ngram
listlengths
0
82k
[ "else self.my_string) oprot.writeFieldEnd() if self.my_enum is not None: oprot.writeFieldBegin('my_enum', TType.I32,", "_val13 iprot.readMapEnd() else: iprot.skip(ftype) elif fid == 11: if ftype", "None: oprot.writeFieldBegin('my_string_enum_map', TType.MAP, 10) oprot.writeMapBegin(TType.STRING, TType.I32, len(self.my_string_enum_map)) for kiter92, viter93", "), # 8 (9, TType.MAP, 'my_string_string_map', (TType.STRING, 'UTF8', TType.STRING, 'UTF8',", "set() (_etype87, _size84) = iprot.readSetBegin() for _i88 in range(_size84): _elem89", "oprot.writeListEnd() oprot.writeFieldEnd() if self.my_enumlist is not None: oprot.writeFieldBegin('my_enumlist', TType.LIST, 17)", "2 else kiter90) oprot.writeString(viter91.encode('utf-8') if sys.version_info[0] == 2 else viter91)", "if sys.version_info[0] == 2 else kiter90) oprot.writeString(viter91.encode('utf-8') if sys.version_info[0] ==", "), # 7 (8, TType.STRING, 'my_binary', 'BINARY', None, ), #", "viter97.write(oprot) oprot.writeMapEnd() oprot.writeFieldEnd() if self.my_enum_stringlist_map is not None: oprot.writeFieldBegin('my_enum_stringlist_map', TType.MAP,", "TType.MAP: self.my_string_enum_map = {} (_ktype8, _vtype9, _size7) = iprot.readMapBegin() for", "= iprot.readI32() _val34 = [] (_etype38, _size35) = iprot.readListBegin() for", "my_binary self.my_string_string_map = my_string_string_map self.my_string_enum_map = my_string_enum_map self.my_enum_string_map = my_enum_string_map", "set() (_etype75, _size72) = iprot.readSetBegin() for _i76 in range(_size72): _elem77", "self.my_string) oprot.writeFieldEnd() if self.my_binary is not None: oprot.writeFieldBegin('my_binary', TType.STRING, 8)", "return not (self == other) class MegaStruct(object): \"\"\" Attributes: -", "2 else iprot.readString() self.my_enum_string_map[_key19] = _val20 iprot.readMapEnd() else: iprot.skip(ftype) elif", "TType.LIST: self.my_stringlist = [] (_etype57, _size54) = iprot.readListBegin() for _i58", "my_structset def read(self, iprot): if iprot._fast_decode is not None and", "__init__(self, my_string=None, my_enum=None,): self.my_string = my_string self.my_enum = my_enum def", "- my_enumset - my_structset \"\"\" def __init__(self, my_bool=None, my_byte=None, my_16bit_int=None,", "TType.DOUBLE, 6) oprot.writeDouble(self.my_double) oprot.writeFieldEnd() if self.my_string is not None: oprot.writeFieldBegin('my_string',", "len(self.my_enum_structlist_map)) for kiter101, viter102 in self.my_enum_structlist_map.items(): oprot.writeI32(kiter101) oprot.writeListBegin(TType.STRUCT, len(viter102)) for", "oprot.writeString(kiter92.encode('utf-8') if sys.version_info[0] == 2 else kiter92) oprot.writeI32(viter93) oprot.writeMapEnd() oprot.writeFieldEnd()", "if self.my_string_enum_map is not None: oprot.writeFieldBegin('my_string_enum_map', TType.MAP, 10) oprot.writeMapBegin(TType.STRING, TType.I32,", "fid == 4: if ftype == TType.I32: self.my_32bit_int = iprot.readI32()", "None, ), # 5 (6, TType.DOUBLE, 'my_double', None, None, ),", "self.my_stringset = my_stringset self.my_enumset = my_enumset self.my_structset = my_structset def", "= iprot.readI32() else: iprot.skip(ftype) elif fid == 5: if ftype", "self.thrift_spec is not None: oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) return oprot.writeStructBegin('MegaStruct') if", "MiniStruct() _elem53.read(iprot) _val47.append(_elem53) iprot.readListEnd() self.my_enum_structlist_map[_key46] = _val47 iprot.readMapEnd() else: iprot.skip(ftype)", "self.thrift_spec is not None: iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) return iprot.readStructBegin()", "), # 2 (3, TType.I16, 'my_16bit_int', None, None, ), #", "= iprot.readI32() _val27 = MiniStruct() _val27.read(iprot) self.my_enum_struct_map[_key26] = _val27 iprot.readMapEnd()", "{} (_ktype29, _vtype30, _size28) = iprot.readMapBegin() for _i32 in range(_size28):", "2) oprot.writeByte(self.my_byte) oprot.writeFieldEnd() if self.my_16bit_int is not None: oprot.writeFieldBegin('my_16bit_int', TType.I16,", "viter102: iter103.write(oprot) oprot.writeListEnd() oprot.writeMapEnd() oprot.writeFieldEnd() if self.my_stringlist is not None:", "TTransport.CReadableTransport) and self.thrift_spec is not None: iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])", "{} (_ktype8, _vtype9, _size7) = iprot.readMapBegin() for _i11 in range(_size7):", "# 18 (19, TType.SET, 'my_enumset', (TType.I32, None, False), None, ),", "None, False), None, ), # 17 (18, TType.SET, 'my_stringset', (TType.STRING,", "for iter106 in self.my_enumlist: oprot.writeI32(iter106) oprot.writeListEnd() oprot.writeFieldEnd() if self.my_stringset is", "8: if ftype == TType.STRING: self.my_binary = iprot.readBinary() else: iprot.skip(ftype)", "(_etype81, _size78) = iprot.readSetBegin() for _i82 in range(_size78): _elem83 =", "in range(_size60): _elem65 = MiniStruct() _elem65.read(iprot) self.my_structlist.append(_elem65) iprot.readListEnd() else: iprot.skip(ftype)", "self.my_enum = iprot.readI32() else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def", "_val6 = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString() self.my_string_string_map[_key5]", "iprot.readI32() _val34 = [] (_etype38, _size35) = iprot.readListBegin() for _i39", "'my_structlist', (TType.STRUCT, [MiniStruct, None], False), None, ), # 16 (17,", "is not None: oprot.writeFieldBegin('my_32bit_int', TType.I32, 4) oprot.writeI32(self.my_32bit_int) oprot.writeFieldEnd() if self.my_64bit_int", "== 2 else iprot.readString() self.my_enum_string_map[_key19] = _val20 iprot.readMapEnd() else: iprot.skip(ftype)", "# 0 (1, TType.BOOL, 'my_bool', None, None, ), # 1", "and self.thrift_spec is not None: oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) return oprot.writeStructBegin('MegaStruct')", "== 17: if ftype == TType.LIST: self.my_enumlist = [] (_etype69,", "else iter100) oprot.writeListEnd() oprot.writeMapEnd() oprot.writeFieldEnd() if self.my_enum_structlist_map is not None:", "_key12 = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString() _val13", "my_string=None, my_binary=None, my_string_string_map=None, my_string_enum_map=None, my_enum_string_map=None, my_enum_struct_map=None, my_enum_stringlist_map=None, my_enum_structlist_map=None, my_stringlist=None, my_structlist=None,", "_val27.read(iprot) self.my_enum_struct_map[_key26] = _val27 iprot.readMapEnd() else: iprot.skip(ftype) elif fid ==", "19) oprot.writeSetBegin(TType.I32, len(self.my_enumset)) for iter108 in self.my_enumset: oprot.writeI32(iter108) oprot.writeSetEnd() oprot.writeFieldEnd()", "= iprot.readFieldBegin() if ftype == TType.STOP: break if fid ==", "self.my_16bit_int = iprot.readI16() else: iprot.skip(ftype) elif fid == 4: if", "17) oprot.writeListBegin(TType.I32, len(self.my_enumlist)) for iter106 in self.my_enumlist: oprot.writeI32(iter106) oprot.writeListEnd() oprot.writeFieldEnd()", "iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString() _val13 = iprot.readI32()", "{} (_ktype1, _vtype2, _size0) = iprot.readMapBegin() for _i4 in range(_size0):", "else kiter90) oprot.writeString(viter91.encode('utf-8') if sys.version_info[0] == 2 else viter91) oprot.writeMapEnd()", "else: iprot.skip(ftype) elif fid == 3: if ftype == TType.I16:", "= _val20 iprot.readMapEnd() else: iprot.skip(ftype) elif fid == 12: if", "None], False), False), None, ), # 14 (15, TType.LIST, 'my_stringlist',", "9) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.my_string_string_map)) for kiter90, viter91 in self.my_string_string_map.items(): oprot.writeString(kiter90.encode('utf-8')", "# 2 (3, TType.I16, 'my_16bit_int', None, None, ), # 3", "if ftype == TType.DOUBLE: self.my_double = iprot.readDouble() else: iprot.skip(ftype) elif", "oprot.writeFieldBegin('my_byte', TType.BYTE, 2) oprot.writeByte(self.my_byte) oprot.writeFieldEnd() if self.my_16bit_int is not None:", "self.my_32bit_int = my_32bit_int self.my_64bit_int = my_64bit_int self.my_double = my_double self.my_string", "None: oprot.writeFieldBegin('my_binary', TType.STRING, 8) oprot.writeBinary(self.my_binary) oprot.writeFieldEnd() if self.my_string_string_map is not", "oprot.writeI32(kiter101) oprot.writeListBegin(TType.STRUCT, len(viter102)) for iter103 in viter102: iter103.write(oprot) oprot.writeListEnd() oprot.writeMapEnd()", "False), False), None, ), # 13 (14, TType.MAP, 'my_enum_structlist_map', (TType.I32,", "self.my_enum_string_map is not None: oprot.writeFieldBegin('my_enum_string_map', TType.MAP, 11) oprot.writeMapBegin(TType.I32, TType.STRING, len(self.my_enum_string_map))", "# Autogenerated by Thrift Compiler (0.13.0) # # DO NOT", "1 (2, TType.I32, 'my_enum', None, None, ), # 2 )", "None: iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) return iprot.readStructBegin() while True: (fname,", "return def __repr__(self): L = ['%s=%r' % (key, value) for", "is not None: oprot.writeFieldBegin('my_64bit_int', TType.I64, 5) oprot.writeI64(self.my_64bit_int) oprot.writeFieldEnd() if self.my_double", "_size7) = iprot.readMapBegin() for _i11 in range(_size7): _key12 = iprot.readString().decode('utf-8')", "(_ktype15, _vtype16, _size14) = iprot.readMapBegin() for _i18 in range(_size14): _key19", "- my_32bit_int - my_64bit_int - my_double - my_string - my_binary", "break if fid == 1: if ftype == TType.STRING: self.my_string", "if ftype == TType.STOP: break if fid == 1: if", "self.my_structlist is not None: oprot.writeFieldBegin('my_structlist', TType.LIST, 16) oprot.writeListBegin(TType.STRUCT, len(self.my_structlist)) for", "my_byte self.my_16bit_int = my_16bit_int self.my_32bit_int = my_32bit_int self.my_64bit_int = my_64bit_int", "TType.BYTE: self.my_byte = iprot.readByte() else: iprot.skip(ftype) elif fid == 3:", "elif fid == 15: if ftype == TType.LIST: self.my_stringlist =", "), # 2 ) all_structs.append(MegaStruct) MegaStruct.thrift_spec = ( None, #", "iter107) oprot.writeSetEnd() oprot.writeFieldEnd() if self.my_enumset is not None: oprot.writeFieldBegin('my_enumset', TType.SET,", "range(_size78): _elem83 = iprot.readI32() self.my_enumset.add(_elem83) iprot.readSetEnd() else: iprot.skip(ftype) elif fid", "None], False), None, ), # 20 ) fix_spec(all_structs) del all_structs", "iprot.readBinary() else: iprot.skip(ftype) elif fid == 9: if ftype ==", "(_etype63, _size60) = iprot.readListBegin() for _i64 in range(_size60): _elem65 =", "13 (14, TType.MAP, 'my_enum_structlist_map', (TType.I32, None, TType.LIST, (TType.STRUCT, [MiniStruct, None],", "ftype == TType.MAP: self.my_enum_stringlist_map = {} (_ktype29, _vtype30, _size28) =", "my_structset=None,): self.my_bool = my_bool self.my_byte = my_byte self.my_16bit_int = my_16bit_int", "self.my_string is not None: oprot.writeFieldBegin('my_string', TType.STRING, 7) oprot.writeString(self.my_string.encode('utf-8') if sys.version_info[0]", "oprot.writeFieldEnd() if self.my_enum_struct_map is not None: oprot.writeFieldBegin('my_enum_struct_map', TType.MAP, 12) oprot.writeMapBegin(TType.I32,", "iprot.readMapBegin() for _i45 in range(_size41): _key46 = iprot.readI32() _val47 =", "# 4 (5, TType.I64, 'my_64bit_int', None, None, ), # 5", "elif fid == 18: if ftype == TType.SET: self.my_stringset =", "is not None: oprot.writeFieldBegin('my_enum_structlist_map', TType.MAP, 14) oprot.writeMapBegin(TType.I32, TType.LIST, len(self.my_enum_structlist_map)) for", "# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU", "iprot.readSetEnd() else: iprot.skip(ftype) elif fid == 20: if ftype ==", "None, TType.STRUCT, [MiniStruct, None], False), None, ), # 12 (13,", "1) oprot.writeString(self.my_string.encode('utf-8') if sys.version_info[0] == 2 else self.my_string) oprot.writeFieldEnd() if", "_size72) = iprot.readSetBegin() for _i76 in range(_size72): _elem77 = iprot.readString().decode('utf-8')", "iprot.skip(ftype) elif fid == 5: if ftype == TType.I64: self.my_64bit_int", "None: oprot.writeFieldBegin('my_32bit_int', TType.I32, 4) oprot.writeI32(self.my_32bit_int) oprot.writeFieldEnd() if self.my_64bit_int is not", "TType.LIST: self.my_structlist = [] (_etype63, _size60) = iprot.readListBegin() for _i64", "(_etype38, _size35) = iprot.readListBegin() for _i39 in range(_size35): _elem40 =", "1: \"LLAMA\", 2: \"ALPACA\", } _NAMES_TO_VALUES = { \"LLAMA\": 1,", "not None: oprot.writeFieldBegin('my_enum_struct_map', TType.MAP, 12) oprot.writeMapBegin(TType.I32, TType.STRUCT, len(self.my_enum_struct_map)) for kiter96,", "# from thrift.Thrift import TType, TMessageType, TFrozenDict, TException, TApplicationException from", "ftype == TType.I32: self.my_enum = iprot.readI32() else: iprot.skip(ftype) else: iprot.skip(ftype)", "iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString() _val34.append(_elem40) iprot.readListEnd() self.my_enum_stringlist_map[_key33]", "elif fid == 17: if ftype == TType.LIST: self.my_enumlist =", "'UTF8', False), False), None, ), # 13 (14, TType.MAP, 'my_enum_structlist_map',", "_elem83 = iprot.readI32() self.my_enumset.add(_elem83) iprot.readSetEnd() else: iprot.skip(ftype) elif fid ==", "= iprot.readMapBegin() for _i32 in range(_size28): _key33 = iprot.readI32() _val34", "MiniStruct() _elem89.read(iprot) self.my_structset.add(_elem89) iprot.readSetEnd() else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd()", "oprot.writeSetBegin(TType.STRUCT, len(self.my_structset)) for iter109 in self.my_structset: iter109.write(oprot) oprot.writeSetEnd() oprot.writeFieldEnd() oprot.writeFieldStop()", "len(self.my_enum_struct_map)) for kiter96, viter97 in self.my_enum_struct_map.items(): oprot.writeI32(kiter96) viter97.write(oprot) oprot.writeMapEnd() oprot.writeFieldEnd()", "ftype == TType.SET: self.my_enumset = set() (_etype81, _size78) = iprot.readSetBegin()", "iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString() self.my_string_string_map[_key5] = _val6", "TType, TMessageType, TFrozenDict, TException, TApplicationException from thrift.protocol.TProtocol import TProtocolException from", "'my_string', 'UTF8', None, ), # 1 (2, TType.I32, 'my_enum', None,", "oprot._fast_encode is not None and self.thrift_spec is not None: oprot.trans.write(oprot._fast_encode(self,", "if ftype == TType.I16: self.my_16bit_int = iprot.readI16() else: iprot.skip(ftype) elif", "== 2: if ftype == TType.BYTE: self.my_byte = iprot.readByte() else:", "TType.I32: self.my_32bit_int = iprot.readI32() else: iprot.skip(ftype) elif fid == 5:", "else: iprot.skip(ftype) elif fid == 2: if ftype == TType.I32:", "return iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if", "11) oprot.writeMapBegin(TType.I32, TType.STRING, len(self.my_enum_string_map)) for kiter94, viter95 in self.my_enum_string_map.items(): oprot.writeI32(kiter94)", "iter103 in viter102: iter103.write(oprot) oprot.writeListEnd() oprot.writeMapEnd() oprot.writeFieldEnd() if self.my_stringlist is", "None, None, ), # 2 ) all_structs.append(MegaStruct) MegaStruct.thrift_spec = (", "MegaStruct.thrift_spec = ( None, # 0 (1, TType.BOOL, 'my_bool', None,", "in range(_size72): _elem77 = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else", "2 else iprot.readString() _val34.append(_elem40) iprot.readListEnd() self.my_enum_stringlist_map[_key33] = _val34 iprot.readMapEnd() else:", "iprot.readListBegin() for _i64 in range(_size60): _elem65 = MiniStruct() _elem65.read(iprot) self.my_structlist.append(_elem65)", "TType.SET, 'my_enumset', (TType.I32, None, False), None, ), # 19 (20,", "not None: oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) return oprot.writeStructBegin('MegaStruct') if self.my_bool is", "None: oprot.writeFieldBegin('my_structset', TType.SET, 20) oprot.writeSetBegin(TType.STRUCT, len(self.my_structset)) for iter109 in self.my_structset:", "TType.MAP, 'my_enum_struct_map', (TType.I32, None, TType.STRUCT, [MiniStruct, None], False), None, ),", "= ( None, # 0 (1, TType.BOOL, 'my_bool', None, None,", "= my_16bit_int self.my_32bit_int = my_32bit_int self.my_64bit_int = my_64bit_int self.my_double =", "11: if ftype == TType.MAP: self.my_enum_string_map = {} (_ktype15, _vtype16,", "TType.BOOL, 1) oprot.writeBool(self.my_bool) oprot.writeFieldEnd() if self.my_byte is not None: oprot.writeFieldBegin('my_byte',", "class MegaStruct(object): \"\"\" Attributes: - my_bool - my_byte - my_16bit_int", "len(viter99)) for iter100 in viter99: oprot.writeString(iter100.encode('utf-8') if sys.version_info[0] == 2", "iprot.skip(ftype) elif fid == 7: if ftype == TType.STRING: self.my_string", "oprot.writeString(self.my_string.encode('utf-8') if sys.version_info[0] == 2 else self.my_string) oprot.writeFieldEnd() if self.my_binary", "oprot.writeMapEnd() oprot.writeFieldEnd() if self.my_enum_string_map is not None: oprot.writeFieldBegin('my_enum_string_map', TType.MAP, 11)", "2 else viter95) oprot.writeMapEnd() oprot.writeFieldEnd() if self.my_enum_struct_map is not None:", "\"\"\" def __init__(self, my_bool=None, my_byte=None, my_16bit_int=None, my_32bit_int=None, my_64bit_int=None, my_double=None, my_string=None,", "TType.STRING: self.my_string = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()", "2 (3, TType.I16, 'my_16bit_int', None, None, ), # 3 (4,", "oprot.writeString(iter104.encode('utf-8') if sys.version_info[0] == 2 else iter104) oprot.writeListEnd() oprot.writeFieldEnd() if", "fid == 14: if ftype == TType.MAP: self.my_enum_structlist_map = {}", "6: if ftype == TType.DOUBLE: self.my_double = iprot.readDouble() else: iprot.skip(ftype)", "oprot.writeFieldEnd() if self.my_double is not None: oprot.writeFieldBegin('my_double', TType.DOUBLE, 6) oprot.writeDouble(self.my_double)", "not None: oprot.writeFieldBegin('my_stringset', TType.SET, 18) oprot.writeSetBegin(TType.STRING, len(self.my_stringset)) for iter107 in", "# 17 (18, TType.SET, 'my_stringset', (TType.STRING, 'UTF8', False), None, ),", "else: iprot.skip(ftype) elif fid == 5: if ftype == TType.I64:", "if ftype == TType.STRING: self.my_string = iprot.readString().decode('utf-8') if sys.version_info[0] ==", "else self.my_string) oprot.writeFieldEnd() if self.my_binary is not None: oprot.writeFieldBegin('my_binary', TType.STRING,", "iter105.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.my_enumlist is not None: oprot.writeFieldBegin('my_enumlist', TType.LIST,", "fid == 1: if ftype == TType.BOOL: self.my_bool = iprot.readBool()", "- my_string_enum_map - my_enum_string_map - my_enum_struct_map - my_enum_stringlist_map - my_enum_structlist_map", "_i58 in range(_size54): _elem59 = iprot.readString().decode('utf-8') if sys.version_info[0] == 2", "{ 1: \"LLAMA\", 2: \"ALPACA\", } _NAMES_TO_VALUES = { \"LLAMA\":", "= my_enumlist self.my_stringset = my_stringset self.my_enumset = my_enumset self.my_structset =", "'my_enum_string_map', (TType.I32, None, TType.STRING, 'UTF8', False), None, ), # 11", "(self == other) class MegaStruct(object): \"\"\" Attributes: - my_bool -", "my_16bit_int - my_32bit_int - my_64bit_int - my_double - my_string -", "None: oprot.writeFieldBegin('my_enum', TType.I32, 2) oprot.writeI32(self.my_enum) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self):", "None, ), # 18 (19, TType.SET, 'my_enumset', (TType.I32, None, False),", "== 4: if ftype == TType.I32: self.my_32bit_int = iprot.readI32() else:", "else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if", "sys.version_info[0] == 2 else viter91) oprot.writeMapEnd() oprot.writeFieldEnd() if self.my_string_enum_map is", "ftype == TType.MAP: self.my_enum_structlist_map = {} (_ktype42, _vtype43, _size41) =", "for iter108 in self.my_enumset: oprot.writeI32(iter108) oprot.writeSetEnd() oprot.writeFieldEnd() if self.my_structset is", "= iprot.readMapBegin() for _i45 in range(_size41): _key46 = iprot.readI32() _val47", "if self.my_32bit_int is not None: oprot.writeFieldBegin('my_32bit_int', TType.I32, 4) oprot.writeI32(self.my_32bit_int) oprot.writeFieldEnd()", "20: if ftype == TType.SET: self.my_structset = set() (_etype87, _size84)", "# 15 (16, TType.LIST, 'my_structlist', (TType.STRUCT, [MiniStruct, None], False), None,", "DOING # # options string: py # from thrift.Thrift import", "elif fid == 8: if ftype == TType.STRING: self.my_binary =", "TType.STRING, len(self.my_string_string_map)) for kiter90, viter91 in self.my_string_string_map.items(): oprot.writeString(kiter90.encode('utf-8') if sys.version_info[0]", "TType.BOOL: self.my_bool = iprot.readBool() else: iprot.skip(ftype) elif fid == 2:", "break if fid == 1: if ftype == TType.BOOL: self.my_bool", "not None: oprot.writeFieldBegin('my_string_string_map', TType.MAP, 9) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.my_string_string_map)) for kiter90,", "TType.STRUCT, len(self.my_enum_struct_map)) for kiter96, viter97 in self.my_enum_struct_map.items(): oprot.writeI32(kiter96) viter97.write(oprot) oprot.writeMapEnd()", "None, ), # 11 (12, TType.MAP, 'my_enum_struct_map', (TType.I32, None, TType.STRUCT,", "is not None: oprot.writeFieldBegin('my_enum_string_map', TType.MAP, 11) oprot.writeMapBegin(TType.I32, TType.STRING, len(self.my_enum_string_map)) for", "= iprot.readByte() else: iprot.skip(ftype) elif fid == 3: if ftype", "else: iprot.skip(ftype) elif fid == 7: if ftype == TType.STRING:", "oprot.writeFieldBegin('my_structlist', TType.LIST, 16) oprot.writeListBegin(TType.STRUCT, len(self.my_structlist)) for iter105 in self.my_structlist: iter105.write(oprot)", "self.my_string = my_string self.my_enum = my_enum def read(self, iprot): if", "TType.STOP: break if fid == 1: if ftype == TType.STRING:", "ftype == TType.LIST: self.my_enumlist = [] (_etype69, _size66) = iprot.readListBegin()", "is not None: oprot.writeFieldBegin('my_enumlist', TType.LIST, 17) oprot.writeListBegin(TType.I32, len(self.my_enumlist)) for iter106", "None: oprot.writeFieldBegin('my_enum_structlist_map', TType.MAP, 14) oprot.writeMapBegin(TType.I32, TType.LIST, len(self.my_enum_structlist_map)) for kiter101, viter102", "oprot.writeFieldEnd() if self.my_byte is not None: oprot.writeFieldBegin('my_byte', TType.BYTE, 2) oprot.writeByte(self.my_byte)", "_i25 in range(_size21): _key26 = iprot.readI32() _val27 = MiniStruct() _val27.read(iprot)", "not None: oprot.writeFieldBegin('my_enum_structlist_map', TType.MAP, 14) oprot.writeMapBegin(TType.I32, TType.LIST, len(self.my_enum_structlist_map)) for kiter101,", "'%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other,", "\"\"\" Attributes: - my_string - my_enum \"\"\" def __init__(self, my_string=None,", "TType.MAP: self.my_string_string_map = {} (_ktype1, _vtype2, _size0) = iprot.readMapBegin() for", "TType.SET: self.my_stringset = set() (_etype75, _size72) = iprot.readSetBegin() for _i76", "TType.SET, 'my_stringset', (TType.STRING, 'UTF8', False), None, ), # 18 (19,", "ALPACA = 2 _VALUES_TO_NAMES = { 1: \"LLAMA\", 2: \"ALPACA\",", "MegaStruct(object): \"\"\" Attributes: - my_bool - my_byte - my_16bit_int -", "iprot.readMapEnd() else: iprot.skip(ftype) elif fid == 13: if ftype ==", "sys.version_info[0] == 2 else iprot.readString() else: iprot.skip(ftype) elif fid ==", "for _i32 in range(_size28): _key33 = iprot.readI32() _val34 = []", "all_structs.append(MiniStruct) MiniStruct.thrift_spec = ( None, # 0 (1, TType.STRING, 'my_string',", "TType.DOUBLE: self.my_double = iprot.readDouble() else: iprot.skip(ftype) elif fid == 7:", "oprot.writeI32(iter106) oprot.writeListEnd() oprot.writeFieldEnd() if self.my_stringset is not None: oprot.writeFieldBegin('my_stringset', TType.SET,", "in range(_size41): _key46 = iprot.readI32() _val47 = [] (_etype51, _size48)", "range(_size7): _key12 = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()", "(9, TType.MAP, 'my_string_string_map', (TType.STRING, 'UTF8', TType.STRING, 'UTF8', False), None, ),", "False), None, ), # 17 (18, TType.SET, 'my_stringset', (TType.STRING, 'UTF8',", "self.my_64bit_int = my_64bit_int self.my_double = my_double self.my_string = my_string self.my_binary", "iprot.readListEnd() self.my_enum_stringlist_map[_key33] = _val34 iprot.readMapEnd() else: iprot.skip(ftype) elif fid ==", "L = ['%s=%r' % (key, value) for key, value in", "TType.SET, 19) oprot.writeSetBegin(TType.I32, len(self.my_enumset)) for iter108 in self.my_enumset: oprot.writeI32(iter108) oprot.writeSetEnd()", "my_string_enum_map=None, my_enum_string_map=None, my_enum_struct_map=None, my_enum_stringlist_map=None, my_enum_structlist_map=None, my_stringlist=None, my_structlist=None, my_enumlist=None, my_stringset=None, my_enumset=None,", "self.my_string_string_map.items(): oprot.writeString(kiter90.encode('utf-8') if sys.version_info[0] == 2 else kiter90) oprot.writeString(viter91.encode('utf-8') if", "is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not", "else: iprot.skip(ftype) elif fid == 18: if ftype == TType.SET:", "None: oprot.writeFieldBegin('my_16bit_int', TType.I16, 3) oprot.writeI16(self.my_16bit_int) oprot.writeFieldEnd() if self.my_32bit_int is not", "'my_64bit_int', None, None, ), # 5 (6, TType.DOUBLE, 'my_double', None,", "fid == 17: if ftype == TType.LIST: self.my_enumlist = []", "_val34 iprot.readMapEnd() else: iprot.skip(ftype) elif fid == 14: if ftype", "== 7: if ftype == TType.STRING: self.my_string = iprot.readString().decode('utf-8') if", "== 2 else iprot.readString() _val34.append(_elem40) iprot.readListEnd() self.my_enum_stringlist_map[_key33] = _val34 iprot.readMapEnd()", "if ftype == TType.LIST: self.my_stringlist = [] (_etype57, _size54) =", "None, ), # 12 (13, TType.MAP, 'my_enum_stringlist_map', (TType.I32, None, TType.LIST,", "if self.my_bool is not None: oprot.writeFieldBegin('my_bool', TType.BOOL, 1) oprot.writeBool(self.my_bool) oprot.writeFieldEnd()", "range(_size41): _key46 = iprot.readI32() _val47 = [] (_etype51, _size48) =", "iprot.readStructEnd() def write(self, oprot): if oprot._fast_encode is not None and", "self.my_enum_string_map.items(): oprot.writeI32(kiter94) oprot.writeString(viter95.encode('utf-8') if sys.version_info[0] == 2 else viter95) oprot.writeMapEnd()", "self.my_structlist = [] (_etype63, _size60) = iprot.readListBegin() for _i64 in", "iprot.readString() self.my_string_string_map[_key5] = _val6 iprot.readMapEnd() else: iprot.skip(ftype) elif fid ==", "other): return not (self == other) class MegaStruct(object): \"\"\" Attributes:", "iprot.skip(ftype) elif fid == 14: if ftype == TType.MAP: self.my_enum_structlist_map", "for _i39 in range(_size35): _elem40 = iprot.readString().decode('utf-8') if sys.version_info[0] ==", "read(self, iprot): if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport)", "if ftype == TType.I64: self.my_64bit_int = iprot.readI64() else: iprot.skip(ftype) elif", "2 else iprot.readString() self.my_string_string_map[_key5] = _val6 iprot.readMapEnd() else: iprot.skip(ftype) elif", "'my_stringlist', (TType.STRING, 'UTF8', False), None, ), # 15 (16, TType.LIST,", "TType.STRING, 'my_string', 'UTF8', None, ), # 7 (8, TType.STRING, 'my_binary',", "= _val27 iprot.readMapEnd() else: iprot.skip(ftype) elif fid == 13: if", "_elem89 = MiniStruct() _elem89.read(iprot) self.my_structset.add(_elem89) iprot.readSetEnd() else: iprot.skip(ftype) else: iprot.skip(ftype)", "(_etype75, _size72) = iprot.readSetBegin() for _i76 in range(_size72): _elem77 =", "oprot.writeFieldBegin('my_double', TType.DOUBLE, 6) oprot.writeDouble(self.my_double) oprot.writeFieldEnd() if self.my_string is not None:", "kiter92) oprot.writeI32(viter93) oprot.writeMapEnd() oprot.writeFieldEnd() if self.my_enum_string_map is not None: oprot.writeFieldBegin('my_enum_string_map',", "SURE THAT YOU KNOW WHAT YOU ARE DOING # #", "[] (_etype63, _size60) = iprot.readListBegin() for _i64 in range(_size60): _elem65", "TType.LIST, (TType.STRING, 'UTF8', False), False), None, ), # 13 (14,", "self.thrift_spec is not None: oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) return oprot.writeStructBegin('MiniStruct') if", "ftype == TType.SET: self.my_structset = set() (_etype87, _size84) = iprot.readSetBegin()", "viter91) oprot.writeMapEnd() oprot.writeFieldEnd() if self.my_string_enum_map is not None: oprot.writeFieldBegin('my_string_enum_map', TType.MAP,", "self.my_64bit_int is not None: oprot.writeFieldBegin('my_64bit_int', TType.I64, 5) oprot.writeI64(self.my_64bit_int) oprot.writeFieldEnd() if", "TType.LIST, len(self.my_enum_stringlist_map)) for kiter98, viter99 in self.my_enum_stringlist_map.items(): oprot.writeI32(kiter98) oprot.writeListBegin(TType.STRING, len(viter99))", "== 2 else self.my_string) oprot.writeFieldEnd() if self.my_enum is not None:", "my_double - my_string - my_binary - my_string_string_map - my_string_enum_map -", "my_enumlist self.my_stringset = my_stringset self.my_enumset = my_enumset self.my_structset = my_structset", "TType.SET, 'my_structset', (TType.STRUCT, [MiniStruct, None], False), None, ), # 20", "= my_stringlist self.my_structlist = my_structlist self.my_enumlist = my_enumlist self.my_stringset =", "= iprot.readSetBegin() for _i82 in range(_size78): _elem83 = iprot.readI32() self.my_enumset.add(_elem83)", "TException, TApplicationException from thrift.protocol.TProtocol import TProtocolException from thrift.TRecursive import fix_spec", "oprot.writeSetBegin(TType.I32, len(self.my_enumset)) for iter108 in self.my_enumset: oprot.writeI32(iter108) oprot.writeSetEnd() oprot.writeFieldEnd() if", "iprot.readListEnd() else: iprot.skip(ftype) elif fid == 18: if ftype ==", "TType.BYTE, 2) oprot.writeByte(self.my_byte) oprot.writeFieldEnd() if self.my_16bit_int is not None: oprot.writeFieldBegin('my_16bit_int',", "'my_enum', None, None, ), # 2 ) all_structs.append(MegaStruct) MegaStruct.thrift_spec =", "my_enum_structlist_map - my_stringlist - my_structlist - my_enumlist - my_stringset -", "_val47 iprot.readMapEnd() else: iprot.skip(ftype) elif fid == 15: if ftype", "Autogenerated by Thrift Compiler (0.13.0) # # DO NOT EDIT", "= iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString() self.my_stringlist.append(_elem59) iprot.readListEnd()", "(_ktype1, _vtype2, _size0) = iprot.readMapBegin() for _i4 in range(_size0): _key5", "_i39 in range(_size35): _elem40 = iprot.readString().decode('utf-8') if sys.version_info[0] == 2", "TType.LIST: self.my_enumlist = [] (_etype69, _size66) = iprot.readListBegin() for _i70", "sys.version_info[0] == 2 else iter107) oprot.writeSetEnd() oprot.writeFieldEnd() if self.my_enumset is", "None], False), None, ), # 16 (17, TType.LIST, 'my_enumlist', (TType.I32,", "_vtype9, _size7) = iprot.readMapBegin() for _i11 in range(_size7): _key12 =", "2: if ftype == TType.BYTE: self.my_byte = iprot.readByte() else: iprot.skip(ftype)", "oprot.writeMapBegin(TType.I32, TType.LIST, len(self.my_enum_structlist_map)) for kiter101, viter102 in self.my_enum_structlist_map.items(): oprot.writeI32(kiter101) oprot.writeListBegin(TType.STRUCT,", "my_enumlist=None, my_stringset=None, my_enumset=None, my_structset=None,): self.my_bool = my_bool self.my_byte = my_byte", "ftype == TType.STRING: self.my_binary = iprot.readBinary() else: iprot.skip(ftype) elif fid", "oprot.writeDouble(self.my_double) oprot.writeFieldEnd() if self.my_string is not None: oprot.writeFieldBegin('my_string', TType.STRING, 7)", "7: if ftype == TType.STRING: self.my_string = iprot.readString().decode('utf-8') if sys.version_info[0]", "- my_stringlist - my_structlist - my_enumlist - my_stringset - my_enumset", "oprot.writeListEnd() oprot.writeMapEnd() oprot.writeFieldEnd() if self.my_stringlist is not None: oprot.writeFieldBegin('my_stringlist', TType.LIST,", "iprot.readMapBegin() for _i11 in range(_size7): _key12 = iprot.readString().decode('utf-8') if sys.version_info[0]", "'UTF8', None, ), # 7 (8, TType.STRING, 'my_binary', 'BINARY', None,", "(_ktype22, _vtype23, _size21) = iprot.readMapBegin() for _i25 in range(_size21): _key26", "def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__", "in range(_size54): _elem59 = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else", "= iprot.readI64() else: iprot.skip(ftype) elif fid == 6: if ftype", "= _val13 iprot.readMapEnd() else: iprot.skip(ftype) elif fid == 11: if", "# 12 (13, TType.MAP, 'my_enum_stringlist_map', (TType.I32, None, TType.LIST, (TType.STRING, 'UTF8',", "ARE SURE THAT YOU KNOW WHAT YOU ARE DOING #", "= 1 ALPACA = 2 _VALUES_TO_NAMES = { 1: \"LLAMA\",", "TType.LIST, 15) oprot.writeListBegin(TType.STRING, len(self.my_stringlist)) for iter104 in self.my_stringlist: oprot.writeString(iter104.encode('utf-8') if", "_elem59 = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString() self.my_stringlist.append(_elem59)", "_elem65.read(iprot) self.my_structlist.append(_elem65) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 17: if", "iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) return iprot.readStructBegin() while True: (fname, ftype,", "TProtocolException from thrift.TRecursive import fix_spec import sys from thrift.transport import", "value in self.__dict__.items()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def", "2 else iprot.readString() else: iprot.skip(ftype) elif fid == 2: if", "= [] (_etype51, _size48) = iprot.readListBegin() for _i52 in range(_size48):", "not None: oprot.writeFieldBegin('my_stringlist', TType.LIST, 15) oprot.writeListBegin(TType.STRING, len(self.my_stringlist)) for iter104 in", "else: iprot.skip(ftype) elif fid == 14: if ftype == TType.MAP:", "self.my_stringset = set() (_etype75, _size72) = iprot.readSetBegin() for _i76 in", "False), None, ), # 16 (17, TType.LIST, 'my_enumlist', (TType.I32, None,", "_vtype2, _size0) = iprot.readMapBegin() for _i4 in range(_size0): _key5 =", "if sys.version_info[0] == 2 else iprot.readString() else: iprot.skip(ftype) elif fid", "2 else iprot.readString() self.my_stringset.add(_elem77) iprot.readSetEnd() else: iprot.skip(ftype) elif fid ==", "Attributes: - my_bool - my_byte - my_16bit_int - my_32bit_int -", "- my_enum_struct_map - my_enum_stringlist_map - my_enum_structlist_map - my_stringlist - my_structlist", "_val34 = [] (_etype38, _size35) = iprot.readListBegin() for _i39 in", "TType.MAP, 'my_string_string_map', (TType.STRING, 'UTF8', TType.STRING, 'UTF8', False), None, ), #", "_size78) = iprot.readSetBegin() for _i82 in range(_size78): _elem83 = iprot.readI32()", "return not (self == other) all_structs.append(MiniStruct) MiniStruct.thrift_spec = ( None,", "None, # 0 (1, TType.STRING, 'my_string', 'UTF8', None, ), #", "0 (1, TType.BOOL, 'my_bool', None, None, ), # 1 (2,", "10: if ftype == TType.MAP: self.my_string_enum_map = {} (_ktype8, _vtype9,", "range(_size72): _elem77 = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()", "TApplicationException from thrift.protocol.TProtocol import TProtocolException from thrift.TRecursive import fix_spec import", "self.my_bool = my_bool self.my_byte = my_byte self.my_16bit_int = my_16bit_int self.my_32bit_int", "else iprot.readString() else: iprot.skip(ftype) elif fid == 8: if ftype", "None, TType.LIST, (TType.STRING, 'UTF8', False), False), None, ), # 13", "== TType.I32: self.my_32bit_int = iprot.readI32() else: iprot.skip(ftype) elif fid ==", "oprot.writeFieldBegin('my_enum_struct_map', TType.MAP, 12) oprot.writeMapBegin(TType.I32, TType.STRUCT, len(self.my_enum_struct_map)) for kiter96, viter97 in", "oprot.writeListBegin(TType.STRING, len(viter99)) for iter100 in viter99: oprot.writeString(iter100.encode('utf-8') if sys.version_info[0] ==", "for _i58 in range(_size54): _elem59 = iprot.readString().decode('utf-8') if sys.version_info[0] ==", "in self.my_enum_struct_map.items(): oprot.writeI32(kiter96) viter97.write(oprot) oprot.writeMapEnd() oprot.writeFieldEnd() if self.my_enum_stringlist_map is not", "== TType.MAP: self.my_string_enum_map = {} (_ktype8, _vtype9, _size7) = iprot.readMapBegin()", "THAT YOU KNOW WHAT YOU ARE DOING # # options", "1: if ftype == TType.STRING: self.my_string = iprot.readString().decode('utf-8') if sys.version_info[0]", "_size0) = iprot.readMapBegin() for _i4 in range(_size0): _key5 = iprot.readString().decode('utf-8')", "for _i25 in range(_size21): _key26 = iprot.readI32() _val27 = MiniStruct()", "self.my_double = my_double self.my_string = my_string self.my_binary = my_binary self.my_string_string_map", "else: iprot.skip(ftype) elif fid == 19: if ftype == TType.SET:", "my_bool - my_byte - my_16bit_int - my_32bit_int - my_64bit_int -", "(16, TType.LIST, 'my_structlist', (TType.STRUCT, [MiniStruct, None], False), None, ), #", "# options string: py # from thrift.Thrift import TType, TMessageType,", "oprot.writeListBegin(TType.STRUCT, len(self.my_structlist)) for iter105 in self.my_structlist: iter105.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if", "None: oprot.writeFieldBegin('my_enumlist', TType.LIST, 17) oprot.writeListBegin(TType.I32, len(self.my_enumlist)) for iter106 in self.my_enumlist:", "not None: oprot.writeFieldBegin('my_bool', TType.BOOL, 1) oprot.writeBool(self.my_bool) oprot.writeFieldEnd() if self.my_byte is", "TType.STRING, 'my_binary', 'BINARY', None, ), # 8 (9, TType.MAP, 'my_string_string_map',", "Thrift Compiler (0.13.0) # # DO NOT EDIT UNLESS YOU", "else iprot.readString() _val34.append(_elem40) iprot.readListEnd() self.my_enum_stringlist_map[_key33] = _val34 iprot.readMapEnd() else: iprot.skip(ftype)", "7) oprot.writeString(self.my_string.encode('utf-8') if sys.version_info[0] == 2 else self.my_string) oprot.writeFieldEnd() if", "len(self.my_enumlist)) for iter106 in self.my_enumlist: oprot.writeI32(iter106) oprot.writeListEnd() oprot.writeFieldEnd() if self.my_stringset", "_size21) = iprot.readMapBegin() for _i25 in range(_size21): _key26 = iprot.readI32()", "for _i64 in range(_size60): _elem65 = MiniStruct() _elem65.read(iprot) self.my_structlist.append(_elem65) iprot.readListEnd()", "self.my_string_string_map[_key5] = _val6 iprot.readMapEnd() else: iprot.skip(ftype) elif fid == 10:", "TType.MAP, 10) oprot.writeMapBegin(TType.STRING, TType.I32, len(self.my_string_enum_map)) for kiter92, viter93 in self.my_string_enum_map.items():", "else: iprot.skip(ftype) elif fid == 17: if ftype == TType.LIST:", "6 (7, TType.STRING, 'my_string', 'UTF8', None, ), # 7 (8,", "my_64bit_int - my_double - my_string - my_binary - my_string_string_map -", "thrift.protocol.TProtocol import TProtocolException from thrift.TRecursive import fix_spec import sys from", "13) oprot.writeMapBegin(TType.I32, TType.LIST, len(self.my_enum_stringlist_map)) for kiter98, viter99 in self.my_enum_stringlist_map.items(): oprot.writeI32(kiter98)", "None: oprot.writeFieldBegin('my_enum_stringlist_map', TType.MAP, 13) oprot.writeMapBegin(TType.I32, TType.LIST, len(self.my_enum_stringlist_map)) for kiter98, viter99", "self.my_enumlist = my_enumlist self.my_stringset = my_stringset self.my_enumset = my_enumset self.my_structset", "is not None: oprot.writeFieldBegin('my_structlist', TType.LIST, 16) oprot.writeListBegin(TType.STRUCT, len(self.my_structlist)) for iter105", "[] (_etype51, _size48) = iprot.readListBegin() for _i52 in range(_size48): _elem53", "while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype ==", "elif fid == 14: if ftype == TType.MAP: self.my_enum_structlist_map =", "iprot.readString() self.my_enum_string_map[_key19] = _val20 iprot.readMapEnd() else: iprot.skip(ftype) elif fid ==", "not None: oprot.writeFieldBegin('my_structset', TType.SET, 20) oprot.writeSetBegin(TType.STRUCT, len(self.my_structset)) for iter109 in", "thrift.TRecursive import fix_spec import sys from thrift.transport import TTransport all_structs", "oprot.writeStructBegin('MiniStruct') if self.my_string is not None: oprot.writeFieldBegin('my_string', TType.STRING, 1) oprot.writeString(self.my_string.encode('utf-8')", "= { \"LLAMA\": 1, \"ALPACA\": 2, } class MiniStruct(object): \"\"\"", "oprot.writeFieldBegin('my_enumlist', TType.LIST, 17) oprot.writeListBegin(TType.I32, len(self.my_enumlist)) for iter106 in self.my_enumlist: oprot.writeI32(iter106)", "18: if ftype == TType.SET: self.my_stringset = set() (_etype75, _size72)", "iprot.readString() _val34.append(_elem40) iprot.readListEnd() self.my_enum_stringlist_map[_key33] = _val34 iprot.readMapEnd() else: iprot.skip(ftype) elif", "'my_structset', (TType.STRUCT, [MiniStruct, None], False), None, ), # 20 )", "True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP:", "(key, value) for key, value in self.__dict__.items()] return '%s(%s)' %", "in self.my_enumlist: oprot.writeI32(iter106) oprot.writeListEnd() oprot.writeFieldEnd() if self.my_stringset is not None:", "elif fid == 13: if ftype == TType.MAP: self.my_enum_stringlist_map =", "None, None, ), # 4 (5, TType.I64, 'my_64bit_int', None, None,", "else iter104) oprot.writeListEnd() oprot.writeFieldEnd() if self.my_structlist is not None: oprot.writeFieldBegin('my_structlist',", "(3, TType.I16, 'my_16bit_int', None, None, ), # 3 (4, TType.I32,", "my_bool=None, my_byte=None, my_16bit_int=None, my_32bit_int=None, my_64bit_int=None, my_double=None, my_string=None, my_binary=None, my_string_string_map=None, my_string_enum_map=None,", "= my_32bit_int self.my_64bit_int = my_64bit_int self.my_double = my_double self.my_string =", "_VALUES_TO_NAMES = { 1: \"LLAMA\", 2: \"ALPACA\", } _NAMES_TO_VALUES =", "fid == 20: if ftype == TType.SET: self.my_structset = set()", "- my_64bit_int - my_double - my_string - my_binary - my_string_string_map", "'UTF8', False), None, ), # 9 (10, TType.MAP, 'my_string_enum_map', (TType.STRING,", "( None, # 0 (1, TType.BOOL, 'my_bool', None, None, ),", "(_ktype42, _vtype43, _size41) = iprot.readMapBegin() for _i45 in range(_size41): _key46", "== 13: if ftype == TType.MAP: self.my_enum_stringlist_map = {} (_ktype29,", "TType.STRING, len(self.my_enum_string_map)) for kiter94, viter95 in self.my_enum_string_map.items(): oprot.writeI32(kiter94) oprot.writeString(viter95.encode('utf-8') if", "is not None: oprot.writeFieldBegin('my_string', TType.STRING, 1) oprot.writeString(self.my_string.encode('utf-8') if sys.version_info[0] ==", "else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot._fast_encode is", "_i88 in range(_size84): _elem89 = MiniStruct() _elem89.read(iprot) self.my_structset.add(_elem89) iprot.readSetEnd() else:", "else: iprot.skip(ftype) elif fid == 10: if ftype == TType.MAP:", "my_string - my_enum \"\"\" def __init__(self, my_string=None, my_enum=None,): self.my_string =", "# 13 (14, TType.MAP, 'my_enum_structlist_map', (TType.I32, None, TType.LIST, (TType.STRUCT, [MiniStruct,", "sys.version_info[0] == 2 else iter104) oprot.writeListEnd() oprot.writeFieldEnd() if self.my_structlist is", "'UTF8', TType.I32, None, False), None, ), # 10 (11, TType.MAP,", "== TType.SET: self.my_stringset = set() (_etype75, _size72) = iprot.readSetBegin() for", "kiter101, viter102 in self.my_enum_structlist_map.items(): oprot.writeI32(kiter101) oprot.writeListBegin(TType.STRUCT, len(viter102)) for iter103 in", "[MiniStruct, None], False), False), None, ), # 14 (15, TType.LIST,", "my_64bit_int=None, my_double=None, my_string=None, my_binary=None, my_string_string_map=None, my_string_enum_map=None, my_enum_string_map=None, my_enum_struct_map=None, my_enum_stringlist_map=None, my_enum_structlist_map=None,", "if self.my_structlist is not None: oprot.writeFieldBegin('my_structlist', TType.LIST, 16) oprot.writeListBegin(TType.STRUCT, len(self.my_structlist))", "self.my_16bit_int = my_16bit_int self.my_32bit_int = my_32bit_int self.my_64bit_int = my_64bit_int self.my_double", "import TType, TMessageType, TFrozenDict, TException, TApplicationException from thrift.protocol.TProtocol import TProtocolException", "TMessageType, TFrozenDict, TException, TApplicationException from thrift.protocol.TProtocol import TProtocolException from thrift.TRecursive", "self.my_enum_struct_map is not None: oprot.writeFieldBegin('my_enum_struct_map', TType.MAP, 12) oprot.writeMapBegin(TType.I32, TType.STRUCT, len(self.my_enum_struct_map))", "if self.my_string is not None: oprot.writeFieldBegin('my_string', TType.STRING, 7) oprot.writeString(self.my_string.encode('utf-8') if", "= my_binary self.my_string_string_map = my_string_string_map self.my_string_enum_map = my_string_enum_map self.my_enum_string_map =", "oprot.writeMapEnd() oprot.writeFieldEnd() if self.my_string_enum_map is not None: oprot.writeFieldBegin('my_string_enum_map', TType.MAP, 10)", "= _val47 iprot.readMapEnd() else: iprot.skip(ftype) elif fid == 15: if", "19 (20, TType.SET, 'my_structset', (TType.STRUCT, [MiniStruct, None], False), None, ),", "for kiter98, viter99 in self.my_enum_stringlist_map.items(): oprot.writeI32(kiter98) oprot.writeListBegin(TType.STRING, len(viter99)) for iter100", "= MiniStruct() _elem65.read(iprot) self.my_structlist.append(_elem65) iprot.readListEnd() else: iprot.skip(ftype) elif fid ==", "oprot.writeFieldBegin('my_stringset', TType.SET, 18) oprot.writeSetBegin(TType.STRING, len(self.my_stringset)) for iter107 in self.my_stringset: oprot.writeString(iter107.encode('utf-8')", "_i32 in range(_size28): _key33 = iprot.readI32() _val34 = [] (_etype38,", "oprot.writeFieldBegin('my_string_string_map', TType.MAP, 9) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.my_string_string_map)) for kiter90, viter91 in", "oprot.writeString(iter107.encode('utf-8') if sys.version_info[0] == 2 else iter107) oprot.writeSetEnd() oprot.writeFieldEnd() if", "in range(_size14): _key19 = iprot.readI32() _val20 = iprot.readString().decode('utf-8') if sys.version_info[0]", "2) oprot.writeI32(self.my_enum) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def __repr__(self):", "None, ), # 2 ) all_structs.append(MegaStruct) MegaStruct.thrift_spec = ( None,", "oprot.writeBool(self.my_bool) oprot.writeFieldEnd() if self.my_byte is not None: oprot.writeFieldBegin('my_byte', TType.BYTE, 2)", "17: if ftype == TType.LIST: self.my_enumlist = [] (_etype69, _size66)", "if ftype == TType.LIST: self.my_enumlist = [] (_etype69, _size66) =", "0 (1, TType.STRING, 'my_string', 'UTF8', None, ), # 1 (2,", "self.my_enumset: oprot.writeI32(iter108) oprot.writeSetEnd() oprot.writeFieldEnd() if self.my_structset is not None: oprot.writeFieldBegin('my_structset',", "fid == 12: if ftype == TType.MAP: self.my_enum_struct_map = {}", "def __init__(self, my_bool=None, my_byte=None, my_16bit_int=None, my_32bit_int=None, my_64bit_int=None, my_double=None, my_string=None, my_binary=None,", "else iprot.readString() self.my_enum_string_map[_key19] = _val20 iprot.readMapEnd() else: iprot.skip(ftype) elif fid", "if self.my_string is not None: oprot.writeFieldBegin('my_string', TType.STRING, 1) oprot.writeString(self.my_string.encode('utf-8') if", "17 (18, TType.SET, 'my_stringset', (TType.STRING, 'UTF8', False), None, ), #", "= my_enum_structlist_map self.my_stringlist = my_stringlist self.my_structlist = my_structlist self.my_enumlist =", "self.my_enumlist.append(_elem71) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 18: if ftype", "self.my_enum_string_map = my_enum_string_map self.my_enum_struct_map = my_enum_struct_map self.my_enum_stringlist_map = my_enum_stringlist_map self.my_enum_structlist_map", "my_bool self.my_byte = my_byte self.my_16bit_int = my_16bit_int self.my_32bit_int = my_32bit_int", "= iprot.readBinary() else: iprot.skip(ftype) elif fid == 9: if ftype", "), # 1 (2, TType.I32, 'my_enum', None, None, ), #", "None, ), # 1 (2, TType.I32, 'my_enum', None, None, ),", "for iter109 in self.my_structset: iter109.write(oprot) oprot.writeSetEnd() oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def", "self.my_string_string_map = {} (_ktype1, _vtype2, _size0) = iprot.readMapBegin() for _i4", "= my_structlist self.my_enumlist = my_enumlist self.my_stringset = my_stringset self.my_enumset =", "ftype == TType.I64: self.my_64bit_int = iprot.readI64() else: iprot.skip(ftype) elif fid", "my_64bit_int self.my_double = my_double self.my_string = my_string self.my_binary = my_binary", "TType.MAP: self.my_enum_string_map = {} (_ktype15, _vtype16, _size14) = iprot.readMapBegin() for", "self.my_string_enum_map = my_string_enum_map self.my_enum_string_map = my_enum_string_map self.my_enum_struct_map = my_enum_struct_map self.my_enum_stringlist_map", "(TType.I32, None, False), None, ), # 19 (20, TType.SET, 'my_structset',", "_vtype23, _size21) = iprot.readMapBegin() for _i25 in range(_size21): _key26 =", "== 2 else iprot.readString() _val13 = iprot.readI32() self.my_string_enum_map[_key12] = _val13", "oprot.writeFieldEnd() if self.my_enum_string_map is not None: oprot.writeFieldBegin('my_enum_string_map', TType.MAP, 11) oprot.writeMapBegin(TType.I32,", "self.my_structset = my_structset def read(self, iprot): if iprot._fast_decode is not", "oprot.writeListBegin(TType.STRING, len(self.my_stringlist)) for iter104 in self.my_stringlist: oprot.writeString(iter104.encode('utf-8') if sys.version_info[0] ==", "), # 16 (17, TType.LIST, 'my_enumlist', (TType.I32, None, False), None,", "iprot): if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and", "ftype == TType.STRING: self.my_string = iprot.readString().decode('utf-8') if sys.version_info[0] == 2", "not None: oprot.writeFieldBegin('my_byte', TType.BYTE, 2) oprot.writeByte(self.my_byte) oprot.writeFieldEnd() if self.my_16bit_int is", "thrift.Thrift import TType, TMessageType, TFrozenDict, TException, TApplicationException from thrift.protocol.TProtocol import", "== 11: if ftype == TType.MAP: self.my_enum_string_map = {} (_ktype15,", "in range(_size78): _elem83 = iprot.readI32() self.my_enumset.add(_elem83) iprot.readSetEnd() else: iprot.skip(ftype) elif", "self.my_structset: iter109.write(oprot) oprot.writeSetEnd() oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def", "my_enum_struct_map - my_enum_stringlist_map - my_enum_structlist_map - my_stringlist - my_structlist -", "[] (_etype57, _size54) = iprot.readListBegin() for _i58 in range(_size54): _elem59", "iprot.readListEnd() else: iprot.skip(ftype) elif fid == 16: if ftype ==", "self.my_enumlist is not None: oprot.writeFieldBegin('my_enumlist', TType.LIST, 17) oprot.writeListBegin(TType.I32, len(self.my_enumlist)) for", "= {} (_ktype15, _vtype16, _size14) = iprot.readMapBegin() for _i18 in", "_vtype16, _size14) = iprot.readMapBegin() for _i18 in range(_size14): _key19 =", "oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) return oprot.writeStructBegin('MiniStruct') if self.my_string is not None:", "iprot.skip(ftype) elif fid == 13: if ftype == TType.MAP: self.my_enum_stringlist_map", "# 2 ) all_structs.append(MegaStruct) MegaStruct.thrift_spec = ( None, # 0", "in self.my_structlist: iter105.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.my_enumlist is not None:", "oprot.writeListEnd() oprot.writeFieldEnd() if self.my_stringset is not None: oprot.writeFieldBegin('my_stringset', TType.SET, 18)", "elif fid == 4: if ftype == TType.I32: self.my_32bit_int =", "if self.my_16bit_int is not None: oprot.writeFieldBegin('my_16bit_int', TType.I16, 3) oprot.writeI16(self.my_16bit_int) oprot.writeFieldEnd()", "_val47.append(_elem53) iprot.readListEnd() self.my_enum_structlist_map[_key46] = _val47 iprot.readMapEnd() else: iprot.skip(ftype) elif fid", "self.my_stringset is not None: oprot.writeFieldBegin('my_stringset', TType.SET, 18) oprot.writeSetBegin(TType.STRING, len(self.my_stringset)) for", "my_string self.my_binary = my_binary self.my_string_string_map = my_string_string_map self.my_string_enum_map = my_string_enum_map", "\"ALPACA\", } _NAMES_TO_VALUES = { \"LLAMA\": 1, \"ALPACA\": 2, }", "isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None: iprot._fast_decode(self, iprot, [self.__class__,", "11 (12, TType.MAP, 'my_enum_struct_map', (TType.I32, None, TType.STRUCT, [MiniStruct, None], False),", "'UTF8', False), None, ), # 15 (16, TType.LIST, 'my_structlist', (TType.STRUCT,", "oprot.writeBinary(self.my_binary) oprot.writeFieldEnd() if self.my_string_string_map is not None: oprot.writeFieldBegin('my_string_string_map', TType.MAP, 9)", "'my_enum_stringlist_map', (TType.I32, None, TType.LIST, (TType.STRING, 'UTF8', False), False), None, ),", "== TType.LIST: self.my_stringlist = [] (_etype57, _size54) = iprot.readListBegin() for", "elif fid == 16: if ftype == TType.LIST: self.my_structlist =", "ARE DOING # # options string: py # from thrift.Thrift", "\"LLAMA\", 2: \"ALPACA\", } _NAMES_TO_VALUES = { \"LLAMA\": 1, \"ALPACA\":", "is not None: oprot.writeFieldBegin('my_binary', TType.STRING, 8) oprot.writeBinary(self.my_binary) oprot.writeFieldEnd() if self.my_string_string_map", "\"LLAMA\": 1, \"ALPACA\": 2, } class MiniStruct(object): \"\"\" Attributes: -", "len(self.my_string_enum_map)) for kiter92, viter93 in self.my_string_enum_map.items(): oprot.writeString(kiter92.encode('utf-8') if sys.version_info[0] ==", "sys.version_info[0] == 2 else iprot.readString() _val6 = iprot.readString().decode('utf-8') if sys.version_info[0]", "[MiniStruct, None], False), None, ), # 12 (13, TType.MAP, 'my_enum_stringlist_map',", "iter109.write(oprot) oprot.writeSetEnd() oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def __repr__(self):", "my_structlist self.my_enumlist = my_enumlist self.my_stringset = my_stringset self.my_enumset = my_enumset", "my_enum \"\"\" def __init__(self, my_string=None, my_enum=None,): self.my_string = my_string self.my_enum", "None: oprot.writeFieldBegin('my_enum_string_map', TType.MAP, 11) oprot.writeMapBegin(TType.I32, TType.STRING, len(self.my_enum_string_map)) for kiter94, viter95", "oprot.writeI32(kiter96) viter97.write(oprot) oprot.writeMapEnd() oprot.writeFieldEnd() if self.my_enum_stringlist_map is not None: oprot.writeFieldBegin('my_enum_stringlist_map',", "iprot.readSetBegin() for _i76 in range(_size72): _elem77 = iprot.readString().decode('utf-8') if sys.version_info[0]", "if oprot._fast_encode is not None and self.thrift_spec is not None:", "if ftype == TType.MAP: self.my_enum_struct_map = {} (_ktype22, _vtype23, _size21)", "WHAT YOU ARE DOING # # options string: py #", "iter100) oprot.writeListEnd() oprot.writeMapEnd() oprot.writeFieldEnd() if self.my_enum_structlist_map is not None: oprot.writeFieldBegin('my_enum_structlist_map',", "None, ), # 15 (16, TType.LIST, 'my_structlist', (TType.STRUCT, [MiniStruct, None],", "oprot): if oprot._fast_encode is not None and self.thrift_spec is not", "None: oprot.writeFieldBegin('my_64bit_int', TType.I64, 5) oprot.writeI64(self.my_64bit_int) oprot.writeFieldEnd() if self.my_double is not", "oprot.writeFieldBegin('my_bool', TType.BOOL, 1) oprot.writeBool(self.my_bool) oprot.writeFieldEnd() if self.my_byte is not None:", "), # 1 (2, TType.BYTE, 'my_byte', None, None, ), #", "iprot.readI64() else: iprot.skip(ftype) elif fid == 6: if ftype ==", "(6, TType.DOUBLE, 'my_double', None, None, ), # 6 (7, TType.STRING,", "== other.__dict__ def __ne__(self, other): return not (self == other)", "elif fid == 11: if ftype == TType.MAP: self.my_enum_string_map =", "= iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString() _val34.append(_elem40) iprot.readListEnd()", "== TType.STRING: self.my_binary = iprot.readBinary() else: iprot.skip(ftype) elif fid ==", "), # 19 (20, TType.SET, 'my_structset', (TType.STRUCT, [MiniStruct, None], False),", "elif fid == 10: if ftype == TType.MAP: self.my_string_enum_map =", "= iprot.readI32() self.my_enumset.add(_elem83) iprot.readSetEnd() else: iprot.skip(ftype) elif fid == 20:", "_elem65 = MiniStruct() _elem65.read(iprot) self.my_structlist.append(_elem65) iprot.readListEnd() else: iprot.skip(ftype) elif fid", "None, None, ), # 1 (2, TType.BYTE, 'my_byte', None, None,", "'UTF8', None, ), # 1 (2, TType.I32, 'my_enum', None, None,", "% (key, value) for key, value in self.__dict__.items()] return '%s(%s)'", "None, ), # 6 (7, TType.STRING, 'my_string', 'UTF8', None, ),", "4: if ftype == TType.I32: self.my_32bit_int = iprot.readI32() else: iprot.skip(ftype)", "not None: oprot.writeFieldBegin('my_enumlist', TType.LIST, 17) oprot.writeListBegin(TType.I32, len(self.my_enumlist)) for iter106 in", "), # 15 (16, TType.LIST, 'my_structlist', (TType.STRUCT, [MiniStruct, None], False),", "15: if ftype == TType.LIST: self.my_stringlist = [] (_etype57, _size54)", "== 2 else viter95) oprot.writeMapEnd() oprot.writeFieldEnd() if self.my_enum_struct_map is not", "14: if ftype == TType.MAP: self.my_enum_structlist_map = {} (_ktype42, _vtype43,", "oprot.writeString(self.my_string.encode('utf-8') if sys.version_info[0] == 2 else self.my_string) oprot.writeFieldEnd() if self.my_enum", "all_structs.append(MegaStruct) MegaStruct.thrift_spec = ( None, # 0 (1, TType.BOOL, 'my_bool',", "= iprot.readI32() _val20 = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else", "in viter99: oprot.writeString(iter100.encode('utf-8') if sys.version_info[0] == 2 else iter100) oprot.writeListEnd()", "len(self.my_enumset)) for iter108 in self.my_enumset: oprot.writeI32(iter108) oprot.writeSetEnd() oprot.writeFieldEnd() if self.my_structset", "TType.MAP, 12) oprot.writeMapBegin(TType.I32, TType.STRUCT, len(self.my_enum_struct_map)) for kiter96, viter97 in self.my_enum_struct_map.items():", "else: iprot.skip(ftype) elif fid == 12: if ftype == TType.MAP:", "for iter100 in viter99: oprot.writeString(iter100.encode('utf-8') if sys.version_info[0] == 2 else", "(15, TType.LIST, 'my_stringlist', (TType.STRING, 'UTF8', False), None, ), # 15", "sys from thrift.transport import TTransport all_structs = [] class MyEnum(object):", "self.my_byte = iprot.readByte() else: iprot.skip(ftype) elif fid == 3: if", "iprot, [self.__class__, self.thrift_spec]) return iprot.readStructBegin() while True: (fname, ftype, fid)", "ftype == TType.LIST: self.my_structlist = [] (_etype63, _size60) = iprot.readListBegin()", "def read(self, iprot): if iprot._fast_decode is not None and isinstance(iprot.trans,", "else kiter92) oprot.writeI32(viter93) oprot.writeMapEnd() oprot.writeFieldEnd() if self.my_enum_string_map is not None:", "oprot.writeSetEnd() oprot.writeFieldEnd() if self.my_structset is not None: oprot.writeFieldBegin('my_structset', TType.SET, 20)", "elif fid == 19: if ftype == TType.SET: self.my_enumset =", "2 else iprot.readString() _val6 = iprot.readString().decode('utf-8') if sys.version_info[0] == 2", "4) oprot.writeI32(self.my_32bit_int) oprot.writeFieldEnd() if self.my_64bit_int is not None: oprot.writeFieldBegin('my_64bit_int', TType.I64,", "TType.LIST, 'my_structlist', (TType.STRUCT, [MiniStruct, None], False), None, ), # 16", "iter100 in viter99: oprot.writeString(iter100.encode('utf-8') if sys.version_info[0] == 2 else iter100)", "_i18 in range(_size14): _key19 = iprot.readI32() _val20 = iprot.readString().decode('utf-8') if", "oprot.writeFieldBegin('my_stringlist', TType.LIST, 15) oprot.writeListBegin(TType.STRING, len(self.my_stringlist)) for iter104 in self.my_stringlist: oprot.writeString(iter104.encode('utf-8')", "None, None, ), # 2 (3, TType.I16, 'my_16bit_int', None, None,", "), # 9 (10, TType.MAP, 'my_string_enum_map', (TType.STRING, 'UTF8', TType.I32, None,", "TType.SET: self.my_enumset = set() (_etype81, _size78) = iprot.readSetBegin() for _i82", "# # DO NOT EDIT UNLESS YOU ARE SURE THAT", "self.my_string is not None: oprot.writeFieldBegin('my_string', TType.STRING, 1) oprot.writeString(self.my_string.encode('utf-8') if sys.version_info[0]", "if ftype == TType.STRING: self.my_binary = iprot.readBinary() else: iprot.skip(ftype) elif", "len(viter102)) for iter103 in viter102: iter103.write(oprot) oprot.writeListEnd() oprot.writeMapEnd() oprot.writeFieldEnd() if", "class MyEnum(object): LLAMA = 1 ALPACA = 2 _VALUES_TO_NAMES =", "fid == 13: if ftype == TType.MAP: self.my_enum_stringlist_map = {}", "not None: oprot.writeFieldBegin('my_binary', TType.STRING, 8) oprot.writeBinary(self.my_binary) oprot.writeFieldEnd() if self.my_string_string_map is", "for _i52 in range(_size48): _elem53 = MiniStruct() _elem53.read(iprot) _val47.append(_elem53) iprot.readListEnd()", "self.my_structset = set() (_etype87, _size84) = iprot.readSetBegin() for _i88 in", "if self.my_enumset is not None: oprot.writeFieldBegin('my_enumset', TType.SET, 19) oprot.writeSetBegin(TType.I32, len(self.my_enumset))", "iprot.skip(ftype) elif fid == 3: if ftype == TType.I16: self.my_16bit_int", "if ftype == TType.I32: self.my_enum = iprot.readI32() else: iprot.skip(ftype) else:", "self.my_structset is not None: oprot.writeFieldBegin('my_structset', TType.SET, 20) oprot.writeSetBegin(TType.STRUCT, len(self.my_structset)) for", "10) oprot.writeMapBegin(TType.STRING, TType.I32, len(self.my_string_enum_map)) for kiter92, viter93 in self.my_string_enum_map.items(): oprot.writeString(kiter92.encode('utf-8')", "if ftype == TType.MAP: self.my_enum_stringlist_map = {} (_ktype29, _vtype30, _size28)", "iprot.readString() else: iprot.skip(ftype) elif fid == 2: if ftype ==", "for _i82 in range(_size78): _elem83 = iprot.readI32() self.my_enumset.add(_elem83) iprot.readSetEnd() else:", "self.my_string_string_map = my_string_string_map self.my_string_enum_map = my_string_enum_map self.my_enum_string_map = my_enum_string_map self.my_enum_struct_map", "3: if ftype == TType.I16: self.my_16bit_int = iprot.readI16() else: iprot.skip(ftype)", "oprot.writeFieldBegin('my_string', TType.STRING, 1) oprot.writeString(self.my_string.encode('utf-8') if sys.version_info[0] == 2 else self.my_string)", "iprot.readI32() _val47 = [] (_etype51, _size48) = iprot.readListBegin() for _i52", "for _i88 in range(_size84): _elem89 = MiniStruct() _elem89.read(iprot) self.my_structset.add(_elem89) iprot.readSetEnd()", "thrift.transport import TTransport all_structs = [] class MyEnum(object): LLAMA =", "oprot.writeFieldBegin('my_64bit_int', TType.I64, 5) oprot.writeI64(self.my_64bit_int) oprot.writeFieldEnd() if self.my_double is not None:", "oprot.writeMapBegin(TType.I32, TType.STRUCT, len(self.my_enum_struct_map)) for kiter96, viter97 in self.my_enum_struct_map.items(): oprot.writeI32(kiter96) viter97.write(oprot)", "None, ), # 13 (14, TType.MAP, 'my_enum_structlist_map', (TType.I32, None, TType.LIST,", "self.my_enum_struct_map[_key26] = _val27 iprot.readMapEnd() else: iprot.skip(ftype) elif fid == 13:", "'my_double', None, None, ), # 6 (7, TType.STRING, 'my_string', 'UTF8',", "(TType.STRING, 'UTF8', TType.I32, None, False), None, ), # 10 (11,", "if ftype == TType.BYTE: self.my_byte = iprot.readByte() else: iprot.skip(ftype) elif", "None, TType.STRING, 'UTF8', False), None, ), # 11 (12, TType.MAP,", "} class MiniStruct(object): \"\"\" Attributes: - my_string - my_enum \"\"\"", "def write(self, oprot): if oprot._fast_encode is not None and self.thrift_spec", "other): return not (self == other) all_structs.append(MiniStruct) MiniStruct.thrift_spec = (", "# 6 (7, TType.STRING, 'my_string', 'UTF8', None, ), # 7", "my_stringset=None, my_enumset=None, my_structset=None,): self.my_bool = my_bool self.my_byte = my_byte self.my_16bit_int", "_val47 = [] (_etype51, _size48) = iprot.readListBegin() for _i52 in", "if ftype == TType.SET: self.my_structset = set() (_etype87, _size84) =", "oprot.writeListBegin(TType.I32, len(self.my_enumlist)) for iter106 in self.my_enumlist: oprot.writeI32(iter106) oprot.writeListEnd() oprot.writeFieldEnd() if", "from thrift.TRecursive import fix_spec import sys from thrift.transport import TTransport", "not None and self.thrift_spec is not None: oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))", "8) oprot.writeBinary(self.my_binary) oprot.writeFieldEnd() if self.my_string_string_map is not None: oprot.writeFieldBegin('my_string_string_map', TType.MAP,", "len(self.my_enum_stringlist_map)) for kiter98, viter99 in self.my_enum_stringlist_map.items(): oprot.writeI32(kiter98) oprot.writeListBegin(TType.STRING, len(viter99)) for", "my_enum_stringlist_map self.my_enum_structlist_map = my_enum_structlist_map self.my_stringlist = my_stringlist self.my_structlist = my_structlist", "- my_enum \"\"\" def __init__(self, my_string=None, my_enum=None,): self.my_string = my_string", "not None: iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) return iprot.readStructBegin() while True:", "% (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__)", "iprot.skip(ftype) elif fid == 6: if ftype == TType.DOUBLE: self.my_double", "12 (13, TType.MAP, 'my_enum_stringlist_map', (TType.I32, None, TType.LIST, (TType.STRING, 'UTF8', False),", "import sys from thrift.transport import TTransport all_structs = [] class", "TType.BYTE, 'my_byte', None, None, ), # 2 (3, TType.I16, 'my_16bit_int',", "{} (_ktype42, _vtype43, _size41) = iprot.readMapBegin() for _i45 in range(_size41):", "= [] (_etype57, _size54) = iprot.readListBegin() for _i58 in range(_size54):", "_elem71 = iprot.readI32() self.my_enumlist.append(_elem71) iprot.readListEnd() else: iprot.skip(ftype) elif fid ==", "in range(_size48): _elem53 = MiniStruct() _elem53.read(iprot) _val47.append(_elem53) iprot.readListEnd() self.my_enum_structlist_map[_key46] =", "iter107 in self.my_stringset: oprot.writeString(iter107.encode('utf-8') if sys.version_info[0] == 2 else iter107)", "= my_enum def read(self, iprot): if iprot._fast_decode is not None", "oprot.writeFieldEnd() if self.my_string is not None: oprot.writeFieldBegin('my_string', TType.STRING, 7) oprot.writeString(self.my_string.encode('utf-8')", "(10, TType.MAP, 'my_string_enum_map', (TType.STRING, 'UTF8', TType.I32, None, False), None, ),", "self.my_64bit_int = iprot.readI64() else: iprot.skip(ftype) elif fid == 6: if", "self.my_enum_string_map = {} (_ktype15, _vtype16, _size14) = iprot.readMapBegin() for _i18", "12) oprot.writeMapBegin(TType.I32, TType.STRUCT, len(self.my_enum_struct_map)) for kiter96, viter97 in self.my_enum_struct_map.items(): oprot.writeI32(kiter96)", "= MiniStruct() _elem53.read(iprot) _val47.append(_elem53) iprot.readListEnd() self.my_enum_structlist_map[_key46] = _val47 iprot.readMapEnd() else:", "TType.I32, 2) oprot.writeI32(self.my_enum) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def", "= set() (_etype81, _size78) = iprot.readSetBegin() for _i82 in range(_size78):", "oprot.writeSetEnd() oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def __repr__(self): L", "my_binary - my_string_string_map - my_string_enum_map - my_enum_string_map - my_enum_struct_map -", "16: if ftype == TType.LIST: self.my_structlist = [] (_etype63, _size60)", "== TType.STOP: break if fid == 1: if ftype ==", "is not None: oprot.writeFieldBegin('my_structset', TType.SET, 20) oprot.writeSetBegin(TType.STRUCT, len(self.my_structset)) for iter109", "= iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString() self.my_stringset.add(_elem77) iprot.readSetEnd()", "TType.SET, 20) oprot.writeSetBegin(TType.STRUCT, len(self.my_structset)) for iter109 in self.my_structset: iter109.write(oprot) oprot.writeSetEnd()", "if ftype == TType.MAP: self.my_string_string_map = {} (_ktype1, _vtype2, _size0)", "kiter98, viter99 in self.my_enum_stringlist_map.items(): oprot.writeI32(kiter98) oprot.writeListBegin(TType.STRING, len(viter99)) for iter100 in", "my_binary=None, my_string_string_map=None, my_string_enum_map=None, my_enum_string_map=None, my_enum_struct_map=None, my_enum_stringlist_map=None, my_enum_structlist_map=None, my_stringlist=None, my_structlist=None, my_enumlist=None,", "iprot.readString() self.my_stringlist.append(_elem59) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 16: if", "DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW", "2 else self.my_string) oprot.writeFieldEnd() if self.my_enum is not None: oprot.writeFieldBegin('my_enum',", "iprot.readI32() self.my_string_enum_map[_key12] = _val13 iprot.readMapEnd() else: iprot.skip(ftype) elif fid ==", "if sys.version_info[0] == 2 else iprot.readString() _val6 = iprot.readString().decode('utf-8') if", "None, False), None, ), # 19 (20, TType.SET, 'my_structset', (TType.STRUCT,", "2: \"ALPACA\", } _NAMES_TO_VALUES = { \"LLAMA\": 1, \"ALPACA\": 2,", "is not None and self.thrift_spec is not None: oprot.trans.write(oprot._fast_encode(self, [self.__class__,", "= iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString() _val6 =", "for kiter96, viter97 in self.my_enum_struct_map.items(): oprot.writeI32(kiter96) viter97.write(oprot) oprot.writeMapEnd() oprot.writeFieldEnd() if", "oprot.writeFieldEnd() if self.my_stringset is not None: oprot.writeFieldBegin('my_stringset', TType.SET, 18) oprot.writeSetBegin(TType.STRING,", "self.thrift_spec])) return oprot.writeStructBegin('MegaStruct') if self.my_bool is not None: oprot.writeFieldBegin('my_bool', TType.BOOL,", "None, ), # 2 (3, TType.I16, 'my_16bit_int', None, None, ),", "for iter107 in self.my_stringset: oprot.writeString(iter107.encode('utf-8') if sys.version_info[0] == 2 else", "in range(_size7): _key12 = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else", "= my_string self.my_enum = my_enum def read(self, iprot): if iprot._fast_decode", "oprot.writeFieldBegin('my_binary', TType.STRING, 8) oprot.writeBinary(self.my_binary) oprot.writeFieldEnd() if self.my_string_string_map is not None:", "ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if", "and self.thrift_spec is not None: iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) return", "elif fid == 7: if ftype == TType.STRING: self.my_string =", "'my_enumlist', (TType.I32, None, False), None, ), # 17 (18, TType.SET,", "'my_string_enum_map', (TType.STRING, 'UTF8', TType.I32, None, False), None, ), # 10", "else iprot.readString() else: iprot.skip(ftype) elif fid == 2: if ftype", "= [] (_etype63, _size60) = iprot.readListBegin() for _i64 in range(_size60):", "'.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ ==", "and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None: iprot._fast_decode(self, iprot,", "range(_size0): _key5 = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()", "(_ktype8, _vtype9, _size7) = iprot.readMapBegin() for _i11 in range(_size7): _key12", "self.my_enum_struct_map = {} (_ktype22, _vtype23, _size21) = iprot.readMapBegin() for _i25", "[] (_etype69, _size66) = iprot.readListBegin() for _i70 in range(_size66): _elem71", "None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None: iprot._fast_decode(self,", "sys.version_info[0] == 2 else self.my_string) oprot.writeFieldEnd() if self.my_enum is not", "_elem53 = MiniStruct() _elem53.read(iprot) _val47.append(_elem53) iprot.readListEnd() self.my_enum_structlist_map[_key46] = _val47 iprot.readMapEnd()", "def __ne__(self, other): return not (self == other) class MegaStruct(object):", "my_structset \"\"\" def __init__(self, my_bool=None, my_byte=None, my_16bit_int=None, my_32bit_int=None, my_64bit_int=None, my_double=None,", "oprot.writeFieldBegin('my_string_enum_map', TType.MAP, 10) oprot.writeMapBegin(TType.STRING, TType.I32, len(self.my_string_enum_map)) for kiter92, viter93 in", "fix_spec import sys from thrift.transport import TTransport all_structs = []", "), # 10 (11, TType.MAP, 'my_enum_string_map', (TType.I32, None, TType.STRING, 'UTF8',", "(_etype57, _size54) = iprot.readListBegin() for _i58 in range(_size54): _elem59 =", "MiniStruct.thrift_spec = ( None, # 0 (1, TType.STRING, 'my_string', 'UTF8',", "_size28) = iprot.readMapBegin() for _i32 in range(_size28): _key33 = iprot.readI32()", "if sys.version_info[0] == 2 else self.my_string) oprot.writeFieldEnd() if self.my_enum is", "'BINARY', None, ), # 8 (9, TType.MAP, 'my_string_string_map', (TType.STRING, 'UTF8',", "fid == 15: if ftype == TType.LIST: self.my_stringlist = []", "== 15: if ftype == TType.LIST: self.my_stringlist = [] (_etype57,", "oprot.writeListEnd() oprot.writeFieldEnd() if self.my_structlist is not None: oprot.writeFieldBegin('my_structlist', TType.LIST, 16)", "if self.my_enum is not None: oprot.writeFieldBegin('my_enum', TType.I32, 2) oprot.writeI32(self.my_enum) oprot.writeFieldEnd()", "iprot.readI16() else: iprot.skip(ftype) elif fid == 4: if ftype ==", "fid == 11: if ftype == TType.MAP: self.my_enum_string_map = {}", "14 (15, TType.LIST, 'my_stringlist', (TType.STRING, 'UTF8', False), None, ), #", "None: oprot.writeFieldBegin('my_stringlist', TType.LIST, 15) oprot.writeListBegin(TType.STRING, len(self.my_stringlist)) for iter104 in self.my_stringlist:", "_size66) = iprot.readListBegin() for _i70 in range(_size66): _elem71 = iprot.readI32()", "(19, TType.SET, 'my_enumset', (TType.I32, None, False), None, ), # 19", "ftype == TType.MAP: self.my_string_string_map = {} (_ktype1, _vtype2, _size0) =", "_elem89.read(iprot) self.my_structset.add(_elem89) iprot.readSetEnd() else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def", "is not None: oprot.writeFieldBegin('my_enum_stringlist_map', TType.MAP, 13) oprot.writeMapBegin(TType.I32, TType.LIST, len(self.my_enum_stringlist_map)) for", "YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING", "None, ), # 16 (17, TType.LIST, 'my_enumlist', (TType.I32, None, False),", "ftype == TType.I32: self.my_32bit_int = iprot.readI32() else: iprot.skip(ftype) elif fid", "['%s=%r' % (key, value) for key, value in self.__dict__.items()] return", "- my_structset \"\"\" def __init__(self, my_bool=None, my_byte=None, my_16bit_int=None, my_32bit_int=None, my_64bit_int=None,", "), # 11 (12, TType.MAP, 'my_enum_struct_map', (TType.I32, None, TType.STRUCT, [MiniStruct,", "== TType.LIST: self.my_enumlist = [] (_etype69, _size66) = iprot.readListBegin() for", "oprot.writeString(kiter90.encode('utf-8') if sys.version_info[0] == 2 else kiter90) oprot.writeString(viter91.encode('utf-8') if sys.version_info[0]", "TType.STRING, 'UTF8', False), None, ), # 9 (10, TType.MAP, 'my_string_enum_map',", "oprot.writeFieldEnd() if self.my_enumset is not None: oprot.writeFieldBegin('my_enumset', TType.SET, 19) oprot.writeSetBegin(TType.I32,", "MiniStruct() _elem65.read(iprot) self.my_structlist.append(_elem65) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 17:", "1, \"ALPACA\": 2, } class MiniStruct(object): \"\"\" Attributes: - my_string", "ftype == TType.STOP: break if fid == 1: if ftype", "self.my_binary = my_binary self.my_string_string_map = my_string_string_map self.my_string_enum_map = my_string_enum_map self.my_enum_string_map", "iprot.readI32() else: iprot.skip(ftype) elif fid == 5: if ftype ==", "if sys.version_info[0] == 2 else iprot.readString() self.my_string_string_map[_key5] = _val6 iprot.readMapEnd()", "[self.__class__, self.thrift_spec])) return oprot.writeStructBegin('MegaStruct') if self.my_bool is not None: oprot.writeFieldBegin('my_bool',", "TType.I16, 'my_16bit_int', None, None, ), # 3 (4, TType.I32, 'my_32bit_int',", "self.my_enumset = set() (_etype81, _size78) = iprot.readSetBegin() for _i82 in", "(TType.STRUCT, [MiniStruct, None], False), None, ), # 16 (17, TType.LIST,", "= iprot.readListBegin() for _i39 in range(_size35): _elem40 = iprot.readString().decode('utf-8') if", "oprot.writeFieldBegin('my_16bit_int', TType.I16, 3) oprot.writeI16(self.my_16bit_int) oprot.writeFieldEnd() if self.my_32bit_int is not None:", "None, ), # 14 (15, TType.LIST, 'my_stringlist', (TType.STRING, 'UTF8', False),", "oprot.writeString(iter100.encode('utf-8') if sys.version_info[0] == 2 else iter100) oprot.writeListEnd() oprot.writeMapEnd() oprot.writeFieldEnd()", "TType.I32, 'my_32bit_int', None, None, ), # 4 (5, TType.I64, 'my_64bit_int',", "== TType.SET: self.my_enumset = set() (_etype81, _size78) = iprot.readSetBegin() for", "not None: oprot.writeFieldBegin('my_string_enum_map', TType.MAP, 10) oprot.writeMapBegin(TType.STRING, TType.I32, len(self.my_string_enum_map)) for kiter92,", "_key46 = iprot.readI32() _val47 = [] (_etype51, _size48) = iprot.readListBegin()", "__ne__(self, other): return not (self == other) class MegaStruct(object): \"\"\"", "TType.I64, 5) oprot.writeI64(self.my_64bit_int) oprot.writeFieldEnd() if self.my_double is not None: oprot.writeFieldBegin('my_double',", "else: iprot.skip(ftype) elif fid == 9: if ftype == TType.MAP:", "len(self.my_structlist)) for iter105 in self.my_structlist: iter105.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.my_enumlist", "'my_string', 'UTF8', None, ), # 7 (8, TType.STRING, 'my_binary', 'BINARY',", "UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE", "my_string=None, my_enum=None,): self.my_string = my_string self.my_enum = my_enum def read(self,", "is not None: iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) return iprot.readStructBegin() while", "my_structlist - my_enumlist - my_stringset - my_enumset - my_structset \"\"\"", "iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString() _val6 = iprot.readString().decode('utf-8')", "in self.my_string_enum_map.items(): oprot.writeString(kiter92.encode('utf-8') if sys.version_info[0] == 2 else kiter92) oprot.writeI32(viter93)", "kiter92, viter93 in self.my_string_enum_map.items(): oprot.writeString(kiter92.encode('utf-8') if sys.version_info[0] == 2 else", "== TType.MAP: self.my_enum_struct_map = {} (_ktype22, _vtype23, _size21) = iprot.readMapBegin()", "self.my_byte is not None: oprot.writeFieldBegin('my_byte', TType.BYTE, 2) oprot.writeByte(self.my_byte) oprot.writeFieldEnd() if", "(18, TType.SET, 'my_stringset', (TType.STRING, 'UTF8', False), None, ), # 18", "2 else viter91) oprot.writeMapEnd() oprot.writeFieldEnd() if self.my_string_enum_map is not None:", "(2, TType.I32, 'my_enum', None, None, ), # 2 ) all_structs.append(MegaStruct)", "= _val34 iprot.readMapEnd() else: iprot.skip(ftype) elif fid == 14: if", "'UTF8', TType.STRING, 'UTF8', False), None, ), # 9 (10, TType.MAP,", "2, } class MiniStruct(object): \"\"\" Attributes: - my_string - my_enum", "None and self.thrift_spec is not None: oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) return", "= iprot.readDouble() else: iprot.skip(ftype) elif fid == 7: if ftype", "is not None: oprot.writeFieldBegin('my_string_enum_map', TType.MAP, 10) oprot.writeMapBegin(TType.STRING, TType.I32, len(self.my_string_enum_map)) for", "= { 1: \"LLAMA\", 2: \"ALPACA\", } _NAMES_TO_VALUES = {", "== 2: if ftype == TType.I32: self.my_enum = iprot.readI32() else:", "(2, TType.BYTE, 'my_byte', None, None, ), # 2 (3, TType.I16,", "== 1: if ftype == TType.STRING: self.my_string = iprot.readString().decode('utf-8') if", "5 (6, TType.DOUBLE, 'my_double', None, None, ), # 6 (7,", "= iprot.readListBegin() for _i64 in range(_size60): _elem65 = MiniStruct() _elem65.read(iprot)", "if self.my_64bit_int is not None: oprot.writeFieldBegin('my_64bit_int', TType.I64, 5) oprot.writeI64(self.my_64bit_int) oprot.writeFieldEnd()", "2 else iter107) oprot.writeSetEnd() oprot.writeFieldEnd() if self.my_enumset is not None:", "_i70 in range(_size66): _elem71 = iprot.readI32() self.my_enumlist.append(_elem71) iprot.readListEnd() else: iprot.skip(ftype)", "iprot.readBool() else: iprot.skip(ftype) elif fid == 2: if ftype ==", "self.my_structlist.append(_elem65) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 17: if ftype", "TType.I16: self.my_16bit_int = iprot.readI16() else: iprot.skip(ftype) elif fid == 4:", "self.my_string_string_map is not None: oprot.writeFieldBegin('my_string_string_map', TType.MAP, 9) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.my_string_string_map))", "== 8: if ftype == TType.STRING: self.my_binary = iprot.readBinary() else:", "None, ), # 1 (2, TType.BYTE, 'my_byte', None, None, ),", "elif fid == 12: if ftype == TType.MAP: self.my_enum_struct_map =", "oprot.writeFieldBegin('my_32bit_int', TType.I32, 4) oprot.writeI32(self.my_32bit_int) oprot.writeFieldEnd() if self.my_64bit_int is not None:", "is not None: oprot.writeFieldBegin('my_enumset', TType.SET, 19) oprot.writeSetBegin(TType.I32, len(self.my_enumset)) for iter108", "== TType.MAP: self.my_enum_string_map = {} (_ktype15, _vtype16, _size14) = iprot.readMapBegin()", "iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot._fast_encode", "iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString() self.my_stringset.add(_elem77) iprot.readSetEnd() else:", "= iprot.readMapBegin() for _i25 in range(_size21): _key26 = iprot.readI32() _val27", "= my_structset def read(self, iprot): if iprot._fast_decode is not None", "in self.my_enumset: oprot.writeI32(iter108) oprot.writeSetEnd() oprot.writeFieldEnd() if self.my_structset is not None:", "False), None, ), # 11 (12, TType.MAP, 'my_enum_struct_map', (TType.I32, None,", "import TTransport all_structs = [] class MyEnum(object): LLAMA = 1", "else iprot.readString() self.my_string_string_map[_key5] = _val6 iprot.readMapEnd() else: iprot.skip(ftype) elif fid", "iprot.readListEnd() else: iprot.skip(ftype) elif fid == 17: if ftype ==", "YOU KNOW WHAT YOU ARE DOING # # options string:", "self.my_stringlist = my_stringlist self.my_structlist = my_structlist self.my_enumlist = my_enumlist self.my_stringset", "= {} (_ktype22, _vtype23, _size21) = iprot.readMapBegin() for _i25 in", "my_enum_structlist_map=None, my_stringlist=None, my_structlist=None, my_enumlist=None, my_stringset=None, my_enumset=None, my_structset=None,): self.my_bool = my_bool", "self.my_stringlist = [] (_etype57, _size54) = iprot.readListBegin() for _i58 in", "from thrift.Thrift import TType, TMessageType, TFrozenDict, TException, TApplicationException from thrift.protocol.TProtocol", "if sys.version_info[0] == 2 else iter100) oprot.writeListEnd() oprot.writeMapEnd() oprot.writeFieldEnd() if", "self.my_enum = my_enum def read(self, iprot): if iprot._fast_decode is not", "in range(_size28): _key33 = iprot.readI32() _val34 = [] (_etype38, _size35)", "TType.MAP, 11) oprot.writeMapBegin(TType.I32, TType.STRING, len(self.my_enum_string_map)) for kiter94, viter95 in self.my_enum_string_map.items():", "sys.version_info[0] == 2 else iprot.readString() self.my_stringlist.append(_elem59) iprot.readListEnd() else: iprot.skip(ftype) elif", "iprot.readMapBegin() for _i18 in range(_size14): _key19 = iprot.readI32() _val20 =", "sys.version_info[0] == 2 else iprot.readString() self.my_enum_string_map[_key19] = _val20 iprot.readMapEnd() else:", "(1, TType.BOOL, 'my_bool', None, None, ), # 1 (2, TType.BYTE,", "[] (_etype38, _size35) = iprot.readListBegin() for _i39 in range(_size35): _elem40", "= my_string self.my_binary = my_binary self.my_string_string_map = my_string_string_map self.my_string_enum_map =", "= my_enum_struct_map self.my_enum_stringlist_map = my_enum_stringlist_map self.my_enum_structlist_map = my_enum_structlist_map self.my_stringlist =", "return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other):", "if sys.version_info[0] == 2 else iprot.readString() self.my_stringset.add(_elem77) iprot.readSetEnd() else: iprot.skip(ftype)", "TType.I32, len(self.my_string_enum_map)) for kiter92, viter93 in self.my_string_enum_map.items(): oprot.writeString(kiter92.encode('utf-8') if sys.version_info[0]", "None: oprot.writeFieldBegin('my_enum_struct_map', TType.MAP, 12) oprot.writeMapBegin(TType.I32, TType.STRUCT, len(self.my_enum_struct_map)) for kiter96, viter97", "my_double=None, my_string=None, my_binary=None, my_string_string_map=None, my_string_enum_map=None, my_enum_string_map=None, my_enum_struct_map=None, my_enum_stringlist_map=None, my_enum_structlist_map=None, my_stringlist=None,", "None, False), None, ), # 10 (11, TType.MAP, 'my_enum_string_map', (TType.I32,", "if ftype == TType.I32: self.my_32bit_int = iprot.readI32() else: iprot.skip(ftype) elif", "ftype == TType.MAP: self.my_enum_string_map = {} (_ktype15, _vtype16, _size14) =", "if sys.version_info[0] == 2 else viter91) oprot.writeMapEnd() oprot.writeFieldEnd() if self.my_string_enum_map", "- my_string - my_binary - my_string_string_map - my_string_enum_map - my_enum_string_map", "14) oprot.writeMapBegin(TType.I32, TType.LIST, len(self.my_enum_structlist_map)) for kiter101, viter102 in self.my_enum_structlist_map.items(): oprot.writeI32(kiter101)", "_elem40 = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString() _val34.append(_elem40)", "iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString() else: iprot.skip(ftype) elif", "__repr__(self): L = ['%s=%r' % (key, value) for key, value", "self.my_byte = my_byte self.my_16bit_int = my_16bit_int self.my_32bit_int = my_32bit_int self.my_64bit_int", "iprot.readI32() self.my_enumlist.append(_elem71) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 18: if", "_i4 in range(_size0): _key5 = iprot.readString().decode('utf-8') if sys.version_info[0] == 2", "== TType.DOUBLE: self.my_double = iprot.readDouble() else: iprot.skip(ftype) elif fid ==", "self.my_string = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString() else:", "elif fid == 6: if ftype == TType.DOUBLE: self.my_double =", "None], False), None, ), # 12 (13, TType.MAP, 'my_enum_stringlist_map', (TType.I32,", "def __ne__(self, other): return not (self == other) all_structs.append(MiniStruct) MiniStruct.thrift_spec", "in range(_size66): _elem71 = iprot.readI32() self.my_enumlist.append(_elem71) iprot.readListEnd() else: iprot.skip(ftype) elif", "- my_enumlist - my_stringset - my_enumset - my_structset \"\"\" def", "1 (2, TType.BYTE, 'my_byte', None, None, ), # 2 (3,", "self.my_enum_stringlist_map[_key33] = _val34 iprot.readMapEnd() else: iprot.skip(ftype) elif fid == 14:", "', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__", "return oprot.writeStructBegin('MegaStruct') if self.my_bool is not None: oprot.writeFieldBegin('my_bool', TType.BOOL, 1)", "False), None, ), # 15 (16, TType.LIST, 'my_structlist', (TType.STRUCT, [MiniStruct,", "for _i45 in range(_size41): _key46 = iprot.readI32() _val47 = []", "== 2 else iprot.readString() else: iprot.skip(ftype) elif fid == 8:", "TType.SET, 18) oprot.writeSetBegin(TType.STRING, len(self.my_stringset)) for iter107 in self.my_stringset: oprot.writeString(iter107.encode('utf-8') if", "18) oprot.writeSetBegin(TType.STRING, len(self.my_stringset)) for iter107 in self.my_stringset: oprot.writeString(iter107.encode('utf-8') if sys.version_info[0]", "- my_enum_structlist_map - my_stringlist - my_structlist - my_enumlist - my_stringset", "# 16 (17, TType.LIST, 'my_enumlist', (TType.I32, None, False), None, ),", "iprot.readMapBegin() for _i25 in range(_size21): _key26 = iprot.readI32() _val27 =", "None: oprot.writeFieldBegin('my_double', TType.DOUBLE, 6) oprot.writeDouble(self.my_double) oprot.writeFieldEnd() if self.my_string is not", "if sys.version_info[0] == 2 else viter95) oprot.writeMapEnd() oprot.writeFieldEnd() if self.my_enum_struct_map", "TType.MAP: self.my_enum_stringlist_map = {} (_ktype29, _vtype30, _size28) = iprot.readMapBegin() for", "= my_enumset self.my_structset = my_structset def read(self, iprot): if iprot._fast_decode", "range(_size48): _elem53 = MiniStruct() _elem53.read(iprot) _val47.append(_elem53) iprot.readListEnd() self.my_enum_structlist_map[_key46] = _val47", "(13, TType.MAP, 'my_enum_stringlist_map', (TType.I32, None, TType.LIST, (TType.STRING, 'UTF8', False), False),", "sys.version_info[0] == 2 else kiter90) oprot.writeString(viter91.encode('utf-8') if sys.version_info[0] == 2", "(7, TType.STRING, 'my_string', 'UTF8', None, ), # 7 (8, TType.STRING,", "oprot.writeFieldEnd() if self.my_enum_structlist_map is not None: oprot.writeFieldBegin('my_enum_structlist_map', TType.MAP, 14) oprot.writeMapBegin(TType.I32,", "15) oprot.writeListBegin(TType.STRING, len(self.my_stringlist)) for iter104 in self.my_stringlist: oprot.writeString(iter104.encode('utf-8') if sys.version_info[0]", "oprot.writeI16(self.my_16bit_int) oprot.writeFieldEnd() if self.my_32bit_int is not None: oprot.writeFieldBegin('my_32bit_int', TType.I32, 4)", "False), None, ), # 9 (10, TType.MAP, 'my_string_enum_map', (TType.STRING, 'UTF8',", "if sys.version_info[0] == 2 else self.my_string) oprot.writeFieldEnd() if self.my_binary is", "TType.MAP, 9) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.my_string_string_map)) for kiter90, viter91 in self.my_string_string_map.items():", "- my_binary - my_string_string_map - my_string_enum_map - my_enum_string_map - my_enum_struct_map", "iter105 in self.my_structlist: iter105.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.my_enumlist is not", "fid == 3: if ftype == TType.I16: self.my_16bit_int = iprot.readI16()", "iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString() self.my_enum_string_map[_key19] = _val20", "for _i4 in range(_size0): _key5 = iprot.readString().decode('utf-8') if sys.version_info[0] ==", "= {} (_ktype29, _vtype30, _size28) = iprot.readMapBegin() for _i32 in", "class MiniStruct(object): \"\"\" Attributes: - my_string - my_enum \"\"\" def", "= ( None, # 0 (1, TType.STRING, 'my_string', 'UTF8', None,", "iprot.readI32() _val20 = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()", "== TType.SET: self.my_structset = set() (_etype87, _size84) = iprot.readSetBegin() for", "oprot.writeFieldBegin('my_enum_stringlist_map', TType.MAP, 13) oprot.writeMapBegin(TType.I32, TType.LIST, len(self.my_enum_stringlist_map)) for kiter98, viter99 in", "from thrift.protocol.TProtocol import TProtocolException from thrift.TRecursive import fix_spec import sys", "my_string_string_map - my_string_enum_map - my_enum_string_map - my_enum_struct_map - my_enum_stringlist_map -", "in range(_size21): _key26 = iprot.readI32() _val27 = MiniStruct() _val27.read(iprot) self.my_enum_struct_map[_key26]", "if self.my_binary is not None: oprot.writeFieldBegin('my_binary', TType.STRING, 8) oprot.writeBinary(self.my_binary) oprot.writeFieldEnd()", "1) oprot.writeBool(self.my_bool) oprot.writeFieldEnd() if self.my_byte is not None: oprot.writeFieldBegin('my_byte', TType.BYTE,", "= iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString() self.my_string_string_map[_key5] =", "19: if ftype == TType.SET: self.my_enumset = set() (_etype81, _size78)", "== 9: if ftype == TType.MAP: self.my_string_string_map = {} (_ktype1,", "= iprot.readMapBegin() for _i18 in range(_size14): _key19 = iprot.readI32() _val20", "# 1 (2, TType.BYTE, 'my_byte', None, None, ), # 2", "else iprot.readString() _val13 = iprot.readI32() self.my_string_enum_map[_key12] = _val13 iprot.readMapEnd() else:", "for iter103 in viter102: iter103.write(oprot) oprot.writeListEnd() oprot.writeMapEnd() oprot.writeFieldEnd() if self.my_stringlist", "self.my_32bit_int is not None: oprot.writeFieldBegin('my_32bit_int', TType.I32, 4) oprot.writeI32(self.my_32bit_int) oprot.writeFieldEnd() if", "self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not", "oprot.writeString(viter95.encode('utf-8') if sys.version_info[0] == 2 else viter95) oprot.writeMapEnd() oprot.writeFieldEnd() if", "for _i76 in range(_size72): _elem77 = iprot.readString().decode('utf-8') if sys.version_info[0] ==", "my_byte - my_16bit_int - my_32bit_int - my_64bit_int - my_double -", "fid == 9: if ftype == TType.MAP: self.my_string_string_map = {}", "- my_16bit_int - my_32bit_int - my_64bit_int - my_double - my_string", "TType.STRING, 'UTF8', False), None, ), # 11 (12, TType.MAP, 'my_enum_struct_map',", "(TType.STRUCT, [MiniStruct, None], False), False), None, ), # 14 (15,", "self.my_enumset = my_enumset self.my_structset = my_structset def read(self, iprot): if", "== 2 else iter104) oprot.writeListEnd() oprot.writeFieldEnd() if self.my_structlist is not", "(4, TType.I32, 'my_32bit_int', None, None, ), # 4 (5, TType.I64,", "else: iprot.skip(ftype) elif fid == 6: if ftype == TType.DOUBLE:", "iprot.readSetEnd() else: iprot.skip(ftype) elif fid == 19: if ftype ==", "'my_binary', 'BINARY', None, ), # 8 (9, TType.MAP, 'my_string_string_map', (TType.STRING,", "== 2 else iter107) oprot.writeSetEnd() oprot.writeFieldEnd() if self.my_enumset is not", "__ne__(self, other): return not (self == other) all_structs.append(MiniStruct) MiniStruct.thrift_spec =", "oprot.writeFieldBegin('my_enumset', TType.SET, 19) oprot.writeSetBegin(TType.I32, len(self.my_enumset)) for iter108 in self.my_enumset: oprot.writeI32(iter108)", "'my_byte', None, None, ), # 2 (3, TType.I16, 'my_16bit_int', None,", "(TType.I32, None, TType.LIST, (TType.STRUCT, [MiniStruct, None], False), False), None, ),", "else iprot.readString() self.my_stringlist.append(_elem59) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 16:", "sys.version_info[0] == 2 else iter100) oprot.writeListEnd() oprot.writeMapEnd() oprot.writeFieldEnd() if self.my_enum_structlist_map", "15 (16, TType.LIST, 'my_structlist', (TType.STRUCT, [MiniStruct, None], False), None, ),", "my_32bit_int=None, my_64bit_int=None, my_double=None, my_string=None, my_binary=None, my_string_string_map=None, my_string_enum_map=None, my_enum_string_map=None, my_enum_struct_map=None, my_enum_stringlist_map=None,", "= {} (_ktype1, _vtype2, _size0) = iprot.readMapBegin() for _i4 in", "oprot.writeI32(self.my_enum) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def __repr__(self): L", "# 7 (8, TType.STRING, 'my_binary', 'BINARY', None, ), # 8", "if self.my_structset is not None: oprot.writeFieldBegin('my_structset', TType.SET, 20) oprot.writeSetBegin(TType.STRUCT, len(self.my_structset))", "my_enum_struct_map=None, my_enum_stringlist_map=None, my_enum_structlist_map=None, my_stringlist=None, my_structlist=None, my_enumlist=None, my_stringset=None, my_enumset=None, my_structset=None,): self.my_bool", "(11, TType.MAP, 'my_enum_string_map', (TType.I32, None, TType.STRING, 'UTF8', False), None, ),", "iprot.skip(ftype) elif fid == 2: if ftype == TType.BYTE: self.my_byte", "_val20 iprot.readMapEnd() else: iprot.skip(ftype) elif fid == 12: if ftype", "ftype == TType.SET: self.my_stringset = set() (_etype75, _size72) = iprot.readSetBegin()", "set() (_etype81, _size78) = iprot.readSetBegin() for _i82 in range(_size78): _elem83", "'my_enumset', (TType.I32, None, False), None, ), # 19 (20, TType.SET,", "my_string_enum_map - my_enum_string_map - my_enum_struct_map - my_enum_stringlist_map - my_enum_structlist_map -", "== TType.LIST: self.my_structlist = [] (_etype63, _size60) = iprot.readListBegin() for", "not None: oprot.writeFieldBegin('my_32bit_int', TType.I32, 4) oprot.writeI32(self.my_32bit_int) oprot.writeFieldEnd() if self.my_64bit_int is", "9 (10, TType.MAP, 'my_string_enum_map', (TType.STRING, 'UTF8', TType.I32, None, False), None,", "oprot.writeFieldEnd() if self.my_enumlist is not None: oprot.writeFieldBegin('my_enumlist', TType.LIST, 17) oprot.writeListBegin(TType.I32,", "iprot.skip(ftype) elif fid == 18: if ftype == TType.SET: self.my_stringset", "ftype == TType.BOOL: self.my_bool = iprot.readBool() else: iprot.skip(ftype) elif fid", "elif fid == 2: if ftype == TType.I32: self.my_enum =", "fid == 2: if ftype == TType.BYTE: self.my_byte = iprot.readByte()", "not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:", "iprot.skip(ftype) elif fid == 8: if ftype == TType.STRING: self.my_binary", "fid == 5: if ftype == TType.I64: self.my_64bit_int = iprot.readI64()", "oprot.writeMapEnd() oprot.writeFieldEnd() if self.my_enum_structlist_map is not None: oprot.writeFieldBegin('my_enum_structlist_map', TType.MAP, 14)", "my_enum def read(self, iprot): if iprot._fast_decode is not None and", "oprot.writeFieldEnd() if self.my_structlist is not None: oprot.writeFieldBegin('my_structlist', TType.LIST, 16) oprot.writeListBegin(TType.STRUCT,", "oprot.writeMapEnd() oprot.writeFieldEnd() if self.my_enum_stringlist_map is not None: oprot.writeFieldBegin('my_enum_stringlist_map', TType.MAP, 13)", "not None: oprot.writeFieldBegin('my_16bit_int', TType.I16, 3) oprot.writeI16(self.my_16bit_int) oprot.writeFieldEnd() if self.my_32bit_int is", "# 1 (2, TType.I32, 'my_enum', None, None, ), # 2", "False), False), None, ), # 14 (15, TType.LIST, 'my_stringlist', (TType.STRING,", "fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid", "[self.__class__, self.thrift_spec]) return iprot.readStructBegin() while True: (fname, ftype, fid) =", "oprot.writeI32(iter108) oprot.writeSetEnd() oprot.writeFieldEnd() if self.my_structset is not None: oprot.writeFieldBegin('my_structset', TType.SET,", "== TType.MAP: self.my_enum_structlist_map = {} (_ktype42, _vtype43, _size41) = iprot.readMapBegin()", "if ftype == TType.LIST: self.my_structlist = [] (_etype63, _size60) =", "== 2 else kiter92) oprot.writeI32(viter93) oprot.writeMapEnd() oprot.writeFieldEnd() if self.my_enum_string_map is", "oprot.writeFieldBegin('my_enum_string_map', TType.MAP, 11) oprot.writeMapBegin(TType.I32, TType.STRING, len(self.my_enum_string_map)) for kiter94, viter95 in", "else: iprot.skip(ftype) elif fid == 11: if ftype == TType.MAP:", "= my_byte self.my_16bit_int = my_16bit_int self.my_32bit_int = my_32bit_int self.my_64bit_int =", "oprot.writeListBegin(TType.STRUCT, len(viter102)) for iter103 in viter102: iter103.write(oprot) oprot.writeListEnd() oprot.writeMapEnd() oprot.writeFieldEnd()", "oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def __repr__(self): L = ['%s=%r'", "(8, TType.STRING, 'my_binary', 'BINARY', None, ), # 8 (9, TType.MAP,", "iprot.readDouble() else: iprot.skip(ftype) elif fid == 7: if ftype ==", "iprot.readSetEnd() else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot):", "in range(_size0): _key5 = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else", "iprot.skip(ftype) elif fid == 9: if ftype == TType.MAP: self.my_string_string_map", "False), None, ), # 18 (19, TType.SET, 'my_enumset', (TType.I32, None,", "if ftype == TType.BOOL: self.my_bool = iprot.readBool() else: iprot.skip(ftype) elif", "if self.my_string_string_map is not None: oprot.writeFieldBegin('my_string_string_map', TType.MAP, 9) oprot.writeMapBegin(TType.STRING, TType.STRING,", "my_string_enum_map self.my_enum_string_map = my_enum_string_map self.my_enum_struct_map = my_enum_struct_map self.my_enum_stringlist_map = my_enum_stringlist_map", "oprot.writeMapEnd() oprot.writeFieldEnd() if self.my_enum_struct_map is not None: oprot.writeFieldBegin('my_enum_struct_map', TType.MAP, 12)", "= MiniStruct() _val27.read(iprot) self.my_enum_struct_map[_key26] = _val27 iprot.readMapEnd() else: iprot.skip(ftype) elif", "None: oprot.writeFieldBegin('my_string', TType.STRING, 7) oprot.writeString(self.my_string.encode('utf-8') if sys.version_info[0] == 2 else", "8 (9, TType.MAP, 'my_string_string_map', (TType.STRING, 'UTF8', TType.STRING, 'UTF8', False), None,", "(_etype69, _size66) = iprot.readListBegin() for _i70 in range(_size66): _elem71 =", "2: if ftype == TType.I32: self.my_enum = iprot.readI32() else: iprot.skip(ftype)", "else: iprot.skip(ftype) elif fid == 2: if ftype == TType.BYTE:", "kiter96, viter97 in self.my_enum_struct_map.items(): oprot.writeI32(kiter96) viter97.write(oprot) oprot.writeMapEnd() oprot.writeFieldEnd() if self.my_enum_stringlist_map", "not None: oprot.writeFieldBegin('my_64bit_int', TType.I64, 5) oprot.writeI64(self.my_64bit_int) oprot.writeFieldEnd() if self.my_double is", "self.my_enum_structlist_map[_key46] = _val47 iprot.readMapEnd() else: iprot.skip(ftype) elif fid == 15:", "viter97 in self.my_enum_struct_map.items(): oprot.writeI32(kiter96) viter97.write(oprot) oprot.writeMapEnd() oprot.writeFieldEnd() if self.my_enum_stringlist_map is", "iprot.readListBegin() for _i58 in range(_size54): _elem59 = iprot.readString().decode('utf-8') if sys.version_info[0]", "if self.my_stringset is not None: oprot.writeFieldBegin('my_stringset', TType.SET, 18) oprot.writeSetBegin(TType.STRING, len(self.my_stringset))", "self.my_enum_stringlist_map = my_enum_stringlist_map self.my_enum_structlist_map = my_enum_structlist_map self.my_stringlist = my_stringlist self.my_structlist", "3 (4, TType.I32, 'my_32bit_int', None, None, ), # 4 (5,", "== 2 else self.my_string) oprot.writeFieldEnd() if self.my_binary is not None:", "= my_string_string_map self.my_string_enum_map = my_string_enum_map self.my_enum_string_map = my_enum_string_map self.my_enum_struct_map =", "_size48) = iprot.readListBegin() for _i52 in range(_size48): _elem53 = MiniStruct()", "_i82 in range(_size78): _elem83 = iprot.readI32() self.my_enumset.add(_elem83) iprot.readSetEnd() else: iprot.skip(ftype)", "my_enumset - my_structset \"\"\" def __init__(self, my_bool=None, my_byte=None, my_16bit_int=None, my_32bit_int=None,", "sys.version_info[0] == 2 else iprot.readString() _val13 = iprot.readI32() self.my_string_enum_map[_key12] =", "not None: oprot.writeFieldBegin('my_enum', TType.I32, 2) oprot.writeI32(self.my_enum) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def", "fid == 10: if ftype == TType.MAP: self.my_string_enum_map = {}", "oprot.writeFieldEnd() if self.my_structset is not None: oprot.writeFieldBegin('my_structset', TType.SET, 20) oprot.writeSetBegin(TType.STRUCT,", "is not None: oprot.writeFieldBegin('my_enum', TType.I32, 2) oprot.writeI32(self.my_enum) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd()", "my_32bit_int self.my_64bit_int = my_64bit_int self.my_double = my_double self.my_string = my_string", "is not None: oprot.writeFieldBegin('my_byte', TType.BYTE, 2) oprot.writeByte(self.my_byte) oprot.writeFieldEnd() if self.my_16bit_int", "self.my_32bit_int = iprot.readI32() else: iprot.skip(ftype) elif fid == 5: if", "my_string - my_binary - my_string_string_map - my_string_enum_map - my_enum_string_map -", "None: oprot.writeFieldBegin('my_enumset', TType.SET, 19) oprot.writeSetBegin(TType.I32, len(self.my_enumset)) for iter108 in self.my_enumset:", "TType.I64: self.my_64bit_int = iprot.readI64() else: iprot.skip(ftype) elif fid == 6:", "if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec", "not (self == other) all_structs.append(MiniStruct) MiniStruct.thrift_spec = ( None, #", "is not None: oprot.writeFieldBegin('my_stringlist', TType.LIST, 15) oprot.writeListBegin(TType.STRING, len(self.my_stringlist)) for iter104", "<filename>serde/src/gen/thrift/gen-py/megastruct/ttypes.py # # Autogenerated by Thrift Compiler (0.13.0) # #", "iprot.skip(ftype) elif fid == 10: if ftype == TType.MAP: self.my_string_enum_map", "sys.version_info[0] == 2 else kiter92) oprot.writeI32(viter93) oprot.writeMapEnd() oprot.writeFieldEnd() if self.my_enum_string_map", "sys.version_info[0] == 2 else iprot.readString() self.my_stringset.add(_elem77) iprot.readSetEnd() else: iprot.skip(ftype) elif", "= iprot.readI32() self.my_string_enum_map[_key12] = _val13 iprot.readMapEnd() else: iprot.skip(ftype) elif fid", "oprot.writeFieldEnd() if self.my_string_string_map is not None: oprot.writeFieldBegin('my_string_string_map', TType.MAP, 9) oprot.writeMapBegin(TType.STRING,", "iprot.skip(ftype) elif fid == 4: if ftype == TType.I32: self.my_32bit_int", "len(self.my_stringset)) for iter107 in self.my_stringset: oprot.writeString(iter107.encode('utf-8') if sys.version_info[0] == 2", "== TType.MAP: self.my_string_string_map = {} (_ktype1, _vtype2, _size0) = iprot.readMapBegin()", "(TType.I32, None, TType.STRING, 'UTF8', False), None, ), # 11 (12,", "fid == 6: if ftype == TType.DOUBLE: self.my_double = iprot.readDouble()", "iprot.readString() self.my_stringset.add(_elem77) iprot.readSetEnd() else: iprot.skip(ftype) elif fid == 19: if", "else iprot.readString() _val6 = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else", "not (self == other) class MegaStruct(object): \"\"\" Attributes: - my_bool", "elif fid == 20: if ftype == TType.SET: self.my_structset =", "# 19 (20, TType.SET, 'my_structset', (TType.STRUCT, [MiniStruct, None], False), None,", "oprot.writeFieldBegin('my_string', TType.STRING, 7) oprot.writeString(self.my_string.encode('utf-8') if sys.version_info[0] == 2 else self.my_string)", "oprot.writeFieldEnd() if self.my_string_enum_map is not None: oprot.writeFieldBegin('my_string_enum_map', TType.MAP, 10) oprot.writeMapBegin(TType.STRING,", "- my_byte - my_16bit_int - my_32bit_int - my_64bit_int - my_double", "2 else iprot.readString() _val13 = iprot.readI32() self.my_string_enum_map[_key12] = _val13 iprot.readMapEnd()", "oprot.writeSetBegin(TType.STRING, len(self.my_stringset)) for iter107 in self.my_stringset: oprot.writeString(iter107.encode('utf-8') if sys.version_info[0] ==", "TType.MAP, 13) oprot.writeMapBegin(TType.I32, TType.LIST, len(self.my_enum_stringlist_map)) for kiter98, viter99 in self.my_enum_stringlist_map.items():", "self.my_double = iprot.readDouble() else: iprot.skip(ftype) elif fid == 7: if", "(20, TType.SET, 'my_structset', (TType.STRUCT, [MiniStruct, None], False), None, ), #", "iprot.readListBegin() for _i39 in range(_size35): _elem40 = iprot.readString().decode('utf-8') if sys.version_info[0]", "not None: oprot.writeFieldBegin('my_string', TType.STRING, 1) oprot.writeString(self.my_string.encode('utf-8') if sys.version_info[0] == 2", "(17, TType.LIST, 'my_enumlist', (TType.I32, None, False), None, ), # 17", "options string: py # from thrift.Thrift import TType, TMessageType, TFrozenDict,", "4 (5, TType.I64, 'my_64bit_int', None, None, ), # 5 (6,", "value) for key, value in self.__dict__.items()] return '%s(%s)' % (self.__class__.__name__,", "TType.LIST, 'my_enumlist', (TType.I32, None, False), None, ), # 17 (18,", "oprot.writeMapBegin(TType.STRING, TType.I32, len(self.my_string_enum_map)) for kiter92, viter93 in self.my_string_enum_map.items(): oprot.writeString(kiter92.encode('utf-8') if", "= iprot.readMapBegin() for _i11 in range(_size7): _key12 = iprot.readString().decode('utf-8') if", "(TType.STRUCT, [MiniStruct, None], False), None, ), # 20 ) fix_spec(all_structs)", "'UTF8', False), None, ), # 11 (12, TType.MAP, 'my_enum_struct_map', (TType.I32,", "TType.I32, 4) oprot.writeI32(self.my_32bit_int) oprot.writeFieldEnd() if self.my_64bit_int is not None: oprot.writeFieldBegin('my_64bit_int',", "return oprot.writeStructBegin('MiniStruct') if self.my_string is not None: oprot.writeFieldBegin('my_string', TType.STRING, 1)", "my_32bit_int - my_64bit_int - my_double - my_string - my_binary -", "if self.my_stringlist is not None: oprot.writeFieldBegin('my_stringlist', TType.LIST, 15) oprot.writeListBegin(TType.STRING, len(self.my_stringlist))", "ftype == TType.MAP: self.my_string_enum_map = {} (_ktype8, _vtype9, _size7) =", "iprot.readI32() self.my_enumset.add(_elem83) iprot.readSetEnd() else: iprot.skip(ftype) elif fid == 20: if", "viter95) oprot.writeMapEnd() oprot.writeFieldEnd() if self.my_enum_struct_map is not None: oprot.writeFieldBegin('my_enum_struct_map', TType.MAP,", "self.__dict__.items()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other):", "== 2 else iprot.readString() self.my_stringset.add(_elem77) iprot.readSetEnd() else: iprot.skip(ftype) elif fid", "20) oprot.writeSetBegin(TType.STRUCT, len(self.my_structset)) for iter109 in self.my_structset: iter109.write(oprot) oprot.writeSetEnd() oprot.writeFieldEnd()", "def __repr__(self): L = ['%s=%r' % (key, value) for key,", "), # 4 (5, TType.I64, 'my_64bit_int', None, None, ), #", "viter102 in self.my_enum_structlist_map.items(): oprot.writeI32(kiter101) oprot.writeListBegin(TType.STRUCT, len(viter102)) for iter103 in viter102:", "TType.STRING, 'my_string', 'UTF8', None, ), # 1 (2, TType.I32, 'my_enum',", "range(_size21): _key26 = iprot.readI32() _val27 = MiniStruct() _val27.read(iprot) self.my_enum_struct_map[_key26] =", "_size35) = iprot.readListBegin() for _i39 in range(_size35): _elem40 = iprot.readString().decode('utf-8')", "iprot.readListBegin() for _i70 in range(_size66): _elem71 = iprot.readI32() self.my_enumlist.append(_elem71) iprot.readListEnd()", "is not None: oprot.writeFieldBegin('my_enum_struct_map', TType.MAP, 12) oprot.writeMapBegin(TType.I32, TType.STRUCT, len(self.my_enum_struct_map)) for", "in self.my_structset: iter109.write(oprot) oprot.writeSetEnd() oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return", "viter91 in self.my_string_string_map.items(): oprot.writeString(kiter90.encode('utf-8') if sys.version_info[0] == 2 else kiter90)", "self.my_bool = iprot.readBool() else: iprot.skip(ftype) elif fid == 2: if", "== 19: if ftype == TType.SET: self.my_enumset = set() (_etype81,", "== 16: if ftype == TType.LIST: self.my_structlist = [] (_etype63,", "16) oprot.writeListBegin(TType.STRUCT, len(self.my_structlist)) for iter105 in self.my_structlist: iter105.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd()", "_key5 = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString() _val6", "range(_size84): _elem89 = MiniStruct() _elem89.read(iprot) self.my_structset.add(_elem89) iprot.readSetEnd() else: iprot.skip(ftype) else:", "# 9 (10, TType.MAP, 'my_string_enum_map', (TType.STRING, 'UTF8', TType.I32, None, False),", "if sys.version_info[0] == 2 else kiter92) oprot.writeI32(viter93) oprot.writeMapEnd() oprot.writeFieldEnd() if", "fid == 8: if ftype == TType.STRING: self.my_binary = iprot.readBinary()", "'my_16bit_int', None, None, ), # 3 (4, TType.I32, 'my_32bit_int', None,", "self.my_enum_struct_map = my_enum_struct_map self.my_enum_stringlist_map = my_enum_stringlist_map self.my_enum_structlist_map = my_enum_structlist_map self.my_stringlist", "if ftype == TType.MAP: self.my_enum_structlist_map = {} (_ktype42, _vtype43, _size41)", "iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype", "fid == 16: if ftype == TType.LIST: self.my_structlist = []", "sys.version_info[0] == 2 else viter95) oprot.writeMapEnd() oprot.writeFieldEnd() if self.my_enum_struct_map is", "is not None: oprot.writeFieldBegin('my_string_string_map', TType.MAP, 9) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.my_string_string_map)) for", "iprot.skip(ftype) elif fid == 11: if ftype == TType.MAP: self.my_enum_string_map", "write(self, oprot): if oprot._fast_encode is not None and self.thrift_spec is", "- my_double - my_string - my_binary - my_string_string_map - my_string_enum_map", "oprot.writeI32(self.my_32bit_int) oprot.writeFieldEnd() if self.my_64bit_int is not None: oprot.writeFieldBegin('my_64bit_int', TType.I64, 5)", "is not None: oprot.writeFieldBegin('my_double', TType.DOUBLE, 6) oprot.writeDouble(self.my_double) oprot.writeFieldEnd() if self.my_string", "else: iprot.skip(ftype) elif fid == 8: if ftype == TType.STRING:", "oprot.writeByte(self.my_byte) oprot.writeFieldEnd() if self.my_16bit_int is not None: oprot.writeFieldBegin('my_16bit_int', TType.I16, 3)", "(_ktype29, _vtype30, _size28) = iprot.readMapBegin() for _i32 in range(_size28): _key33", "= iprot.readListBegin() for _i52 in range(_size48): _elem53 = MiniStruct() _elem53.read(iprot)", "None, ), # 3 (4, TType.I32, 'my_32bit_int', None, None, ),", "self.my_structlist: iter105.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.my_enumlist is not None: oprot.writeFieldBegin('my_enumlist',", "for iter105 in self.my_structlist: iter105.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.my_enumlist is", "2 else self.my_string) oprot.writeFieldEnd() if self.my_binary is not None: oprot.writeFieldBegin('my_binary',", "= {} (_ktype42, _vtype43, _size41) = iprot.readMapBegin() for _i45 in", "_size84) = iprot.readSetBegin() for _i88 in range(_size84): _elem89 = MiniStruct()", "), # 5 (6, TType.DOUBLE, 'my_double', None, None, ), #", "None: oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) return oprot.writeStructBegin('MiniStruct') if self.my_string is not", "iprot.readByte() else: iprot.skip(ftype) elif fid == 3: if ftype ==", "else: iprot.skip(ftype) elif fid == 15: if ftype == TType.LIST:", "TType.MAP, 'my_enum_stringlist_map', (TType.I32, None, TType.LIST, (TType.STRING, 'UTF8', False), False), None,", "_val27 = MiniStruct() _val27.read(iprot) self.my_enum_struct_map[_key26] = _val27 iprot.readMapEnd() else: iprot.skip(ftype)", "iprot.readMapEnd() else: iprot.skip(ftype) elif fid == 15: if ftype ==", "= iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString() self.my_enum_string_map[_key19] =", "12: if ftype == TType.MAP: self.my_enum_struct_map = {} (_ktype22, _vtype23,", "7 (8, TType.STRING, 'my_binary', 'BINARY', None, ), # 8 (9,", "not None: oprot.writeFieldBegin('my_enumset', TType.SET, 19) oprot.writeSetBegin(TType.I32, len(self.my_enumset)) for iter108 in", "iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 1:", "other.__dict__ def __ne__(self, other): return not (self == other) all_structs.append(MiniStruct)", "oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.my_string_string_map)) for kiter90, viter91 in self.my_string_string_map.items(): oprot.writeString(kiter90.encode('utf-8') if", "def validate(self): return def __repr__(self): L = ['%s=%r' % (key,", "== 2 else iprot.readString() _val6 = iprot.readString().decode('utf-8') if sys.version_info[0] ==", "self.my_double is not None: oprot.writeFieldBegin('my_double', TType.DOUBLE, 6) oprot.writeDouble(self.my_double) oprot.writeFieldEnd() if", "oprot.writeFieldEnd() if self.my_enum_stringlist_map is not None: oprot.writeFieldBegin('my_enum_stringlist_map', TType.MAP, 13) oprot.writeMapBegin(TType.I32,", "iter106 in self.my_enumlist: oprot.writeI32(iter106) oprot.writeListEnd() oprot.writeFieldEnd() if self.my_stringset is not", "MiniStruct(object): \"\"\" Attributes: - my_string - my_enum \"\"\" def __init__(self,", "# 0 (1, TType.STRING, 'my_string', 'UTF8', None, ), # 1", "== 2 else iprot.readString() self.my_string_string_map[_key5] = _val6 iprot.readMapEnd() else: iprot.skip(ftype)", "(TType.I32, None, TType.LIST, (TType.STRING, 'UTF8', False), False), None, ), #", "if fid == 1: if ftype == TType.BOOL: self.my_bool =", "if self.my_byte is not None: oprot.writeFieldBegin('my_byte', TType.BYTE, 2) oprot.writeByte(self.my_byte) oprot.writeFieldEnd()", "= {} (_ktype8, _vtype9, _size7) = iprot.readMapBegin() for _i11 in", "iprot.readSetBegin() for _i82 in range(_size78): _elem83 = iprot.readI32() self.my_enumset.add(_elem83) iprot.readSetEnd()", "5) oprot.writeI64(self.my_64bit_int) oprot.writeFieldEnd() if self.my_double is not None: oprot.writeFieldBegin('my_double', TType.DOUBLE,", "= iprot.readI16() else: iprot.skip(ftype) elif fid == 4: if ftype", "== TType.BYTE: self.my_byte = iprot.readByte() else: iprot.skip(ftype) elif fid ==", "self.my_enumset is not None: oprot.writeFieldBegin('my_enumset', TType.SET, 19) oprot.writeSetBegin(TType.I32, len(self.my_enumset)) for", "oprot.writeFieldEnd() if self.my_32bit_int is not None: oprot.writeFieldBegin('my_32bit_int', TType.I32, 4) oprot.writeI32(self.my_32bit_int)", "_i64 in range(_size60): _elem65 = MiniStruct() _elem65.read(iprot) self.my_structlist.append(_elem65) iprot.readListEnd() else:", "my_string_string_map self.my_string_enum_map = my_string_enum_map self.my_enum_string_map = my_enum_string_map self.my_enum_struct_map = my_enum_struct_map", "my_double self.my_string = my_string self.my_binary = my_binary self.my_string_string_map = my_string_string_map", "iprot.readMapBegin() for _i4 in range(_size0): _key5 = iprot.readString().decode('utf-8') if sys.version_info[0]", "None, ), # 8 (9, TType.MAP, 'my_string_string_map', (TType.STRING, 'UTF8', TType.STRING,", "if sys.version_info[0] == 2 else iprot.readString() _val34.append(_elem40) iprot.readListEnd() self.my_enum_stringlist_map[_key33] =", "(_etype51, _size48) = iprot.readListBegin() for _i52 in range(_size48): _elem53 =", "== TType.I16: self.my_16bit_int = iprot.readI16() else: iprot.skip(ftype) elif fid ==", "for _i11 in range(_size7): _key12 = iprot.readString().decode('utf-8') if sys.version_info[0] ==", "oprot.writeFieldBegin('my_enum', TType.I32, 2) oprot.writeI32(self.my_enum) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return", "fid == 19: if ftype == TType.SET: self.my_enumset = set()", "iprot.readString() _val6 = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()", "iter109 in self.my_structset: iter109.write(oprot) oprot.writeSetEnd() oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self):", "None, ), # 9 (10, TType.MAP, 'my_string_enum_map', (TType.STRING, 'UTF8', TType.I32,", "- my_stringset - my_enumset - my_structset \"\"\" def __init__(self, my_bool=None,", "= iprot.readMapBegin() for _i4 in range(_size0): _key5 = iprot.readString().decode('utf-8') if", "# 8 (9, TType.MAP, 'my_string_string_map', (TType.STRING, 'UTF8', TType.STRING, 'UTF8', False),", "= [] (_etype69, _size66) = iprot.readListBegin() for _i70 in range(_size66):", "False), None, ), # 13 (14, TType.MAP, 'my_enum_structlist_map', (TType.I32, None,", "\"\"\" Attributes: - my_bool - my_byte - my_16bit_int - my_32bit_int", "in range(_size84): _elem89 = MiniStruct() _elem89.read(iprot) self.my_structset.add(_elem89) iprot.readSetEnd() else: iprot.skip(ftype)", "== 12: if ftype == TType.MAP: self.my_enum_struct_map = {} (_ktype22,", "), # 18 (19, TType.SET, 'my_enumset', (TType.I32, None, False), None,", "TType.SET: self.my_structset = set() (_etype87, _size84) = iprot.readSetBegin() for _i88", "for kiter101, viter102 in self.my_enum_structlist_map.items(): oprot.writeI32(kiter101) oprot.writeListBegin(TType.STRUCT, len(viter102)) for iter103", "range(_size14): _key19 = iprot.readI32() _val20 = iprot.readString().decode('utf-8') if sys.version_info[0] ==", "\"ALPACA\": 2, } class MiniStruct(object): \"\"\" Attributes: - my_string -", "_i76 in range(_size72): _elem77 = iprot.readString().decode('utf-8') if sys.version_info[0] == 2", "elif fid == 5: if ftype == TType.I64: self.my_64bit_int =", ") all_structs.append(MegaStruct) MegaStruct.thrift_spec = ( None, # 0 (1, TType.BOOL,", "__eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def", "len(self.my_enum_string_map)) for kiter94, viter95 in self.my_enum_string_map.items(): oprot.writeI32(kiter94) oprot.writeString(viter95.encode('utf-8') if sys.version_info[0]", "self.my_enumset.add(_elem83) iprot.readSetEnd() else: iprot.skip(ftype) elif fid == 20: if ftype", "== TType.STRING: self.my_string = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else", "), # 6 (7, TType.STRING, 'my_string', 'UTF8', None, ), #", "and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self", "= iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString() else: iprot.skip(ftype)", "2 else iprot.readString() self.my_stringlist.append(_elem59) iprot.readListEnd() else: iprot.skip(ftype) elif fid ==", "== 2 else kiter90) oprot.writeString(viter91.encode('utf-8') if sys.version_info[0] == 2 else", "not None: oprot.writeFieldBegin('my_double', TType.DOUBLE, 6) oprot.writeDouble(self.my_double) oprot.writeFieldEnd() if self.my_string is", "oprot.writeFieldBegin('my_enum_structlist_map', TType.MAP, 14) oprot.writeMapBegin(TType.I32, TType.LIST, len(self.my_enum_structlist_map)) for kiter101, viter102 in", "== TType.MAP: self.my_enum_stringlist_map = {} (_ktype29, _vtype30, _size28) = iprot.readMapBegin()", "self.__dict__ == other.__dict__ def __ne__(self, other): return not (self ==", "), # 12 (13, TType.MAP, 'my_enum_stringlist_map', (TType.I32, None, TType.LIST, (TType.STRING,", "_key26 = iprot.readI32() _val27 = MiniStruct() _val27.read(iprot) self.my_enum_struct_map[_key26] = _val27", "= my_stringset self.my_enumset = my_enumset self.my_structset = my_structset def read(self,", "import TProtocolException from thrift.TRecursive import fix_spec import sys from thrift.transport", "else viter95) oprot.writeMapEnd() oprot.writeFieldEnd() if self.my_enum_struct_map is not None: oprot.writeFieldBegin('my_enum_struct_map',", "oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) return oprot.writeStructBegin('MegaStruct') if self.my_bool is not None:", "iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is", "oprot.writeString(viter91.encode('utf-8') if sys.version_info[0] == 2 else viter91) oprot.writeMapEnd() oprot.writeFieldEnd() if", "EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU", "iprot.readMapEnd() else: iprot.skip(ftype) elif fid == 10: if ftype ==", "None, ), # 4 (5, TType.I64, 'my_64bit_int', None, None, ),", "NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT", "= iprot.readListBegin() for _i58 in range(_size54): _elem59 = iprot.readString().decode('utf-8') if", "== TType.BOOL: self.my_bool = iprot.readBool() else: iprot.skip(ftype) elif fid ==", "- my_enum_stringlist_map - my_enum_structlist_map - my_stringlist - my_structlist - my_enumlist", "fid == 18: if ftype == TType.SET: self.my_stringset = set()", "ftype == TType.I16: self.my_16bit_int = iprot.readI16() else: iprot.skip(ftype) elif fid", "iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot._fast_encode is not", "isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return", "TType.MAP, 'my_enum_structlist_map', (TType.I32, None, TType.LIST, (TType.STRUCT, [MiniStruct, None], False), False),", "is not None: oprot.writeFieldBegin('my_16bit_int', TType.I16, 3) oprot.writeI16(self.my_16bit_int) oprot.writeFieldEnd() if self.my_32bit_int", "if self.my_enum_stringlist_map is not None: oprot.writeFieldBegin('my_enum_stringlist_map', TType.MAP, 13) oprot.writeMapBegin(TType.I32, TType.LIST,", "), # 13 (14, TType.MAP, 'my_enum_structlist_map', (TType.I32, None, TType.LIST, (TType.STRUCT,", "for kiter92, viter93 in self.my_string_enum_map.items(): oprot.writeString(kiter92.encode('utf-8') if sys.version_info[0] == 2", "10 (11, TType.MAP, 'my_enum_string_map', (TType.I32, None, TType.STRING, 'UTF8', False), None,", "iter108 in self.my_enumset: oprot.writeI32(iter108) oprot.writeSetEnd() oprot.writeFieldEnd() if self.my_structset is not", "if ftype == TType.SET: self.my_enumset = set() (_etype81, _size78) =", "not None: oprot.writeFieldBegin('my_structlist', TType.LIST, 16) oprot.writeListBegin(TType.STRUCT, len(self.my_structlist)) for iter105 in", "oprot.writeFieldEnd() if self.my_binary is not None: oprot.writeFieldBegin('my_binary', TType.STRING, 8) oprot.writeBinary(self.my_binary)", "kiter90) oprot.writeString(viter91.encode('utf-8') if sys.version_info[0] == 2 else viter91) oprot.writeMapEnd() oprot.writeFieldEnd()", "if self.my_enumlist is not None: oprot.writeFieldBegin('my_enumlist', TType.LIST, 17) oprot.writeListBegin(TType.I32, len(self.my_enumlist))", "elif fid == 2: if ftype == TType.BYTE: self.my_byte =", "TType.STRUCT, [MiniStruct, None], False), None, ), # 12 (13, TType.MAP,", "iprot.skip(ftype) elif fid == 15: if ftype == TType.LIST: self.my_stringlist", "is not None: oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) return oprot.writeStructBegin('MegaStruct') if self.my_bool", "False), None, ), # 10 (11, TType.MAP, 'my_enum_string_map', (TType.I32, None,", "# 3 (4, TType.I32, 'my_32bit_int', None, None, ), # 4", "_val20 = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString() self.my_enum_string_map[_key19]", "== other) all_structs.append(MiniStruct) MiniStruct.thrift_spec = ( None, # 0 (1,", "_size60) = iprot.readListBegin() for _i64 in range(_size60): _elem65 = MiniStruct()", "iprot.skip(ftype) elif fid == 17: if ftype == TType.LIST: self.my_enumlist", "self.my_enum_stringlist_map.items(): oprot.writeI32(kiter98) oprot.writeListBegin(TType.STRING, len(viter99)) for iter100 in viter99: oprot.writeString(iter100.encode('utf-8') if", "== 18: if ftype == TType.SET: self.my_stringset = set() (_etype75,", "by Thrift Compiler (0.13.0) # # DO NOT EDIT UNLESS", "oprot.writeFieldEnd() if self.my_stringlist is not None: oprot.writeFieldBegin('my_stringlist', TType.LIST, 15) oprot.writeListBegin(TType.STRING,", "len(self.my_stringlist)) for iter104 in self.my_stringlist: oprot.writeString(iter104.encode('utf-8') if sys.version_info[0] == 2", "16 (17, TType.LIST, 'my_enumlist', (TType.I32, None, False), None, ), #", "), # 3 (4, TType.I32, 'my_32bit_int', None, None, ), #", "(1, TType.STRING, 'my_string', 'UTF8', None, ), # 1 (2, TType.I32,", "(TType.I32, None, TType.STRUCT, [MiniStruct, None], False), None, ), # 12", "= iprot.readSetBegin() for _i76 in range(_size72): _elem77 = iprot.readString().decode('utf-8') if", "other.__dict__ def __ne__(self, other): return not (self == other) class", "TType.STRING, 1) oprot.writeString(self.my_string.encode('utf-8') if sys.version_info[0] == 2 else self.my_string) oprot.writeFieldEnd()", "else: iprot.skip(ftype) elif fid == 4: if ftype == TType.I32:", "= my_enum_string_map self.my_enum_struct_map = my_enum_struct_map self.my_enum_stringlist_map = my_enum_stringlist_map self.my_enum_structlist_map =", "sys.version_info[0] == 2 else self.my_string) oprot.writeFieldEnd() if self.my_binary is not", "2 else iter104) oprot.writeListEnd() oprot.writeFieldEnd() if self.my_structlist is not None:", "self.my_string_enum_map.items(): oprot.writeString(kiter92.encode('utf-8') if sys.version_info[0] == 2 else kiter92) oprot.writeI32(viter93) oprot.writeMapEnd()", "- my_string - my_enum \"\"\" def __init__(self, my_string=None, my_enum=None,): self.my_string", "my_stringlist self.my_structlist = my_structlist self.my_enumlist = my_enumlist self.my_stringset = my_stringset", "[MiniStruct, None], False), None, ), # 16 (17, TType.LIST, 'my_enumlist',", "_val13 = iprot.readI32() self.my_string_enum_map[_key12] = _val13 iprot.readMapEnd() else: iprot.skip(ftype) elif", "self.my_enum_struct_map.items(): oprot.writeI32(kiter96) viter97.write(oprot) oprot.writeMapEnd() oprot.writeFieldEnd() if self.my_enum_stringlist_map is not None:", "(TType.STRING, 'UTF8', False), None, ), # 18 (19, TType.SET, 'my_enumset',", "- my_bool - my_byte - my_16bit_int - my_32bit_int - my_64bit_int", "self.thrift_spec])) return oprot.writeStructBegin('MiniStruct') if self.my_string is not None: oprot.writeFieldBegin('my_string', TType.STRING,", "validate(self): return def __repr__(self): L = ['%s=%r' % (key, value)", "ftype == TType.DOUBLE: self.my_double = iprot.readDouble() else: iprot.skip(ftype) elif fid", "iprot.skip(ftype) elif fid == 20: if ftype == TType.SET: self.my_structset", "self.my_stringset: oprot.writeString(iter107.encode('utf-8') if sys.version_info[0] == 2 else iter107) oprot.writeSetEnd() oprot.writeFieldEnd()", "my_enum_stringlist_map=None, my_enum_structlist_map=None, my_stringlist=None, my_structlist=None, my_enumlist=None, my_stringset=None, my_enumset=None, my_structset=None,): self.my_bool =", "(TType.STRING, 'UTF8', False), False), None, ), # 13 (14, TType.MAP,", "TType.LIST, 16) oprot.writeListBegin(TType.STRUCT, len(self.my_structlist)) for iter105 in self.my_structlist: iter105.write(oprot) oprot.writeListEnd()", "oprot.writeI32(kiter94) oprot.writeString(viter95.encode('utf-8') if sys.version_info[0] == 2 else viter95) oprot.writeMapEnd() oprot.writeFieldEnd()", "if self.my_double is not None: oprot.writeFieldBegin('my_double', TType.DOUBLE, 6) oprot.writeDouble(self.my_double) oprot.writeFieldEnd()", "my_string_string_map=None, my_string_enum_map=None, my_enum_string_map=None, my_enum_struct_map=None, my_enum_stringlist_map=None, my_enum_structlist_map=None, my_stringlist=None, my_structlist=None, my_enumlist=None, my_stringset=None,", "my_enum_stringlist_map - my_enum_structlist_map - my_stringlist - my_structlist - my_enumlist -", "for kiter90, viter91 in self.my_string_string_map.items(): oprot.writeString(kiter90.encode('utf-8') if sys.version_info[0] == 2", "self.my_string = my_string self.my_binary = my_binary self.my_string_string_map = my_string_string_map self.my_string_enum_map", "len(self.my_structset)) for iter109 in self.my_structset: iter109.write(oprot) oprot.writeSetEnd() oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd()", "from thrift.transport import TTransport all_structs = [] class MyEnum(object): LLAMA", "is not None: oprot.writeFieldBegin('my_string', TType.STRING, 7) oprot.writeString(self.my_string.encode('utf-8') if sys.version_info[0] ==", "self.my_enum_structlist_map = {} (_ktype42, _vtype43, _size41) = iprot.readMapBegin() for _i45", "not None: oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) return oprot.writeStructBegin('MiniStruct') if self.my_string is", "{} (_ktype15, _vtype16, _size14) = iprot.readMapBegin() for _i18 in range(_size14):", "= set() (_etype75, _size72) = iprot.readSetBegin() for _i76 in range(_size72):", "[] class MyEnum(object): LLAMA = 1 ALPACA = 2 _VALUES_TO_NAMES", "if sys.version_info[0] == 2 else iter104) oprot.writeListEnd() oprot.writeFieldEnd() if self.my_structlist", "TTransport all_structs = [] class MyEnum(object): LLAMA = 1 ALPACA", "None, # 0 (1, TType.BOOL, 'my_bool', None, None, ), #", "1 ALPACA = 2 _VALUES_TO_NAMES = { 1: \"LLAMA\", 2:", "TType.LIST, (TType.STRUCT, [MiniStruct, None], False), False), None, ), # 14", "self.my_enum_structlist_map = my_enum_structlist_map self.my_stringlist = my_stringlist self.my_structlist = my_structlist self.my_enumlist", "( None, # 0 (1, TType.STRING, 'my_string', 'UTF8', None, ),", "for kiter94, viter95 in self.my_enum_string_map.items(): oprot.writeI32(kiter94) oprot.writeString(viter95.encode('utf-8') if sys.version_info[0] ==", "sys.version_info[0] == 2 else iprot.readString() _val34.append(_elem40) iprot.readListEnd() self.my_enum_stringlist_map[_key33] = _val34", "- my_enum_string_map - my_enum_struct_map - my_enum_stringlist_map - my_enum_structlist_map - my_stringlist", "= iprot.readListBegin() for _i70 in range(_size66): _elem71 = iprot.readI32() self.my_enumlist.append(_elem71)", "'my_bool', None, None, ), # 1 (2, TType.BYTE, 'my_byte', None,", "kiter94, viter95 in self.my_enum_string_map.items(): oprot.writeI32(kiter94) oprot.writeString(viter95.encode('utf-8') if sys.version_info[0] == 2", "= set() (_etype87, _size84) = iprot.readSetBegin() for _i88 in range(_size84):", "== 3: if ftype == TType.I16: self.my_16bit_int = iprot.readI16() else:", "string: py # from thrift.Thrift import TType, TMessageType, TFrozenDict, TException,", "all_structs = [] class MyEnum(object): LLAMA = 1 ALPACA =", "if self.my_enum_string_map is not None: oprot.writeFieldBegin('my_enum_string_map', TType.MAP, 11) oprot.writeMapBegin(TType.I32, TType.STRING,", "in self.my_stringset: oprot.writeString(iter107.encode('utf-8') if sys.version_info[0] == 2 else iter107) oprot.writeSetEnd()", "{} (_ktype22, _vtype23, _size21) = iprot.readMapBegin() for _i25 in range(_size21):", "if ftype == TType.MAP: self.my_enum_string_map = {} (_ktype15, _vtype16, _size14)", "= my_64bit_int self.my_double = my_double self.my_string = my_string self.my_binary =", "= ['%s=%r' % (key, value) for key, value in self.__dict__.items()]", "if sys.version_info[0] == 2 else iprot.readString() self.my_enum_string_map[_key19] = _val20 iprot.readMapEnd()", "2 else kiter92) oprot.writeI32(viter93) oprot.writeMapEnd() oprot.writeFieldEnd() if self.my_enum_string_map is not", "viter99 in self.my_enum_stringlist_map.items(): oprot.writeI32(kiter98) oprot.writeListBegin(TType.STRING, len(viter99)) for iter100 in viter99:", "other) class MegaStruct(object): \"\"\" Attributes: - my_bool - my_byte -", "iprot.readListEnd() self.my_enum_structlist_map[_key46] = _val47 iprot.readMapEnd() else: iprot.skip(ftype) elif fid ==", "TType.I64, 'my_64bit_int', None, None, ), # 5 (6, TType.DOUBLE, 'my_double',", "_size41) = iprot.readMapBegin() for _i45 in range(_size41): _key46 = iprot.readI32()", "def __init__(self, my_string=None, my_enum=None,): self.my_string = my_string self.my_enum = my_enum", "elif fid == 9: if ftype == TType.MAP: self.my_string_string_map =", "= iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString() _val13 =", "iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot._fast_encode is not None", "= my_string_enum_map self.my_enum_string_map = my_enum_string_map self.my_enum_struct_map = my_enum_struct_map self.my_enum_stringlist_map =", "_i52 in range(_size48): _elem53 = MiniStruct() _elem53.read(iprot) _val47.append(_elem53) iprot.readListEnd() self.my_enum_structlist_map[_key46]", "_key33 = iprot.readI32() _val34 = [] (_etype38, _size35) = iprot.readListBegin()", "18 (19, TType.SET, 'my_enumset', (TType.I32, None, False), None, ), #", "my_enum_struct_map self.my_enum_stringlist_map = my_enum_stringlist_map self.my_enum_structlist_map = my_enum_structlist_map self.my_stringlist = my_stringlist", "if sys.version_info[0] == 2 else iprot.readString() _val13 = iprot.readI32() self.my_string_enum_map[_key12]", "TType.DOUBLE, 'my_double', None, None, ), # 6 (7, TType.STRING, 'my_string',", "TType.I32, 'my_enum', None, None, ), # 2 ) all_structs.append(MegaStruct) MegaStruct.thrift_spec", "TType.LIST, len(self.my_enum_structlist_map)) for kiter101, viter102 in self.my_enum_structlist_map.items(): oprot.writeI32(kiter101) oprot.writeListBegin(TType.STRUCT, len(viter102))", "MiniStruct() _val27.read(iprot) self.my_enum_struct_map[_key26] = _val27 iprot.readMapEnd() else: iprot.skip(ftype) elif fid", "else: iprot.skip(ftype) elif fid == 16: if ftype == TType.LIST:", "None, TType.LIST, (TType.STRUCT, [MiniStruct, None], False), False), None, ), #", "), # 17 (18, TType.SET, 'my_stringset', (TType.STRING, 'UTF8', False), None,", "# 5 (6, TType.DOUBLE, 'my_double', None, None, ), # 6", "_size54) = iprot.readListBegin() for _i58 in range(_size54): _elem59 = iprot.readString().decode('utf-8')", "1: if ftype == TType.BOOL: self.my_bool = iprot.readBool() else: iprot.skip(ftype)", "None: oprot.writeFieldBegin('my_structlist', TType.LIST, 16) oprot.writeListBegin(TType.STRUCT, len(self.my_structlist)) for iter105 in self.my_structlist:", "{ \"LLAMA\": 1, \"ALPACA\": 2, } class MiniStruct(object): \"\"\" Attributes:", "iter104) oprot.writeListEnd() oprot.writeFieldEnd() if self.my_structlist is not None: oprot.writeFieldBegin('my_structlist', TType.LIST,", "LLAMA = 1 ALPACA = 2 _VALUES_TO_NAMES = { 1:", "range(_size66): _elem71 = iprot.readI32() self.my_enumlist.append(_elem71) iprot.readListEnd() else: iprot.skip(ftype) elif fid", "== 2 else iprot.readString() else: iprot.skip(ftype) elif fid == 2:", "iprot.readMapBegin() for _i32 in range(_size28): _key33 = iprot.readI32() _val34 =", "(5, TType.I64, 'my_64bit_int', None, None, ), # 5 (6, TType.DOUBLE,", "TType.STOP: break if fid == 1: if ftype == TType.BOOL:", "self.my_stringlist: oprot.writeString(iter104.encode('utf-8') if sys.version_info[0] == 2 else iter104) oprot.writeListEnd() oprot.writeFieldEnd()", "my_enumset=None, my_structset=None,): self.my_bool = my_bool self.my_byte = my_byte self.my_16bit_int =", "= MiniStruct() _elem89.read(iprot) self.my_structset.add(_elem89) iprot.readSetEnd() else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd()", "fid == 7: if ftype == TType.STRING: self.my_string = iprot.readString().decode('utf-8')", "sys.version_info[0] == 2 else iprot.readString() self.my_string_string_map[_key5] = _val6 iprot.readMapEnd() else:", "if sys.version_info[0] == 2 else iter107) oprot.writeSetEnd() oprot.writeFieldEnd() if self.my_enumset", "} _NAMES_TO_VALUES = { \"LLAMA\": 1, \"ALPACA\": 2, } class", "my_enumset self.my_structset = my_structset def read(self, iprot): if iprot._fast_decode is", "TType.LIST, 'my_stringlist', (TType.STRING, 'UTF8', False), None, ), # 15 (16,", "iprot.skip(ftype) elif fid == 16: if ftype == TType.LIST: self.my_structlist", "None, ), # 10 (11, TType.MAP, 'my_enum_string_map', (TType.I32, None, TType.STRING,", "== 6: if ftype == TType.DOUBLE: self.my_double = iprot.readDouble() else:", "else: iprot.skip(ftype) elif fid == 13: if ftype == TType.MAP:", "self.thrift_spec]) return iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin()", "= 2 _VALUES_TO_NAMES = { 1: \"LLAMA\", 2: \"ALPACA\", }", "my_byte=None, my_16bit_int=None, my_32bit_int=None, my_64bit_int=None, my_double=None, my_string=None, my_binary=None, my_string_string_map=None, my_string_enum_map=None, my_enum_string_map=None,", "2 _VALUES_TO_NAMES = { 1: \"LLAMA\", 2: \"ALPACA\", } _NAMES_TO_VALUES", "== TType.I32: self.my_enum = iprot.readI32() else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd()", "oprot.writeStructBegin('MegaStruct') if self.my_bool is not None: oprot.writeFieldBegin('my_bool', TType.BOOL, 1) oprot.writeBool(self.my_bool)", "False), None, ), # 19 (20, TType.SET, 'my_structset', (TType.STRUCT, [MiniStruct,", "other) all_structs.append(MiniStruct) MiniStruct.thrift_spec = ( None, # 0 (1, TType.STRING,", "None: oprot.writeFieldBegin('my_string_string_map', TType.MAP, 9) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.my_string_string_map)) for kiter90, viter91", "oprot.writeFieldEnd() if self.my_64bit_int is not None: oprot.writeFieldBegin('my_64bit_int', TType.I64, 5) oprot.writeI64(self.my_64bit_int)", "my_stringlist=None, my_structlist=None, my_enumlist=None, my_stringset=None, my_enumset=None, my_structset=None,): self.my_bool = my_bool self.my_byte", "TType.BOOL, 'my_bool', None, None, ), # 1 (2, TType.BYTE, 'my_byte',", "iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString() self.my_stringlist.append(_elem59) iprot.readListEnd() else:", "_vtype43, _size41) = iprot.readMapBegin() for _i45 in range(_size41): _key46 =", "range(_size60): _elem65 = MiniStruct() _elem65.read(iprot) self.my_structlist.append(_elem65) iprot.readListEnd() else: iprot.skip(ftype) elif", "is not None: oprot.writeFieldBegin('my_stringset', TType.SET, 18) oprot.writeSetBegin(TType.STRING, len(self.my_stringset)) for iter107", "= iprot.readI32() self.my_enumlist.append(_elem71) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 18:", "my_16bit_int self.my_32bit_int = my_32bit_int self.my_64bit_int = my_64bit_int self.my_double = my_double", "TType.I32: self.my_enum = iprot.readI32() else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd()", "return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return", "self.my_enum_stringlist_map = {} (_ktype29, _vtype30, _size28) = iprot.readMapBegin() for _i32", "self.my_enum_structlist_map is not None: oprot.writeFieldBegin('my_enum_structlist_map', TType.MAP, 14) oprot.writeMapBegin(TType.I32, TType.LIST, len(self.my_enum_structlist_map))", "oprot.writeListEnd() oprot.writeMapEnd() oprot.writeFieldEnd() if self.my_enum_structlist_map is not None: oprot.writeFieldBegin('my_enum_structlist_map', TType.MAP,", "Attributes: - my_string - my_enum \"\"\" def __init__(self, my_string=None, my_enum=None,):", "in viter102: iter103.write(oprot) oprot.writeListEnd() oprot.writeMapEnd() oprot.writeFieldEnd() if self.my_stringlist is not", "Compiler (0.13.0) # # DO NOT EDIT UNLESS YOU ARE", "None, None, ), # 5 (6, TType.DOUBLE, 'my_double', None, None,", "iprot.readListBegin() for _i52 in range(_size48): _elem53 = MiniStruct() _elem53.read(iprot) _val47.append(_elem53)", "== 1: if ftype == TType.BOOL: self.my_bool = iprot.readBool() else:", "viter99: oprot.writeString(iter100.encode('utf-8') if sys.version_info[0] == 2 else iter100) oprot.writeListEnd() oprot.writeMapEnd()", "None, ), # 7 (8, TType.STRING, 'my_binary', 'BINARY', None, ),", "in self.my_stringlist: oprot.writeString(iter104.encode('utf-8') if sys.version_info[0] == 2 else iter104) oprot.writeListEnd()", "not None: oprot.writeFieldBegin('my_enum_string_map', TType.MAP, 11) oprot.writeMapBegin(TType.I32, TType.STRING, len(self.my_enum_string_map)) for kiter94,", "TFrozenDict, TException, TApplicationException from thrift.protocol.TProtocol import TProtocolException from thrift.TRecursive import", "iter103.write(oprot) oprot.writeListEnd() oprot.writeMapEnd() oprot.writeFieldEnd() if self.my_stringlist is not None: oprot.writeFieldBegin('my_stringlist',", "in self.my_enum_structlist_map.items(): oprot.writeI32(kiter101) oprot.writeListBegin(TType.STRUCT, len(viter102)) for iter103 in viter102: iter103.write(oprot)", "self.my_enum_string_map[_key19] = _val20 iprot.readMapEnd() else: iprot.skip(ftype) elif fid == 12:", "iter104 in self.my_stringlist: oprot.writeString(iter104.encode('utf-8') if sys.version_info[0] == 2 else iter104)", "self.my_string_enum_map is not None: oprot.writeFieldBegin('my_string_enum_map', TType.MAP, 10) oprot.writeMapBegin(TType.STRING, TType.I32, len(self.my_string_enum_map))", "YOU ARE DOING # # options string: py # from", "not None: oprot.writeFieldBegin('my_enum_stringlist_map', TType.MAP, 13) oprot.writeMapBegin(TType.I32, TType.LIST, len(self.my_enum_stringlist_map)) for kiter98,", "= my_bool self.my_byte = my_byte self.my_16bit_int = my_16bit_int self.my_32bit_int =", "# 10 (11, TType.MAP, 'my_enum_string_map', (TType.I32, None, TType.STRING, 'UTF8', False),", "iprot.readSetBegin() for _i88 in range(_size84): _elem89 = MiniStruct() _elem89.read(iprot) self.my_structset.add(_elem89)", "else viter91) oprot.writeMapEnd() oprot.writeFieldEnd() if self.my_string_enum_map is not None: oprot.writeFieldBegin('my_string_enum_map',", "if self.my_enum_struct_map is not None: oprot.writeFieldBegin('my_enum_struct_map', TType.MAP, 12) oprot.writeMapBegin(TType.I32, TType.STRUCT,", "iprot.readString() _val13 = iprot.readI32() self.my_string_enum_map[_key12] = _val13 iprot.readMapEnd() else: iprot.skip(ftype)", "import fix_spec import sys from thrift.transport import TTransport all_structs =", "'UTF8', False), None, ), # 18 (19, TType.SET, 'my_enumset', (TType.I32,", "self.my_binary is not None: oprot.writeFieldBegin('my_binary', TType.STRING, 8) oprot.writeBinary(self.my_binary) oprot.writeFieldEnd() if", "oprot.writeFieldEnd() if self.my_enum is not None: oprot.writeFieldBegin('my_enum', TType.I32, 2) oprot.writeI32(self.my_enum)", "_key19 = iprot.readI32() _val20 = iprot.readString().decode('utf-8') if sys.version_info[0] == 2", "None: oprot.writeFieldBegin('my_string', TType.STRING, 1) oprot.writeString(self.my_string.encode('utf-8') if sys.version_info[0] == 2 else", "TType.I16, 3) oprot.writeI16(self.my_16bit_int) oprot.writeFieldEnd() if self.my_32bit_int is not None: oprot.writeFieldBegin('my_32bit_int',", "self.my_string) oprot.writeFieldEnd() if self.my_enum is not None: oprot.writeFieldBegin('my_enum', TType.I32, 2)", "my_structlist=None, my_enumlist=None, my_stringset=None, my_enumset=None, my_structset=None,): self.my_bool = my_bool self.my_byte =", "self.my_string_enum_map[_key12] = _val13 iprot.readMapEnd() else: iprot.skip(ftype) elif fid == 11:", "oprot.writeStructEnd() def validate(self): return def __repr__(self): L = ['%s=%r' %", "else iter107) oprot.writeSetEnd() oprot.writeFieldEnd() if self.my_enumset is not None: oprot.writeFieldBegin('my_enumset',", "iprot.readMapEnd() else: iprot.skip(ftype) elif fid == 11: if ftype ==", "'my_32bit_int', None, None, ), # 4 (5, TType.I64, 'my_64bit_int', None,", "MyEnum(object): LLAMA = 1 ALPACA = 2 _VALUES_TO_NAMES = {", "_NAMES_TO_VALUES = { \"LLAMA\": 1, \"ALPACA\": 2, } class MiniStruct(object):", "__init__(self, my_bool=None, my_byte=None, my_16bit_int=None, my_32bit_int=None, my_64bit_int=None, my_double=None, my_string=None, my_binary=None, my_string_string_map=None,", "2 else iprot.readString() else: iprot.skip(ftype) elif fid == 8: if", "iprot.readString() else: iprot.skip(ftype) elif fid == 8: if ftype ==", "_size14) = iprot.readMapBegin() for _i18 in range(_size14): _key19 = iprot.readI32()", "= my_enum_stringlist_map self.my_enum_structlist_map = my_enum_structlist_map self.my_stringlist = my_stringlist self.my_structlist =", "self.my_stringlist.append(_elem59) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 16: if ftype", "self.my_stringset.add(_elem77) iprot.readSetEnd() else: iprot.skip(ftype) elif fid == 19: if ftype", "for _i18 in range(_size14): _key19 = iprot.readI32() _val20 = iprot.readString().decode('utf-8')", "== 5: if ftype == TType.I64: self.my_64bit_int = iprot.readI64() else:", "if self.my_enum_structlist_map is not None: oprot.writeFieldBegin('my_enum_structlist_map', TType.MAP, 14) oprot.writeMapBegin(TType.I32, TType.LIST,", "None: oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) return oprot.writeStructBegin('MegaStruct') if self.my_bool is not", "_i11 in range(_size7): _key12 = iprot.readString().decode('utf-8') if sys.version_info[0] == 2", "my_enum=None,): self.my_string = my_string self.my_enum = my_enum def read(self, iprot):", "ftype == TType.BYTE: self.my_byte = iprot.readByte() else: iprot.skip(ftype) elif fid", "== 10: if ftype == TType.MAP: self.my_string_enum_map = {} (_ktype8,", "else iprot.readString() self.my_stringset.add(_elem77) iprot.readSetEnd() else: iprot.skip(ftype) elif fid == 19:", "== other) class MegaStruct(object): \"\"\" Attributes: - my_bool - my_byte", "iprot.skip(ftype) elif fid == 2: if ftype == TType.I32: self.my_enum", "None: oprot.writeFieldBegin('my_bool', TType.BOOL, 1) oprot.writeBool(self.my_bool) oprot.writeFieldEnd() if self.my_byte is not", "(14, TType.MAP, 'my_enum_structlist_map', (TType.I32, None, TType.LIST, (TType.STRUCT, [MiniStruct, None], False),", "len(self.my_string_string_map)) for kiter90, viter91 in self.my_string_string_map.items(): oprot.writeString(kiter90.encode('utf-8') if sys.version_info[0] ==", "self.my_enumlist = [] (_etype69, _size66) = iprot.readListBegin() for _i70 in", "[self.__class__, self.thrift_spec])) return oprot.writeStructBegin('MiniStruct') if self.my_string is not None: oprot.writeFieldBegin('my_string',", "'my_enum_structlist_map', (TType.I32, None, TType.LIST, (TType.STRUCT, [MiniStruct, None], False), False), None,", "my_enum_structlist_map self.my_stringlist = my_stringlist self.my_structlist = my_structlist self.my_enumlist = my_enumlist", "== TType.I64: self.my_64bit_int = iprot.readI64() else: iprot.skip(ftype) elif fid ==", "iprot.readI32() else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot):", "my_stringset - my_enumset - my_structset \"\"\" def __init__(self, my_bool=None, my_byte=None,", "_val6 iprot.readMapEnd() else: iprot.skip(ftype) elif fid == 10: if ftype", "_vtype30, _size28) = iprot.readMapBegin() for _i32 in range(_size28): _key33 =", "kiter90, viter91 in self.my_string_string_map.items(): oprot.writeString(kiter90.encode('utf-8') if sys.version_info[0] == 2 else", "'my_string_string_map', (TType.STRING, 'UTF8', TType.STRING, 'UTF8', False), None, ), # 9", "[MiniStruct, None], False), None, ), # 20 ) fix_spec(all_structs) del", "py # from thrift.Thrift import TType, TMessageType, TFrozenDict, TException, TApplicationException", "is not None: oprot.writeFieldBegin('my_bool', TType.BOOL, 1) oprot.writeBool(self.my_bool) oprot.writeFieldEnd() if self.my_byte", "(TType.STRING, 'UTF8', False), None, ), # 15 (16, TType.LIST, 'my_structlist',", "(0.13.0) # # DO NOT EDIT UNLESS YOU ARE SURE", "my_enum_string_map self.my_enum_struct_map = my_enum_struct_map self.my_enum_stringlist_map = my_enum_stringlist_map self.my_enum_structlist_map = my_enum_structlist_map", "(_etype87, _size84) = iprot.readSetBegin() for _i88 in range(_size84): _elem89 =", "- my_string_string_map - my_string_enum_map - my_enum_string_map - my_enum_struct_map - my_enum_stringlist_map", "False), None, ), # 12 (13, TType.MAP, 'my_enum_stringlist_map', (TType.I32, None,", "'my_enum_struct_map', (TType.I32, None, TType.STRUCT, [MiniStruct, None], False), None, ), #", "other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self,", "TType.MAP: self.my_enum_struct_map = {} (_ktype22, _vtype23, _size21) = iprot.readMapBegin() for", "= [] class MyEnum(object): LLAMA = 1 ALPACA = 2", "- my_structlist - my_enumlist - my_stringset - my_enumset - my_structset", "self.my_enum_structlist_map.items(): oprot.writeI32(kiter101) oprot.writeListBegin(TType.STRUCT, len(viter102)) for iter103 in viter102: iter103.write(oprot) oprot.writeListEnd()", "my_16bit_int=None, my_32bit_int=None, my_64bit_int=None, my_double=None, my_string=None, my_binary=None, my_string_string_map=None, my_string_enum_map=None, my_enum_string_map=None, my_enum_struct_map=None,", "_val34.append(_elem40) iprot.readListEnd() self.my_enum_stringlist_map[_key33] = _val34 iprot.readMapEnd() else: iprot.skip(ftype) elif fid", "fid == 1: if ftype == TType.STRING: self.my_string = iprot.readString().decode('utf-8')", "== 20: if ftype == TType.SET: self.my_structset = set() (_etype87,", "oprot.writeMapBegin(TType.I32, TType.STRING, len(self.my_enum_string_map)) for kiter94, viter95 in self.my_enum_string_map.items(): oprot.writeI32(kiter94) oprot.writeString(viter95.encode('utf-8')", "self.my_enumlist: oprot.writeI32(iter106) oprot.writeListEnd() oprot.writeFieldEnd() if self.my_stringset is not None: oprot.writeFieldBegin('my_stringset',", "TType.STRING: self.my_binary = iprot.readBinary() else: iprot.skip(ftype) elif fid == 9:", "TType.MAP, 'my_string_enum_map', (TType.STRING, 'UTF8', TType.I32, None, False), None, ), #", "fid == 2: if ftype == TType.I32: self.my_enum = iprot.readI32()", "TType.MAP, 14) oprot.writeMapBegin(TType.I32, TType.LIST, len(self.my_enum_structlist_map)) for kiter101, viter102 in self.my_enum_structlist_map.items():", "viter93 in self.my_string_enum_map.items(): oprot.writeString(kiter92.encode('utf-8') if sys.version_info[0] == 2 else kiter92)", "for _i70 in range(_size66): _elem71 = iprot.readI32() self.my_enumlist.append(_elem71) iprot.readListEnd() else:", "\"\"\" def __init__(self, my_string=None, my_enum=None,): self.my_string = my_string self.my_enum =", "my_enum_string_map - my_enum_struct_map - my_enum_stringlist_map - my_enum_structlist_map - my_stringlist -", "5: if ftype == TType.I64: self.my_64bit_int = iprot.readI64() else: iprot.skip(ftype)", "oprot.writeFieldEnd() if self.my_16bit_int is not None: oprot.writeFieldBegin('my_16bit_int', TType.I16, 3) oprot.writeI16(self.my_16bit_int)", "None, ), # 19 (20, TType.SET, 'my_structset', (TType.STRUCT, [MiniStruct, None],", "oprot.writeSetEnd() oprot.writeFieldEnd() if self.my_enumset is not None: oprot.writeFieldBegin('my_enumset', TType.SET, 19)", "(fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break", "None: oprot.writeFieldBegin('my_byte', TType.BYTE, 2) oprot.writeByte(self.my_byte) oprot.writeFieldEnd() if self.my_16bit_int is not", "not None: oprot.writeFieldBegin('my_string', TType.STRING, 7) oprot.writeString(self.my_string.encode('utf-8') if sys.version_info[0] == 2", "= my_double self.my_string = my_string self.my_binary = my_binary self.my_string_string_map =", "for key, value in self.__dict__.items()] return '%s(%s)' % (self.__class__.__name__, ',", "oprot.writeI64(self.my_64bit_int) oprot.writeFieldEnd() if self.my_double is not None: oprot.writeFieldBegin('my_double', TType.DOUBLE, 6)", "= iprot.readI32() _val47 = [] (_etype51, _size48) = iprot.readListBegin() for", "TType.LIST, 17) oprot.writeListBegin(TType.I32, len(self.my_enumlist)) for iter106 in self.my_enumlist: oprot.writeI32(iter106) oprot.writeListEnd()", "(self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and", "(12, TType.MAP, 'my_enum_struct_map', (TType.I32, None, TType.STRUCT, [MiniStruct, None], False), None,", "for iter104 in self.my_stringlist: oprot.writeString(iter104.encode('utf-8') if sys.version_info[0] == 2 else", "_i45 in range(_size41): _key46 = iprot.readI32() _val47 = [] (_etype51,", "_val27 iprot.readMapEnd() else: iprot.skip(ftype) elif fid == 13: if ftype", "my_string self.my_enum = my_enum def read(self, iprot): if iprot._fast_decode is", "if fid == 1: if ftype == TType.STRING: self.my_string =", "= iprot.readI32() else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self,", "= _val6 iprot.readMapEnd() else: iprot.skip(ftype) elif fid == 10: if", "self.my_string_enum_map = {} (_ktype8, _vtype9, _size7) = iprot.readMapBegin() for _i11", "13: if ftype == TType.MAP: self.my_enum_stringlist_map = {} (_ktype29, _vtype30,", "(TType.I32, None, False), None, ), # 17 (18, TType.SET, 'my_stringset',", "(TType.STRING, 'UTF8', TType.STRING, 'UTF8', False), None, ), # 9 (10,", "and self.thrift_spec is not None: oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) return oprot.writeStructBegin('MiniStruct')", "self.my_16bit_int is not None: oprot.writeFieldBegin('my_16bit_int', TType.I16, 3) oprot.writeI16(self.my_16bit_int) oprot.writeFieldEnd() if", "if sys.version_info[0] == 2 else iprot.readString() self.my_stringlist.append(_elem59) iprot.readListEnd() else: iprot.skip(ftype)", "range(_size54): _elem59 = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()", "(self == other) all_structs.append(MiniStruct) MiniStruct.thrift_spec = ( None, # 0", "iprot.skip(ftype) elif fid == 12: if ftype == TType.MAP: self.my_enum_struct_map", "iprot.readMapEnd() else: iprot.skip(ftype) elif fid == 14: if ftype ==", "_elem77 = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString() self.my_stringset.add(_elem77)", "== 2 else viter91) oprot.writeMapEnd() oprot.writeFieldEnd() if self.my_string_enum_map is not", "self.my_bool is not None: oprot.writeFieldBegin('my_bool', TType.BOOL, 1) oprot.writeBool(self.my_bool) oprot.writeFieldEnd() if", "2 else iter100) oprot.writeListEnd() oprot.writeMapEnd() oprot.writeFieldEnd() if self.my_enum_structlist_map is not", "iprot.readI32() _val27 = MiniStruct() _val27.read(iprot) self.my_enum_struct_map[_key26] = _val27 iprot.readMapEnd() else:", "'my_stringset', (TType.STRING, 'UTF8', False), None, ), # 18 (19, TType.SET,", "in self.my_enum_stringlist_map.items(): oprot.writeI32(kiter98) oprot.writeListBegin(TType.STRING, len(viter99)) for iter100 in viter99: oprot.writeString(iter100.encode('utf-8')", "), # 14 (15, TType.LIST, 'my_stringlist', (TType.STRING, 'UTF8', False), None,", "oprot.writeFieldBegin('my_structset', TType.SET, 20) oprot.writeSetBegin(TType.STRUCT, len(self.my_structset)) for iter109 in self.my_structset: iter109.write(oprot)", "# # options string: py # from thrift.Thrift import TType,", "my_stringlist - my_structlist - my_enumlist - my_stringset - my_enumset -", "# 14 (15, TType.LIST, 'my_stringlist', (TType.STRING, 'UTF8', False), None, ),", "6) oprot.writeDouble(self.my_double) oprot.writeFieldEnd() if self.my_string is not None: oprot.writeFieldBegin('my_string', TType.STRING,", "KNOW WHAT YOU ARE DOING # # options string: py", "self.my_stringlist is not None: oprot.writeFieldBegin('my_stringlist', TType.LIST, 15) oprot.writeListBegin(TType.STRING, len(self.my_stringlist)) for", "None: oprot.writeFieldBegin('my_stringset', TType.SET, 18) oprot.writeSetBegin(TType.STRING, len(self.my_stringset)) for iter107 in self.my_stringset:", "== 2 else iter100) oprot.writeListEnd() oprot.writeMapEnd() oprot.writeFieldEnd() if self.my_enum_structlist_map is", "== 2 else iprot.readString() self.my_stringlist.append(_elem59) iprot.readListEnd() else: iprot.skip(ftype) elif fid", "# # Autogenerated by Thrift Compiler (0.13.0) # # DO", "is not None: oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) return oprot.writeStructBegin('MiniStruct') if self.my_string", "range(_size28): _key33 = iprot.readI32() _val34 = [] (_etype38, _size35) =", "9: if ftype == TType.MAP: self.my_string_string_map = {} (_ktype1, _vtype2,", "_elem53.read(iprot) _val47.append(_elem53) iprot.readListEnd() self.my_enum_structlist_map[_key46] = _val47 iprot.readMapEnd() else: iprot.skip(ftype) elif", "self.my_enum is not None: oprot.writeFieldBegin('my_enum', TType.I32, 2) oprot.writeI32(self.my_enum) oprot.writeFieldEnd() oprot.writeFieldStop()", "iprot.readMapEnd() else: iprot.skip(ftype) elif fid == 12: if ftype ==", "in self.my_string_string_map.items(): oprot.writeString(kiter90.encode('utf-8') if sys.version_info[0] == 2 else kiter90) oprot.writeString(viter91.encode('utf-8')", "None, ), # 17 (18, TType.SET, 'my_stringset', (TType.STRING, 'UTF8', False),", "elif fid == 3: if ftype == TType.I16: self.my_16bit_int =", "= iprot.readBool() else: iprot.skip(ftype) elif fid == 2: if ftype", "TType.STRING, 7) oprot.writeString(self.my_string.encode('utf-8') if sys.version_info[0] == 2 else self.my_string) oprot.writeFieldEnd()", "2 ) all_structs.append(MegaStruct) MegaStruct.thrift_spec = ( None, # 0 (1,", "self.my_enum_stringlist_map is not None: oprot.writeFieldBegin('my_enum_stringlist_map', TType.MAP, 13) oprot.writeMapBegin(TType.I32, TType.LIST, len(self.my_enum_stringlist_map))", "oprot.writeI32(kiter98) oprot.writeListBegin(TType.STRING, len(viter99)) for iter100 in viter99: oprot.writeString(iter100.encode('utf-8') if sys.version_info[0]", "in self.my_enum_string_map.items(): oprot.writeI32(kiter94) oprot.writeString(viter95.encode('utf-8') if sys.version_info[0] == 2 else viter95)", "range(_size35): _elem40 = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()", "my_enum_string_map=None, my_enum_struct_map=None, my_enum_stringlist_map=None, my_enum_structlist_map=None, my_stringlist=None, my_structlist=None, my_enumlist=None, my_stringset=None, my_enumset=None, my_structset=None,):", "ftype == TType.MAP: self.my_enum_struct_map = {} (_ktype22, _vtype23, _size21) =", "else: iprot.skip(ftype) elif fid == 20: if ftype == TType.SET:", "TType.STRING, 8) oprot.writeBinary(self.my_binary) oprot.writeFieldEnd() if self.my_string_string_map is not None: oprot.writeFieldBegin('my_string_string_map',", "None, None, ), # 6 (7, TType.STRING, 'my_string', 'UTF8', None,", "if ftype == TType.SET: self.my_stringset = set() (_etype75, _size72) =", "oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def __repr__(self): L =", "my_enumlist - my_stringset - my_enumset - my_structset \"\"\" def __init__(self,", "iprot.skip(ftype) elif fid == 19: if ftype == TType.SET: self.my_enumset", "False), None, ), # 14 (15, TType.LIST, 'my_stringlist', (TType.STRING, 'UTF8',", "in range(_size35): _elem40 = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else", "# 11 (12, TType.MAP, 'my_enum_struct_map', (TType.I32, None, TType.STRUCT, [MiniStruct, None],", "ftype == TType.LIST: self.my_stringlist = [] (_etype57, _size54) = iprot.readListBegin()", "3) oprot.writeI16(self.my_16bit_int) oprot.writeFieldEnd() if self.my_32bit_int is not None: oprot.writeFieldBegin('my_32bit_int', TType.I32,", "= [] (_etype38, _size35) = iprot.readListBegin() for _i39 in range(_size35):", "if ftype == TType.MAP: self.my_string_enum_map = {} (_ktype8, _vtype9, _size7)", "oprot.writeMapEnd() oprot.writeFieldEnd() if self.my_stringlist is not None: oprot.writeFieldBegin('my_stringlist', TType.LIST, 15)", "TType.I32, None, False), None, ), # 10 (11, TType.MAP, 'my_enum_string_map',", "= iprot.readSetBegin() for _i88 in range(_size84): _elem89 = MiniStruct() _elem89.read(iprot)", "TType.MAP, 'my_enum_string_map', (TType.I32, None, TType.STRING, 'UTF8', False), None, ), #", "viter95 in self.my_enum_string_map.items(): oprot.writeI32(kiter94) oprot.writeString(viter95.encode('utf-8') if sys.version_info[0] == 2 else", "in self.__dict__.items()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self,", "self.my_binary = iprot.readBinary() else: iprot.skip(ftype) elif fid == 9: if", "oprot.writeMapBegin(TType.I32, TType.LIST, len(self.my_enum_stringlist_map)) for kiter98, viter99 in self.my_enum_stringlist_map.items(): oprot.writeI32(kiter98) oprot.writeListBegin(TType.STRING,", "oprot.writeI32(viter93) oprot.writeMapEnd() oprot.writeFieldEnd() if self.my_enum_string_map is not None: oprot.writeFieldBegin('my_enum_string_map', TType.MAP,", "None, None, ), # 3 (4, TType.I32, 'my_32bit_int', None, None,", "TType.MAP: self.my_enum_structlist_map = {} (_ktype42, _vtype43, _size41) = iprot.readMapBegin() for", "== 14: if ftype == TType.MAP: self.my_enum_structlist_map = {} (_ktype42,", "my_stringset self.my_enumset = my_enumset self.my_structset = my_structset def read(self, iprot):", "self.my_structset.add(_elem89) iprot.readSetEnd() else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self,", "key, value in self.__dict__.items()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L))", "self.my_structlist = my_structlist self.my_enumlist = my_enumlist self.my_stringset = my_stringset self.my_enumset" ]
[ "str(re.sub('\\n','',line)) def copy_data(source='',target=''): cmd = 'cp '+ mkpath('data/' + source)", "add init_file suitePath = f1[0:f1.rindex( \"/\" )] if os.path.exists(suitePath +", "2 == 0: # f.write('1997,Ford,E350,\"ac, abs, moon\",3000.00,a\\n') # else: #", "test_30_gpload_reuse_table_update_mode_with_fast_match(self): \"30 gpload update mode with fast match\" drop_tables() copy_data('external_file_04.txt','data_file.txt')", "merge mode with encoding GBK\" file = mkpath('setup.sql') runfile(file) copy_data('external_file_04.txt','data_file.txt')", "command) (ok,out) = run(cmd) if not ok: raise Exception(\"Unable to", "open(mkpath('query9.sql'),'a') f.write(\"\\! psql -d reuse_gptest -c 'select count(*) from texttable;'\\n\"+\"\\!", "self.doTest(5) def test_06_gpload_formatOpts_delimiter(self): \"6 gpload formatOpts delimiter \\\"'\\\" with reuse\"", "ofile == '-': ofile = '2>&1' elif not ofile: ofile", "connect to a different host @param port : port where", "'%s %s > %s 2>&1' % (LMYD, myinitfile, f1, f2,", "write_config_file(reuse_flag='true',formatOpts='text',file='data_file.txt',table='texttable',delimiter=\"'|'\") self.doTest(1) def test_02_gpload_formatOpts_delimiter(self): \"2 gpload formatOpts delimiter '\\t' with", "\"4 gpload formatOpts delimiter E'\\u0009' with reuse\" copy_data('external_file_02.txt','data_file.txt') write_config_file(reuse_flag='true',formatOpts='text',file='data_file.txt',table='texttable',delimiter=\"E'\\u0009'\") self.doTest(4)", "outputPath = outputPath ) # Gets the suitePath name to", "\" write_config_file('update','true',file='data_file.txt') self.doTest(14) def test_15_gpload_reuse_table_merge_mode_with_different_columns_order(self): \"15 gpload merge mode with", "copy_data('external_file_13.csv','data_file.csv') write_config_file(reuse_flag='true',formatOpts='csv',file='data_file.csv',table='csvtable',format='csv',delimiter=\"','\",log_errors=True,error_limit='10',staging_table='staging_table',externalSchema='test') self.doTest(26) def test_27_gpload_ext_staging_table_with_externalschema(self): \"27 gpload reuse ext_staging_table if", "for i in range(0, 10000): if i % 2 ==", "= getPortMasterOnly() def get_table_name(): try: db = pg.DB(dbname='reuse_gptest' ,host='localhost' ,port=int(PGPORT)", "=flag, dbname= dbname, username= username, PGOPTIONS= PGOPTIONS, host = host,", "s_s3: text\") f.write(\"\\n - s_n1: smallint\") f.write(\"\\n - s_n2: integer\")", "'merge': f.write(\"\\n - MODE: \"+'merge') f.write(\"\\n - UPDATE_COLUMNS:\") f.write(\"\\n -", "= mkpath('config') if not os.path.exists(d): os.mkdir(d) def write_config_file(mode='insert', reuse_flag='',columns_flag='0',mapping='0',portNum='8081',database='reuse_gptest',host='localhost',formatOpts='text',file='data/external_file_01.txt',table='texttable',format='text',delimiter=\"'|'\",escape='',quote='',truncate='False',log_errors=None, error_limit='0',error_table=None,externalSchema=None,staging_table=None,fast_match='false',", "with ext schema\" file = mkpath('setup.sql') runfile(file) f = open(mkpath('query24.sql'),'a')", "subprocess.Popen(cmd,shell=True,stdout=subprocess.PIPE,stderr=subprocess.PIPE) out = p.communicate()[0] ret = [] ret.append(out) rc =", "2>&1' else: ofile = '> %s 2>&1' % ofile return", "mkpath('setup.sql') runfile(file) f = open(mkpath('query26.sql'),'a') f.write(\"\\! psql -d reuse_gptest -c", "\"LOGNAME\" ) HOST = socket.gethostname() GPHOME = os.getenv(\"GPHOME\") PGPORT =", "= mkpath('setup.sql') runfile(file) f = open(mkpath('query25.sql'),'a') f.write(\"\\! psql -d reuse_gptest", "csvtable;'\") # f.close() # f = open(mkpath('data/large_file.csv'),'w') # for i", "gpload fill missing fields\" file = mkpath('setup.sql') runfile(file) copy_data('external_file_04.txt','data_file.txt') write_config_file(mode='insert',reuse_flag='false',fast_match='false',file='data_file.txt',table='texttable1',", "mode with encoding GBK\" file = mkpath('setup.sql') runfile(file) copy_data('external_file_04.txt','data_file.txt') write_config_file(mode='merge',reuse_flag='true',fast_match='false',file='data_file.txt',encoding='GBK')", "def outFile(fname,outputPath = ''): return changeExtFile(fname, \".out\", outputPath) def diffFile(", "- QUOTE: \"+quote) if fill: f.write(\"\\n - FILL_MISSING_FIELDS: true\") f.write(\"\\n", "with reuse\" copy_data('external_file_01.txt','data_file.txt') write_config_file(reuse_flag='true',formatOpts='text',file='data_file.txt',table='texttable',escape=\"E'\\\\\\\\'\") self.doTest(21) # case 22 is flaky", "with reuse \" copy_data('external_file_11.csv','data_file.csv') write_config_file(reuse_flag='true',formatOpts='csv',file='data_file.csv',table='csvtable',format='csv',delimiter=\"','\") self.doTest(16) def test_17_gpload_formatOpts_quote(self): \"17 gpload", "outputPath),flag = flag, dbname=dbname , username=username, PGOPTIONS=PGOPTIONS, host = host,", "cmd = \"source %s/greenplum_path.sh; export MASTER_DATA_DIRECTORY=%s; export PGPORT=%s; %s\" \\", "reuse table when encoding is setted from GBK to empty\"", "= os.environ.get('USER'),gphome = os.environ['GPHOME'], mdd=os.environ['MASTER_DATA_DIRECTORY'],port = os.environ['PGPORT']): master_pattern = \"Context:\\s*-1\\s*Value:\\s*\\d+\"", "write_config_file(reuse_flag='true',formatOpts='csv',file='data_file.csv',table='csvtable',format='csv',delimiter=\"','\",error_table=\"err_table\",error_limit='90000000') self.doTest(23) def test_24_gpload_error_count(self): \"24 gpload error count with ext", "self.doTest(36) def test_37_gpload_reuse_table_merge_mode_invalid_encoding(self): \"37 gpload merge mode with invalid encoding\"", "None, username = None, PGOPTIONS = None, host = None,", "= \"-U3\", outputPath = \"\"): \"\"\" PURPOSE: compare the actual", "ret.append(out) rc = False if p.wait() else True return (rc,ret)", "out) = run('../gpdiff.pl -w ' + optionalFlags + \\ '", "name of the .sql file whose actual and expected outputs", "FILL_MISSING_FIELDS: true\") f.write(\"\\n OUTPUT:\") f.write(\"\\n - TABLE: \"+table) if mode:", "write_config_file('merge','true',file='data/data_file.tbl',columns_flag='1',mapping='1') self.doTest(15) def test_16_gpload_formatOpts_quote(self): \"16 gpload formatOpts quote unspecified in", "= '-f ' + ifile else: raise PSQLError('missing cmd and", "ext = '.ans' return os.path.splitext(fname)[0] + ext def isFileEqual( f1,", "self.doTest(30) def test_31_gpload_reuse_table_update_mode_with_fast_match_and_different_columns_number(self): \"31 gpload update mode with fast match", "): return changeExtFile( fname, \".diff\", outputPath ) def changeExtFile( fname,", "You may include the path as well as the filename.", "False if p.wait() else True return (rc,ret) def outFile(fname,outputPath =", "PGHOST = os.environ.get(\"PGHOST\") if PGHOST is None: PGHOST = HOST", "rc = False if p.wait() else True return (rc,ret) def", "command or file against psql. Return True if OK. @param", "n8 text\",dbname='reuse_gptest') copy_data('external_file_08.txt','data_file.txt') write_config_file(mode='update',reuse_flag='true',fast_match='true',file='data_file.txt') self.doTest(31) def test_32_gpload_update_mode_without_reuse_table_with_fast_match(self): \"32 gpload update", "errorMessage = str(e) print 'could not connect to database: '", "that the function is backwards compatible). Yes, this is passed", "def changeExtFile( fname, ext = \".diff\", outputPath = \"\" ):", "ofile = '> /dev/null 2>&1' else: ofile = '> %s", "outputPath=outputPath) result = isFileEqual(f1, f2, optionalFlags, outputPath=outputPath) diff = None", "f.write(\"\\n - REUSE_TABLES: \"+reuse_flag) f.write(\"\\n - FAST_MATCH: \"+fast_match) if staging_table:", "and not a failure. The reason for an error might", "gpload merge mode with reuse (RERUN with different columns number", "test_15_gpload_reuse_table_merge_mode_with_different_columns_order(self): \"15 gpload merge mode with different columns' order \"", "gpload formatOpts quote unspecified in CSV with reuse \" copy_data('external_file_11.csv','data_file.csv')", "(flag == '-q'): # Don't echo commands sent to server", "it is configured\" file = mkpath('setup.sql') runfile(file) f = open(mkpath('query25.sql'),'a')", "= PGUSER # Use the default login user if PGOPTIONS", "count(*) from test.csvtable;'\") f.close() copy_data('external_file_13.csv','data_file.csv') write_config_file(reuse_flag='true',formatOpts='csv',file='data_file.csv',table='test.csvtable',format='csv',delimiter=\"','\",log_errors=True,error_limit='10',staging_table='staging_table',externalSchema=\"'%'\") self.doTest(27) def test_28_gpload_ext_staging_table_with_dot(self): \"28", "match and differenct columns number) \" psql_run(cmd=\"ALTER TABLE texttable ADD", "open(mkpath('query28.sql'),'a') f.write(\"\\! psql -d reuse_gptest -c 'select count(*) from test.csvtable;'\")", "test_19_gpload_formatOpts_escape(self): \"19 gpload formatOpts escape '\\\\' with reuse\" copy_data('external_file_01.txt','data_file.txt') file", "f.write(\"\\n - ERROR_LIMIT: \" + error_limit) if error_table: f.write(\"\\n -", "f.write(\"\\nDATABASE: \"+database) f.write(\"\\nUSER: \"+os.environ.get('USER')) f.write(\"\\nHOST: \"+hostNameAddrs) f.write(\"\\nPORT: \"+masterPort) f.write(\"\\nGPLOAD:\") f.write(\"\\n", "db.query(queryString.encode('utf-8')).getresult() return resultList def drop_tables(): try: db = pg.DB(dbname='reuse_gptest' ,host='localhost'", "if port else 5432 def get_ip(hostname=None): if hostname is None:", "elif not ofile: ofile = '> /dev/null 2>&1' else: ofile", "PGUSER is None: PGUSER = USER PGHOST = os.environ.get(\"PGHOST\") if", "not found, etc. Failure is define as test case failures,", "n8: s_n8\") f.write(\"\\n n9: s_n9\") if externalSchema: f.write(\"\\n EXTERNAL:\") f.write(\"\\n", "f.write(\"\\n - s_dt: timestamp\") f.write(\"\\n - s_s3: text\") f.write(\"\\n -", "host : to connect to a different host @param port", "<gh_stars>1-10 #!/usr/bin/env python import unittest import sys import os import", "0: return os.path.splitext( fname )[0] + ext else: filename =", "GPHOME = os.getenv(\"GPHOME\") PGPORT = get_port() PGUSER = os.environ.get(\"PGUSER\") if", "unittest import sys import os import string import time import", "= db.query(queryString.encode('utf-8')).getresult() return resultList def drop_tables(): try: db = pg.DB(dbname='reuse_gptest'", "columns number in file) \" psql_run(cmd=\"ALTER TABLE texttable ADD column", "exists: 'a' = append; 'w' = write. Defaults to append", "from csvtable;'\") f.close() copy_data('external_file_13.csv','data_file.csv') write_config_file(reuse_flag='true',formatOpts='csv',file='data_file.csv',table='csvtable',format='csv',delimiter=\"','\",log_errors=True,error_limit='10',staging_table='staging_table') self.doTest(25) def test_26_gpload_ext_staging_table_with_externalschema(self): \"26 gpload", "test_10_gpload_reuse_table_merge_mode_with_reuse(self): \"10 gpload merge mode with reuse \" drop_tables() copy_data('external_file_06.txt','data_file.txt')", "is None: host = \"-h %s\" % PGHOST else: host", "Yes, this is passed to the open() function, so you", "write_config_file(mode='merge',reuse_flag='true',fast_match='false',file='data_file.txt',encoding='GBK') self.doTest(36) def test_37_gpload_reuse_table_merge_mode_invalid_encoding(self): \"37 gpload merge mode with invalid", "= os.getenv(\"GPHOME\") PGPORT = get_port() PGUSER = os.environ.get(\"PGUSER\") if PGUSER", "# copy_data('large_file.csv','data_file.csv') # write_config_file(reuse_flag='true',formatOpts='csv',file='data_file.csv',table='csvtable',format='csv',delimiter=\"','\",log_errors=True,error_limit='90000000') # self.doTest(22) def test_23_gpload_error_count(self): \"23 gpload", "write_config_file(mode='merge',reuse_flag='true',fast_match='false',file='data_file.txt',encoding='xxxx') self.doTest(37) def test_38_gpload_without_preload(self): \"38 gpload insert mode without preload\"", "s_dt\") f.write(\"\\n s3: s_s3\") f.write(\"\\n n1: s_n1\") f.write(\"\\n n2: s_n2\")", "sys.path: sys.path.append(UPD) DBNAME = \"postgres\" USER = os.environ.get( \"LOGNAME\" )", "PGOPTIONS= PGOPTIONS, host = host, port = port) return (ok,", "myinitfile = \"\"): LMYD = os.path.abspath(os.path.dirname(__file__)) if not os.access( f1,", "-d %s %s %s -U %s %s %s %s' %", "%s %s %s' % (PGOPTIONS, dbname, host, port, username, flag,", "str(e) print 'could not connect to database: ' + errorMessage", "cmd: The command to run at the shell. oFile: an", "def test_23_gpload_error_count(self): \"23 gpload error_table\" file = mkpath('setup.sql') runfile(file) f", "hostname hostinfo = socket.getaddrinfo(hostname, None) ipaddrlist = list(set([(ai[4][0]) for ai", "MODE: \"+'insert') if mode == 'update': f.write(\"\\n - MODE: \"+'update')", "function, so you can theoretically pass any value that is", "None: PGOPTIONS = \"\" else: PGOPTIONS = \"PGOPTIONS='%s'\" % PGOPTIONS", "\"-h %s\" % PGHOST else: host = \"-h %s\" %", "self.doTest(28) def test_29_gpload_reuse_table_insert_mode_with_reuse_and_null(self): \"29 gpload insert mode with reuse and", "true\" drop_tables() copy_data('external_file_08.txt','data_file.txt') write_config_file(mode='update',reuse_flag='false',fast_match='true',file='data_file.txt') self.doTest(32) def test_33_gpload_reuse_table_merge_mode_with_fast_match_and_external_schema(self): \"33 gpload update", "host is None: host = \"-h %s\" % PGHOST else:", "= run( '../gpdiff.pl -w ' + optionalFlags + \\ '", "is running @param PGOPTIONS: connects to postgres via utility mode", "drop_tables() copy_data('external_file_04.txt','data_file.txt') write_config_file(mode='update',reuse_flag='true',file='data_file.txt') self.doTest(8) def test_09_gpload_reuse_table_update_mode_without_reuse(self): \"9 gpload update mode", "write_config_file(mode='update',reuse_flag='true',file='data_file.txt') self.doTest(8) def test_09_gpload_reuse_table_update_mode_without_reuse(self): \"9 gpload update mode without reuse\"", "i in list: name = i[0] match = re.search('ext_gpload',name) if", "port = port) else: (ok,out) = psql_run(ifile =ifile, ofile =outFile(outputFile,", "test_05_gpload_formatOpts_delimiter(self): \"5 gpload formatOpts delimiter E'\\\\'' with reuse\" copy_data('external_file_03.txt','data_file.txt') write_config_file(reuse_flag='true',formatOpts='text',file='data_file.txt',table='texttable',delimiter=\"E'\\''\")", "def test_26_gpload_ext_staging_table_with_externalschema(self): \"26 gpload reuse ext_staging_table if it is configured", "-I CONTEXT: -I GP_IGNORE: --gp_init_file=%s/global_init_file ' '%s %s > %s", "\"5 gpload formatOpts delimiter E'\\\\'' with reuse\" copy_data('external_file_03.txt','data_file.txt') write_config_file(reuse_flag='true',formatOpts='text',file='data_file.txt',table='texttable',delimiter=\"E'\\''\") self.doTest(5)", "host, port, username, flag, arg, ofile)) def run(cmd): \"\"\" Run", "ofile)) def run(cmd): \"\"\" Run a shell command. Return (True,", "+ fname + ext def gpdbAnsFile(fname): ext = '.ans' return", "host = \"-h %s\" % PGHOST else: host = \"-h", "port) return (ok, out) def psql_run(ifile = None, ofile =", "= USER PGHOST = os.environ.get(\"PGHOST\") if PGHOST is None: PGHOST", "write_config_file(mode='insert',reuse_flag='false') self.doTest(7) def test_08_gpload_reuse_table_update_mode_with_reuse(self): \"8 gpload update mode with reuse\"", ") cmd = \"source %s/greenplum_path.sh; export MASTER_DATA_DIRECTORY=%s; export PGPORT=%s; %s\"", "reuse_gptest -c 'select count(*) from test.csvtable;'\") f.close() copy_data('external_file_13.csv','data_file.csv') write_config_file(reuse_flag='true',formatOpts='csv',file='data_file.csv',table='test.csvtable',format='csv',delimiter=\"','\",log_errors=True,error_limit='10',staging_table='staging_table',externalSchema=\"'%'\") self.doTest(27)", "with reuse\" drop_tables() copy_data('external_file_04.txt','data_file.txt') write_config_file(mode='update',reuse_flag='true',file='data_file.txt') self.doTest(8) def test_09_gpload_reuse_table_update_mode_without_reuse(self): \"9 gpload", "an optional output file. mode: What to do if the", "os.environ.get('USER') if not user: user = os.environ.get('USER') if os.path.isfile(file): for", "filename = fname.split( \"/\" ) fname = os.path.splitext( filename[len( filename", "@param ifile: input file @param cmd: command line @param flag:", "is None: port = \"\" else: port = \"-p %s\"", "the expected result. ''' pass class GPLoad_FormatOpts_TestCase(unittest.TestCase): def check_result(self,ifile, optionalFlags", "def test_15_gpload_reuse_table_merge_mode_with_different_columns_order(self): \"15 gpload merge mode with different columns' order", "return ok def read_diff(ifile, outputPath): \"\"\" Opens the diff file", "'-c \"%s\"' % cmd elif ifile: arg = ' <", "= port) else: (ok,out) = psql_run(ifile =ifile, ofile =outFile(outputFile, outputPath),", "= mkpath('setup.sql') runfile(file) write_config_file(reuse_flag='true',formatOpts='text',file='data_file.txt',table='texttable',escape='\\\\') self.doTest(19) def test_20_gpload_formatOpts_escape(self): \"20 gpload formatOpts", "MYD = os.path.abspath(os.path.dirname(__file__)) mkpath = lambda *x: os.path.join(MYD, *x) UPD", "source) + ' ' + mkpath(target) p = subprocess.Popen(cmd,shell=True,stdout=subprocess.PIPE,stderr=subprocess.PIPE) return", "f = open(mkpath('query23.sql'),'a') f.write(\"\\! psql -d reuse_gptest -c 'select count(*)", "gpload merge mode with different columns' order \" copy_data('external_file_10.txt','data/data_file.tbl') write_config_file('merge','true',file='data/data_file.tbl',columns_flag='1',mapping='1')", "of open(). \"\"\" p = subprocess.Popen(cmd,shell=True,stdout=subprocess.PIPE,stderr=subprocess.PIPE) out = p.communicate()[0] ret", "MAPPING:\") f.write(\"\\n s1: s_s1\") f.write(\"\\n s2: s_s2\") f.write(\"\\n dt: s_dt\")", "delimiter E'\\t' with reuse\" copy_data('external_file_02.txt','data_file.txt') write_config_file(reuse_flag='true',formatOpts='text',file='data_file.txt',table='texttable',delimiter=\"E'\\\\t'\") self.doTest(3) def test_04_gpload_formatOpts_delimiter(self): \"4", "\".diff\", outputPath ) def changeExtFile( fname, ext = \".diff\", outputPath", "'\\\\x26'(&) with reuse\" copy_data('external_file_12.csv','data_file.csv') write_config_file(reuse_flag='true',formatOpts='csv',file='data_file.csv',table='csvtable',format='csv',delimiter=\"','\",quote=\"'\\x26'\") self.doTest(17) def test_18_gpload_formatOpts_quote(self): \"18 gpload", "if format: f.write(\"\\n - FORMAT: \"+format) if log_errors: f.write(\"\\n -", "f.write(\"\\n - s_n7: double precision\") f.write(\"\\n - s_n8: text\") f.write(\"\\n", "as diff: return diff.read() def modify_sql_file(num): file = mkpath('query%d.sql' %", "failure. The reason for an error might be program error,", "and expected output files and report an error if they", "if they don't match. PARAMETERS: ifile: the name of the", "def get_ip(hostname=None): if hostname is None: hostname = socket.gethostname() else:", "is seldom issue. we can't reproduce it locally, so we", "''): return changeExtFile(fname, \".out\", outputPath) def diffFile( fname, outputPath =", "' '%s %s > %s 2>&1' % ( LMYD, f1,", "%s %s -U %s %s %s %s' % (PGOPTIONS, dbname,", "for exceptions in this module http://docs.python.org/tutorial/errors.html We want to raise", "runfile(ifile, flag='', dbname=None, outputPath=\"\", outputFile=\"\", username=None, PGOPTIONS=None, host = None,", "file whose actual and expected outputs we want to compare.", "csvtable;'\") f.close() f = open(mkpath('data/large_file.csv'),'w') for i in range(0, 10000):", "\"-U3\", outputPath = \"\"): \"\"\" PURPOSE: compare the actual and", "PURPOSE: compare the actual and expected output files and report", "file %s' % f1 ) if not os.access( f2, os.R_OK", "- n2\") f.write(\"\\n - MATCH_COLUMNS:\") f.write(\"\\n - n1\") f.write(\"\\n -", "% host if port is None: port = \"\" else:", "% cmd elif ifile: arg = ' < ' +", "not blocking others #def test_22_gpload_error_count(self): # \"22 gpload error count\"", "if p.wait() else True return (rc,ret) def outFile(fname,outputPath = ''):", "is setted from GBK to empty\" write_config_file(mode='merge',reuse_flag='true',fast_match='true',file='data_file.txt') self.doTest(35) def test_36_gpload_reuse_table_merge_mode_default_encoding(self):", "\"+'update') if mode == 'merge': f.write(\"\\n - MODE: \"+'merge') f.write(\"\\n", "escape: f.write(\"\\n - ESCAPE: \"+escape) if quote: f.write(\"\\n - QUOTE:", "= run(cmd) if not ok: raise Exception(\"Unable to connect to", "-I GP_IGNORE: --gp_init_file=%s/global_init_file ' '%s %s > %s 2>&1' %", "We want to raise an error and not a failure.", "mkpath('setup.sql') runfile(file) f = open(mkpath('query28.sql'),'a') f.write(\"\\! psql -d reuse_gptest -c", "return os.path.splitext( fname )[0] + ext else: filename = fname.split(", "reuse ext_staging_table if it is configured with externalschema\" file =", "\"+hostNameAddrs) if portNum: f.write(\"\\n PORT: \"+portNum) f.write(\"\\n FILE:\") f.write(\"\\n -", "gpdbAnsFile(fname): ext = '.ans' return os.path.splitext(fname)[0] + ext def isFileEqual(", "and ifile') if ofile == '-': ofile = '2>&1' elif", "os.environ['GPHOME'], mdd=os.environ['MASTER_DATA_DIRECTORY'],port = os.environ['PGPORT']): master_pattern = \"Context:\\s*-1\\s*Value:\\s*\\d+\" command = \"gpconfig", "= '.ans' return os.path.splitext(fname)[0] + ext def isFileEqual( f1, f2,", "10000): if i % 2 == 0: f.write('1997,Ford,E350,\"ac, abs, moon\",3000.00,a\\n')", "''' PSQLError is the base class for exceptions in this", "return changeExtFile( fname, \".diff\", outputPath ) def changeExtFile( fname, ext", "ok: raise Exception(\"Unable to connect to segment server %s as", "raise Exception( 'Error: cannot find file %s' % f2 )", "os.getenv(\"GPHOME\") PGPORT = get_port() PGUSER = os.environ.get(\"PGUSER\") if PGUSER is", "f1 ) if not os.access( f2, os.R_OK ): raise Exception(", "(rc,ret) def outFile(fname,outputPath = ''): return changeExtFile(fname, \".out\", outputPath) def", "= os.environ.get(\"PGHOST\") if PGHOST is None: PGHOST = HOST d", "ofile = '> %s 2>&1' % ofile return run('%s psql", "find file %s' % f1 ) if not os.access( f2,", "f.close() file = mkpath('setup.sql') runfile(file) self.check_result(file) def test_01_gpload_formatOpts_delimiter(self): \"1 gpload", "login user if PGOPTIONS is None: PGOPTIONS = \"\" else:", "test_32_gpload_update_mode_without_reuse_table_with_fast_match(self): \"32 gpload update mode when reuse table is false", "return diff.read() def modify_sql_file(num): file = mkpath('query%d.sql' % num) user", "None, ofile = None, cmd = None, flag = '-e',dbname", "in range(1,40): f = open(mkpath('query%d.sql' % num),'w') f.write(\"\\! gpload -f", "Python error: GC object already tracked during testing. # This", "test_11_gpload_reuse_table_merge_mode_without_reuse(self): \"11 gpload merge mode without reuse \" copy_data('external_file_07.txt','data_file.txt') write_config_file('merge','false',file='data_file.txt')", "is valid for the second parameter of open(). \"\"\" p", "f2 = outFile(ifile, outputPath=outputPath) result = isFileEqual(f1, f2, optionalFlags, outputPath=outputPath)", "f.write(\"\\n - ENCODING: \"+encoding) if escape: f.write(\"\\n - ESCAPE: \"+escape)", ", username=username, PGOPTIONS=PGOPTIONS, host = host, port = port) else:", "MODE: \"+'update') if mode == 'merge': f.write(\"\\n - MODE: \"+'merge')", "test_17_gpload_formatOpts_quote(self): \"17 gpload formatOpts quote '\\\\x26'(&) with reuse\" copy_data('external_file_12.csv','data_file.csv') write_config_file(reuse_flag='true',formatOpts='csv',file='data_file.csv',table='csvtable',format='csv',delimiter=\"','\",quote=\"'\\x26'\")", "@param port : port where gpdb is running @param PGOPTIONS:", "PGOPTIONS, host = host, port = port) return (ok, out)", "file = mkpath('setup.sql') runfile(file) copy_data('external_file_04.txt','data_file.txt') write_config_file(mode='merge',reuse_flag='true',fast_match='true',file='data_file.txt',encoding='GBK') self.doTest(34) def test_35_gpload_reuse_table_merge_mode_with_fast_match_default_encoding(self): \"35", "\"%s\"' % cmd elif ifile: arg = ' < '", "for myip in ipaddrlist: if myip.find(\":\") > 0: ipv6 =", "f.write(\"\\n - ERROR_LIMIT: \" + error_limit) if delimiter: f.write(\"\\n -", "s_n4\") f.write(\"\\n n5: s_n5\") f.write(\"\\n n6: s_n6\") f.write(\"\\n n7: s_n7\")", "ignore blank lines. By default, diffs are unified with 3", "gpload formatOpts quote '\\\\x26'(&) with reuse\" copy_data('external_file_12.csv','data_file.csv') write_config_file(reuse_flag='true',formatOpts='csv',file='data_file.csv',table='csvtable',format='csv',delimiter=\"','\",quote=\"'\\x26'\") self.doTest(17) def", "update mode with reuse\" drop_tables() copy_data('external_file_04.txt','data_file.txt') write_config_file(mode='update',reuse_flag='true',file='data_file.txt') self.doTest(8) def test_09_gpload_reuse_table_update_mode_without_reuse(self):", "copy_data('external_file_09.txt','data_file.txt') write_config_file('merge','true',file='data_file.txt') self.doTest(13) def test_14_gpload_reuse_table_update_mode_with_reuse_RERUN(self): \"14 gpload update mode with", "flag: -e Run SQL with no comments (default) -a Run", "ERROR_TABLE: \" + error_table) f.write(\"\\n - ERROR_LIMIT: \" + error_limit)", "\"-U3\"). \"\"\" f1 = gpdbAnsFile(ifile) f2 = outFile(ifile, outputPath=outputPath) result", "f.close() # copy_data('large_file.csv','data_file.csv') # write_config_file(reuse_flag='true',formatOpts='csv',file='data_file.csv',table='csvtable',format='csv',delimiter=\"','\",log_errors=True,error_limit='90000000') # self.doTest(22) def test_23_gpload_error_count(self): \"23", "re.search('ext_gpload',name) if match: queryString = \"DROP EXTERNAL TABLE %s\" %", "http://docs.python.org/tutorial/errors.html We want to raise an error and not a", "% (LMYD, myinitfile, f1, f2, dfile)) else: (ok, out) =", "without reuse\" runfile(mkpath('setup.sql')) f = open(mkpath('query7.sql'),'a') f.write(\"\\! psql -d reuse_gptest", "to segment server %s as user %s\" % (host, user))", "abs, moon\",3000.00\\n') f.close() copy_data('large_file.csv','data_file.csv') write_config_file(reuse_flag='true',formatOpts='csv',file='data_file.csv',table='csvtable',format='csv',delimiter=\"','\",error_table=\"err_table\",error_limit='90000000') self.doTest(23) def test_24_gpload_error_count(self): \"24 gpload", "input file @param cmd: command line @param flag: -e Run", "INPUT:\") f.write(\"\\n - SOURCE:\") f.write(\"\\n LOCAL_HOSTNAME:\") f.write(\"\\n - \"+hostNameAddrs) if", "= mkpath('setup.sql') runfile(file) f = open(mkpath('query24.sql'),'a') f.write(\"\\! psql -d reuse_gptest", "in list: name = i[0] match = re.search('ext_gpload',name) if match:", "port = None): ''' Run a command or file against", "write_config_file(reuse_flag='true',formatOpts='csv',file='data_file.csv',table='csvtable',format='csv',delimiter=\"','\",quote=\"'\\x26'\") self.doTest(17) def test_18_gpload_formatOpts_quote(self): \"18 gpload formatOpts quote E'\\\\x26'(&) with", "when the output is different from the expected result. '''", "f.write(\"\\n - COLUMNS:\") f.write(\"\\n - s_s1: text\") f.write(\"\\n - s_s2:", "copy_data('external_file_10.txt','data/data_file.tbl') write_config_file('merge','true',file='data/data_file.tbl',columns_flag='1',mapping='1') self.doTest(15) def test_16_gpload_formatOpts_quote(self): \"16 gpload formatOpts quote unspecified", "error_limit='100') self.doTest(29) def test_30_gpload_reuse_table_update_mode_with_fast_match(self): \"30 gpload update mode with fast", ") ) if ok: os.unlink( dfile ) return ok def", "as subprocess except: import subprocess import pg def get_port_from_conf(): file", "copy_data('external_file_02.txt','data_file.txt') write_config_file(reuse_flag='true',formatOpts='text',file='data_file.txt',table='texttable',delimiter=\"E'\\u0009'\") self.doTest(4) def test_05_gpload_formatOpts_delimiter(self): \"5 gpload formatOpts delimiter E'\\\\''", "def modify_sql_file(num): file = mkpath('query%d.sql' % num) user = os.environ.get('USER')", "s_s2\") f.write(\"\\n dt: s_dt\") f.write(\"\\n s3: s_s3\") f.write(\"\\n n1: s_n1\")", "f: for line in f.xreadlines(): match = re.search('port=\\d+',line) if match:", "2 == 0: f.write('1997,Ford,E350,\"ac, abs, moon\",3000.00,a\\n') else: f.write('1997,Ford,E350,\"ac, abs, moon\",3000.00\\n')", "-f\" + \" \" + file) modify_sql_file(num) file = mkpath('query%d.sql'", "if PGUSER is None: PGUSER = USER PGHOST = os.environ.get(\"PGHOST\")", "= os.path.abspath(os.path.dirname(__file__)) if not os.access( f1, os.R_OK ): raise Exception(", "GBK to empty\" write_config_file(mode='merge',reuse_flag='true',fast_match='true',file='data_file.txt') self.doTest(35) def test_36_gpload_reuse_table_merge_mode_default_encoding(self): \"36 gpload merge", "return ipv4 def getPortMasterOnly(host = 'localhost',master_value = None, user =", "MATCH_COLUMNS:\") f.write(\"\\n - n1\") f.write(\"\\n - s1\") f.write(\"\\n - s2\")", "\"1 gpload formatOpts delimiter '|' with reuse \" copy_data('external_file_01.txt','data_file.txt') write_config_file(reuse_flag='true',formatOpts='text',file='data_file.txt',table='texttable',delimiter=\"'|'\")", "mode without reuse\" f = open(mkpath('query9.sql'),'a') f.write(\"\\! psql -d reuse_gptest", "self.doTest(8) def test_09_gpload_reuse_table_update_mode_without_reuse(self): \"9 gpload update mode without reuse\" f", "gpload error_table\" file = mkpath('setup.sql') runfile(file) f = open(mkpath('query23.sql'),'a') f.write(\"\\!", "\\\"'\\\" with reuse\" copy_data('external_file_03.txt','data_file.txt') write_config_file(reuse_flag='true',formatOpts='text',file='data_file.txt',table='texttable',delimiter=\"\\\"'\\\"\") self.doTest(6) def test_07_gpload_reuse_table_insert_mode_without_reuse(self): \"7 gpload", "It may report: Fatal Python error: GC object already tracked", "self.doTest(29) def test_30_gpload_reuse_table_update_mode_with_fast_match(self): \"30 gpload update mode with fast match\"", "copy_data('external_file_06.txt','data_file.txt') write_config_file('merge','true',file='data_file.txt') self.doTest(10) def test_11_gpload_reuse_table_merge_mode_without_reuse(self): \"11 gpload merge mode without", "is true\" drop_tables() copy_data('external_file_08.txt','data_file.txt') write_config_file(mode='update',reuse_flag='false',fast_match='true',file='data_file.txt') self.doTest(32) def test_33_gpload_reuse_table_merge_mode_with_fast_match_and_external_schema(self): \"33 gpload", "(ok, out) = psql_run(ifile = ifile,ofile = outFile(ifile, outputPath),flag =", "is backwards compatible). Yes, this is passed to the open()", "in out: if re.search(master_pattern, line): master_value = int(line.split()[3].strip()) if master_value", "in this module http://docs.python.org/tutorial/errors.html We want to raise an error", "= None, host = None, port = None): ''' Run", "gpload merge mode without reuse \" copy_data('external_file_07.txt','data_file.txt') write_config_file('merge','false',file='data_file.txt') self.doTest(11) def", "testing. # This is seldom issue. we can't reproduce it", "test_22_gpload_error_count(self): # \"22 gpload error count\" # f = open(mkpath('query22.sql'),'a')", "test_00_gpload_formatOpts_setup(self): \"0 gpload setup\" for num in range(1,40): f =", "output file already exists: 'a' = append; 'w' = write.", "- SOURCE:\") f.write(\"\\n LOCAL_HOSTNAME:\") f.write(\"\\n - \"+hostNameAddrs) if portNum: f.write(\"\\n", "mode: What to do if the output file already exists:", "db.query(queryString.encode('utf-8')) else: queryString = \"DROP TABLE %s\" % name db.query(queryString.encode('utf-8'))", "database: f.write(\"\\nDATABASE: \"+database) f.write(\"\\nUSER: \"+os.environ.get('USER')) f.write(\"\\nHOST: \"+hostNameAddrs) f.write(\"\\nPORT: \"+masterPort) f.write(\"\\nGPLOAD:\")", "f1, f2, dfile ) ) if ok: os.unlink( dfile )", "gpload update mode without reuse\" f = open(mkpath('query9.sql'),'a') f.write(\"\\! psql", "sys import os import string import time import socket import", "Run SQL with no comments (default) -a Run SQL with", "case 22 is flaky on concourse. It may report: Fatal", "others #def test_22_gpload_error_count(self): # \"22 gpload error count\" # f", "match1 = re.search('\\d+', match.group()) if match1: return match1.group() def get_port():", "self.doTest(23) def test_24_gpload_error_count(self): \"24 gpload error count with ext schema\"", "gpload merge mode with reuse \" drop_tables() copy_data('external_file_06.txt','data_file.txt') write_config_file('merge','true',file='data_file.txt') self.doTest(10)", "from texttable;'\\n\"+\"\\! psql -d reuse_gptest -c 'select * from texttable", "f.write(\"\\n n5: s_n5\") f.write(\"\\n n6: s_n6\") f.write(\"\\n n7: s_n7\") f.write(\"\\n", "if UPD not in sys.path: sys.path.append(UPD) DBNAME = \"postgres\" USER", "modify_sql_file(num) file = mkpath('query%d.sql' % num) runfile(file) self.check_result(file) def test_00_gpload_formatOpts_setup(self):", "\"/\" ) fname = os.path.splitext( filename[len( filename ) - 1]", "match1: return match1.group() def get_port(): port = os.environ['PGPORT'] if not", "%s/greenplum_path.sh; export MASTER_DATA_DIRECTORY=%s; export PGPORT=%s; %s\" \\ % (gphome, mdd,", "% (gphome, mdd, port, command) (ok,out) = run(cmd) if not", "%s\" % name db.query(queryString.encode('utf-8')) class PSQLError(Exception): ''' PSQLError is the", "reuse (RERUN) \" write_config_file('update','true',file='data_file.txt') self.doTest(14) def test_15_gpload_reuse_table_merge_mode_with_different_columns_order(self): \"15 gpload merge", "self.doTest(7) def test_08_gpload_reuse_table_update_mode_with_reuse(self): \"8 gpload update mode with reuse\" drop_tables()", ") except Exception,e: errorMessage = str(e) print 'could not connect", "count(*) from test.csvtable;'\") f.close() copy_data('external_file_13.csv','data_file.csv') write_config_file(reuse_flag='true',formatOpts='csv',file='data_file.csv',table='test.csvtable',format='csv',delimiter=\"','\",log_errors=True,error_limit='10',staging_table='t.staging_table') self.doTest(28) def test_29_gpload_reuse_table_insert_mode_with_reuse_and_null(self): \"29", "if master_value is None: error_msg = \"\".join(out) raise Exception(error_msg) return", "table when encoding is setted from GBK to empty\" write_config_file(mode='merge',reuse_flag='true',fast_match='true',file='data_file.txt')", "+ ifile if flag == '-a': arg = '-f '", "copy_data('external_file_11.csv','data_file.csv') write_config_file(reuse_flag='true',formatOpts='csv',file='data_file.csv',table='csvtable',format='csv',delimiter=\"','\") self.doTest(16) def test_17_gpload_formatOpts_quote(self): \"17 gpload formatOpts quote '\\\\x26'(&)", "with fast match and encoding GBK\" file = mkpath('setup.sql') runfile(file)", "None: PGUSER = USER PGHOST = os.environ.get(\"PGHOST\") if PGHOST is", "- ENCODING: \"+encoding) if escape: f.write(\"\\n - ESCAPE: \"+escape) if", "errorMessage queryString = \"\"\"SELECT relname from pg_class WHERE relname like", "line): master_value = int(line.split()[3].strip()) if master_value is None: error_msg =", "None: host = \"-h %s\" % PGHOST else: host =", "object already tracked during testing. # This is seldom issue.", "reason for an error might be program error, file not", "def test_16_gpload_formatOpts_quote(self): \"16 gpload formatOpts quote unspecified in CSV with", "--gp_init_file=%s/init_file ' '%s %s > %s 2>&1' % (LMYD, suitePath,", "Don't echo commands sent to server arg = '-e <", "to not blocking others #def test_22_gpload_error_count(self): # \"22 gpload error", "== 0: f.write('1997,Ford,E350,\"ac, abs, moon\",3000.00,a\\n') else: f.write('1997,Ford,E350,\"ac, abs, moon\",3000.00\\n') f.close()", "as well as the filename. This function will process this", "fname, ext = \".diff\", outputPath = \"\" ): if len(", "= False if p.wait() else True return (rc,ret) def outFile(fname,outputPath", "TABLE %s\" % name db.query(queryString.encode('utf-8')) else: queryString = \"DROP TABLE", "runfile(file) write_config_file(reuse_flag='true',formatOpts='text',file='data_file.txt',table='texttable',escape='\\\\') self.doTest(19) def test_20_gpload_formatOpts_escape(self): \"20 gpload formatOpts escape '\\\\'", "decimal\") f.write(\"\\n - s_n5: numeric\") f.write(\"\\n - s_n6: real\") f.write(\"\\n", "suitePath name to add init_file suitePath = f1[0:f1.rindex( \"/\" )]", "test_28_gpload_ext_staging_table_with_dot(self): \"28 gpload reuse ext_staging_table if it is configured with", "out the proper names of the .out and .ans files.", "= open(mkpath('config/config_file'),'w') f.write(\"VERSION: 1.0.0.1\") if database: f.write(\"\\nDATABASE: \"+database) f.write(\"\\nUSER: \"+os.environ.get('USER'))", "pg def get_port_from_conf(): file = os.environ.get('MASTER_DATA_DIRECTORY')+'/postgresql.conf' if os.path.isfile(file): with open(file)", "OUTPUT:\") f.write(\"\\n - TABLE: \"+table) if mode: if mode ==", "reuse_gptest -c 'select * from texttable where n2=222;'\") f.close() copy_data('external_file_05.txt','data_file.txt')", "the default login user if PGOPTIONS is None: PGOPTIONS =", "subprocess32 as subprocess except: import subprocess import pg def get_port_from_conf():", "try: db = pg.DB(dbname='reuse_gptest' ,host='localhost' ,port=int(PGPORT) ) except Exception,e: errorMessage", "mkpath('setup.sql') runfile(file) copy_data('external_file_04.txt','data_file.txt') write_config_file(mode='insert',reuse_flag='true',fast_match='false',file='data_file.txt',error_table=\"err_table\",error_limit='1000',preload=False) self.doTest(38) def test_39_gpload_fill_missing_fields(self): \"39 gpload fill", "is None: PGOPTIONS = \"\" else: PGOPTIONS = \"PGOPTIONS='%s'\" %", "with externalschema\" file = mkpath('setup.sql') runfile(file) f = open(mkpath('query26.sql'),'a') f.write(\"\\!", "* from texttable where n2=222;'\") f.close() copy_data('external_file_05.txt','data_file.txt') write_config_file(mode='update',reuse_flag='false',file='data_file.txt') self.doTest(9) def", "= open(mkpath('query27.sql'),'a') f.write(\"\\! psql -d reuse_gptest -c 'select count(*) from", "os.access( f1, os.R_OK ): raise Exception( 'Error: cannot find file", "pg.DB(dbname='reuse_gptest' ,host='localhost' ,port=int(PGPORT) ) except Exception,e: errorMessage = str(e) print", "mkpath('setup.sql') runfile(file) copy_data('external_file_04.txt','data_file.txt') write_config_file(mode='merge',reuse_flag='true',fast_match='false',file='data_file.txt',encoding='xxxx') self.doTest(37) def test_38_gpload_without_preload(self): \"38 gpload insert", "def test_04_gpload_formatOpts_delimiter(self): \"4 gpload formatOpts delimiter E'\\u0009' with reuse\" copy_data('external_file_02.txt','data_file.txt')", "self.doTest(11) def test_12_gpload_reuse_table_merge_mode_with_different_columns_number_in_file(self): \"12 gpload merge mode with reuse (RERUN", "mode with reuse (RERUN) \" write_config_file('update','true',file='data_file.txt') self.doTest(14) def test_15_gpload_reuse_table_merge_mode_with_different_columns_order(self): \"15", "Return True if OK. @param dbname: database name @param ifile:", "= get_table_name() for i in list: name = i[0] match", "HINT: -I CONTEXT: -I GP_IGNORE: --gp_init_file=%s/global_init_file ' '%s %s >", "with different columns number in DB table) \" preTest =", "via utility mode ''' if dbname is None: dbname =", "mkpath('setup.sql') runfile(file) copy_data('external_file_04.txt','data_file.txt') write_config_file(mode='insert',reuse_flag='false',fast_match='false',file='data_file.txt',table='texttable1', error_limit='1000', fill=True) self.doTest(39) if __name__ ==", "mkpath('setup.sql') runfile(file) f = open(mkpath('query25.sql'),'a') f.write(\"\\! psql -d reuse_gptest -c", "arg = '-f ' + ifile else: raise PSQLError('missing cmd", "TABLE %s\" % name db.query(queryString.encode('utf-8')) class PSQLError(Exception): ''' PSQLError is", "gpload formatOpts escape '\\\\' with reuse\" copy_data('external_file_01.txt','data_file.txt') file = mkpath('setup.sql')", "command = \"gpconfig -s %s\" % ( \"port\" ) cmd", "diff. For example, pass \" -B \" (with the blank", "passed to the open() function, so you can theoretically pass", "encoding=None, preload=True, fill=False): f = open(mkpath('config/config_file'),'w') f.write(\"VERSION: 1.0.0.1\") if database:", "= \"\" ): if len( outputPath ) == 0: return", "# write_config_file(reuse_flag='true',formatOpts='csv',file='data_file.csv',table='csvtable',format='csv',delimiter=\"','\",log_errors=True,error_limit='90000000') # self.doTest(22) def test_23_gpload_error_count(self): \"23 gpload error_table\" file", "match\" drop_tables() copy_data('external_file_04.txt','data_file.txt') write_config_file(mode='update',reuse_flag='true',fast_match='true',file='data_file.txt') self.doTest(30) def test_31_gpload_reuse_table_update_mode_with_fast_match_and_different_columns_number(self): \"31 gpload update", "test_38_gpload_without_preload(self): \"38 gpload insert mode without preload\" file = mkpath('setup.sql')", "expected output files and report an error if they don't", "f.write(\"\\n - STAGING_TABLE: \"+staging_table) f.write(\"\\n\") f.close() def runfile(ifile, flag='', dbname=None,", "range(0, 10000): # if i % 2 == 0: #", "s_n8: text\") f.write(\"\\n - s_n9: text\") if format: f.write(\"\\n -", "else: host = \"-h %s\" % host if port is", "outputPath = \"\" ): if len( outputPath ) == 0:", "with 3 lines of context (i.e. optionalFlags is \"-U3\"). \"\"\"", "\" -B \" (with the blank spaces) to ignore blank", "self.doTest(6) def test_07_gpload_reuse_table_insert_mode_without_reuse(self): \"7 gpload insert mode without reuse\" runfile(mkpath('setup.sql'))", "% name db.query(queryString.encode('utf-8')) class PSQLError(Exception): ''' PSQLError is the base", "flag='', dbname=None, outputPath=\"\", outputFile=\"\", username=None, PGOPTIONS=None, host = None, port", "ext_staging_table if it is configured with externalschema\" file = mkpath('setup.sql')", "and fast match is true\" drop_tables() copy_data('external_file_08.txt','data_file.txt') write_config_file(mode='update',reuse_flag='false',fast_match='true',file='data_file.txt') self.doTest(32) def", "% name db.query(queryString.encode('utf-8')) else: queryString = \"DROP TABLE %s\" %", "gpload error count with ext schema\" file = mkpath('setup.sql') runfile(file)", "self.doTest(32) def test_33_gpload_reuse_table_merge_mode_with_fast_match_and_external_schema(self): \"33 gpload update mode with fast match", "% f1 ) if not os.access( f2, os.R_OK ): raise", "copy_data('external_file_04.txt','data_file.txt') write_config_file(mode='merge',reuse_flag='true',fast_match='true',file='data_file.txt',externalSchema='test') self.doTest(33) def test_34_gpload_reuse_table_merge_mode_with_fast_match_and_encoding(self): \"34 gpload merge mode with", "-c 'select * from texttable where n2=222;'\") f.close() copy_data('external_file_05.txt','data_file.txt') write_config_file(mode='update',reuse_flag='false',file='data_file.txt')", "f.write('1997,Ford,E350,\"ac, abs, moon\",3000.00\\n') f.close() copy_data('large_file.csv','data_file.csv') write_config_file(reuse_flag='true',formatOpts='csv',file='data_file.csv',table='csvtable',format='csv',delimiter=\"','\",log_errors=True,error_limit='90000000',externalSchema='test') self.doTest(24) def test_25_gpload_ext_staging_table(self): \"25", "PGHOST = HOST d = mkpath('config') if not os.path.exists(d): os.mkdir(d)", "-c 'select count(*) from texttable where n2 is null;'\") f.close()", "to append (so that the function is backwards compatible). Yes,", "module http://docs.python.org/tutorial/errors.html We want to raise an error and not", "and report an error if they don't match. PARAMETERS: ifile:", "%s\" \\ % (gphome, mdd, port, command) (ok,out) = run(cmd)", "+ source) + ' ' + mkpath(target) p = subprocess.Popen(cmd,shell=True,stdout=subprocess.PIPE,stderr=subprocess.PIPE)", "file. mode: What to do if the output file already", "diffs are unified with 3 lines of context (i.e. optionalFlags", "error_msg = \"\".join(out) raise Exception(error_msg) return str(master_value) \"\"\" Global Values", "\"\"\" Global Values \"\"\" MYD = os.path.abspath(os.path.dirname(__file__)) mkpath = lambda", "runfile(file) f = open(mkpath('query28.sql'),'a') f.write(\"\\! psql -d reuse_gptest -c 'select", "f.write('1997,Ford,E350,\"ac, abs, moon\",3000.00,a\\n') else: f.write('1997,Ford,E350,\"ac, abs, moon\",3000.00\\n') f.close() copy_data('large_file.csv','data_file.csv') write_config_file(reuse_flag='true',formatOpts='csv',file='data_file.csv',table='csvtable',format='csv',delimiter=\"','\",error_table=\"err_table\",error_limit='90000000')", "f.close() copy_data('external_file_13.csv','data_file.csv') write_config_file(reuse_flag='true',formatOpts='csv',file='data_file.csv',table='csvtable',format='csv',delimiter=\"','\",log_errors=True,error_limit='10',staging_table='staging_table',externalSchema='test') self.doTest(26) def test_27_gpload_ext_staging_table_with_externalschema(self): \"27 gpload reuse ext_staging_table", "file = mkpath('setup.sql') runfile(file) f = open(mkpath('query24.sql'),'a') f.write(\"\\! psql -d", "database name @param ifile: input file @param cmd: command line", "What to do if the output file already exists: 'a'", "Exception( 'Error: cannot find file %s' % f2 ) dfile", "fname, outputPath = \"\" ): return changeExtFile( fname, \".diff\", outputPath", "'could not connect to database: ' + errorMessage list =", "self.doTest(15) def test_16_gpload_formatOpts_quote(self): \"16 gpload formatOpts quote unspecified in CSV", "-c 'select count(*) from test.csvtable;'\") f.close() copy_data('external_file_13.csv','data_file.csv') write_config_file(reuse_flag='true',formatOpts='csv',file='data_file.csv',table='test.csvtable',format='csv',delimiter=\"','\",log_errors=True,error_limit='10',staging_table='staging_table',externalSchema=\"'%'\") self.doTest(27) def", "def check_result(self,ifile, optionalFlags = \"-U3\", outputPath = \"\"): \"\"\" PURPOSE:", "= ifile,ofile = outFile(ifile, outputPath),flag = flag, dbname=dbname , username=username,", "= list(set([(ai[4][0]) for ai in hostinfo])) for myip in ipaddrlist:", "test case failures, when the output is different from the", "open(mkpath('data/large_file.csv'),'w') # for i in range(0, 10000): # if i", "\"39 gpload fill missing fields\" file = mkpath('setup.sql') runfile(file) copy_data('external_file_04.txt','data_file.txt')", "fast match\" drop_tables() copy_data('external_file_04.txt','data_file.txt') write_config_file(mode='update',reuse_flag='true',fast_match='true',file='data_file.txt') self.doTest(30) def test_31_gpload_reuse_table_update_mode_with_fast_match_and_different_columns_number(self): \"31 gpload", "from GBK to empty\" write_config_file(mode='merge',reuse_flag='true',fast_match='true',file='data_file.txt') self.doTest(35) def test_36_gpload_reuse_table_merge_mode_default_encoding(self): \"36 gpload", "LMYD = os.path.abspath(os.path.dirname(__file__)) if not os.access( f1, os.R_OK ): raise", "username is None: username = PGUSER # Use the default", "f.write(\"\\n - s_n1: smallint\") f.write(\"\\n - s_n2: integer\") f.write(\"\\n -", "= psql_run(ifile =ifile, ofile =outFile(outputFile, outputPath), flag =flag, dbname= dbname,", "relname like 'ext_gpload_reusable%' OR relname like 'staging_gpload_reusable%';\"\"\" resultList = db.query(queryString.encode('utf-8')).getresult()", "isFileEqual(f1, f2, optionalFlags, outputPath=outputPath) diff = None if result else", "test_06_gpload_formatOpts_delimiter(self): \"6 gpload formatOpts delimiter \\\"'\\\" with reuse\" copy_data('external_file_03.txt','data_file.txt') write_config_file(reuse_flag='true',formatOpts='text',file='data_file.txt',table='texttable',delimiter=\"\\\"'\\\"\")", "\" + error_limit) if error_table: f.write(\"\\n - ERROR_TABLE: \" +", "ifile else: raise PSQLError('missing cmd and ifile') if ofile ==", "): if len( outputPath ) == 0: return os.path.splitext( fname", "\"13 gpload merge mode with reuse (RERUN with different columns", "getPortMasterOnly(host = 'localhost',master_value = None, user = os.environ.get('USER'),gphome = os.environ['GPHOME'],", "'w' = write. Defaults to append (so that the function", "python import unittest import sys import os import string import", ") HOST = socket.gethostname() GPHOME = os.getenv(\"GPHOME\") PGPORT = get_port()", "relname like 'staging_gpload_reusable%';\"\"\" resultList = db.query(queryString.encode('utf-8')).getresult() return resultList def drop_tables():", "= \"postgres\" USER = os.environ.get( \"LOGNAME\" ) HOST = socket.gethostname()", "the diff file that is assocated with the given input", "def test_02_gpload_formatOpts_delimiter(self): \"2 gpload formatOpts delimiter '\\t' with reuse\" copy_data('external_file_02.txt','data_file.txt')", "file that is assocated with the given input file and", "not ofile: ofile = '> /dev/null 2>&1' else: ofile =", "filename. This function will process this file name to figure", "to postgres via utility mode ''' if dbname is None:", "--gp_init_file=%s ' '%s %s > %s 2>&1' % (LMYD, myinitfile,", "with fast match and external schema\" file = mkpath('setup.sql') runfile(file)", "if match: match1 = re.search('\\d+', match.group()) if match1: return match1.group()", "cannot find file %s' % f2 ) dfile = diffFile(", "flag, dbname=dbname , username=username, PGOPTIONS=PGOPTIONS, host = host, port =", "name db.query(queryString.encode('utf-8')) class PSQLError(Exception): ''' PSQLError is the base class", "f = open(mkpath('query%d.sql' % num),'w') f.write(\"\\! gpload -f \"+mkpath('config/config_file')+ \"", "-I NOTICE: -I HINT: -I CONTEXT: -I GP_IGNORE: --gp_init_file=%s/global_init_file '", "quote '\\\\x26'(&) with reuse\" copy_data('external_file_12.csv','data_file.csv') write_config_file(reuse_flag='true',formatOpts='csv',file='data_file.csv',table='csvtable',format='csv',delimiter=\"','\",quote=\"'\\x26'\") self.doTest(17) def test_18_gpload_formatOpts_quote(self): \"18", "empty\" write_config_file(mode='merge',reuse_flag='true',fast_match='true',file='data_file.txt') self.doTest(35) def test_36_gpload_reuse_table_merge_mode_default_encoding(self): \"36 gpload merge mode with", "self.assertTrue(result, \"query resulted in diff:\\n{}\".format(diff)) return True def doTest(self, num):", "f.close() copy_data('external_file_14.txt','data_file.txt') write_config_file(mode='insert',reuse_flag='true',file='data_file.txt',log_errors=True, error_limit='100') self.doTest(29) def test_30_gpload_reuse_table_update_mode_with_fast_match(self): \"30 gpload update", "== '__main__': suite = unittest.TestLoader().loadTestsFromTestCase(GPLoad_FormatOpts_TestCase) runner = unittest.TextTestRunner(verbosity=2) ret =", "the shell. oFile: an optional output file. mode: What to", "-I HINT: -I CONTEXT: -I GP_IGNORE: --gp_init_file=%s/global_init_file ' '%s %s", "f.write(\"\\n s3: s_s3\") f.write(\"\\n n1: s_n1\") f.write(\"\\n n2: s_n2\") f.write(\"\\n", "REUSE_TABLES: \"+reuse_flag) f.write(\"\\n - FAST_MATCH: \"+fast_match) if staging_table: f.write(\"\\n -", "default login user if PGOPTIONS is None: PGOPTIONS = \"\"", "%s 2>&1' % (LMYD, myinitfile, f1, f2, dfile)) else: (ok,", "open(mkpath('query%d.sql' % num),'w') f.write(\"\\! gpload -f \"+mkpath('config/config_file')+ \" -d reuse_gptest\\n\"+\"\\!", "resulted in diff:\\n{}\".format(diff)) return True def doTest(self, num): file =", "may report: Fatal Python error: GC object already tracked during", "case failures, when the output is different from the expected", "raise an error and not a failure. The reason for", "setted from GBK to empty\" write_config_file(mode='merge',reuse_flag='true',fast_match='true',file='data_file.txt') self.doTest(35) def test_36_gpload_reuse_table_merge_mode_default_encoding(self): \"36", "outputPath), flag =flag, dbname= dbname, username= username, PGOPTIONS= PGOPTIONS, host", "% PGOPTIONS if host is None: host = \"-h %s\"", "test_25_gpload_ext_staging_table(self): \"25 gpload reuse ext_staging_table if it is configured\" file", "encoding GBK\" file = mkpath('setup.sql') runfile(file) copy_data('external_file_04.txt','data_file.txt') write_config_file(mode='merge',reuse_flag='true',fast_match='true',file='data_file.txt',encoding='GBK') self.doTest(34) def", "run(cmd) if not ok: raise Exception(\"Unable to connect to segment", "gpload formatOpts delimiter \\\"'\\\" with reuse\" copy_data('external_file_03.txt','data_file.txt') write_config_file(reuse_flag='true',formatOpts='text',file='data_file.txt',table='texttable',delimiter=\"\\\"'\\\"\") self.doTest(6) def", "= os.environ.get('USER') if not user: user = os.environ.get('USER') if os.path.isfile(file):", "to run at the shell. oFile: an optional output file.", "portNum: f.write(\"\\n PORT: \"+portNum) f.write(\"\\n FILE:\") f.write(\"\\n - \"+mkpath(file)) if", "reuse_gptest -c 'select count(*) from texttable;'\") f.close() write_config_file(mode='insert',reuse_flag='false') self.doTest(7) def", "f.write(\"\\n s2: s_s2\") f.write(\"\\n dt: s_dt\") f.write(\"\\n s3: s_s3\") f.write(\"\\n", "if ok: os.unlink( dfile ) return ok def read_diff(ifile, outputPath):", "import platform import re try: import subprocess32 as subprocess except:", "- s_n5: numeric\") f.write(\"\\n - s_n6: real\") f.write(\"\\n - s_n7:", "as a string. \"\"\" dfile = diffFile(ifile, outputPath) with open(dfile,", "schema\" file = mkpath('setup.sql') runfile(file) copy_data('external_file_04.txt','data_file.txt') write_config_file(mode='merge',reuse_flag='true',fast_match='true',file='data_file.txt',externalSchema='test') self.doTest(33) def test_34_gpload_reuse_table_merge_mode_with_fast_match_and_encoding(self):", "command-line options (if any) for diff. For example, pass \"", "self.doTest(38) def test_39_gpload_fill_missing_fields(self): \"39 gpload fill missing fields\" file =", "\"33 gpload update mode with fast match and external schema\"", "= mkpath('setup.sql') runfile(file) copy_data('external_file_04.txt','data_file.txt') write_config_file(mode='insert',reuse_flag='true',fast_match='false',file='data_file.txt',error_table=\"err_table\",error_limit='1000',preload=False) self.doTest(38) def test_39_gpload_fill_missing_fields(self): \"39 gpload", "os.unlink( dfile ) return ok def read_diff(ifile, outputPath): \"\"\" Opens", "write_config_file(reuse_flag='true',formatOpts='csv',file='data_file.csv',table='csvtable',format='csv',delimiter=\"','\",quote=\"E'\\x26'\") self.doTest(18) def test_19_gpload_formatOpts_escape(self): \"19 gpload formatOpts escape '\\\\' with", "1] )[0] return outputPath + \"/\" + fname + ext", "mode with reuse and null\" runfile(mkpath('setup.sql')) f = open(mkpath('query29.sql'),'a') f.write(\"\\!", "+ ifile else: raise PSQLError('missing cmd and ifile') if ofile", "dbname = DBNAME if username is None: username = PGUSER", "\"\" else: PGOPTIONS = \"PGOPTIONS='%s'\" % PGOPTIONS if host is", "f1, f2, dfile)) else: if os.path.exists(myinitfile): (ok, out) = run('../gpdiff.pl", "TABLE texttable ADD column n8 text\",dbname='reuse_gptest') copy_data('external_file_08.txt','data_file.txt') write_config_file(mode='update',reuse_flag='true',fast_match='true',file='data_file.txt') self.doTest(31) def", "if os.path.exists(suitePath + \"/init_file\"): (ok, out) = run('../gpdiff.pl -w '", "+ ext def isFileEqual( f1, f2, optionalFlags = \"\", outputPath", "for line in out: out = line.split('\\n') for line in", "flag == '-a': arg = '-f ' + ifile else:", "*x) UPD = os.path.abspath(mkpath('..')) if UPD not in sys.path: sys.path.append(UPD)", "out) = run( '../gpdiff.pl -w ' + optionalFlags + \\", "else: ofile = '> %s 2>&1' % ofile return run('%s", "is None: error_msg = \"\".join(out) raise Exception(error_msg) return str(master_value) \"\"\"", "Run a command or file against psql. Return True if", "f1, f2, optionalFlags = \"\", outputPath = \"\", myinitfile =", "write_config_file(reuse_flag='true',formatOpts='csv',file='data_file.csv',table='csvtable',format='csv',delimiter=\"','\",log_errors=True,error_limit='90000000') # self.doTest(22) def test_23_gpload_error_count(self): \"23 gpload error_table\" file =", "to do if the output file already exists: 'a' =", "%s' % f2 ) dfile = diffFile( f1, outputPath =", "f.write('1997,Ford,E350,\"ac, abs, moon\",3000.00,a\\n') else: f.write('1997,Ford,E350,\"ac, abs, moon\",3000.00\\n') f.close() copy_data('large_file.csv','data_file.csv') write_config_file(reuse_flag='true',formatOpts='csv',file='data_file.csv',table='csvtable',format='csv',delimiter=\"','\",log_errors=True,error_limit='90000000',externalSchema='test')", "def get_port(): port = os.environ['PGPORT'] if not port: port =", "want to raise an error and not a failure. The", "ipaddrlist: if myip.find(\":\") > 0: ipv6 = myip return ipv6", "database: ' + errorMessage list = get_table_name() for i in", "delimiter '|' with reuse \" copy_data('external_file_01.txt','data_file.txt') write_config_file(reuse_flag='true',formatOpts='text',file='data_file.txt',table='texttable',delimiter=\"'|'\") self.doTest(1) def test_02_gpload_formatOpts_delimiter(self):", "cmd = None, flag = '-e',dbname = None, username =", "dt: s_dt\") f.write(\"\\n s3: s_s3\") f.write(\"\\n n1: s_n1\") f.write(\"\\n n2:", "externalSchema: f.write(\"\\n EXTERNAL:\") f.write(\"\\n - SCHEMA: \"+externalSchema) if preload: f.write(\"\\n", "and returns its contents as a string. \"\"\" dfile =", "db.query(queryString.encode('utf-8')) class PSQLError(Exception): ''' PSQLError is the base class for", "@param dbname: database name @param ifile: input file @param cmd:", "QUOTE: \"+quote) if fill: f.write(\"\\n - FILL_MISSING_FIELDS: true\") f.write(\"\\n OUTPUT:\")", "arg = '-c \"%s\"' % cmd elif ifile: arg =", "= \".diff\", outputPath = \"\" ): if len( outputPath )", "= psql_run(ifile = ifile,ofile = outFile(ifile, outputPath),flag = flag, dbname=dbname", "def test_06_gpload_formatOpts_delimiter(self): \"6 gpload formatOpts delimiter \\\"'\\\" with reuse\" copy_data('external_file_03.txt','data_file.txt')", "reuse\" copy_data('external_file_01.txt','data_file.txt') write_config_file(reuse_flag='true',formatOpts='text',file='data_file.txt',table='texttable',escape= '\\x5C') self.doTest(20) def test_21_gpload_formatOpts_escape(self): \"21 gpload formatOpts", "error_limit) if delimiter: f.write(\"\\n - DELIMITER: \"+delimiter) if encoding: f.write(\"\\n", "f.write(\"\\n n7: s_n7\") f.write(\"\\n n8: s_n8\") f.write(\"\\n n9: s_n9\") if", "if not user: user = os.environ.get('USER') if os.path.isfile(file): for line", "% (LMYD, suitePath, f1, f2, dfile)) else: if os.path.exists(myinitfile): (ok,", "file = mkpath('setup.sql') runfile(file) f = open(mkpath('query28.sql'),'a') f.write(\"\\! psql -d", "null\" runfile(mkpath('setup.sql')) f = open(mkpath('query29.sql'),'a') f.write(\"\\! psql -d reuse_gptest -c", "def test_11_gpload_reuse_table_merge_mode_without_reuse(self): \"11 gpload merge mode without reuse \" copy_data('external_file_07.txt','data_file.txt')", "%s\" % host if port is None: port = \"\"", "test_27_gpload_ext_staging_table_with_externalschema(self): \"27 gpload reuse ext_staging_table if it is configured with", "file = mkpath('setup.sql') runfile(file) copy_data('external_file_04.txt','data_file.txt') write_config_file(mode='insert',reuse_flag='false',fast_match='false',file='data_file.txt',table='texttable1', error_limit='1000', fill=True) self.doTest(39) if", "fname, \".diff\", outputPath ) def changeExtFile( fname, ext = \".diff\",", "= DBNAME if username is None: username = PGUSER #", "dbname=None, outputPath=\"\", outputFile=\"\", username=None, PGOPTIONS=None, host = None, port =", "gpload formatOpts delimiter '\\t' with reuse\" copy_data('external_file_02.txt','data_file.txt') write_config_file(reuse_flag='true',formatOpts='text',file='data_file.txt',table='texttable',delimiter=\"'\\t'\") self.doTest(2) def", "\"\", myinitfile = \"\"): LMYD = os.path.abspath(os.path.dirname(__file__)) if not os.access(", "): raise Exception( 'Error: cannot find file %s' % f2", "get_ip(hostname=None): if hostname is None: hostname = socket.gethostname() else: hostname", "server arg = '-e < ' + ifile if flag", "def test_27_gpload_ext_staging_table_with_externalschema(self): \"27 gpload reuse ext_staging_table if it is configured", "true\") f.write(\"\\n OUTPUT:\") f.write(\"\\n - TABLE: \"+table) if mode: if", "columns number) \" psql_run(cmd=\"ALTER TABLE texttable ADD column n8 text\",dbname='reuse_gptest')", "# This is seldom issue. we can't reproduce it locally,", "a shell command. Return (True, [result]) if OK, or (False,", "hostname is None: hostname = socket.gethostname() else: hostname = hostname", "table) \" preTest = mkpath('pre_test_13.sql') psql_run(preTest, dbname='reuse_gptest') copy_data('external_file_09.txt','data_file.txt') write_config_file('merge','true',file='data_file.txt') self.doTest(13)", "test_39_gpload_fill_missing_fields(self): \"39 gpload fill missing fields\" file = mkpath('setup.sql') runfile(file)", "\"22 gpload error count\" # f = open(mkpath('query22.sql'),'a') # f.write(\"\\!", "the name of the .sql file whose actual and expected", "= open(mkpath('query7.sql'),'a') f.write(\"\\! psql -d reuse_gptest -c 'select count(*) from", "+ \" \" + file) modify_sql_file(num) file = mkpath('query%d.sql' %", "f.close() copy_data('large_file.csv','data_file.csv') write_config_file(reuse_flag='true',formatOpts='csv',file='data_file.csv',table='csvtable',format='csv',delimiter=\"','\",error_table=\"err_table\",error_limit='90000000') self.doTest(23) def test_24_gpload_error_count(self): \"24 gpload error count", "s_n2\") f.write(\"\\n n3: s_n3\") f.write(\"\\n n4: s_n4\") f.write(\"\\n n5: s_n5\")", "= outFile(ifile, outputPath=outputPath) result = isFileEqual(f1, f2, optionalFlags, outputPath=outputPath) diff", "p.communicate()[0] ret = [] ret.append(out) rc = False if p.wait()", "i[0] match = re.search('ext_gpload',name) if match: queryString = \"DROP EXTERNAL", "match: match1 = re.search('\\d+', match.group()) if match1: return match1.group() def", "if externalSchema: f.write(\"\\n EXTERNAL:\") f.write(\"\\n - SCHEMA: \"+externalSchema) if preload:", "formatOpts quote E'\\\\x26'(&) with reuse\" copy_data('external_file_12.csv','data_file.csv') write_config_file(reuse_flag='true',formatOpts='csv',file='data_file.csv',table='csvtable',format='csv',delimiter=\"','\",quote=\"E'\\x26'\") self.doTest(18) def test_19_gpload_formatOpts_escape(self):", "open(mkpath('query7.sql'),'a') f.write(\"\\! psql -d reuse_gptest -c 'select count(*) from texttable;'\")", "(so that the function is backwards compatible). Yes, this is", "'select count(*) from texttable;'\") f.close() write_config_file(mode='insert',reuse_flag='false') self.doTest(7) def test_08_gpload_reuse_table_update_mode_with_reuse(self): \"8", "mkpath(target) p = subprocess.Popen(cmd,shell=True,stdout=subprocess.PIPE,stderr=subprocess.PIPE) return p.communicate() hostNameAddrs = get_ip(HOST) masterPort", "preload=True, fill=False): f = open(mkpath('config/config_file'),'w') f.write(\"VERSION: 1.0.0.1\") if database: f.write(\"\\nDATABASE:", "from texttable where n2 is null;'\") f.close() copy_data('external_file_14.txt','data_file.txt') write_config_file(mode='insert',reuse_flag='true',file='data_file.txt',log_errors=True, error_limit='100')", "the second parameter of open(). \"\"\" p = subprocess.Popen(cmd,shell=True,stdout=subprocess.PIPE,stderr=subprocess.PIPE) out", "for an error might be program error, file not found,", "# f.close() # f = open(mkpath('data/large_file.csv'),'w') # for i in", "notice @param username: psql user @param host : to connect", "'r') as diff: return diff.read() def modify_sql_file(num): file = mkpath('query%d.sql'", "= gpdbAnsFile(ifile) f2 = outFile(ifile, outputPath=outputPath) result = isFileEqual(f1, f2,", "= unittest.TestLoader().loadTestsFromTestCase(GPLoad_FormatOpts_TestCase) runner = unittest.TextTestRunner(verbosity=2) ret = not runner.run(suite).wasSuccessful() sys.exit(ret)", "open(mkpath('data/large_file.csv'),'w') for i in range(0, 10000): if i % 2", "mkpath('setup.sql') runfile(file) self.check_result(file) def test_01_gpload_formatOpts_delimiter(self): \"1 gpload formatOpts delimiter '|'", "CONTEXT: -I GP_IGNORE: --gp_init_file=%s/global_init_file ' '%s %s > %s 2>&1'", "get_port(): port = os.environ['PGPORT'] if not port: port = get_port_from_conf()", "merge mode with invalid encoding\" file = mkpath('setup.sql') runfile(file) copy_data('external_file_04.txt','data_file.txt')", "cmd = 'cp '+ mkpath('data/' + source) + ' '", "in out: out = line.split('\\n') for line in out: if", "ifile') if ofile == '-': ofile = '2>&1' elif not", "gpload insert mode without preload\" file = mkpath('setup.sql') runfile(file) copy_data('external_file_04.txt','data_file.txt')", "delimiter E'\\u0009' with reuse\" copy_data('external_file_02.txt','data_file.txt') write_config_file(reuse_flag='true',formatOpts='text',file='data_file.txt',table='texttable',delimiter=\"E'\\u0009'\") self.doTest(4) def test_05_gpload_formatOpts_delimiter(self): \"5", "if PGHOST is None: PGHOST = HOST d = mkpath('config')", "reuse_gptest -c 'select count(*) from csvtable;'\") # f.close() # f", "def psql_run(ifile = None, ofile = None, cmd = None,", "ERROR_LIMIT: \" + error_limit) if delimiter: f.write(\"\\n - DELIMITER: \"+delimiter)", "raise Exception( 'Error: cannot find file %s' % f1 )", "else 5432 def get_ip(hostname=None): if hostname is None: hostname =", "\"16 gpload formatOpts quote unspecified in CSV with reuse \"", "write_config_file(mode='insert',reuse_flag='true',fast_match='false',file='data_file.txt',error_table=\"err_table\",error_limit='1000',preload=False) self.doTest(38) def test_39_gpload_fill_missing_fields(self): \"39 gpload fill missing fields\" file", "shell. oFile: an optional output file. mode: What to do", "might be program error, file not found, etc. Failure is", "f.write(\"\\! psql -d reuse_gptest -c 'select count(*) from texttable;'\") f.close()", "(RERUN) \" write_config_file('update','true',file='data_file.txt') self.doTest(14) def test_15_gpload_reuse_table_merge_mode_with_different_columns_order(self): \"15 gpload merge mode", "\" copy_data('external_file_01.txt','data_file.txt') write_config_file(reuse_flag='true',formatOpts='text',file='data_file.txt',table='texttable',delimiter=\"'|'\") self.doTest(1) def test_02_gpload_formatOpts_delimiter(self): \"2 gpload formatOpts delimiter", "preload\" file = mkpath('setup.sql') runfile(file) copy_data('external_file_04.txt','data_file.txt') write_config_file(mode='insert',reuse_flag='true',fast_match='false',file='data_file.txt',error_table=\"err_table\",error_limit='1000',preload=False) self.doTest(38) def test_39_gpload_fill_missing_fields(self):", "update mode with fast match and external schema\" file =", "when reuse table is false and fast match is true\"", "in order to not blocking others #def test_22_gpload_error_count(self): # \"22", "s_n3: bigint\") f.write(\"\\n - s_n4: decimal\") f.write(\"\\n - s_n5: numeric\")", "--gp_init_file=%s/global_init_file --gp_init_file=%s ' '%s %s > %s 2>&1' % (LMYD,", "the given input file and returns its contents as a", ",port=int(PGPORT) ) except Exception,e: errorMessage = str(e) print 'could not", "with reuse and null\" runfile(mkpath('setup.sql')) f = open(mkpath('query29.sql'),'a') f.write(\"\\! psql", "len( outputPath ) == 0: return os.path.splitext( fname )[0] +", "\"37 gpload merge mode with invalid encoding\" file = mkpath('setup.sql')", "to compare. You may include the path as well as", "= \"Context:\\s*-1\\s*Value:\\s*\\d+\" command = \"gpconfig -s %s\" % ( \"port\"", "\"19 gpload formatOpts escape '\\\\' with reuse\" copy_data('external_file_01.txt','data_file.txt') file =", "if staging_table: f.write(\"\\n - STAGING_TABLE: \"+staging_table) f.write(\"\\n\") f.close() def runfile(ifile,", "- \"+hostNameAddrs) if portNum: f.write(\"\\n PORT: \"+portNum) f.write(\"\\n FILE:\") f.write(\"\\n", "%s 2>&1' % ( LMYD, f1, f2, dfile ) )", "it is configured with dot\" file = mkpath('setup.sql') runfile(file) f", "texttable where n2 is null;'\") f.close() copy_data('external_file_14.txt','data_file.txt') write_config_file(mode='insert',reuse_flag='true',file='data_file.txt',log_errors=True, error_limit='100') self.doTest(29)", "ifile if flag == '-a': arg = '-f ' +", "dbname=dbname , username=username, PGOPTIONS=PGOPTIONS, host = host, port = port)", "ext = \".diff\", outputPath = \"\" ): if len( outputPath", "os.path.splitext( fname )[0] + ext else: filename = fname.split( \"/\"", "(ok, out) = run( '../gpdiff.pl -w ' + optionalFlags +", "report: Fatal Python error: GC object already tracked during testing.", "open(mkpath('query22.sql'),'a') # f.write(\"\\! psql -d reuse_gptest -c 'select count(*) from", "ext else: filename = fname.split( \"/\" ) fname = os.path.splitext(", "expected outputs we want to compare. You may include the", "port where gpdb is running @param PGOPTIONS: connects to postgres", "output files and report an error if they don't match.", "Run SQL with comments and psql notice @param username: psql", "= None): if len(outputFile) == 0: (ok, out) = psql_run(ifile", "-f \"+mkpath('config/config_file')+ \" -d reuse_gptest\\n\") f.close() file = mkpath('setup.sql') runfile(file)", "any value that is valid for the second parameter of", "' + optionalFlags + \\ ' -I NOTICE: -I HINT:", "reuse\" runfile(mkpath('setup.sql')) f = open(mkpath('query7.sql'),'a') f.write(\"\\! psql -d reuse_gptest -c", "f.write(\"\\n - MODE: \"+'insert') if mode == 'update': f.write(\"\\n -", "- s_s2: text\") f.write(\"\\n - s_dt: timestamp\") f.write(\"\\n - s_s3:", "comments (default) -a Run SQL with comments and psql notice", "/dev/null 2>&1' else: ofile = '> %s 2>&1' % ofile", "def getPortMasterOnly(host = 'localhost',master_value = None, user = os.environ.get('USER'),gphome =", "PGOPTIONS = None, host = None, port = None): '''", "from csvtable;'\") # f.close() # f = open(mkpath('data/large_file.csv'),'w') # for", "text\",dbname='reuse_gptest') copy_data('external_file_08.txt','data_file.txt') write_config_file(mode='update',reuse_flag='true',fast_match='true',file='data_file.txt') self.doTest(31) def test_32_gpload_update_mode_without_reuse_table_with_fast_match(self): \"32 gpload update mode", "f2, optionalFlags = \"\", outputPath = \"\", myinitfile = \"\"):", "host if port is None: port = \"\" else: port", "if PGOPTIONS is None: PGOPTIONS = \"\" else: PGOPTIONS =", "outputPath ) def changeExtFile( fname, ext = \".diff\", outputPath =", "else: PGOPTIONS = \"PGOPTIONS='%s'\" % PGOPTIONS if host is None:", "- s2\") if mapping=='1': f.write(\"\\n - MAPPING:\") f.write(\"\\n s1: s_s1\")", "None: port = \"\" else: port = \"-p %s\" %", "+ ext def gpdbAnsFile(fname): ext = '.ans' return os.path.splitext(fname)[0] +", "open(mkpath('query23.sql'),'a') f.write(\"\\! psql -d reuse_gptest -c 'select count(*) from csvtable;'\")", "psql -d reuse_gptest -c 'select count(*) from test.csvtable;'\") f.close() copy_data('external_file_13.csv','data_file.csv')", "= \"\".join(out) raise Exception(error_msg) return str(master_value) \"\"\" Global Values \"\"\"", "reuse \" copy_data('external_file_01.txt','data_file.txt') write_config_file(reuse_flag='true',formatOpts='text',file='data_file.txt',table='texttable',delimiter=\"'|'\") self.doTest(1) def test_02_gpload_formatOpts_delimiter(self): \"2 gpload formatOpts", "if not os.access( f1, os.R_OK ): raise Exception( 'Error: cannot", "flag, arg, ofile)) def run(cmd): \"\"\" Run a shell command.", "file = mkpath('query%d.sql' % num) user = os.environ.get('USER') if not", "None: PGHOST = HOST d = mkpath('config') if not os.path.exists(d):", "if hostname is None: hostname = socket.gethostname() else: hostname =", "if mode == 'insert': f.write(\"\\n - MODE: \"+'insert') if mode", ": to connect to a different host @param port :", "from pg_class WHERE relname like 'ext_gpload_reusable%' OR relname like 'staging_gpload_reusable%';\"\"\"", "Use the default login user if PGOPTIONS is None: PGOPTIONS", "' + ifile if not (flag == '-q'): # Don't", "gpload merge mode with fast match and encoding GBK\" file", "= None, PGOPTIONS = None, host = None, port =", "= mkpath('setup.sql') runfile(file) f = open(mkpath('query28.sql'),'a') f.write(\"\\! psql -d reuse_gptest", "FILE:\") f.write(\"\\n - \"+mkpath(file)) if columns_flag=='1': f.write(\"\\n - COLUMNS:\") f.write(\"\\n", "text\") f.write(\"\\n - s_dt: timestamp\") f.write(\"\\n - s_s3: text\") f.write(\"\\n", "== '-q'): # Don't echo commands sent to server arg", "f.write(\"\\n n9: s_n9\") if externalSchema: f.write(\"\\n EXTERNAL:\") f.write(\"\\n - SCHEMA:", "reuse\" copy_data('external_file_02.txt','data_file.txt') write_config_file(reuse_flag='true',formatOpts='text',file='data_file.txt',table='texttable',delimiter=\"'\\t'\") self.doTest(2) def test_03_gpload_formatOpts_delimiter(self): \"3 gpload formatOpts delimiter", "= 'cp '+ mkpath('data/' + source) + ' ' +", "with reuse\" copy_data('external_file_02.txt','data_file.txt') write_config_file(reuse_flag='true',formatOpts='text',file='data_file.txt',table='texttable',delimiter=\"E'\\\\t'\") self.doTest(3) def test_04_gpload_formatOpts_delimiter(self): \"4 gpload formatOpts", "with encoding GBK\" file = mkpath('setup.sql') runfile(file) copy_data('external_file_04.txt','data_file.txt') write_config_file(mode='merge',reuse_flag='true',fast_match='false',file='data_file.txt',encoding='GBK') self.doTest(36)", "run( '../gpdiff.pl -w ' + optionalFlags + \\ ' -I", "errorMessage list = get_table_name() for i in list: name =", "i in range(0, 10000): # if i % 2 ==", "mode == 'merge': f.write(\"\\n - MODE: \"+'merge') f.write(\"\\n - UPDATE_COLUMNS:\")", "will process this file name to figure out the proper", "so you can theoretically pass any value that is valid", "file %s' % f2 ) dfile = diffFile( f1, outputPath", "psql -d reuse_gptest -c 'select count(*) from csvtable;'\") f.close() copy_data('external_file_13.csv','data_file.csv')", "= os.environ.get('USER') if os.path.isfile(file): for line in fileinput.FileInput(file,inplace=1): line =", "mdd, port, command) (ok,out) = run(cmd) if not ok: raise", "reuse_gptest\\n\"+\"\\! gpload -f \"+mkpath('config/config_file')+ \" -d reuse_gptest\\n\") f.close() file =", "port = os.environ['PGPORT'] if not port: port = get_port_from_conf() return", "import string import time import socket import fileinput import platform", "os.path.join(MYD, *x) UPD = os.path.abspath(mkpath('..')) if UPD not in sys.path:", "username, PGOPTIONS= PGOPTIONS, host = host, port = port) return", "\" + error_limit) if delimiter: f.write(\"\\n - DELIMITER: \"+delimiter) if", "0: f.write('1997,Ford,E350,\"ac, abs, moon\",3000.00,a\\n') else: f.write('1997,Ford,E350,\"ac, abs, moon\",3000.00\\n') f.close() copy_data('large_file.csv','data_file.csv')", "copy_data('external_file_04.txt','data_file.txt') write_config_file(mode='update',reuse_flag='true',fast_match='true',file='data_file.txt') self.doTest(30) def test_31_gpload_reuse_table_update_mode_with_fast_match_and_different_columns_number(self): \"31 gpload update mode with", "file = mkpath('setup.sql') runfile(file) copy_data('external_file_04.txt','data_file.txt') write_config_file(mode='merge',reuse_flag='true',fast_match='true',file='data_file.txt',externalSchema='test') self.doTest(33) def test_34_gpload_reuse_table_merge_mode_with_fast_match_and_encoding(self): \"34", "f.write('1997,Ford,E350,\"ac, abs, moon\",3000.00\\n') # f.close() # copy_data('large_file.csv','data_file.csv') # write_config_file(reuse_flag='true',formatOpts='csv',file='data_file.csv',table='csvtable',format='csv',delimiter=\"','\",log_errors=True,error_limit='90000000') #", "port if cmd: arg = '-c \"%s\"' % cmd elif", "count(*) from texttable;'\\n\"+\"\\! psql -d reuse_gptest -c 'select * from", "f.write(\"\\n - s_n6: real\") f.write(\"\\n - s_n7: double precision\") f.write(\"\\n", "= p.communicate()[0] ret = [] ret.append(out) rc = False if", "-c 'select count(*) from csvtable;'\") # f.close() # f =", "GBK\" file = mkpath('setup.sql') runfile(file) copy_data('external_file_04.txt','data_file.txt') write_config_file(mode='merge',reuse_flag='true',fast_match='false',file='data_file.txt',encoding='GBK') self.doTest(36) def test_37_gpload_reuse_table_merge_mode_invalid_encoding(self):", "= [] ret.append(out) rc = False if p.wait() else True", "Exception( 'Error: cannot find file %s' % f1 ) if", "\"+reuse_flag) f.write(\"\\n - FAST_MATCH: \"+fast_match) if staging_table: f.write(\"\\n - STAGING_TABLE:", "configured\" file = mkpath('setup.sql') runfile(file) f = open(mkpath('query25.sql'),'a') f.write(\"\\! psql", "mapping=='1': f.write(\"\\n - MAPPING:\") f.write(\"\\n s1: s_s1\") f.write(\"\\n s2: s_s2\")", "test_07_gpload_reuse_table_insert_mode_without_reuse(self): \"7 gpload insert mode without reuse\" runfile(mkpath('setup.sql')) f =", "write_config_file(mode='merge',reuse_flag='true',fast_match='true',file='data_file.txt') self.doTest(35) def test_36_gpload_reuse_table_merge_mode_default_encoding(self): \"36 gpload merge mode with encoding", "os import string import time import socket import fileinput import", "< ' + ifile if flag == '-a': arg =", "- ERROR_LIMIT: \" + error_limit) if delimiter: f.write(\"\\n - DELIMITER:", "%s' % (PGOPTIONS, dbname, host, port, username, flag, arg, ofile))", "(ok,out) = psql_run(ifile =ifile, ofile =outFile(outputFile, outputPath), flag =flag, dbname=", "STAGING_TABLE: \"+staging_table) f.write(\"\\n\") f.close() def runfile(ifile, flag='', dbname=None, outputPath=\"\", outputFile=\"\",", "abs, moon\",3000.00,a\\n') # else: # f.write('1997,Ford,E350,\"ac, abs, moon\",3000.00\\n') # f.close()", "is false and fast match is true\" drop_tables() copy_data('external_file_08.txt','data_file.txt') write_config_file(mode='update',reuse_flag='false',fast_match='true',file='data_file.txt')", "f1, os.R_OK ): raise Exception( 'Error: cannot find file %s'", "s_n6: real\") f.write(\"\\n - s_n7: double precision\") f.write(\"\\n - s_n8:", "mode with reuse\" drop_tables() copy_data('external_file_04.txt','data_file.txt') write_config_file(mode='update',reuse_flag='true',file='data_file.txt') self.doTest(8) def test_09_gpload_reuse_table_update_mode_without_reuse(self): \"9", "theoretically pass any value that is valid for the second", "+ ext else: filename = fname.split( \"/\" ) fname =", "mkpath('setup.sql') runfile(file) f = open(mkpath('query24.sql'),'a') f.write(\"\\! psql -d reuse_gptest -c", "outputPath) with open(dfile, 'r') as diff: return diff.read() def modify_sql_file(num):", "= re.search('port=\\d+',line) if match: match1 = re.search('\\d+', match.group()) if match1:", "run('../gpdiff.pl -w ' + optionalFlags + \\ ' -I NOTICE:", "# f.write('1997,Ford,E350,\"ac, abs, moon\",3000.00,a\\n') # else: # f.write('1997,Ford,E350,\"ac, abs, moon\",3000.00\\n')", "runfile(file) copy_data('external_file_04.txt','data_file.txt') write_config_file(mode='insert',reuse_flag='true',fast_match='false',file='data_file.txt',error_table=\"err_table\",error_limit='1000',preload=False) self.doTest(38) def test_39_gpload_fill_missing_fields(self): \"39 gpload fill missing", "[result]) if OK, or (False, []) otherwise. @params cmd: The", "os.path.exists(suitePath + \"/init_file\"): (ok, out) = run('../gpdiff.pl -w ' +", "copy_data('external_file_05.txt','data_file.txt') write_config_file(mode='update',reuse_flag='false',file='data_file.txt') self.doTest(9) def test_10_gpload_reuse_table_merge_mode_with_reuse(self): \"10 gpload merge mode with", ")[0] + ext else: filename = fname.split( \"/\" ) fname", "on concourse. It may report: Fatal Python error: GC object", "write_config_file('merge','true',file='data_file.txt') self.doTest(10) def test_11_gpload_reuse_table_merge_mode_without_reuse(self): \"11 gpload merge mode without reuse", "f.close() copy_data('external_file_13.csv','data_file.csv') write_config_file(reuse_flag='true',formatOpts='csv',file='data_file.csv',table='csvtable',format='csv',delimiter=\"','\",log_errors=True,error_limit='10',staging_table='staging_table') self.doTest(25) def test_26_gpload_ext_staging_table_with_externalschema(self): \"26 gpload reuse ext_staging_table", "the actual and expected output files and report an error", "\"+'insert') if mode == 'update': f.write(\"\\n - MODE: \"+'update') if", "port = None): if len(outputFile) == 0: (ok, out) =", "reuse\" copy_data('external_file_01.txt','data_file.txt') write_config_file(reuse_flag='true',formatOpts='text',file='data_file.txt',table='texttable',escape=\"E'\\\\\\\\'\") self.doTest(21) # case 22 is flaky on", "== 'insert': f.write(\"\\n - MODE: \"+'insert') if mode == 'update':", "OK. @param dbname: database name @param ifile: input file @param", "ext_staging_table if it is configured with dot\" file = mkpath('setup.sql')", "True def doTest(self, num): file = mkpath('query%d.diff' % num) if", "os.R_OK ): raise Exception( 'Error: cannot find file %s' %", "= \"\" ): return changeExtFile( fname, \".diff\", outputPath ) def", "f1 = gpdbAnsFile(ifile) f2 = outFile(ifile, outputPath=outputPath) result = isFileEqual(f1,", "flag = '-e',dbname = None, username = None, PGOPTIONS =", "= int(line.split()[3].strip()) if master_value is None: error_msg = \"\".join(out) raise", "\"\"\" f1 = gpdbAnsFile(ifile) f2 = outFile(ifile, outputPath=outputPath) result =", "psql -d reuse_gptest -c 'select count(*) from texttable;'\") f.close() write_config_file(mode='insert',reuse_flag='false')", "f.write(\"\\n - s_n9: text\") if format: f.write(\"\\n - FORMAT: \"+format)", "(False, []) otherwise. @params cmd: The command to run at", "f2 ) dfile = diffFile( f1, outputPath = outputPath )", "ext schema\" file = mkpath('setup.sql') runfile(file) f = open(mkpath('query24.sql'),'a') f.write(\"\\!", "unspecified in CSV with reuse \" copy_data('external_file_11.csv','data_file.csv') write_config_file(reuse_flag='true',formatOpts='csv',file='data_file.csv',table='csvtable',format='csv',delimiter=\"','\") self.doTest(16) def", "text\",dbname='reuse_gptest') copy_data('external_file_08.txt','data_file.txt') write_config_file('merge','true',file='data_file.txt') self.doTest(12) def test_13_gpload_reuse_table_merge_mode_with_different_columns_number_in_DB(self): \"13 gpload merge mode", "formatOpts quote '\\\\x26'(&) with reuse\" copy_data('external_file_12.csv','data_file.csv') write_config_file(reuse_flag='true',formatOpts='csv',file='data_file.csv',table='csvtable',format='csv',delimiter=\"','\",quote=\"'\\x26'\") self.doTest(17) def test_18_gpload_formatOpts_quote(self):", "f.close() copy_data('external_file_05.txt','data_file.txt') write_config_file(mode='update',reuse_flag='false',file='data_file.txt') self.doTest(9) def test_10_gpload_reuse_table_merge_mode_with_reuse(self): \"10 gpload merge mode", "without preload\" file = mkpath('setup.sql') runfile(file) copy_data('external_file_04.txt','data_file.txt') write_config_file(mode='insert',reuse_flag='true',fast_match='false',file='data_file.txt',error_table=\"err_table\",error_limit='1000',preload=False) self.doTest(38) def", "= os.environ.get( \"LOGNAME\" ) HOST = socket.gethostname() GPHOME = os.getenv(\"GPHOME\")", "'-e',dbname = None, username = None, PGOPTIONS = None, host", "- ERROR_LIMIT: \" + error_limit) if error_table: f.write(\"\\n - ERROR_TABLE:", "whose actual and expected outputs we want to compare. You", "- s_n9: text\") if format: f.write(\"\\n - FORMAT: \"+format) if", "- ERROR_TABLE: \" + error_table) f.write(\"\\n - ERROR_LIMIT: \" +", "name to figure out the proper names of the .out", "against psql. Return True if OK. @param dbname: database name", "cannot find file %s' % f1 ) if not os.access(", "self.doTest(21) # case 22 is flaky on concourse. It may", "outFile(ifile, outputPath),flag = flag, dbname=dbname , username=username, PGOPTIONS=PGOPTIONS, host =", "relname from pg_class WHERE relname like 'ext_gpload_reusable%' OR relname like", "open(dfile, 'r') as diff: return diff.read() def modify_sql_file(num): file =", "delimiter \\\"'\\\" with reuse\" copy_data('external_file_03.txt','data_file.txt') write_config_file(reuse_flag='true',formatOpts='text',file='data_file.txt',table='texttable',delimiter=\"\\\"'\\\"\") self.doTest(6) def test_07_gpload_reuse_table_insert_mode_without_reuse(self): \"7", "as the filename. This function will process this file name", "write_config_file(mode='insert',reuse_flag='false',fast_match='false',file='data_file.txt',table='texttable1', error_limit='1000', fill=True) self.doTest(39) if __name__ == '__main__': suite =", "reuse (RERUN with different columns number in DB table) \"", "(RERUN with different columns number in DB table) \" preTest", "setup\" for num in range(1,40): f = open(mkpath('query%d.sql' % num),'w')", ") # Gets the suitePath name to add init_file suitePath", "like 'ext_gpload_reusable%' OR relname like 'staging_gpload_reusable%';\"\"\" resultList = db.query(queryString.encode('utf-8')).getresult() return", "db = pg.DB(dbname='reuse_gptest' ,host='localhost' ,port=int(PGPORT) ) except Exception,e: errorMessage =", "f2, os.R_OK ): raise Exception( 'Error: cannot find file %s'", "outputs we want to compare. You may include the path", "encoding: f.write(\"\\n - ENCODING: \"+encoding) if escape: f.write(\"\\n - ESCAPE:", "PGHOST else: host = \"-h %s\" % host if port", "queryString = \"DROP TABLE %s\" % name db.query(queryString.encode('utf-8')) class PSQLError(Exception):", "f.write(\"\\! psql -d reuse_gptest -c 'select count(*) from texttable;'\\n\"+\"\\! psql", "open(mkpath('query27.sql'),'a') f.write(\"\\! psql -d reuse_gptest -c 'select count(*) from test.csvtable;'\")", "def test_32_gpload_update_mode_without_reuse_table_with_fast_match(self): \"32 gpload update mode when reuse table is", "pg_class WHERE relname like 'ext_gpload_reusable%' OR relname like 'staging_gpload_reusable%';\"\"\" resultList", "def doTest(self, num): file = mkpath('query%d.diff' % num) if os.path.isfile(file):", "input file and returns its contents as a string. \"\"\"", "port = port) return (ok, out) def psql_run(ifile = None,", "\"2 gpload formatOpts delimiter '\\t' with reuse\" copy_data('external_file_02.txt','data_file.txt') write_config_file(reuse_flag='true',formatOpts='text',file='data_file.txt',table='texttable',delimiter=\"'\\t'\") self.doTest(2)", "with reuse\" copy_data('external_file_02.txt','data_file.txt') write_config_file(reuse_flag='true',formatOpts='text',file='data_file.txt',table='texttable',delimiter=\"'\\t'\") self.doTest(2) def test_03_gpload_formatOpts_delimiter(self): \"3 gpload formatOpts", ")[0] return outputPath + \"/\" + fname + ext def", "fname + ext def gpdbAnsFile(fname): ext = '.ans' return os.path.splitext(fname)[0]", "\"+masterPort) f.write(\"\\nGPLOAD:\") f.write(\"\\n INPUT:\") f.write(\"\\n - SOURCE:\") f.write(\"\\n LOCAL_HOSTNAME:\") f.write(\"\\n", "= lambda *x: os.path.join(MYD, *x) UPD = os.path.abspath(mkpath('..')) if UPD", "= '-e < ' + ifile if flag == '-a':", "f.write(\"VERSION: 1.0.0.1\") if database: f.write(\"\\nDATABASE: \"+database) f.write(\"\\nUSER: \"+os.environ.get('USER')) f.write(\"\\nHOST: \"+hostNameAddrs)", "f.write(\"\\n dt: s_dt\") f.write(\"\\n s3: s_s3\") f.write(\"\\n n1: s_n1\") f.write(\"\\n", "\"12 gpload merge mode with reuse (RERUN with different columns", "= None, port = None): if len(outputFile) == 0: (ok,", "output is different from the expected result. ''' pass class", "if re.search(master_pattern, line): master_value = int(line.split()[3].strip()) if master_value is None:", "self.doTest(19) def test_20_gpload_formatOpts_escape(self): \"20 gpload formatOpts escape '\\\\' with reuse\"", "%s\" % port if cmd: arg = '-c \"%s\"' %", "open() function, so you can theoretically pass any value that", "cmd: arg = '-c \"%s\"' % cmd elif ifile: arg", "socket.gethostname() else: hostname = hostname hostinfo = socket.getaddrinfo(hostname, None) ipaddrlist", "% ( LMYD, f1, f2, dfile ) ) if ok:", "f = open(mkpath('query22.sql'),'a') # f.write(\"\\! psql -d reuse_gptest -c 'select", "#def test_22_gpload_error_count(self): # \"22 gpload error count\" # f =", "s1\") f.write(\"\\n - s2\") if mapping=='1': f.write(\"\\n - MAPPING:\") f.write(\"\\n", "if host is None: host = \"-h %s\" % PGHOST", "' '%s %s > %s 2>&1' % (LMYD, myinitfile, f1,", "- TABLE: \"+table) if mode: if mode == 'insert': f.write(\"\\n", "drop_tables() copy_data('external_file_08.txt','data_file.txt') write_config_file(mode='update',reuse_flag='false',fast_match='true',file='data_file.txt') self.doTest(32) def test_33_gpload_reuse_table_merge_mode_with_fast_match_and_external_schema(self): \"33 gpload update mode", "else: hostname = hostname hostinfo = socket.getaddrinfo(hostname, None) ipaddrlist =", "runfile(file) f = open(mkpath('query25.sql'),'a') f.write(\"\\! psql -d reuse_gptest -c 'select", "\"29 gpload insert mode with reuse and null\" runfile(mkpath('setup.sql')) f", "texttable;'\") f.close() write_config_file(mode='insert',reuse_flag='false') self.doTest(7) def test_08_gpload_reuse_table_update_mode_with_reuse(self): \"8 gpload update mode", "= open(mkpath('query24.sql'),'a') f.write(\"\\! psql -d reuse_gptest -c 'select count(*) from", "gpload update mode with reuse\" drop_tables() copy_data('external_file_04.txt','data_file.txt') write_config_file(mode='update',reuse_flag='true',file='data_file.txt') self.doTest(8) def", "escape '\\\\' with reuse\" copy_data('external_file_01.txt','data_file.txt') write_config_file(reuse_flag='true',formatOpts='text',file='data_file.txt',table='texttable',escape= '\\x5C') self.doTest(20) def test_21_gpload_formatOpts_escape(self):", "psql. Return True if OK. @param dbname: database name @param", "getPortMasterOnly() def get_table_name(): try: db = pg.DB(dbname='reuse_gptest' ,host='localhost' ,port=int(PGPORT) )", "not connect to database: ' + errorMessage list = get_table_name()", "write_config_file(reuse_flag='true',formatOpts='text',file='data_file.txt',table='texttable',escape='\\\\') self.doTest(19) def test_20_gpload_formatOpts_escape(self): \"20 gpload formatOpts escape '\\\\' with", "\"+delimiter) if encoding: f.write(\"\\n - ENCODING: \"+encoding) if escape: f.write(\"\\n", "COLUMNS:\") f.write(\"\\n - s_s1: text\") f.write(\"\\n - s_s2: text\") f.write(\"\\n", "dbname= dbname, username= username, PGOPTIONS= PGOPTIONS, host = host, port", "DB table) \" preTest = mkpath('pre_test_13.sql') psql_run(preTest, dbname='reuse_gptest') copy_data('external_file_09.txt','data_file.txt') write_config_file('merge','true',file='data_file.txt')", "f.close() # f = open(mkpath('data/large_file.csv'),'w') # for i in range(0,", "name db.query(queryString.encode('utf-8')) else: queryString = \"DROP TABLE %s\" % name", "def test_25_gpload_ext_staging_table(self): \"25 gpload reuse ext_staging_table if it is configured\"", "match and encoding GBK\" file = mkpath('setup.sql') runfile(file) copy_data('external_file_04.txt','data_file.txt') write_config_file(mode='merge',reuse_flag='true',fast_match='true',file='data_file.txt',encoding='GBK')", "dfile ) return ok def read_diff(ifile, outputPath): \"\"\" Opens the", "write_config_file(reuse_flag='true',formatOpts='text',file='data_file.txt',table='texttable',delimiter=\"E'\\''\") self.doTest(5) def test_06_gpload_formatOpts_delimiter(self): \"6 gpload formatOpts delimiter \\\"'\\\" with", "-c 'select count(*) from csvtable;'\") f.close() f = open(mkpath('data/large_file.csv'),'w') for", "- s_n2: integer\") f.write(\"\\n - s_n3: bigint\") f.write(\"\\n - s_n4:", "- REUSE_TABLES: \"+reuse_flag) f.write(\"\\n - FAST_MATCH: \"+fast_match) if staging_table: f.write(\"\\n", "file @param cmd: command line @param flag: -e Run SQL", "open(mkpath('query25.sql'),'a') f.write(\"\\! psql -d reuse_gptest -c 'select count(*) from csvtable;'\")", "mode with fast match\" drop_tables() copy_data('external_file_04.txt','data_file.txt') write_config_file(mode='update',reuse_flag='true',fast_match='true',file='data_file.txt') self.doTest(30) def test_31_gpload_reuse_table_update_mode_with_fast_match_and_different_columns_number(self):", "runfile(file) copy_data('external_file_04.txt','data_file.txt') write_config_file(mode='merge',reuse_flag='true',fast_match='false',file='data_file.txt',encoding='GBK') self.doTest(36) def test_37_gpload_reuse_table_merge_mode_invalid_encoding(self): \"37 gpload merge mode", "outputPath = \"\"): \"\"\" PURPOSE: compare the actual and expected", "-w ' + optionalFlags + \\ ' -I NOTICE: -I", ",host='localhost' ,port=int(PGPORT) ) except Exception,e: errorMessage = str(e) print 'could", "# f = open(mkpath('query22.sql'),'a') # f.write(\"\\! psql -d reuse_gptest -c", "we can't reproduce it locally, so we disable it, in", "runfile(file) f = open(mkpath('query26.sql'),'a') f.write(\"\\! psql -d reuse_gptest -c 'select", "or (False, []) otherwise. @params cmd: The command to run", "-c 'select count(*) from csvtable;'\") f.close() copy_data('external_file_13.csv','data_file.csv') write_config_file(reuse_flag='true',formatOpts='csv',file='data_file.csv',table='csvtable',format='csv',delimiter=\"','\",log_errors=True,error_limit='10',staging_table='staging_table') self.doTest(25) def", "runfile(file) copy_data('external_file_04.txt','data_file.txt') write_config_file(mode='merge',reuse_flag='true',fast_match='true',file='data_file.txt',encoding='GBK') self.doTest(34) def test_35_gpload_reuse_table_merge_mode_with_fast_match_default_encoding(self): \"35 gpload does not", "= outFile(ifile, outputPath),flag = flag, dbname=dbname , username=username, PGOPTIONS=PGOPTIONS, host", "if ofile == '-': ofile = '2>&1' elif not ofile:", "hostinfo = socket.getaddrinfo(hostname, None) ipaddrlist = list(set([(ai[4][0]) for ai in", "def test_28_gpload_ext_staging_table_with_dot(self): \"28 gpload reuse ext_staging_table if it is configured", "- s_n1: smallint\") f.write(\"\\n - s_n2: integer\") f.write(\"\\n - s_n3:", "\" + error_table) f.write(\"\\n - ERROR_LIMIT: \" + error_limit) if", "# f.close() # copy_data('large_file.csv','data_file.csv') # write_config_file(reuse_flag='true',formatOpts='csv',file='data_file.csv',table='csvtable',format='csv',delimiter=\"','\",log_errors=True,error_limit='90000000') # self.doTest(22) def test_23_gpload_error_count(self):", "= None, cmd = None, flag = '-e',dbname = None,", "raise Exception(\"Unable to connect to segment server %s as user", "'select count(*) from csvtable;'\") f.close() copy_data('external_file_13.csv','data_file.csv') write_config_file(reuse_flag='true',formatOpts='csv',file='data_file.csv',table='csvtable',format='csv',delimiter=\"','\",log_errors=True,error_limit='10',staging_table='staging_table',externalSchema='test') self.doTest(26) def test_27_gpload_ext_staging_table_with_externalschema(self):", "os.environ.get( \"LOGNAME\" ) HOST = socket.gethostname() GPHOME = os.getenv(\"GPHOME\") PGPORT", "'insert': f.write(\"\\n - MODE: \"+'insert') if mode == 'update': f.write(\"\\n", "mode with fast match and encoding GBK\" file = mkpath('setup.sql')", "+ error_table) f.write(\"\\n - ERROR_LIMIT: \" + error_limit) if delimiter:", "f = open(mkpath('query24.sql'),'a') f.write(\"\\! psql -d reuse_gptest -c 'select count(*)", "= open(mkpath('data/large_file.csv'),'w') for i in range(0, 10000): if i %", "= 'localhost',master_value = None, user = os.environ.get('USER'),gphome = os.environ['GPHOME'], mdd=os.environ['MASTER_DATA_DIRECTORY'],port", "smallint\") f.write(\"\\n - s_n2: integer\") f.write(\"\\n - s_n3: bigint\") f.write(\"\\n", "outputPath=outputPath) diff = None if result else read_diff(ifile, outputPath) self.assertTrue(result,", "mkpath('query%d.sql' % num) user = os.environ.get('USER') if not user: user", "reuse \" copy_data('external_file_07.txt','data_file.txt') write_config_file('merge','false',file='data_file.txt') self.doTest(11) def test_12_gpload_reuse_table_merge_mode_with_different_columns_number_in_file(self): \"12 gpload merge", "f.write(\"\\n - n1\") f.write(\"\\n - s1\") f.write(\"\\n - s2\") if", "s_n2: integer\") f.write(\"\\n - s_n3: bigint\") f.write(\"\\n - s_n4: decimal\")", "i % 2 == 0: # f.write('1997,Ford,E350,\"ac, abs, moon\",3000.00,a\\n') #", "to server arg = '-e < ' + ifile if", "copy_data('external_file_13.csv','data_file.csv') write_config_file(reuse_flag='true',formatOpts='csv',file='data_file.csv',table='csvtable',format='csv',delimiter=\"','\",log_errors=True,error_limit='10',staging_table='staging_table') self.doTest(25) def test_26_gpload_ext_staging_table_with_externalschema(self): \"26 gpload reuse ext_staging_table if", "count(*) from csvtable;'\") f.close() copy_data('external_file_13.csv','data_file.csv') write_config_file(reuse_flag='true',formatOpts='csv',file='data_file.csv',table='csvtable',format='csv',delimiter=\"','\",log_errors=True,error_limit='10',staging_table='staging_table',externalSchema='test') self.doTest(26) def test_27_gpload_ext_staging_table_with_externalschema(self): \"27", "s_s1: text\") f.write(\"\\n - s_s2: text\") f.write(\"\\n - s_dt: timestamp\")", "copy_data('external_file_01.txt','data_file.txt') file = mkpath('setup.sql') runfile(file) write_config_file(reuse_flag='true',formatOpts='text',file='data_file.txt',table='texttable',escape='\\\\') self.doTest(19) def test_20_gpload_formatOpts_escape(self): \"20", "with dot\" file = mkpath('setup.sql') runfile(file) f = open(mkpath('query28.sql'),'a') f.write(\"\\!", "import pg def get_port_from_conf(): file = os.environ.get('MASTER_DATA_DIRECTORY')+'/postgresql.conf' if os.path.isfile(file): with", "fileinput import platform import re try: import subprocess32 as subprocess", "match = re.search('port=\\d+',line) if match: match1 = re.search('\\d+', match.group()) if", "= line.replace(\"gpload.py \",\"gpload \") print str(re.sub('\\n','',line)) def copy_data(source='',target=''): cmd =", "-c 'select count(*) from texttable;'\\n\"+\"\\! psql -d reuse_gptest -c 'select", "= \"\" else: PGOPTIONS = \"PGOPTIONS='%s'\" % PGOPTIONS if host", "''' Run a command or file against psql. Return True", "connect to database: ' + errorMessage queryString = \"\"\"SELECT relname", "- s_n8: text\") f.write(\"\\n - s_n9: text\") if format: f.write(\"\\n", "None, cmd = None, flag = '-e',dbname = None, username", "psql_run(cmd=\"ALTER TABLE texttable ADD column n8 text\",dbname='reuse_gptest') copy_data('external_file_08.txt','data_file.txt') write_config_file(mode='update',reuse_flag='true',fast_match='true',file='data_file.txt') self.doTest(31)", "- s_s1: text\") f.write(\"\\n - s_s2: text\") f.write(\"\\n - s_dt:", "f1, outputPath = outputPath ) # Gets the suitePath name", "myinitfile, f1, f2, dfile)) else: (ok, out) = run( '../gpdiff.pl", "f.write(\"\\n - MAPPING:\") f.write(\"\\n s1: s_s1\") f.write(\"\\n s2: s_s2\") f.write(\"\\n", "different columns number in DB table) \" preTest = mkpath('pre_test_13.sql')", "from test.csvtable;'\") f.close() copy_data('external_file_13.csv','data_file.csv') write_config_file(reuse_flag='true',formatOpts='csv',file='data_file.csv',table='test.csvtable',format='csv',delimiter=\"','\",log_errors=True,error_limit='10',staging_table='t.staging_table') self.doTest(28) def test_29_gpload_reuse_table_insert_mode_with_reuse_and_null(self): \"29 gpload", "= get_ip(HOST) masterPort = getPortMasterOnly() def get_table_name(): try: db =", "with reuse\" copy_data('external_file_01.txt','data_file.txt') write_config_file(reuse_flag='true',formatOpts='text',file='data_file.txt',table='texttable',escape= '\\x5C') self.doTest(20) def test_21_gpload_formatOpts_escape(self): \"21 gpload", "= fname.split( \"/\" ) fname = os.path.splitext( filename[len( filename )", "double precision\") f.write(\"\\n - s_n8: text\") f.write(\"\\n - s_n9: text\")", "read_diff(ifile, outputPath): \"\"\" Opens the diff file that is assocated", "\",\"gpload \") print str(re.sub('\\n','',line)) def copy_data(source='',target=''): cmd = 'cp '+", "don't match. PARAMETERS: ifile: the name of the .sql file", "None: error_msg = \"\".join(out) raise Exception(error_msg) return str(master_value) \"\"\" Global", "f.write(\"\\nHOST: \"+hostNameAddrs) f.write(\"\\nPORT: \"+masterPort) f.write(\"\\nGPLOAD:\") f.write(\"\\n INPUT:\") f.write(\"\\n - SOURCE:\")", "where gpdb is running @param PGOPTIONS: connects to postgres via", "want to compare. You may include the path as well", "= \"DROP TABLE %s\" % name db.query(queryString.encode('utf-8')) class PSQLError(Exception): '''", "= \"\", outputPath = \"\", myinitfile = \"\"): LMYD =", "formatOpts quote unspecified in CSV with reuse \" copy_data('external_file_11.csv','data_file.csv') write_config_file(reuse_flag='true',formatOpts='csv',file='data_file.csv',table='csvtable',format='csv',delimiter=\"','\")", "name @param ifile: input file @param cmd: command line @param", "os.path.abspath(mkpath('..')) if UPD not in sys.path: sys.path.append(UPD) DBNAME = \"postgres\"", "n1: s_n1\") f.write(\"\\n n2: s_n2\") f.write(\"\\n n3: s_n3\") f.write(\"\\n n4:", "drop_tables(): try: db = pg.DB(dbname='reuse_gptest' ,host='localhost' ,port=int(PGPORT) ) except Exception,e:", "f.write(\"\\n - FORMAT: \"+format) if log_errors: f.write(\"\\n - LOG_ERRORS: true\")", "out: if re.search(master_pattern, line): master_value = int(line.split()[3].strip()) if master_value is", "not os.access( f2, os.R_OK ): raise Exception( 'Error: cannot find", "copy_data('external_file_13.csv','data_file.csv') write_config_file(reuse_flag='true',formatOpts='csv',file='data_file.csv',table='test.csvtable',format='csv',delimiter=\"','\",log_errors=True,error_limit='10',staging_table='staging_table',externalSchema=\"'%'\") self.doTest(27) def test_28_gpload_ext_staging_table_with_dot(self): \"28 gpload reuse ext_staging_table if", "if match: queryString = \"DROP EXTERNAL TABLE %s\" % name", "- \"+mkpath(file)) if columns_flag=='1': f.write(\"\\n - COLUMNS:\") f.write(\"\\n - s_s1:", "if fill: f.write(\"\\n - FILL_MISSING_FIELDS: true\") f.write(\"\\n OUTPUT:\") f.write(\"\\n -", "match: queryString = \"DROP EXTERNAL TABLE %s\" % name db.query(queryString.encode('utf-8'))", "resultList def drop_tables(): try: db = pg.DB(dbname='reuse_gptest' ,host='localhost' ,port=int(PGPORT) )", "without reuse \" copy_data('external_file_07.txt','data_file.txt') write_config_file('merge','false',file='data_file.txt') self.doTest(11) def test_12_gpload_reuse_table_merge_mode_with_different_columns_number_in_file(self): \"12 gpload", "integer\") f.write(\"\\n - s_n3: bigint\") f.write(\"\\n - s_n4: decimal\") f.write(\"\\n", "def test_36_gpload_reuse_table_merge_mode_default_encoding(self): \"36 gpload merge mode with encoding GBK\" file", "f.write(\"\\n s1: s_s1\") f.write(\"\\n s2: s_s2\") f.write(\"\\n dt: s_dt\") f.write(\"\\n", "if OK. @param dbname: database name @param ifile: input file", "def test_31_gpload_reuse_table_update_mode_with_fast_match_and_different_columns_number(self): \"31 gpload update mode with fast match and", "i in range(0, 10000): if i % 2 == 0:", "error_table: f.write(\"\\n - ERROR_TABLE: \" + error_table) f.write(\"\\n - ERROR_LIMIT:", "SQL with comments and psql notice @param username: psql user", "for line in out: if re.search(master_pattern, line): master_value = int(line.split()[3].strip())", "USER PGHOST = os.environ.get(\"PGHOST\") if PGHOST is None: PGHOST =", "append; 'w' = write. Defaults to append (so that the", "file = mkpath('setup.sql') runfile(file) f = open(mkpath('query26.sql'),'a') f.write(\"\\! psql -d", "log_errors: f.write(\"\\n - LOG_ERRORS: true\") f.write(\"\\n - ERROR_LIMIT: \" +", "fname.split( \"/\" ) fname = os.path.splitext( filename[len( filename ) -", "'a' = append; 'w' = write. Defaults to append (so", "for ai in hostinfo])) for myip in ipaddrlist: if myip.find(\":\")", "f.write(\"\\n - LOG_ERRORS: true\") f.write(\"\\n - ERROR_LIMIT: \" + error_limit)", "at the shell. oFile: an optional output file. mode: What", "reuse\" copy_data('external_file_02.txt','data_file.txt') write_config_file(reuse_flag='true',formatOpts='text',file='data_file.txt',table='texttable',delimiter=\"E'\\\\t'\") self.doTest(3) def test_04_gpload_formatOpts_delimiter(self): \"4 gpload formatOpts delimiter", "the output is different from the expected result. ''' pass", "number in file) \" psql_run(cmd=\"ALTER TABLE texttable ADD column n8", "mkpath('setup.sql') runfile(file) f = open(mkpath('query27.sql'),'a') f.write(\"\\! psql -d reuse_gptest -c", "- FORMAT: \"+format) if log_errors: f.write(\"\\n - LOG_ERRORS: true\") f.write(\"\\n", "is passed to the open() function, so you can theoretically", "diff file that is assocated with the given input file", "PGOPTIONS = \"PGOPTIONS='%s'\" % PGOPTIONS if host is None: host", "if __name__ == '__main__': suite = unittest.TestLoader().loadTestsFromTestCase(GPLoad_FormatOpts_TestCase) runner = unittest.TextTestRunner(verbosity=2)", "mode without reuse\" runfile(mkpath('setup.sql')) f = open(mkpath('query7.sql'),'a') f.write(\"\\! psql -d", "master_pattern = \"Context:\\s*-1\\s*Value:\\s*\\d+\" command = \"gpconfig -s %s\" % (", "staging_table: f.write(\"\\n - STAGING_TABLE: \"+staging_table) f.write(\"\\n\") f.close() def runfile(ifile, flag='',", "= ' < ' + ifile if not (flag ==", "is None: username = PGUSER # Use the default login", "E'\\\\'' with reuse\" copy_data('external_file_03.txt','data_file.txt') write_config_file(reuse_flag='true',formatOpts='text',file='data_file.txt',table='texttable',delimiter=\"E'\\''\") self.doTest(5) def test_06_gpload_formatOpts_delimiter(self): \"6 gpload", "blank lines. By default, diffs are unified with 3 lines", "in range(0, 10000): # if i % 2 == 0:", "copy_data('external_file_08.txt','data_file.txt') write_config_file(mode='update',reuse_flag='false',fast_match='true',file='data_file.txt') self.doTest(32) def test_33_gpload_reuse_table_merge_mode_with_fast_match_and_external_schema(self): \"33 gpload update mode with", "'localhost',master_value = None, user = os.environ.get('USER'),gphome = os.environ['GPHOME'], mdd=os.environ['MASTER_DATA_DIRECTORY'],port =", "DBNAME = \"postgres\" USER = os.environ.get( \"LOGNAME\" ) HOST =", "= \"\"\"SELECT relname from pg_class WHERE relname like 'ext_gpload_reusable%' OR", "'> /dev/null 2>&1' else: ofile = '> %s 2>&1' %", "(gphome, mdd, port, command) (ok,out) = run(cmd) if not ok:", "to database: ' + errorMessage queryString = \"\"\"SELECT relname from", "3 lines of context (i.e. optionalFlags is \"-U3\"). \"\"\" f1", "Return (True, [result]) if OK, or (False, []) otherwise. @params", "PORT: \"+portNum) f.write(\"\\n FILE:\") f.write(\"\\n - \"+mkpath(file)) if columns_flag=='1': f.write(\"\\n", "write_config_file(reuse_flag='true',formatOpts='csv',file='data_file.csv',table='test.csvtable',format='csv',delimiter=\"','\",log_errors=True,error_limit='10',staging_table='staging_table',externalSchema=\"'%'\") self.doTest(27) def test_28_gpload_ext_staging_table_with_dot(self): \"28 gpload reuse ext_staging_table if it", "merge mode with different columns' order \" copy_data('external_file_10.txt','data/data_file.tbl') write_config_file('merge','true',file='data/data_file.tbl',columns_flag='1',mapping='1') self.doTest(15)", "\"36 gpload merge mode with encoding GBK\" file = mkpath('setup.sql')", "\"8 gpload update mode with reuse\" drop_tables() copy_data('external_file_04.txt','data_file.txt') write_config_file(mode='update',reuse_flag='true',file='data_file.txt') self.doTest(8)", "fast match and differenct columns number) \" psql_run(cmd=\"ALTER TABLE texttable", "in ipaddrlist: if myip.find(\":\") > 0: ipv6 = myip return", "\"27 gpload reuse ext_staging_table if it is configured with externalschema\"", "\"17 gpload formatOpts quote '\\\\x26'(&) with reuse\" copy_data('external_file_12.csv','data_file.csv') write_config_file(reuse_flag='true',formatOpts='csv',file='data_file.csv',table='csvtable',format='csv',delimiter=\"','\",quote=\"'\\x26'\") self.doTest(17)", "ENCODING: \"+encoding) if escape: f.write(\"\\n - ESCAPE: \"+escape) if quote:", "\" copy_data('external_file_10.txt','data/data_file.tbl') write_config_file('merge','true',file='data/data_file.tbl',columns_flag='1',mapping='1') self.doTest(15) def test_16_gpload_formatOpts_quote(self): \"16 gpload formatOpts quote", "ok: os.unlink( dfile ) return ok def read_diff(ifile, outputPath): \"\"\"", "s_n7\") f.write(\"\\n n8: s_n8\") f.write(\"\\n n9: s_n9\") if externalSchema: f.write(\"\\n", "# for i in range(0, 10000): # if i %", "not port: port = get_port_from_conf() return port if port else", "outputPath + \"/\" + fname + ext def gpdbAnsFile(fname): ext", "runfile(file) self.check_result(file) def test_01_gpload_formatOpts_delimiter(self): \"1 gpload formatOpts delimiter '|' with", "out = line.split('\\n') for line in out: if re.search(master_pattern, line):", "optionalFlags = \"\", outputPath = \"\", myinitfile = \"\"): LMYD", "None: username = PGUSER # Use the default login user", "write_config_file(mode='update',reuse_flag='false',fast_match='true',file='data_file.txt') self.doTest(32) def test_33_gpload_reuse_table_merge_mode_with_fast_match_and_external_schema(self): \"33 gpload update mode with fast", "= diffFile( f1, outputPath = outputPath ) # Gets the", ") return ok def read_diff(ifile, outputPath): \"\"\" Opens the diff", "with invalid encoding\" file = mkpath('setup.sql') runfile(file) copy_data('external_file_04.txt','data_file.txt') write_config_file(mode='merge',reuse_flag='true',fast_match='false',file='data_file.txt',encoding='xxxx') self.doTest(37)", "masterPort = getPortMasterOnly() def get_table_name(): try: db = pg.DB(dbname='reuse_gptest' ,host='localhost'", "arg = ' < ' + ifile if not (flag", "f1[0:f1.rindex( \"/\" )] if os.path.exists(suitePath + \"/init_file\"): (ok, out) =", "os.environ.get('USER'),gphome = os.environ['GPHOME'], mdd=os.environ['MASTER_DATA_DIRECTORY'],port = os.environ['PGPORT']): master_pattern = \"Context:\\s*-1\\s*Value:\\s*\\d+\" command", "texttable;'\\n\"+\"\\! psql -d reuse_gptest -c 'select * from texttable where", "gpload update mode with fast match and external schema\" file", "count\" # f = open(mkpath('query22.sql'),'a') # f.write(\"\\! psql -d reuse_gptest", "self.doTest(35) def test_36_gpload_reuse_table_merge_mode_default_encoding(self): \"36 gpload merge mode with encoding GBK\"", "except Exception,e: errorMessage = str(e) print 'could not connect to", "list: name = i[0] match = re.search('ext_gpload',name) if match: queryString", "the function is backwards compatible). Yes, this is passed to", "s_n9: text\") if format: f.write(\"\\n - FORMAT: \"+format) if log_errors:", "f = open(mkpath('config/config_file'),'w') f.write(\"VERSION: 1.0.0.1\") if database: f.write(\"\\nDATABASE: \"+database) f.write(\"\\nUSER:", "ofile return run('%s psql -d %s %s %s -U %s", "issue. we can't reproduce it locally, so we disable it,", "in sys.path: sys.path.append(UPD) DBNAME = \"postgres\" USER = os.environ.get( \"LOGNAME\"", "f.write(\"\\n PORT: \"+portNum) f.write(\"\\n FILE:\") f.write(\"\\n - \"+mkpath(file)) if columns_flag=='1':", "myip.find(\".\") > 0: ipv4 = myip return ipv4 def getPortMasterOnly(host", "ai in hostinfo])) for myip in ipaddrlist: if myip.find(\":\") >", "optionalFlags = \"-U3\", outputPath = \"\"): \"\"\" PURPOSE: compare the", "\"11 gpload merge mode without reuse \" copy_data('external_file_07.txt','data_file.txt') write_config_file('merge','false',file='data_file.txt') self.doTest(11)", "copy_data('external_file_08.txt','data_file.txt') write_config_file(mode='update',reuse_flag='true',fast_match='true',file='data_file.txt') self.doTest(31) def test_32_gpload_update_mode_without_reuse_table_with_fast_match(self): \"32 gpload update mode when", "= pg.DB(dbname='reuse_gptest' ,host='localhost' ,port=int(PGPORT) ) except Exception,e: errorMessage = str(e)", "def test_30_gpload_reuse_table_update_mode_with_fast_match(self): \"30 gpload update mode with fast match\" drop_tables()", "\"DROP TABLE %s\" % name db.query(queryString.encode('utf-8')) class PSQLError(Exception): ''' PSQLError", "'-a': arg = '-f ' + ifile else: raise PSQLError('missing", "The reason for an error might be program error, file", "username, flag, arg, ofile)) def run(cmd): \"\"\" Run a shell", "test_24_gpload_error_count(self): \"24 gpload error count with ext schema\" file =", "\" -d reuse_gptest\\n\") f.close() file = mkpath('setup.sql') runfile(file) self.check_result(file) def", "\"\"\"SELECT relname from pg_class WHERE relname like 'ext_gpload_reusable%' OR relname", "function will process this file name to figure out the", "ADD column n8 text\",dbname='reuse_gptest') copy_data('external_file_08.txt','data_file.txt') write_config_file('merge','true',file='data_file.txt') self.doTest(12) def test_13_gpload_reuse_table_merge_mode_with_different_columns_number_in_DB(self): \"13", "host = host, port = port) else: (ok,out) = psql_run(ifile", "self.doTest(20) def test_21_gpload_formatOpts_escape(self): \"21 gpload formatOpts escape E'\\\\\\\\' with reuse\"", "fill: f.write(\"\\n - FILL_MISSING_FIELDS: true\") f.write(\"\\n OUTPUT:\") f.write(\"\\n - TABLE:", "write. Defaults to append (so that the function is backwards", "For example, pass \" -B \" (with the blank spaces)", "= \"source %s/greenplum_path.sh; export MASTER_DATA_DIRECTORY=%s; export PGPORT=%s; %s\" \\ %", "mkpath('data/' + source) + ' ' + mkpath(target) p =", "class for exceptions in this module http://docs.python.org/tutorial/errors.html We want to", "mkpath('setup.sql') runfile(file) f = open(mkpath('query23.sql'),'a') f.write(\"\\! psql -d reuse_gptest -c", "f.write(\"\\n - s_s3: text\") f.write(\"\\n - s_n1: smallint\") f.write(\"\\n -", "get_port() PGUSER = os.environ.get(\"PGUSER\") if PGUSER is None: PGUSER =", "port else 5432 def get_ip(hostname=None): if hostname is None: hostname", "%s 2>&1' % (LMYD, suitePath, f1, f2, dfile)) else: if", "% num) runfile(file) self.check_result(file) def test_00_gpload_formatOpts_setup(self): \"0 gpload setup\" for", ".ans files. optionalFlags: command-line options (if any) for diff. For", "\"24 gpload error count with ext schema\" file = mkpath('setup.sql')", "'-': ofile = '2>&1' elif not ofile: ofile = '>", "\"6 gpload formatOpts delimiter \\\"'\\\" with reuse\" copy_data('external_file_03.txt','data_file.txt') write_config_file(reuse_flag='true',formatOpts='text',file='data_file.txt',table='texttable',delimiter=\"\\\"'\\\"\") self.doTest(6)", "ext_staging_table if it is configured\" file = mkpath('setup.sql') runfile(file) f", "%s\" % name db.query(queryString.encode('utf-8')) else: queryString = \"DROP TABLE %s\"", "f = open(mkpath('query25.sql'),'a') f.write(\"\\! psql -d reuse_gptest -c 'select count(*)", "import time import socket import fileinput import platform import re", "cmd and ifile') if ofile == '-': ofile = '2>&1'", "with different columns' order \" copy_data('external_file_10.txt','data/data_file.tbl') write_config_file('merge','true',file='data/data_file.tbl',columns_flag='1',mapping='1') self.doTest(15) def test_16_gpload_formatOpts_quote(self):", "test_21_gpload_formatOpts_escape(self): \"21 gpload formatOpts escape E'\\\\\\\\' with reuse\" copy_data('external_file_01.txt','data_file.txt') write_config_file(reuse_flag='true',formatOpts='text',file='data_file.txt',table='texttable',escape=\"E'\\\\\\\\'\")", "\"port\" ) cmd = \"source %s/greenplum_path.sh; export MASTER_DATA_DIRECTORY=%s; export PGPORT=%s;", "diffFile( f1, outputPath = outputPath ) # Gets the suitePath", "- MODE: \"+'merge') f.write(\"\\n - UPDATE_COLUMNS:\") f.write(\"\\n - n2\") f.write(\"\\n", "text\") f.write(\"\\n - s_n1: smallint\") f.write(\"\\n - s_n2: integer\") f.write(\"\\n", "with reuse\" copy_data('external_file_12.csv','data_file.csv') write_config_file(reuse_flag='true',formatOpts='csv',file='data_file.csv',table='csvtable',format='csv',delimiter=\"','\",quote=\"'\\x26'\") self.doTest(17) def test_18_gpload_formatOpts_quote(self): \"18 gpload formatOpts", "'Error: cannot find file %s' % f2 ) dfile =", "user)) for line in out: out = line.split('\\n') for line", "that is assocated with the given input file and returns", "else: port = \"-p %s\" % port if cmd: arg", "%s\" % PGHOST else: host = \"-h %s\" % host", "a string. \"\"\" dfile = diffFile(ifile, outputPath) with open(dfile, 'r')", "hostNameAddrs = get_ip(HOST) masterPort = getPortMasterOnly() def get_table_name(): try: db", "include the path as well as the filename. This function", "def get_port_from_conf(): file = os.environ.get('MASTER_DATA_DIRECTORY')+'/postgresql.conf' if os.path.isfile(file): with open(file) as", "PGPORT = get_port() PGUSER = os.environ.get(\"PGUSER\") if PGUSER is None:", "is assocated with the given input file and returns its", "-I CONTEXT: -I GP_IGNORE: --gp_init_file=%s/global_init_file --gp_init_file=%s/init_file ' '%s %s >", "= re.search('\\d+', match.group()) if match1: return match1.group() def get_port(): port", "\" copy_data('external_file_07.txt','data_file.txt') write_config_file('merge','false',file='data_file.txt') self.doTest(11) def test_12_gpload_reuse_table_merge_mode_with_different_columns_number_in_file(self): \"12 gpload merge mode", "' + ifile else: raise PSQLError('missing cmd and ifile') if", "psql_run(ifile = ifile,ofile = outFile(ifile, outputPath),flag = flag, dbname=dbname ,", "self.doTest(25) def test_26_gpload_ext_staging_table_with_externalschema(self): \"26 gpload reuse ext_staging_table if it is", "in DB table) \" preTest = mkpath('pre_test_13.sql') psql_run(preTest, dbname='reuse_gptest') copy_data('external_file_09.txt','data_file.txt')", "else: queryString = \"DROP TABLE %s\" % name db.query(queryString.encode('utf-8')) class", "''' if dbname is None: dbname = DBNAME if username", "%s -U %s %s %s %s' % (PGOPTIONS, dbname, host,", "moon\",3000.00,a\\n') else: f.write('1997,Ford,E350,\"ac, abs, moon\",3000.00\\n') f.close() copy_data('large_file.csv','data_file.csv') write_config_file(reuse_flag='true',formatOpts='csv',file='data_file.csv',table='csvtable',format='csv',delimiter=\"','\",error_table=\"err_table\",error_limit='90000000') self.doTest(23) def", "shell command. Return (True, [result]) if OK, or (False, [])", "\"Context:\\s*-1\\s*Value:\\s*\\d+\" command = \"gpconfig -s %s\" % ( \"port\" )", "s_n6\") f.write(\"\\n n7: s_n7\") f.write(\"\\n n8: s_n8\") f.write(\"\\n n9: s_n9\")", "else: # f.write('1997,Ford,E350,\"ac, abs, moon\",3000.00\\n') # f.close() # copy_data('large_file.csv','data_file.csv') #", "merge mode with fast match and encoding GBK\" file =", "os.path.abspath(os.path.dirname(__file__)) mkpath = lambda *x: os.path.join(MYD, *x) UPD = os.path.abspath(mkpath('..'))", "Run a shell command. Return (True, [result]) if OK, or", "precision\") f.write(\"\\n - s_n8: text\") f.write(\"\\n - s_n9: text\") if", "(with the blank spaces) to ignore blank lines. By default,", "suitePath = f1[0:f1.rindex( \"/\" )] if os.path.exists(suitePath + \"/init_file\"): (ok,", "mode == 'insert': f.write(\"\\n - MODE: \"+'insert') if mode ==", "if not (flag == '-q'): # Don't echo commands sent", "test.csvtable;'\") f.close() copy_data('external_file_13.csv','data_file.csv') write_config_file(reuse_flag='true',formatOpts='csv',file='data_file.csv',table='test.csvtable',format='csv',delimiter=\"','\",log_errors=True,error_limit='10',staging_table='t.staging_table') self.doTest(28) def test_29_gpload_reuse_table_insert_mode_with_reuse_and_null(self): \"29 gpload insert", "list(set([(ai[4][0]) for ai in hostinfo])) for myip in ipaddrlist: if", "error if they don't match. PARAMETERS: ifile: the name of", "host = None, port = None): if len(outputFile) == 0:", "mkpath = lambda *x: os.path.join(MYD, *x) UPD = os.path.abspath(mkpath('..')) if", "f.write(\"\\n - s_n5: numeric\") f.write(\"\\n - s_n6: real\") f.write(\"\\n -", "s_n5: numeric\") f.write(\"\\n - s_n6: real\") f.write(\"\\n - s_n7: double", "PGOPTIONS=None, host = None, port = None): if len(outputFile) ==", "GP_IGNORE: --gp_init_file=%s/global_init_file --gp_init_file=%s ' '%s %s > %s 2>&1' %", "# f.write('1997,Ford,E350,\"ac, abs, moon\",3000.00\\n') # f.close() # copy_data('large_file.csv','data_file.csv') # write_config_file(reuse_flag='true',formatOpts='csv',file='data_file.csv',table='csvtable',format='csv',delimiter=\"','\",log_errors=True,error_limit='90000000')", "the open() function, so you can theoretically pass any value", "- 1] )[0] return outputPath + \"/\" + fname +", "runfile(mkpath('setup.sql')) f = open(mkpath('query7.sql'),'a') f.write(\"\\! psql -d reuse_gptest -c 'select", "context (i.e. optionalFlags is \"-U3\"). \"\"\" f1 = gpdbAnsFile(ifile) f2", "ret = [] ret.append(out) rc = False if p.wait() else", "-d reuse_gptest -c 'select count(*) from texttable;'\") f.close() write_config_file(mode='insert',reuse_flag='false') self.doTest(7)", "port) else: (ok,out) = psql_run(ifile =ifile, ofile =outFile(outputFile, outputPath), flag", "diffFile(ifile, outputPath) with open(dfile, 'r') as diff: return diff.read() def", "SQL with no comments (default) -a Run SQL with comments", "(PGOPTIONS, dbname, host, port, username, flag, arg, ofile)) def run(cmd):", "is configured with dot\" file = mkpath('setup.sql') runfile(file) f =", "gpload does not reuse table when encoding is setted from", "copy_data('external_file_04.txt','data_file.txt') write_config_file(mode='merge',reuse_flag='true',fast_match='false',file='data_file.txt',encoding='xxxx') self.doTest(37) def test_38_gpload_without_preload(self): \"38 gpload insert mode without", "+ error_limit) if error_table: f.write(\"\\n - ERROR_TABLE: \" + error_table)", "= mkpath('setup.sql') runfile(file) copy_data('external_file_04.txt','data_file.txt') write_config_file(mode='merge',reuse_flag='true',fast_match='false',file='data_file.txt',encoding='GBK') self.doTest(36) def test_37_gpload_reuse_table_merge_mode_invalid_encoding(self): \"37 gpload", "queryString = \"\"\"SELECT relname from pg_class WHERE relname like 'ext_gpload_reusable%'", "- ESCAPE: \"+escape) if quote: f.write(\"\\n - QUOTE: \"+quote) if", "gpload setup\" for num in range(1,40): f = open(mkpath('query%d.sql' %", "%s %s %s %s' % (PGOPTIONS, dbname, host, port, username,", "'Error: cannot find file %s' % f1 ) if not", "= line.split('\\n') for line in out: if re.search(master_pattern, line): master_value", "MODE: \"+'merge') f.write(\"\\n - UPDATE_COLUMNS:\") f.write(\"\\n - n2\") f.write(\"\\n -", "WHERE relname like 'ext_gpload_reusable%' OR relname like 'staging_gpload_reusable%';\"\"\" resultList =", "\"+mkpath('config/config_file')+ \" -d reuse_gptest\\n\") f.close() file = mkpath('setup.sql') runfile(file) self.check_result(file)", "EXTERNAL:\") f.write(\"\\n - SCHEMA: \"+externalSchema) if preload: f.write(\"\\n PRELOAD:\") f.write(\"\\n", "gpload insert mode with reuse and null\" runfile(mkpath('setup.sql')) f =", "user @param host : to connect to a different host", "formatOpts escape E'\\\\\\\\' with reuse\" copy_data('external_file_01.txt','data_file.txt') write_config_file(reuse_flag='true',formatOpts='text',file='data_file.txt',table='texttable',escape=\"E'\\\\\\\\'\") self.doTest(21) # case", "reuse\" copy_data('external_file_01.txt','data_file.txt') file = mkpath('setup.sql') runfile(file) write_config_file(reuse_flag='true',formatOpts='text',file='data_file.txt',table='texttable',escape='\\\\') self.doTest(19) def test_20_gpload_formatOpts_escape(self):", "import os import string import time import socket import fileinput", "command line @param flag: -e Run SQL with no comments", "gpload formatOpts delimiter '|' with reuse \" copy_data('external_file_01.txt','data_file.txt') write_config_file(reuse_flag='true',formatOpts='text',file='data_file.txt',table='texttable',delimiter=\"'|'\") self.doTest(1)", "# f.write(\"\\! psql -d reuse_gptest -c 'select count(*) from csvtable;'\")", "define as test case failures, when the output is different", "runfile(file) f = open(mkpath('query24.sql'),'a') f.write(\"\\! psql -d reuse_gptest -c 'select", "if delimiter: f.write(\"\\n - DELIMITER: \"+delimiter) if encoding: f.write(\"\\n -", "write_config_file(reuse_flag='true',formatOpts='text',file='data_file.txt',table='texttable',escape=\"E'\\\\\\\\'\") self.doTest(21) # case 22 is flaky on concourse. It", "%s\" % ( \"port\" ) cmd = \"source %s/greenplum_path.sh; export", "f.write(\"\\n - s_n8: text\") f.write(\"\\n - s_n9: text\") if format:", "f.write(\"\\n - \"+hostNameAddrs) if portNum: f.write(\"\\n PORT: \"+portNum) f.write(\"\\n FILE:\")", "import fileinput import platform import re try: import subprocess32 as", "not (flag == '-q'): # Don't echo commands sent to", "you can theoretically pass any value that is valid for", "match = re.search('ext_gpload',name) if match: queryString = \"DROP EXTERNAL TABLE", "etc. Failure is define as test case failures, when the", "the filename. This function will process this file name to", "self.doTest(17) def test_18_gpload_formatOpts_quote(self): \"18 gpload formatOpts quote E'\\\\x26'(&) with reuse\"", "return str(master_value) \"\"\" Global Values \"\"\" MYD = os.path.abspath(os.path.dirname(__file__)) mkpath", "' < ' + ifile if not (flag == '-q'):", "self.doTest(33) def test_34_gpload_reuse_table_merge_mode_with_fast_match_and_encoding(self): \"34 gpload merge mode with fast match", "The command to run at the shell. oFile: an optional", "== 'update': f.write(\"\\n - MODE: \"+'update') if mode == 'merge':", "and expected outputs we want to compare. You may include", "'\\x5C') self.doTest(20) def test_21_gpload_formatOpts_escape(self): \"21 gpload formatOpts escape E'\\\\\\\\' with", "encoding\" file = mkpath('setup.sql') runfile(file) copy_data('external_file_04.txt','data_file.txt') write_config_file(mode='merge',reuse_flag='true',fast_match='false',file='data_file.txt',encoding='xxxx') self.doTest(37) def test_38_gpload_without_preload(self):", "psql_run(preTest, dbname='reuse_gptest') copy_data('external_file_09.txt','data_file.txt') write_config_file('merge','true',file='data_file.txt') self.doTest(13) def test_14_gpload_reuse_table_update_mode_with_reuse_RERUN(self): \"14 gpload update", "= os.environ.get('MASTER_DATA_DIRECTORY')+'/postgresql.conf' if os.path.isfile(file): with open(file) as f: for line", "% (PGOPTIONS, dbname, host, port, username, flag, arg, ofile)) def", "reuse_gptest -c 'select count(*) from texttable;'\\n\"+\"\\! psql -d reuse_gptest -c", "\"\"\" PURPOSE: compare the actual and expected output files and", "f.close() f = open(mkpath('data/large_file.csv'),'w') for i in range(0, 10000): if", "mode with invalid encoding\" file = mkpath('setup.sql') runfile(file) copy_data('external_file_04.txt','data_file.txt') write_config_file(mode='merge',reuse_flag='true',fast_match='false',file='data_file.txt',encoding='xxxx')", "= '> /dev/null 2>&1' else: ofile = '> %s 2>&1'", "=outFile(outputFile, outputPath), flag =flag, dbname= dbname, username= username, PGOPTIONS= PGOPTIONS,", "'> %s 2>&1' % ofile return run('%s psql -d %s", "= append; 'w' = write. Defaults to append (so that", "update mode when reuse table is false and fast match", "reuse_gptest -c 'select count(*) from texttable where n2 is null;'\")", "\"PGOPTIONS='%s'\" % PGOPTIONS if host is None: host = \"-h", "os.path.splitext(fname)[0] + ext def isFileEqual( f1, f2, optionalFlags = \"\",", "= outputPath ) # Gets the suitePath name to add", "22 is flaky on concourse. It may report: Fatal Python", "f.write(\"\\n - s_n2: integer\") f.write(\"\\n - s_n3: bigint\") f.write(\"\\n -", "if os.path.exists(myinitfile): (ok, out) = run('../gpdiff.pl -w ' + optionalFlags", "host, port = port) else: (ok,out) = psql_run(ifile =ifile, ofile", "delimiter '\\t' with reuse\" copy_data('external_file_02.txt','data_file.txt') write_config_file(reuse_flag='true',formatOpts='text',file='data_file.txt',table='texttable',delimiter=\"'\\t'\") self.doTest(2) def test_03_gpload_formatOpts_delimiter(self): \"3", "f.write(\"\\n - ESCAPE: \"+escape) if quote: f.write(\"\\n - QUOTE: \"+quote)", "test_12_gpload_reuse_table_merge_mode_with_different_columns_number_in_file(self): \"12 gpload merge mode with reuse (RERUN with different", "insert mode with reuse and null\" runfile(mkpath('setup.sql')) f = open(mkpath('query29.sql'),'a')", "\"+format) if log_errors: f.write(\"\\n - LOG_ERRORS: true\") f.write(\"\\n - ERROR_LIMIT:", "def test_33_gpload_reuse_table_merge_mode_with_fast_match_and_external_schema(self): \"33 gpload update mode with fast match and", "def test_14_gpload_reuse_table_update_mode_with_reuse_RERUN(self): \"14 gpload update mode with reuse (RERUN) \"", "gpload update mode with reuse (RERUN) \" write_config_file('update','true',file='data_file.txt') self.doTest(14) def", "if not ok: raise Exception(\"Unable to connect to segment server", "moon\",3000.00\\n') f.close() copy_data('large_file.csv','data_file.csv') write_config_file(reuse_flag='true',formatOpts='csv',file='data_file.csv',table='csvtable',format='csv',delimiter=\"','\",log_errors=True,error_limit='90000000',externalSchema='test') self.doTest(24) def test_25_gpload_ext_staging_table(self): \"25 gpload reuse", "None, port = None): ''' Run a command or file", "diff = None if result else read_diff(ifile, outputPath) self.assertTrue(result, \"query", "\"+quote) if fill: f.write(\"\\n - FILL_MISSING_FIELDS: true\") f.write(\"\\n OUTPUT:\") f.write(\"\\n", "E'\\\\x26'(&) with reuse\" copy_data('external_file_12.csv','data_file.csv') write_config_file(reuse_flag='true',formatOpts='csv',file='data_file.csv',table='csvtable',format='csv',delimiter=\"','\",quote=\"E'\\x26'\") self.doTest(18) def test_19_gpload_formatOpts_escape(self): \"19 gpload", "fill=False): f = open(mkpath('config/config_file'),'w') f.write(\"VERSION: 1.0.0.1\") if database: f.write(\"\\nDATABASE: \"+database)", "E'\\u0009' with reuse\" copy_data('external_file_02.txt','data_file.txt') write_config_file(reuse_flag='true',formatOpts='text',file='data_file.txt',table='texttable',delimiter=\"E'\\u0009'\") self.doTest(4) def test_05_gpload_formatOpts_delimiter(self): \"5 gpload", "with open(file) as f: for line in f.xreadlines(): match =", "= myip return ipv4 def getPortMasterOnly(host = 'localhost',master_value = None,", "texttable ADD column n8 text\",dbname='reuse_gptest') copy_data('external_file_08.txt','data_file.txt') write_config_file('merge','true',file='data_file.txt') self.doTest(12) def test_13_gpload_reuse_table_merge_mode_with_different_columns_number_in_DB(self):", "os.path.exists(d): os.mkdir(d) def write_config_file(mode='insert', reuse_flag='',columns_flag='0',mapping='0',portNum='8081',database='reuse_gptest',host='localhost',formatOpts='text',file='data/external_file_01.txt',table='texttable',format='text',delimiter=\"'|'\",escape='',quote='',truncate='False',log_errors=None, error_limit='0',error_table=None,externalSchema=None,staging_table=None,fast_match='false', encoding=None, preload=True, fill=False): f", "f.write(\"\\nUSER: \"+os.environ.get('USER')) f.write(\"\\nHOST: \"+hostNameAddrs) f.write(\"\\nPORT: \"+masterPort) f.write(\"\\nGPLOAD:\") f.write(\"\\n INPUT:\") f.write(\"\\n", "os.mkdir(d) def write_config_file(mode='insert', reuse_flag='',columns_flag='0',mapping='0',portNum='8081',database='reuse_gptest',host='localhost',formatOpts='text',file='data/external_file_01.txt',table='texttable',format='text',delimiter=\"'|'\",escape='',quote='',truncate='False',log_errors=None, error_limit='0',error_table=None,externalSchema=None,staging_table=None,fast_match='false', encoding=None, preload=True, fill=False): f =", "if match1: return match1.group() def get_port(): port = os.environ['PGPORT'] if", "2>&1' % (LMYD, suitePath, f1, f2, dfile)) else: if os.path.exists(myinitfile):", "if len(outputFile) == 0: (ok, out) = psql_run(ifile = ifile,ofile", "compatible). Yes, this is passed to the open() function, so", "'\\t' with reuse\" copy_data('external_file_02.txt','data_file.txt') write_config_file(reuse_flag='true',formatOpts='text',file='data_file.txt',table='texttable',delimiter=\"'\\t'\") self.doTest(2) def test_03_gpload_formatOpts_delimiter(self): \"3 gpload", "test_29_gpload_reuse_table_insert_mode_with_reuse_and_null(self): \"29 gpload insert mode with reuse and null\" runfile(mkpath('setup.sql'))", "to empty\" write_config_file(mode='merge',reuse_flag='true',fast_match='true',file='data_file.txt') self.doTest(35) def test_36_gpload_reuse_table_merge_mode_default_encoding(self): \"36 gpload merge mode", "f.write(\"\\n LOCAL_HOSTNAME:\") f.write(\"\\n - \"+hostNameAddrs) if portNum: f.write(\"\\n PORT: \"+portNum)", "= mkpath('query%d.sql' % num) runfile(file) self.check_result(file) def test_00_gpload_formatOpts_setup(self): \"0 gpload", "if portNum: f.write(\"\\n PORT: \"+portNum) f.write(\"\\n FILE:\") f.write(\"\\n - \"+mkpath(file))", "= mkpath('setup.sql') runfile(file) copy_data('external_file_04.txt','data_file.txt') write_config_file(mode='insert',reuse_flag='false',fast_match='false',file='data_file.txt',table='texttable1', error_limit='1000', fill=True) self.doTest(39) if __name__", "\"+table) if mode: if mode == 'insert': f.write(\"\\n - MODE:", "\"DROP EXTERNAL TABLE %s\" % name db.query(queryString.encode('utf-8')) else: queryString =", "(ok, out) = run('../gpdiff.pl -w ' + optionalFlags + \\", "from csvtable;'\") f.close() copy_data('external_file_13.csv','data_file.csv') write_config_file(reuse_flag='true',formatOpts='csv',file='data_file.csv',table='csvtable',format='csv',delimiter=\"','\",log_errors=True,error_limit='10',staging_table='staging_table',externalSchema='test') self.doTest(26) def test_27_gpload_ext_staging_table_with_externalschema(self): \"27 gpload", "ofile = None, cmd = None, flag = '-e',dbname =", "test_09_gpload_reuse_table_update_mode_without_reuse(self): \"9 gpload update mode without reuse\" f = open(mkpath('query9.sql'),'a')", "- MAPPING:\") f.write(\"\\n s1: s_s1\") f.write(\"\\n s2: s_s2\") f.write(\"\\n dt:", "write_config_file(reuse_flag='true',formatOpts='text',file='data_file.txt',table='texttable',escape= '\\x5C') self.doTest(20) def test_21_gpload_formatOpts_escape(self): \"21 gpload formatOpts escape E'\\\\\\\\'", "file = mkpath('setup.sql') runfile(file) self.check_result(file) def test_01_gpload_formatOpts_delimiter(self): \"1 gpload formatOpts", "= None if result else read_diff(ifile, outputPath) self.assertTrue(result, \"query resulted", "import re try: import subprocess32 as subprocess except: import subprocess", "with reuse (RERUN) \" write_config_file('update','true',file='data_file.txt') self.doTest(14) def test_15_gpload_reuse_table_merge_mode_with_different_columns_order(self): \"15 gpload", "self.doTest(12) def test_13_gpload_reuse_table_merge_mode_with_different_columns_number_in_DB(self): \"13 gpload merge mode with reuse (RERUN", "backwards compatible). Yes, this is passed to the open() function,", "f.write(\"\\n n3: s_n3\") f.write(\"\\n n4: s_n4\") f.write(\"\\n n5: s_n5\") f.write(\"\\n", "name = i[0] match = re.search('ext_gpload',name) if match: queryString =", "write_config_file(mode='merge',reuse_flag='true',fast_match='true',file='data_file.txt',encoding='GBK') self.doTest(34) def test_35_gpload_reuse_table_merge_mode_with_fast_match_default_encoding(self): \"35 gpload does not reuse table", "Exception(error_msg) return str(master_value) \"\"\" Global Values \"\"\" MYD = os.path.abspath(os.path.dirname(__file__))", "result = isFileEqual(f1, f2, optionalFlags, outputPath=outputPath) diff = None if", "the path as well as the filename. This function will", "\"gpconfig -s %s\" % ( \"port\" ) cmd = \"source", "f.write(\"\\n - s_n4: decimal\") f.write(\"\\n - s_n5: numeric\") f.write(\"\\n -", "copy_data('external_file_04.txt','data_file.txt') write_config_file(mode='update',reuse_flag='true',file='data_file.txt') self.doTest(8) def test_09_gpload_reuse_table_update_mode_without_reuse(self): \"9 gpload update mode without", "f.write(\"\\n\") f.close() def runfile(ifile, flag='', dbname=None, outputPath=\"\", outputFile=\"\", username=None, PGOPTIONS=None,", "line in out: if re.search(master_pattern, line): master_value = int(line.split()[3].strip()) if", "Failure is define as test case failures, when the output", "-d reuse_gptest -c 'select count(*) from test.csvtable;'\") f.close() copy_data('external_file_13.csv','data_file.csv') write_config_file(reuse_flag='true',formatOpts='csv',file='data_file.csv',table='test.csvtable',format='csv',delimiter=\"','\",log_errors=True,error_limit='10',staging_table='staging_table',externalSchema=\"'%'\")", "gpload error count\" # f = open(mkpath('query22.sql'),'a') # f.write(\"\\! psql", "s_s2: text\") f.write(\"\\n - s_dt: timestamp\") f.write(\"\\n - s_s3: text\")", "= open(mkpath('query26.sql'),'a') f.write(\"\\! psql -d reuse_gptest -c 'select count(*) from", "NOTICE: -I HINT: -I CONTEXT: -I GP_IGNORE: --gp_init_file=%s/global_init_file --gp_init_file=%s/init_file '", "fname = os.path.splitext( filename[len( filename ) - 1] )[0] return", "read_diff(ifile, outputPath) self.assertTrue(result, \"query resulted in diff:\\n{}\".format(diff)) return True def", "this file name to figure out the proper names of", "update mode with fast match and differenct columns number) \"", "get_port_from_conf() return port if port else 5432 def get_ip(hostname=None): if", "externalschema\" file = mkpath('setup.sql') runfile(file) f = open(mkpath('query27.sql'),'a') f.write(\"\\! psql", "- s_n7: double precision\") f.write(\"\\n - s_n8: text\") f.write(\"\\n -", "mkpath('query%d.diff' % num) if os.path.isfile(file): run(\"rm -f\" + \" \"", "' -I NOTICE: -I HINT: -I CONTEXT: -I GP_IGNORE: --gp_init_file=%s/global_init_file", "\" copy_data('external_file_11.csv','data_file.csv') write_config_file(reuse_flag='true',formatOpts='csv',file='data_file.csv',table='csvtable',format='csv',delimiter=\"','\") self.doTest(16) def test_17_gpload_formatOpts_quote(self): \"17 gpload formatOpts quote", "the .out and .ans files. optionalFlags: command-line options (if any)", "it is configured with externalschema\" file = mkpath('setup.sql') runfile(file) f", "= os.path.splitext( filename[len( filename ) - 1] )[0] return outputPath", "s1: s_s1\") f.write(\"\\n s2: s_s2\") f.write(\"\\n dt: s_dt\") f.write(\"\\n s3:", "host = host, port = port) return (ok, out) def", "test_02_gpload_formatOpts_delimiter(self): \"2 gpload formatOpts delimiter '\\t' with reuse\" copy_data('external_file_02.txt','data_file.txt') write_config_file(reuse_flag='true',formatOpts='text',file='data_file.txt',table='texttable',delimiter=\"'\\t'\")", "copy_data('external_file_04.txt','data_file.txt') write_config_file(mode='insert',reuse_flag='false',fast_match='false',file='data_file.txt',table='texttable1', error_limit='1000', fill=True) self.doTest(39) if __name__ == '__main__': suite", "file = mkpath('setup.sql') runfile(file) f = open(mkpath('query27.sql'),'a') f.write(\"\\! psql -d", "username = PGUSER # Use the default login user if", "- s1\") f.write(\"\\n - s2\") if mapping=='1': f.write(\"\\n - MAPPING:\")", "0: ipv6 = myip return ipv6 elif myip.find(\".\") > 0:", "f.write(\"\\n - DELIMITER: \"+delimiter) if encoding: f.write(\"\\n - ENCODING: \"+encoding)", "flag =flag, dbname= dbname, username= username, PGOPTIONS= PGOPTIONS, host =", "match and external schema\" file = mkpath('setup.sql') runfile(file) copy_data('external_file_04.txt','data_file.txt') write_config_file(mode='merge',reuse_flag='true',fast_match='true',file='data_file.txt',externalSchema='test')", "column n8 text\",dbname='reuse_gptest') copy_data('external_file_08.txt','data_file.txt') write_config_file('merge','true',file='data_file.txt') self.doTest(12) def test_13_gpload_reuse_table_merge_mode_with_different_columns_number_in_DB(self): \"13 gpload", "-d reuse_gptest\\n\") f.close() file = mkpath('setup.sql') runfile(file) self.check_result(file) def test_01_gpload_formatOpts_delimiter(self):", "preload: f.write(\"\\n PRELOAD:\") f.write(\"\\n - REUSE_TABLES: \"+reuse_flag) f.write(\"\\n - FAST_MATCH:", "gpload update mode with fast match\" drop_tables() copy_data('external_file_04.txt','data_file.txt') write_config_file(mode='update',reuse_flag='true',fast_match='true',file='data_file.txt') self.doTest(30)", "OR relname like 'staging_gpload_reusable%';\"\"\" resultList = db.query(queryString.encode('utf-8')).getresult() return resultList def", "+ mkpath(target) p = subprocess.Popen(cmd,shell=True,stdout=subprocess.PIPE,stderr=subprocess.PIPE) return p.communicate() hostNameAddrs = get_ip(HOST)", "in file) \" psql_run(cmd=\"ALTER TABLE texttable ADD column n8 text\",dbname='reuse_gptest')", "\"32 gpload update mode when reuse table is false and", "columns' order \" copy_data('external_file_10.txt','data/data_file.tbl') write_config_file('merge','true',file='data/data_file.tbl',columns_flag='1',mapping='1') self.doTest(15) def test_16_gpload_formatOpts_quote(self): \"16 gpload", "def test_19_gpload_formatOpts_escape(self): \"19 gpload formatOpts escape '\\\\' with reuse\" copy_data('external_file_01.txt','data_file.txt')", "no comments (default) -a Run SQL with comments and psql", "hostname = socket.gethostname() else: hostname = hostname hostinfo = socket.getaddrinfo(hostname,", "DBNAME if username is None: username = PGUSER # Use", "-c 'select count(*) from test.csvtable;'\") f.close() copy_data('external_file_13.csv','data_file.csv') write_config_file(reuse_flag='true',formatOpts='csv',file='data_file.csv',table='test.csvtable',format='csv',delimiter=\"','\",log_errors=True,error_limit='10',staging_table='t.staging_table') self.doTest(28) def", "self.doTest(1) def test_02_gpload_formatOpts_delimiter(self): \"2 gpload formatOpts delimiter '\\t' with reuse\"", "def test_18_gpload_formatOpts_quote(self): \"18 gpload formatOpts quote E'\\\\x26'(&) with reuse\" copy_data('external_file_12.csv','data_file.csv')", "\" + file) modify_sql_file(num) file = mkpath('query%d.sql' % num) runfile(file)", "(ok,out) = run(cmd) if not ok: raise Exception(\"Unable to connect", "outputFile=\"\", username=None, PGOPTIONS=None, host = None, port = None): if", "test_35_gpload_reuse_table_merge_mode_with_fast_match_default_encoding(self): \"35 gpload does not reuse table when encoding is", "PGOPTIONS if host is None: host = \"-h %s\" %", "@param host : to connect to a different host @param", "path as well as the filename. This function will process", "'select * from texttable where n2=222;'\") f.close() copy_data('external_file_05.txt','data_file.txt') write_config_file(mode='update',reuse_flag='false',file='data_file.txt') self.doTest(9)", "def test_08_gpload_reuse_table_update_mode_with_reuse(self): \"8 gpload update mode with reuse\" drop_tables() copy_data('external_file_04.txt','data_file.txt')", "reuse\" copy_data('external_file_03.txt','data_file.txt') write_config_file(reuse_flag='true',formatOpts='text',file='data_file.txt',table='texttable',delimiter=\"\\\"'\\\"\") self.doTest(6) def test_07_gpload_reuse_table_insert_mode_without_reuse(self): \"7 gpload insert mode", "with reuse (RERUN with different columns number in file) \"", "@params cmd: The command to run at the shell. oFile:", "=ifile, ofile =outFile(outputFile, outputPath), flag =flag, dbname= dbname, username= username,", "n5: s_n5\") f.write(\"\\n n6: s_n6\") f.write(\"\\n n7: s_n7\") f.write(\"\\n n8:", "outputPath ) # Gets the suitePath name to add init_file", "already exists: 'a' = append; 'w' = write. Defaults to", "out) def psql_run(ifile = None, ofile = None, cmd =", "find file %s' % f2 ) dfile = diffFile( f1,", "> 0: ipv4 = myip return ipv4 def getPortMasterOnly(host =", "\"+mkpath('config/config_file')+ \" -d reuse_gptest\\n\"+\"\\! gpload -f \"+mkpath('config/config_file')+ \" -d reuse_gptest\\n\")", "return ipv6 elif myip.find(\".\") > 0: ipv4 = myip return", "with comments and psql notice @param username: psql user @param", "update mode without reuse\" f = open(mkpath('query9.sql'),'a') f.write(\"\\! psql -d", "0: ipv4 = myip return ipv4 def getPortMasterOnly(host = 'localhost',master_value", "get_port_from_conf(): file = os.environ.get('MASTER_DATA_DIRECTORY')+'/postgresql.conf' if os.path.isfile(file): with open(file) as f:", "= '2>&1' elif not ofile: ofile = '> /dev/null 2>&1'", "line.replace(\"gpload.py \",\"gpload \") print str(re.sub('\\n','',line)) def copy_data(source='',target=''): cmd = 'cp", "changeExtFile( fname, \".diff\", outputPath ) def changeExtFile( fname, ext =", "return run('%s psql -d %s %s %s -U %s %s", "merge mode with reuse (RERUN with different columns number in", "- COLUMNS:\") f.write(\"\\n - s_s1: text\") f.write(\"\\n - s_s2: text\")", "not in sys.path: sys.path.append(UPD) DBNAME = \"postgres\" USER = os.environ.get(", "if not os.access( f2, os.R_OK ): raise Exception( 'Error: cannot", "-d reuse_gptest -c 'select count(*) from csvtable;'\") f.close() copy_data('external_file_13.csv','data_file.csv') write_config_file(reuse_flag='true',formatOpts='csv',file='data_file.csv',table='csvtable',format='csv',delimiter=\"','\",log_errors=True,error_limit='10',staging_table='staging_table',externalSchema='test')", "configured with externalschema\" file = mkpath('setup.sql') runfile(file) f = open(mkpath('query26.sql'),'a')", "% (host, user)) for line in out: out = line.split('\\n')", "- n1\") f.write(\"\\n - s1\") f.write(\"\\n - s2\") if mapping=='1':", "f = open(mkpath('data/large_file.csv'),'w') for i in range(0, 10000): if i", "-I NOTICE: -I HINT: -I CONTEXT: -I GP_IGNORE: --gp_init_file=%s/global_init_file --gp_init_file=%s", "user if PGOPTIONS is None: PGOPTIONS = \"\" else: PGOPTIONS", "and differenct columns number) \" psql_run(cmd=\"ALTER TABLE texttable ADD column", "error_limit) if error_table: f.write(\"\\n - ERROR_TABLE: \" + error_table) f.write(\"\\n", "= os.environ['PGPORT']): master_pattern = \"Context:\\s*-1\\s*Value:\\s*\\d+\" command = \"gpconfig -s %s\"", "mode when reuse table is false and fast match is", "\"30 gpload update mode with fast match\" drop_tables() copy_data('external_file_04.txt','data_file.txt') write_config_file(mode='update',reuse_flag='true',fast_match='true',file='data_file.txt')", "formatOpts escape '\\\\' with reuse\" copy_data('external_file_01.txt','data_file.txt') write_config_file(reuse_flag='true',formatOpts='text',file='data_file.txt',table='texttable',escape= '\\x5C') self.doTest(20) def", "PGOPTIONS: connects to postgres via utility mode ''' if dbname", "the suitePath name to add init_file suitePath = f1[0:f1.rindex( \"/\"", "else: (ok, out) = run( '../gpdiff.pl -w ' + optionalFlags", "num) runfile(file) self.check_result(file) def test_00_gpload_formatOpts_setup(self): \"0 gpload setup\" for num", "changeExtFile(fname, \".out\", outputPath) def diffFile( fname, outputPath = \"\" ):", "psql -d reuse_gptest -c 'select count(*) from texttable;'\\n\"+\"\\! psql -d", "(RERUN with different columns number in file) \" psql_run(cmd=\"ALTER TABLE", "not connect to database: ' + errorMessage queryString = \"\"\"SELECT", "i % 2 == 0: f.write('1997,Ford,E350,\"ac, abs, moon\",3000.00,a\\n') else: f.write('1997,Ford,E350,\"ac,", "formatOpts escape '\\\\' with reuse\" copy_data('external_file_01.txt','data_file.txt') file = mkpath('setup.sql') runfile(file)", "FAST_MATCH: \"+fast_match) if staging_table: f.write(\"\\n - STAGING_TABLE: \"+staging_table) f.write(\"\\n\") f.close()", "with reuse \" copy_data('external_file_01.txt','data_file.txt') write_config_file(reuse_flag='true',formatOpts='text',file='data_file.txt',table='texttable',delimiter=\"'|'\") self.doTest(1) def test_02_gpload_formatOpts_delimiter(self): \"2 gpload", "import socket import fileinput import platform import re try: import", "reuse_flag='',columns_flag='0',mapping='0',portNum='8081',database='reuse_gptest',host='localhost',formatOpts='text',file='data/external_file_01.txt',table='texttable',format='text',delimiter=\"'|'\",escape='',quote='',truncate='False',log_errors=None, error_limit='0',error_table=None,externalSchema=None,staging_table=None,fast_match='false', encoding=None, preload=True, fill=False): f = open(mkpath('config/config_file'),'w') f.write(\"VERSION: 1.0.0.1\")", "# Use the default login user if PGOPTIONS is None:", "write_config_file(reuse_flag='true',formatOpts='csv',file='data_file.csv',table='csvtable',format='csv',delimiter=\"','\",log_errors=True,error_limit='10',staging_table='staging_table') self.doTest(25) def test_26_gpload_ext_staging_table_with_externalschema(self): \"26 gpload reuse ext_staging_table if it", "this module http://docs.python.org/tutorial/errors.html We want to raise an error and", "count with ext schema\" file = mkpath('setup.sql') runfile(file) f =", "if mode == 'merge': f.write(\"\\n - MODE: \"+'merge') f.write(\"\\n -", "number in DB table) \" preTest = mkpath('pre_test_13.sql') psql_run(preTest, dbname='reuse_gptest')", "test_31_gpload_reuse_table_update_mode_with_fast_match_and_different_columns_number(self): \"31 gpload update mode with fast match and differenct", "= socket.getaddrinfo(hostname, None) ipaddrlist = list(set([(ai[4][0]) for ai in hostinfo]))", "write_config_file(mode='insert', reuse_flag='',columns_flag='0',mapping='0',portNum='8081',database='reuse_gptest',host='localhost',formatOpts='text',file='data/external_file_01.txt',table='texttable',format='text',delimiter=\"'|'\",escape='',quote='',truncate='False',log_errors=None, error_limit='0',error_table=None,externalSchema=None,staging_table=None,fast_match='false', encoding=None, preload=True, fill=False): f = open(mkpath('config/config_file'),'w') f.write(\"VERSION:", "command. Return (True, [result]) if OK, or (False, []) otherwise.", "self.doTest(27) def test_28_gpload_ext_staging_table_with_dot(self): \"28 gpload reuse ext_staging_table if it is", "a command or file against psql. Return True if OK.", "\"query resulted in diff:\\n{}\".format(diff)) return True def doTest(self, num): file", "SCHEMA: \"+externalSchema) if preload: f.write(\"\\n PRELOAD:\") f.write(\"\\n - REUSE_TABLES: \"+reuse_flag)", "%s > %s 2>&1' % ( LMYD, f1, f2, dfile", "num): file = mkpath('query%d.diff' % num) if os.path.isfile(file): run(\"rm -f\"", "running @param PGOPTIONS: connects to postgres via utility mode '''", "f.write(\"\\n - ERROR_TABLE: \" + error_table) f.write(\"\\n - ERROR_LIMIT: \"", "%s 2>&1' % ofile return run('%s psql -d %s %s", "gpload reuse ext_staging_table if it is configured with dot\" file", "delimiter E'\\\\'' with reuse\" copy_data('external_file_03.txt','data_file.txt') write_config_file(reuse_flag='true',formatOpts='text',file='data_file.txt',table='texttable',delimiter=\"E'\\''\") self.doTest(5) def test_06_gpload_formatOpts_delimiter(self): \"6", "True if OK. @param dbname: database name @param ifile: input", "fast match is true\" drop_tables() copy_data('external_file_08.txt','data_file.txt') write_config_file(mode='update',reuse_flag='false',fast_match='true',file='data_file.txt') self.doTest(32) def test_33_gpload_reuse_table_merge_mode_with_fast_match_and_external_schema(self):", "escape '\\\\' with reuse\" copy_data('external_file_01.txt','data_file.txt') file = mkpath('setup.sql') runfile(file) write_config_file(reuse_flag='true',formatOpts='text',file='data_file.txt',table='texttable',escape='\\\\')", "\" psql_run(cmd=\"ALTER TABLE texttable ADD column n8 text\",dbname='reuse_gptest') copy_data('external_file_08.txt','data_file.txt') write_config_file('merge','true',file='data_file.txt')", "d = mkpath('config') if not os.path.exists(d): os.mkdir(d) def write_config_file(mode='insert', reuse_flag='',columns_flag='0',mapping='0',portNum='8081',database='reuse_gptest',host='localhost',formatOpts='text',file='data/external_file_01.txt',table='texttable',format='text',delimiter=\"'|'\",escape='',quote='',truncate='False',log_errors=None,", "drop_tables() copy_data('external_file_06.txt','data_file.txt') write_config_file('merge','true',file='data_file.txt') self.doTest(10) def test_11_gpload_reuse_table_merge_mode_without_reuse(self): \"11 gpload merge mode", "os.path.exists(myinitfile): (ok, out) = run('../gpdiff.pl -w ' + optionalFlags +", "-d reuse_gptest -c 'select count(*) from csvtable;'\") f.close() f =", "return port if port else 5432 def get_ip(hostname=None): if hostname", "\"+database) f.write(\"\\nUSER: \"+os.environ.get('USER')) f.write(\"\\nHOST: \"+hostNameAddrs) f.write(\"\\nPORT: \"+masterPort) f.write(\"\\nGPLOAD:\") f.write(\"\\n INPUT:\")", "list = get_table_name() for i in list: name = i[0]", "if OK, or (False, []) otherwise. @params cmd: The command", "\"\" ): return changeExtFile( fname, \".diff\", outputPath ) def changeExtFile(", "port : port where gpdb is running @param PGOPTIONS: connects", "copy_data('external_file_01.txt','data_file.txt') write_config_file(reuse_flag='true',formatOpts='text',file='data_file.txt',table='texttable',delimiter=\"'|'\") self.doTest(1) def test_02_gpload_formatOpts_delimiter(self): \"2 gpload formatOpts delimiter '\\t'", "= run('../gpdiff.pl -w ' + optionalFlags + \\ ' -I", "= \"\"): \"\"\" PURPOSE: compare the actual and expected output", "if preload: f.write(\"\\n PRELOAD:\") f.write(\"\\n - REUSE_TABLES: \"+reuse_flag) f.write(\"\\n -", "return match1.group() def get_port(): port = os.environ['PGPORT'] if not port:", "if database: f.write(\"\\nDATABASE: \"+database) f.write(\"\\nUSER: \"+os.environ.get('USER')) f.write(\"\\nHOST: \"+hostNameAddrs) f.write(\"\\nPORT: \"+masterPort)", "test_26_gpload_ext_staging_table_with_externalschema(self): \"26 gpload reuse ext_staging_table if it is configured with", "-d reuse_gptest -c 'select count(*) from test.csvtable;'\") f.close() copy_data('external_file_13.csv','data_file.csv') write_config_file(reuse_flag='true',formatOpts='csv',file='data_file.csv',table='test.csvtable',format='csv',delimiter=\"','\",log_errors=True,error_limit='10',staging_table='t.staging_table')", "= mkpath('setup.sql') runfile(file) copy_data('external_file_04.txt','data_file.txt') write_config_file(mode='merge',reuse_flag='true',fast_match='false',file='data_file.txt',encoding='xxxx') self.doTest(37) def test_38_gpload_without_preload(self): \"38 gpload", "def test_05_gpload_formatOpts_delimiter(self): \"5 gpload formatOpts delimiter E'\\\\'' with reuse\" copy_data('external_file_03.txt','data_file.txt')", "n1\") f.write(\"\\n - s1\") f.write(\"\\n - s2\") if mapping=='1': f.write(\"\\n", "quote E'\\\\x26'(&) with reuse\" copy_data('external_file_12.csv','data_file.csv') write_config_file(reuse_flag='true',formatOpts='csv',file='data_file.csv',table='csvtable',format='csv',delimiter=\"','\",quote=\"E'\\x26'\") self.doTest(18) def test_19_gpload_formatOpts_escape(self): \"19", "> 0: ipv6 = myip return ipv6 elif myip.find(\".\") >", "- s_dt: timestamp\") f.write(\"\\n - s_s3: text\") f.write(\"\\n - s_n1:", "== 0: (ok, out) = psql_run(ifile = ifile,ofile = outFile(ifile,", "\"/\" + fname + ext def gpdbAnsFile(fname): ext = '.ans'", "): raise Exception( 'Error: cannot find file %s' % f1", "= socket.gethostname() GPHOME = os.getenv(\"GPHOME\") PGPORT = get_port() PGUSER =", "files. optionalFlags: command-line options (if any) for diff. For example,", "\"\" else: port = \"-p %s\" % port if cmd:", "None if result else read_diff(ifile, outputPath) self.assertTrue(result, \"query resulted in", "def test_24_gpload_error_count(self): \"24 gpload error count with ext schema\" file", "seldom issue. we can't reproduce it locally, so we disable", "write_config_file(reuse_flag='true',formatOpts='text',file='data_file.txt',table='texttable',delimiter=\"E'\\\\t'\") self.doTest(3) def test_04_gpload_formatOpts_delimiter(self): \"4 gpload formatOpts delimiter E'\\u0009' with", "host @param port : port where gpdb is running @param", "(True, [result]) if OK, or (False, []) otherwise. @params cmd:", "parameter of open(). \"\"\" p = subprocess.Popen(cmd,shell=True,stdout=subprocess.PIPE,stderr=subprocess.PIPE) out = p.communicate()[0]", "from test.csvtable;'\") f.close() copy_data('external_file_13.csv','data_file.csv') write_config_file(reuse_flag='true',formatOpts='csv',file='data_file.csv',table='test.csvtable',format='csv',delimiter=\"','\",log_errors=True,error_limit='10',staging_table='staging_table',externalSchema=\"'%'\") self.doTest(27) def test_28_gpload_ext_staging_table_with_dot(self): \"28 gpload", "= os.environ.get(\"PGUSER\") if PGUSER is None: PGUSER = USER PGHOST", "host = None, port = None): ''' Run a command", "process this file name to figure out the proper names", "'ext_gpload_reusable%' OR relname like 'staging_gpload_reusable%';\"\"\" resultList = db.query(queryString.encode('utf-8')).getresult() return resultList", "out = p.communicate()[0] ret = [] ret.append(out) rc = False", "% f2 ) dfile = diffFile( f1, outputPath = outputPath", "if os.path.isfile(file): run(\"rm -f\" + \" \" + file) modify_sql_file(num)", "True return (rc,ret) def outFile(fname,outputPath = ''): return changeExtFile(fname, \".out\",", "f.write(\"\\n - FAST_MATCH: \"+fast_match) if staging_table: f.write(\"\\n - STAGING_TABLE: \"+staging_table)", "are unified with 3 lines of context (i.e. optionalFlags is", "echo commands sent to server arg = '-e < '", "file already exists: 'a' = append; 'w' = write. Defaults", "return p.communicate() hostNameAddrs = get_ip(HOST) masterPort = getPortMasterOnly() def get_table_name():", "preTest = mkpath('pre_test_13.sql') psql_run(preTest, dbname='reuse_gptest') copy_data('external_file_09.txt','data_file.txt') write_config_file('merge','true',file='data_file.txt') self.doTest(13) def test_14_gpload_reuse_table_update_mode_with_reuse_RERUN(self):", "from texttable;'\") f.close() write_config_file(mode='insert',reuse_flag='false') self.doTest(7) def test_08_gpload_reuse_table_update_mode_with_reuse(self): \"8 gpload update", "different columns' order \" copy_data('external_file_10.txt','data/data_file.tbl') write_config_file('merge','true',file='data/data_file.tbl',columns_flag='1',mapping='1') self.doTest(15) def test_16_gpload_formatOpts_quote(self): \"16", "\"+mkpath(file)) if columns_flag=='1': f.write(\"\\n - COLUMNS:\") f.write(\"\\n - s_s1: text\")", "gpload -f \"+mkpath('config/config_file')+ \" -d reuse_gptest\\n\"+\"\\! gpload -f \"+mkpath('config/config_file')+ \"", "mkpath('query%d.sql' % num) runfile(file) self.check_result(file) def test_00_gpload_formatOpts_setup(self): \"0 gpload setup\"", "PGUSER = os.environ.get(\"PGUSER\") if PGUSER is None: PGUSER = USER", "re.search(master_pattern, line): master_value = int(line.split()[3].strip()) if master_value is None: error_msg", "arg, ofile)) def run(cmd): \"\"\" Run a shell command. Return", "during testing. # This is seldom issue. we can't reproduce", "user = os.environ.get('USER') if os.path.isfile(file): for line in fileinput.FileInput(file,inplace=1): line", "error count\" # f = open(mkpath('query22.sql'),'a') # f.write(\"\\! psql -d", "if mode: if mode == 'insert': f.write(\"\\n - MODE: \"+'insert')", "write_config_file(reuse_flag='true',formatOpts='csv',file='data_file.csv',table='csvtable',format='csv',delimiter=\"','\",log_errors=True,error_limit='90000000',externalSchema='test') self.doTest(24) def test_25_gpload_ext_staging_table(self): \"25 gpload reuse ext_staging_table if it", "external schema\" file = mkpath('setup.sql') runfile(file) copy_data('external_file_04.txt','data_file.txt') write_config_file(mode='merge',reuse_flag='true',fast_match='true',file='data_file.txt',externalSchema='test') self.doTest(33) def", "file) \" psql_run(cmd=\"ALTER TABLE texttable ADD column n8 text\",dbname='reuse_gptest') copy_data('external_file_08.txt','data_file.txt')", "'+ mkpath('data/' + source) + ' ' + mkpath(target) p", "os.environ.get(\"PGHOST\") if PGHOST is None: PGHOST = HOST d =", "for line in f.xreadlines(): match = re.search('port=\\d+',line) if match: match1", "f.write(\"\\n OUTPUT:\") f.write(\"\\n - TABLE: \"+table) if mode: if mode", "output file. mode: What to do if the output file", "false and fast match is true\" drop_tables() copy_data('external_file_08.txt','data_file.txt') write_config_file(mode='update',reuse_flag='false',fast_match='true',file='data_file.txt') self.doTest(32)", "with reuse\" copy_data('external_file_03.txt','data_file.txt') write_config_file(reuse_flag='true',formatOpts='text',file='data_file.txt',table='texttable',delimiter=\"\\\"'\\\"\") self.doTest(6) def test_07_gpload_reuse_table_insert_mode_without_reuse(self): \"7 gpload insert", "user: user = os.environ.get('USER') if os.path.isfile(file): for line in fileinput.FileInput(file,inplace=1):", "s_n3\") f.write(\"\\n n4: s_n4\") f.write(\"\\n n5: s_n5\") f.write(\"\\n n6: s_n6\")", "= \"\" else: port = \"-p %s\" % port if", "E'\\\\\\\\' with reuse\" copy_data('external_file_01.txt','data_file.txt') write_config_file(reuse_flag='true',formatOpts='text',file='data_file.txt',table='texttable',escape=\"E'\\\\\\\\'\") self.doTest(21) # case 22 is", "def test_17_gpload_formatOpts_quote(self): \"17 gpload formatOpts quote '\\\\x26'(&) with reuse\" copy_data('external_file_12.csv','data_file.csv')", "= '-c \"%s\"' % cmd elif ifile: arg = '", "print 'could not connect to database: ' + errorMessage queryString", "reuse\" copy_data('external_file_12.csv','data_file.csv') write_config_file(reuse_flag='true',formatOpts='csv',file='data_file.csv',table='csvtable',format='csv',delimiter=\"','\",quote=\"E'\\x26'\") self.doTest(18) def test_19_gpload_formatOpts_escape(self): \"19 gpload formatOpts escape", "file = mkpath('setup.sql') runfile(file) copy_data('external_file_04.txt','data_file.txt') write_config_file(mode='merge',reuse_flag='true',fast_match='false',file='data_file.txt',encoding='xxxx') self.doTest(37) def test_38_gpload_without_preload(self): \"38", "= re.search('ext_gpload',name) if match: queryString = \"DROP EXTERNAL TABLE %s\"", "in hostinfo])) for myip in ipaddrlist: if myip.find(\":\") > 0:", "\" psql_run(cmd=\"ALTER TABLE texttable ADD column n8 text\",dbname='reuse_gptest') copy_data('external_file_08.txt','data_file.txt') write_config_file(mode='update',reuse_flag='true',fast_match='true',file='data_file.txt')", "NOTICE: -I HINT: -I CONTEXT: -I GP_IGNORE: --gp_init_file=%s/global_init_file ' '%s", "HOST d = mkpath('config') if not os.path.exists(d): os.mkdir(d) def write_config_file(mode='insert',", "to add init_file suitePath = f1[0:f1.rindex( \"/\" )] if os.path.exists(suitePath", "copy_data('external_file_13.csv','data_file.csv') write_config_file(reuse_flag='true',formatOpts='csv',file='data_file.csv',table='test.csvtable',format='csv',delimiter=\"','\",log_errors=True,error_limit='10',staging_table='t.staging_table') self.doTest(28) def test_29_gpload_reuse_table_insert_mode_with_reuse_and_null(self): \"29 gpload insert mode with", "== 'merge': f.write(\"\\n - MODE: \"+'merge') f.write(\"\\n - UPDATE_COLUMNS:\") f.write(\"\\n", "SOURCE:\") f.write(\"\\n LOCAL_HOSTNAME:\") f.write(\"\\n - \"+hostNameAddrs) if portNum: f.write(\"\\n PORT:", "mode with reuse \" drop_tables() copy_data('external_file_06.txt','data_file.txt') write_config_file('merge','true',file='data_file.txt') self.doTest(10) def test_11_gpload_reuse_table_merge_mode_without_reuse(self):", "E'\\t' with reuse\" copy_data('external_file_02.txt','data_file.txt') write_config_file(reuse_flag='true',formatOpts='text',file='data_file.txt',table='texttable',delimiter=\"E'\\\\t'\") self.doTest(3) def test_04_gpload_formatOpts_delimiter(self): \"4 gpload", "they don't match. PARAMETERS: ifile: the name of the .sql", "def test_13_gpload_reuse_table_merge_mode_with_different_columns_number_in_DB(self): \"13 gpload merge mode with reuse (RERUN with", "f.write(\"\\n - s_s1: text\") f.write(\"\\n - s_s2: text\") f.write(\"\\n -", "base class for exceptions in this module http://docs.python.org/tutorial/errors.html We want", "s_s3\") f.write(\"\\n n1: s_n1\") f.write(\"\\n n2: s_n2\") f.write(\"\\n n3: s_n3\")", "%s' % f1 ) if not os.access( f2, os.R_OK ):", "f.write(\"\\n FILE:\") f.write(\"\\n - \"+mkpath(file)) if columns_flag=='1': f.write(\"\\n - COLUMNS:\")", "test_20_gpload_formatOpts_escape(self): \"20 gpload formatOpts escape '\\\\' with reuse\" copy_data('external_file_01.txt','data_file.txt') write_config_file(reuse_flag='true',formatOpts='text',file='data_file.txt',table='texttable',escape=", "mkpath('setup.sql') runfile(file) copy_data('external_file_04.txt','data_file.txt') write_config_file(mode='merge',reuse_flag='true',fast_match='false',file='data_file.txt',encoding='GBK') self.doTest(36) def test_37_gpload_reuse_table_merge_mode_invalid_encoding(self): \"37 gpload merge", "import sys import os import string import time import socket", "optionalFlags: command-line options (if any) for diff. For example, pass", "'select count(*) from test.csvtable;'\") f.close() copy_data('external_file_13.csv','data_file.csv') write_config_file(reuse_flag='true',formatOpts='csv',file='data_file.csv',table='test.csvtable',format='csv',delimiter=\"','\",log_errors=True,error_limit='10',staging_table='staging_table',externalSchema=\"'%'\") self.doTest(27) def test_28_gpload_ext_staging_table_with_dot(self):", "count(*) from csvtable;'\") # f.close() # f = open(mkpath('data/large_file.csv'),'w') #", "= \"-h %s\" % PGHOST else: host = \"-h %s\"", "out) = psql_run(ifile = ifile,ofile = outFile(ifile, outputPath),flag = flag,", "in range(0, 10000): if i % 2 == 0: f.write('1997,Ford,E350,\"ac,", "abs, moon\",3000.00\\n') # f.close() # copy_data('large_file.csv','data_file.csv') # write_config_file(reuse_flag='true',formatOpts='csv',file='data_file.csv',table='csvtable',format='csv',delimiter=\"','\",log_errors=True,error_limit='90000000') # self.doTest(22)", "= \"-h %s\" % host if port is None: port", "psql user @param host : to connect to a different", "raise Exception(error_msg) return str(master_value) \"\"\" Global Values \"\"\" MYD =", "f2, dfile)) else: if os.path.exists(myinitfile): (ok, out) = run('../gpdiff.pl -w", "= mkpath('setup.sql') runfile(file) f = open(mkpath('query26.sql'),'a') f.write(\"\\! psql -d reuse_gptest", "TABLE: \"+table) if mode: if mode == 'insert': f.write(\"\\n -", "f.write(\"\\n - s_s2: text\") f.write(\"\\n - s_dt: timestamp\") f.write(\"\\n -", "= get_port() PGUSER = os.environ.get(\"PGUSER\") if PGUSER is None: PGUSER", "ext def gpdbAnsFile(fname): ext = '.ans' return os.path.splitext(fname)[0] + ext", "update mode with reuse (RERUN) \" write_config_file('update','true',file='data_file.txt') self.doTest(14) def test_15_gpload_reuse_table_merge_mode_with_different_columns_order(self):", "os.path.isfile(file): with open(file) as f: for line in f.xreadlines(): match", "f.write(\"\\n - MODE: \"+'update') if mode == 'merge': f.write(\"\\n -", "tracked during testing. # This is seldom issue. we can't", "'select count(*) from texttable where n2 is null;'\") f.close() copy_data('external_file_14.txt','data_file.txt')", "- s_n6: real\") f.write(\"\\n - s_n7: double precision\") f.write(\"\\n -", "is \"-U3\"). \"\"\" f1 = gpdbAnsFile(ifile) f2 = outFile(ifile, outputPath=outputPath)", "when encoding is setted from GBK to empty\" write_config_file(mode='merge',reuse_flag='true',fast_match='true',file='data_file.txt') self.doTest(35)", "with reuse \" drop_tables() copy_data('external_file_06.txt','data_file.txt') write_config_file('merge','true',file='data_file.txt') self.doTest(10) def test_11_gpload_reuse_table_merge_mode_without_reuse(self): \"11", "USER = os.environ.get( \"LOGNAME\" ) HOST = socket.gethostname() GPHOME =", "dbname, host, port, username, flag, arg, ofile)) def run(cmd): \"\"\"", "def runfile(ifile, flag='', dbname=None, outputPath=\"\", outputFile=\"\", username=None, PGOPTIONS=None, host =", "Exception(\"Unable to connect to segment server %s as user %s\"", "= \"DROP EXTERNAL TABLE %s\" % name db.query(queryString.encode('utf-8')) else: queryString", "where n2 is null;'\") f.close() copy_data('external_file_14.txt','data_file.txt') write_config_file(mode='insert',reuse_flag='true',file='data_file.txt',log_errors=True, error_limit='100') self.doTest(29) def", "s_n7: double precision\") f.write(\"\\n - s_n8: text\") f.write(\"\\n - s_n9:", "test_33_gpload_reuse_table_merge_mode_with_fast_match_and_external_schema(self): \"33 gpload update mode with fast match and external", "-I GP_IGNORE: --gp_init_file=%s/global_init_file --gp_init_file=%s/init_file ' '%s %s > %s 2>&1'", "'__main__': suite = unittest.TestLoader().loadTestsFromTestCase(GPLoad_FormatOpts_TestCase) runner = unittest.TextTestRunner(verbosity=2) ret = not", "= str(e) print 'could not connect to database: ' +", "abs, moon\",3000.00,a\\n') else: f.write('1997,Ford,E350,\"ac, abs, moon\",3000.00\\n') f.close() copy_data('large_file.csv','data_file.csv') write_config_file(reuse_flag='true',formatOpts='csv',file='data_file.csv',table='csvtable',format='csv',delimiter=\"','\",log_errors=True,error_limit='90000000',externalSchema='test') self.doTest(24)", "CONTEXT: -I GP_IGNORE: --gp_init_file=%s/global_init_file --gp_init_file=%s ' '%s %s > %s", "psql -d %s %s %s -U %s %s %s %s'", "line = line.replace(\"gpload.py \",\"gpload \") print str(re.sub('\\n','',line)) def copy_data(source='',target=''): cmd", "the .sql file whose actual and expected outputs we want", "as f: for line in f.xreadlines(): match = re.search('port=\\d+',line) if", "copy_data('large_file.csv','data_file.csv') # write_config_file(reuse_flag='true',formatOpts='csv',file='data_file.csv',table='csvtable',format='csv',delimiter=\"','\",log_errors=True,error_limit='90000000') # self.doTest(22) def test_23_gpload_error_count(self): \"23 gpload error_table\"", "f.write(\"\\n - SCHEMA: \"+externalSchema) if preload: f.write(\"\\n PRELOAD:\") f.write(\"\\n -", "\"\"\" Run a shell command. Return (True, [result]) if OK,", "for line in fileinput.FileInput(file,inplace=1): line = line.replace(\"gpload.py \",\"gpload \") print", "if it is configured with externalschema\" file = mkpath('setup.sql') runfile(file)", "mode == 'update': f.write(\"\\n - MODE: \"+'update') if mode ==", "\" -d reuse_gptest\\n\"+\"\\! gpload -f \"+mkpath('config/config_file')+ \" -d reuse_gptest\\n\") f.close()", "error_table) f.write(\"\\n - ERROR_LIMIT: \" + error_limit) if delimiter: f.write(\"\\n", "= \"PGOPTIONS='%s'\" % PGOPTIONS if host is None: host =", "- s_n3: bigint\") f.write(\"\\n - s_n4: decimal\") f.write(\"\\n - s_n5:", "found, etc. Failure is define as test case failures, when", "= open(mkpath('data/large_file.csv'),'w') # for i in range(0, 10000): # if", "master_value = int(line.split()[3].strip()) if master_value is None: error_msg = \"\".join(out)", "error count with ext schema\" file = mkpath('setup.sql') runfile(file) f", "2>&1' % ( LMYD, f1, f2, dfile ) ) if", "= mkpath('query%d.diff' % num) if os.path.isfile(file): run(\"rm -f\" + \"", "null;'\") f.close() copy_data('external_file_14.txt','data_file.txt') write_config_file(mode='insert',reuse_flag='true',file='data_file.txt',log_errors=True, error_limit='100') self.doTest(29) def test_30_gpload_reuse_table_update_mode_with_fast_match(self): \"30 gpload", "copy_data('large_file.csv','data_file.csv') write_config_file(reuse_flag='true',formatOpts='csv',file='data_file.csv',table='csvtable',format='csv',delimiter=\"','\",error_table=\"err_table\",error_limit='90000000') self.doTest(23) def test_24_gpload_error_count(self): \"24 gpload error count with", "csvtable;'\") f.close() copy_data('external_file_13.csv','data_file.csv') write_config_file(reuse_flag='true',formatOpts='csv',file='data_file.csv',table='csvtable',format='csv',delimiter=\"','\",log_errors=True,error_limit='10',staging_table='staging_table') self.doTest(25) def test_26_gpload_ext_staging_table_with_externalschema(self): \"26 gpload reuse", "column n8 text\",dbname='reuse_gptest') copy_data('external_file_08.txt','data_file.txt') write_config_file(mode='update',reuse_flag='true',fast_match='true',file='data_file.txt') self.doTest(31) def test_32_gpload_update_mode_without_reuse_table_with_fast_match(self): \"32 gpload", ") if ok: os.unlink( dfile ) return ok def read_diff(ifile,", "and encoding GBK\" file = mkpath('setup.sql') runfile(file) copy_data('external_file_04.txt','data_file.txt') write_config_file(mode='merge',reuse_flag='true',fast_match='true',file='data_file.txt',encoding='GBK') self.doTest(34)", "'\\\\' with reuse\" copy_data('external_file_01.txt','data_file.txt') file = mkpath('setup.sql') runfile(file) write_config_file(reuse_flag='true',formatOpts='text',file='data_file.txt',table='texttable',escape='\\\\') self.doTest(19)", "LOG_ERRORS: true\") f.write(\"\\n - ERROR_LIMIT: \" + error_limit) if error_table:", "PGPORT=%s; %s\" \\ % (gphome, mdd, port, command) (ok,out) =", "merge mode without reuse \" copy_data('external_file_07.txt','data_file.txt') write_config_file('merge','false',file='data_file.txt') self.doTest(11) def test_12_gpload_reuse_table_merge_mode_with_different_columns_number_in_file(self):", "is flaky on concourse. It may report: Fatal Python error:", "gpdbAnsFile(ifile) f2 = outFile(ifile, outputPath=outputPath) result = isFileEqual(f1, f2, optionalFlags,", "moon\",3000.00,a\\n') else: f.write('1997,Ford,E350,\"ac, abs, moon\",3000.00\\n') f.close() copy_data('large_file.csv','data_file.csv') write_config_file(reuse_flag='true',formatOpts='csv',file='data_file.csv',table='csvtable',format='csv',delimiter=\"','\",log_errors=True,error_limit='90000000',externalSchema='test') self.doTest(24) def", "= os.environ['PGPORT'] if not port: port = get_port_from_conf() return port", "f.close() def runfile(ifile, flag='', dbname=None, outputPath=\"\", outputFile=\"\", username=None, PGOPTIONS=None, host", "port: port = get_port_from_conf() return port if port else 5432", "None, user = os.environ.get('USER'),gphome = os.environ['GPHOME'], mdd=os.environ['MASTER_DATA_DIRECTORY'],port = os.environ['PGPORT']): master_pattern", "ipv4 = myip return ipv4 def getPortMasterOnly(host = 'localhost',master_value =", "subprocess.Popen(cmd,shell=True,stdout=subprocess.PIPE,stderr=subprocess.PIPE) return p.communicate() hostNameAddrs = get_ip(HOST) masterPort = getPortMasterOnly() def", "s_n4: decimal\") f.write(\"\\n - s_n5: numeric\") f.write(\"\\n - s_n6: real\")", "with reuse\" copy_data('external_file_02.txt','data_file.txt') write_config_file(reuse_flag='true',formatOpts='text',file='data_file.txt',table='texttable',delimiter=\"E'\\u0009'\") self.doTest(4) def test_05_gpload_formatOpts_delimiter(self): \"5 gpload formatOpts", "self.doTest(10) def test_11_gpload_reuse_table_merge_mode_without_reuse(self): \"11 gpload merge mode without reuse \"", "mkpath('setup.sql') runfile(file) copy_data('external_file_04.txt','data_file.txt') write_config_file(mode='merge',reuse_flag='true',fast_match='true',file='data_file.txt',externalSchema='test') self.doTest(33) def test_34_gpload_reuse_table_merge_mode_with_fast_match_and_encoding(self): \"34 gpload merge", "an error and not a failure. The reason for an", "= hostname hostinfo = socket.getaddrinfo(hostname, None) ipaddrlist = list(set([(ai[4][0]) for", "f1, f2, dfile)) else: (ok, out) = run( '../gpdiff.pl -w", "f = open(mkpath('data/large_file.csv'),'w') # for i in range(0, 10000): #", "ipv4 def getPortMasterOnly(host = 'localhost',master_value = None, user = os.environ.get('USER'),gphome", "\"+hostNameAddrs) f.write(\"\\nPORT: \"+masterPort) f.write(\"\\nGPLOAD:\") f.write(\"\\n INPUT:\") f.write(\"\\n - SOURCE:\") f.write(\"\\n", "with reuse\" copy_data('external_file_03.txt','data_file.txt') write_config_file(reuse_flag='true',formatOpts='text',file='data_file.txt',table='texttable',delimiter=\"E'\\''\") self.doTest(5) def test_06_gpload_formatOpts_delimiter(self): \"6 gpload formatOpts", "count(*) from texttable where n2 is null;'\") f.close() copy_data('external_file_14.txt','data_file.txt') write_config_file(mode='insert',reuse_flag='true',file='data_file.txt',log_errors=True,", "with fast match\" drop_tables() copy_data('external_file_04.txt','data_file.txt') write_config_file(mode='update',reuse_flag='true',fast_match='true',file='data_file.txt') self.doTest(30) def test_31_gpload_reuse_table_update_mode_with_fast_match_and_different_columns_number(self): \"31", "fields\" file = mkpath('setup.sql') runfile(file) copy_data('external_file_04.txt','data_file.txt') write_config_file(mode='insert',reuse_flag='false',fast_match='false',file='data_file.txt',table='texttable1', error_limit='1000', fill=True) self.doTest(39)", "gpload formatOpts escape E'\\\\\\\\' with reuse\" copy_data('external_file_01.txt','data_file.txt') write_config_file(reuse_flag='true',formatOpts='text',file='data_file.txt',table='texttable',escape=\"E'\\\\\\\\'\") self.doTest(21) #", "texttable where n2=222;'\") f.close() copy_data('external_file_05.txt','data_file.txt') write_config_file(mode='update',reuse_flag='false',file='data_file.txt') self.doTest(9) def test_10_gpload_reuse_table_merge_mode_with_reuse(self): \"10", "write_config_file('merge','false',file='data_file.txt') self.doTest(11) def test_12_gpload_reuse_table_merge_mode_with_different_columns_number_in_file(self): \"12 gpload merge mode with reuse", ") dfile = diffFile( f1, outputPath = outputPath ) #", "Global Values \"\"\" MYD = os.path.abspath(os.path.dirname(__file__)) mkpath = lambda *x:", "== 0: return os.path.splitext( fname )[0] + ext else: filename", "if it is configured\" file = mkpath('setup.sql') runfile(file) f =", "\"+externalSchema) if preload: f.write(\"\\n PRELOAD:\") f.write(\"\\n - REUSE_TABLES: \"+reuse_flag) f.write(\"\\n", "f = open(mkpath('query27.sql'),'a') f.write(\"\\! psql -d reuse_gptest -c 'select count(*)", "in f.xreadlines(): match = re.search('port=\\d+',line) if match: match1 = re.search('\\d+',", "time import socket import fileinput import platform import re try:", "fast match and encoding GBK\" file = mkpath('setup.sql') runfile(file) copy_data('external_file_04.txt','data_file.txt')", "f.write(\"\\! gpload -f \"+mkpath('config/config_file')+ \" -d reuse_gptest\\n\"+\"\\! gpload -f \"+mkpath('config/config_file')+", "\"/\" )] if os.path.exists(suitePath + \"/init_file\"): (ok, out) = run('../gpdiff.pl", "outputPath) self.assertTrue(result, \"query resulted in diff:\\n{}\".format(diff)) return True def doTest(self,", "different from the expected result. ''' pass class GPLoad_FormatOpts_TestCase(unittest.TestCase): def", "default, diffs are unified with 3 lines of context (i.e.", "copy_data('external_file_02.txt','data_file.txt') write_config_file(reuse_flag='true',formatOpts='text',file='data_file.txt',table='texttable',delimiter=\"'\\t'\") self.doTest(2) def test_03_gpload_formatOpts_delimiter(self): \"3 gpload formatOpts delimiter E'\\t'", "moon\",3000.00\\n') f.close() copy_data('large_file.csv','data_file.csv') write_config_file(reuse_flag='true',formatOpts='csv',file='data_file.csv',table='csvtable',format='csv',delimiter=\"','\",error_table=\"err_table\",error_limit='90000000') self.doTest(23) def test_24_gpload_error_count(self): \"24 gpload error", "- MATCH_COLUMNS:\") f.write(\"\\n - n1\") f.write(\"\\n - s1\") f.write(\"\\n -", "PARAMETERS: ifile: the name of the .sql file whose actual", "n2 is null;'\") f.close() copy_data('external_file_14.txt','data_file.txt') write_config_file(mode='insert',reuse_flag='true',file='data_file.txt',log_errors=True, error_limit='100') self.doTest(29) def test_30_gpload_reuse_table_update_mode_with_fast_match(self):", "name to add init_file suitePath = f1[0:f1.rindex( \"/\" )] if", "'staging_gpload_reusable%';\"\"\" resultList = db.query(queryString.encode('utf-8')).getresult() return resultList def drop_tables(): try: db", "\"\", outputPath = \"\", myinitfile = \"\"): LMYD = os.path.abspath(os.path.dirname(__file__))", "\"15 gpload merge mode with different columns' order \" copy_data('external_file_10.txt','data/data_file.tbl')", "if log_errors: f.write(\"\\n - LOG_ERRORS: true\") f.write(\"\\n - ERROR_LIMIT: \"", "= os.path.abspath(mkpath('..')) if UPD not in sys.path: sys.path.append(UPD) DBNAME =", "== '-a': arg = '-f ' + ifile else: raise", "= os.environ['GPHOME'], mdd=os.environ['MASTER_DATA_DIRECTORY'],port = os.environ['PGPORT']): master_pattern = \"Context:\\s*-1\\s*Value:\\s*\\d+\" command =", "f.write(\"\\n - SOURCE:\") f.write(\"\\n LOCAL_HOSTNAME:\") f.write(\"\\n - \"+hostNameAddrs) if portNum:", "function is backwards compatible). Yes, this is passed to the", "mkpath('setup.sql') runfile(file) copy_data('external_file_04.txt','data_file.txt') write_config_file(mode='merge',reuse_flag='true',fast_match='true',file='data_file.txt',encoding='GBK') self.doTest(34) def test_35_gpload_reuse_table_merge_mode_with_fast_match_default_encoding(self): \"35 gpload does", "self.doTest(9) def test_10_gpload_reuse_table_merge_mode_with_reuse(self): \"10 gpload merge mode with reuse \"", "\" (with the blank spaces) to ignore blank lines. By", "sent to server arg = '-e < ' + ifile", "def drop_tables(): try: db = pg.DB(dbname='reuse_gptest' ,host='localhost' ,port=int(PGPORT) ) except", "= mkpath('setup.sql') runfile(file) copy_data('external_file_04.txt','data_file.txt') write_config_file(mode='merge',reuse_flag='true',fast_match='true',file='data_file.txt',externalSchema='test') self.doTest(33) def test_34_gpload_reuse_table_merge_mode_with_fast_match_and_encoding(self): \"34 gpload", "if it is configured with dot\" file = mkpath('setup.sql') runfile(file)", "table is false and fast match is true\" drop_tables() copy_data('external_file_08.txt','data_file.txt')", "f = open(mkpath('query26.sql'),'a') f.write(\"\\! psql -d reuse_gptest -c 'select count(*)", "'-f ' + ifile else: raise PSQLError('missing cmd and ifile')", "f = open(mkpath('query28.sql'),'a') f.write(\"\\! psql -d reuse_gptest -c 'select count(*)", "= open(mkpath('query25.sql'),'a') f.write(\"\\! psql -d reuse_gptest -c 'select count(*) from", ") - 1] )[0] return outputPath + \"/\" + fname", "open(mkpath('query24.sql'),'a') f.write(\"\\! psql -d reuse_gptest -c 'select count(*) from csvtable;'\")", "not reuse table when encoding is setted from GBK to", "test_18_gpload_formatOpts_quote(self): \"18 gpload formatOpts quote E'\\\\x26'(&) with reuse\" copy_data('external_file_12.csv','data_file.csv') write_config_file(reuse_flag='true',formatOpts='csv',file='data_file.csv',table='csvtable',format='csv',delimiter=\"','\",quote=\"E'\\x26'\")", "def test_39_gpload_fill_missing_fields(self): \"39 gpload fill missing fields\" file = mkpath('setup.sql')", "-B \" (with the blank spaces) to ignore blank lines.", "mkpath('setup.sql') runfile(file) write_config_file(reuse_flag='true',formatOpts='text',file='data_file.txt',table='texttable',escape='\\\\') self.doTest(19) def test_20_gpload_formatOpts_escape(self): \"20 gpload formatOpts escape", "= open(mkpath('query9.sql'),'a') f.write(\"\\! psql -d reuse_gptest -c 'select count(*) from", "= i[0] match = re.search('ext_gpload',name) if match: queryString = \"DROP", "to database: ' + errorMessage list = get_table_name() for i", "username=username, PGOPTIONS=PGOPTIONS, host = host, port = port) else: (ok,out)", "self.check_result(file) def test_01_gpload_formatOpts_delimiter(self): \"1 gpload formatOpts delimiter '|' with reuse", "n7: s_n7\") f.write(\"\\n n8: s_n8\") f.write(\"\\n n9: s_n9\") if externalSchema:", "for diff. For example, pass \" -B \" (with the", "' + ifile if flag == '-a': arg = '-f", "def write_config_file(mode='insert', reuse_flag='',columns_flag='0',mapping='0',portNum='8081',database='reuse_gptest',host='localhost',formatOpts='text',file='data/external_file_01.txt',table='texttable',format='text',delimiter=\"'|'\",escape='',quote='',truncate='False',log_errors=None, error_limit='0',error_table=None,externalSchema=None,staging_table=None,fast_match='false', encoding=None, preload=True, fill=False): f = open(mkpath('config/config_file'),'w')", "write_config_file(mode='update',reuse_flag='false',file='data_file.txt') self.doTest(9) def test_10_gpload_reuse_table_merge_mode_with_reuse(self): \"10 gpload merge mode with reuse", "\"9 gpload update mode without reuse\" f = open(mkpath('query9.sql'),'a') f.write(\"\\!", "\" preTest = mkpath('pre_test_13.sql') psql_run(preTest, dbname='reuse_gptest') copy_data('external_file_09.txt','data_file.txt') write_config_file('merge','true',file='data_file.txt') self.doTest(13) def", "copy_data('external_file_01.txt','data_file.txt') write_config_file(reuse_flag='true',formatOpts='text',file='data_file.txt',table='texttable',escape= '\\x5C') self.doTest(20) def test_21_gpload_formatOpts_escape(self): \"21 gpload formatOpts escape", "'%s %s > %s 2>&1' % (LMYD, suitePath, f1, f2,", "dfile)) else: (ok, out) = run( '../gpdiff.pl -w ' +", "(LMYD, suitePath, f1, f2, dfile)) else: if os.path.exists(myinitfile): (ok, out)", "= host, port = port) else: (ok,out) = psql_run(ifile =ifile,", "if not os.path.exists(d): os.mkdir(d) def write_config_file(mode='insert', reuse_flag='',columns_flag='0',mapping='0',portNum='8081',database='reuse_gptest',host='localhost',formatOpts='text',file='data/external_file_01.txt',table='texttable',format='text',delimiter=\"'|'\",escape='',quote='',truncate='False',log_errors=None, error_limit='0',error_table=None,externalSchema=None,staging_table=None,fast_match='false', encoding=None, preload=True,", "configured with externalschema\" file = mkpath('setup.sql') runfile(file) f = open(mkpath('query27.sql'),'a')", "if encoding: f.write(\"\\n - ENCODING: \"+encoding) if escape: f.write(\"\\n -", "out: out = line.split('\\n') for line in out: if re.search(master_pattern,", "+ error_limit) if delimiter: f.write(\"\\n - DELIMITER: \"+delimiter) if encoding:", "\"\"): LMYD = os.path.abspath(os.path.dirname(__file__)) if not os.access( f1, os.R_OK ):", "the blank spaces) to ignore blank lines. By default, diffs", "with reuse (RERUN with different columns number in DB table)", "%s as user %s\" % (host, user)) for line in", "%s %s %s -U %s %s %s %s' % (PGOPTIONS,", "file = mkpath('query%d.sql' % num) runfile(file) self.check_result(file) def test_00_gpload_formatOpts_setup(self): \"0", "PRELOAD:\") f.write(\"\\n - REUSE_TABLES: \"+reuse_flag) f.write(\"\\n - FAST_MATCH: \"+fast_match) if", "ext def isFileEqual( f1, f2, optionalFlags = \"\", outputPath =", "to connect to segment server %s as user %s\" %", "f.write(\"\\n - n2\") f.write(\"\\n - MATCH_COLUMNS:\") f.write(\"\\n - n1\") f.write(\"\\n", "'\\\\' with reuse\" copy_data('external_file_01.txt','data_file.txt') write_config_file(reuse_flag='true',formatOpts='text',file='data_file.txt',table='texttable',escape= '\\x5C') self.doTest(20) def test_21_gpload_formatOpts_escape(self): \"21", "reuse ext_staging_table if it is configured with dot\" file =", "self.doTest(3) def test_04_gpload_formatOpts_delimiter(self): \"4 gpload formatOpts delimiter E'\\u0009' with reuse\"", "f.write(\"\\n n2: s_n2\") f.write(\"\\n n3: s_n3\") f.write(\"\\n n4: s_n4\") f.write(\"\\n", "= '-e',dbname = None, username = None, PGOPTIONS = None,", "returns its contents as a string. \"\"\" dfile = diffFile(ifile,", "lines. By default, diffs are unified with 3 lines of", "= mkpath('pre_test_13.sql') psql_run(preTest, dbname='reuse_gptest') copy_data('external_file_09.txt','data_file.txt') write_config_file('merge','true',file='data_file.txt') self.doTest(13) def test_14_gpload_reuse_table_update_mode_with_reuse_RERUN(self): \"14", "is configured with externalschema\" file = mkpath('setup.sql') runfile(file) f =", "segment server %s as user %s\" % (host, user)) for", "username = None, PGOPTIONS = None, host = None, port", "not user: user = os.environ.get('USER') if os.path.isfile(file): for line in", "count(*) from csvtable;'\") f.close() copy_data('external_file_13.csv','data_file.csv') write_config_file(reuse_flag='true',formatOpts='csv',file='data_file.csv',table='csvtable',format='csv',delimiter=\"','\",log_errors=True,error_limit='10',staging_table='staging_table') self.doTest(25) def test_26_gpload_ext_staging_table_with_externalschema(self): \"26", "- LOG_ERRORS: true\") f.write(\"\\n - ERROR_LIMIT: \" + error_limit) if", "test_01_gpload_formatOpts_delimiter(self): \"1 gpload formatOpts delimiter '|' with reuse \" copy_data('external_file_01.txt','data_file.txt')", "\"31 gpload update mode with fast match and differenct columns", "ofile = '2>&1' elif not ofile: ofile = '> /dev/null", "else: f.write('1997,Ford,E350,\"ac, abs, moon\",3000.00\\n') f.close() copy_data('large_file.csv','data_file.csv') write_config_file(reuse_flag='true',formatOpts='csv',file='data_file.csv',table='csvtable',format='csv',delimiter=\"','\",error_table=\"err_table\",error_limit='90000000') self.doTest(23) def test_24_gpload_error_count(self):", "with reuse\" copy_data('external_file_12.csv','data_file.csv') write_config_file(reuse_flag='true',formatOpts='csv',file='data_file.csv',table='csvtable',format='csv',delimiter=\"','\",quote=\"E'\\x26'\") self.doTest(18) def test_19_gpload_formatOpts_escape(self): \"19 gpload formatOpts", "it, in order to not blocking others #def test_22_gpload_error_count(self): #", "range(1,40): f = open(mkpath('query%d.sql' % num),'w') f.write(\"\\! gpload -f \"+mkpath('config/config_file')+", "-a Run SQL with comments and psql notice @param username:", "MASTER_DATA_DIRECTORY=%s; export PGPORT=%s; %s\" \\ % (gphome, mdd, port, command)", ") def changeExtFile( fname, ext = \".diff\", outputPath = \"\"", "gpload formatOpts delimiter E'\\\\'' with reuse\" copy_data('external_file_03.txt','data_file.txt') write_config_file(reuse_flag='true',formatOpts='text',file='data_file.txt',table='texttable',delimiter=\"E'\\''\") self.doTest(5) def", "return os.path.splitext(fname)[0] + ext def isFileEqual( f1, f2, optionalFlags =", "test_13_gpload_reuse_table_merge_mode_with_different_columns_number_in_DB(self): \"13 gpload merge mode with reuse (RERUN with different", "compare the actual and expected output files and report an", "f2, optionalFlags, outputPath=outputPath) diff = None if result else read_diff(ifile,", "copy_data('external_file_04.txt','data_file.txt') write_config_file(mode='merge',reuse_flag='true',fast_match='true',file='data_file.txt',encoding='GBK') self.doTest(34) def test_35_gpload_reuse_table_merge_mode_with_fast_match_default_encoding(self): \"35 gpload does not reuse", "\"+escape) if quote: f.write(\"\\n - QUOTE: \"+quote) if fill: f.write(\"\\n", "dbname: database name @param ifile: input file @param cmd: command", "mkpath('config') if not os.path.exists(d): os.mkdir(d) def write_config_file(mode='insert', reuse_flag='',columns_flag='0',mapping='0',portNum='8081',database='reuse_gptest',host='localhost',formatOpts='text',file='data/external_file_01.txt',table='texttable',format='text',delimiter=\"'|'\",escape='',quote='',truncate='False',log_errors=None, error_limit='0',error_table=None,externalSchema=None,staging_table=None,fast_match='false', encoding=None,", "file name to figure out the proper names of the", "-f \"+mkpath('config/config_file')+ \" -d reuse_gptest\\n\"+\"\\! gpload -f \"+mkpath('config/config_file')+ \" -d", "return changeExtFile(fname, \".out\", outputPath) def diffFile( fname, outputPath = \"\"", "actual and expected output files and report an error if", "s_n9\") if externalSchema: f.write(\"\\n EXTERNAL:\") f.write(\"\\n - SCHEMA: \"+externalSchema) if", "0: # f.write('1997,Ford,E350,\"ac, abs, moon\",3000.00,a\\n') # else: # f.write('1997,Ford,E350,\"ac, abs,", "'select count(*) from csvtable;'\") f.close() copy_data('external_file_13.csv','data_file.csv') write_config_file(reuse_flag='true',formatOpts='csv',file='data_file.csv',table='csvtable',format='csv',delimiter=\"','\",log_errors=True,error_limit='10',staging_table='staging_table') self.doTest(25) def test_26_gpload_ext_staging_table_with_externalschema(self):", "= get_port_from_conf() return port if port else 5432 def get_ip(hostname=None):", "mkpath('pre_test_13.sql') psql_run(preTest, dbname='reuse_gptest') copy_data('external_file_09.txt','data_file.txt') write_config_file('merge','true',file='data_file.txt') self.doTest(13) def test_14_gpload_reuse_table_update_mode_with_reuse_RERUN(self): \"14 gpload", "Exception,e: errorMessage = str(e) print 'could not connect to database:", "def test_38_gpload_without_preload(self): \"38 gpload insert mode without preload\" file =", "PGUSER = USER PGHOST = os.environ.get(\"PGHOST\") if PGHOST is None:", "formatOpts delimiter \\\"'\\\" with reuse\" copy_data('external_file_03.txt','data_file.txt') write_config_file(reuse_flag='true',formatOpts='text',file='data_file.txt',table='texttable',delimiter=\"\\\"'\\\"\") self.doTest(6) def test_07_gpload_reuse_table_insert_mode_without_reuse(self):", "ipv6 elif myip.find(\".\") > 0: ipv4 = myip return ipv4", "-d reuse_gptest -c 'select * from texttable where n2=222;'\") f.close()", "match is true\" drop_tables() copy_data('external_file_08.txt','data_file.txt') write_config_file(mode='update',reuse_flag='false',fast_match='true',file='data_file.txt') self.doTest(32) def test_33_gpload_reuse_table_merge_mode_with_fast_match_and_external_schema(self): \"33", "int(line.split()[3].strip()) if master_value is None: error_msg = \"\".join(out) raise Exception(error_msg)", "import subprocess32 as subprocess except: import subprocess import pg def", "\") print str(re.sub('\\n','',line)) def copy_data(source='',target=''): cmd = 'cp '+ mkpath('data/'", "quote unspecified in CSV with reuse \" copy_data('external_file_11.csv','data_file.csv') write_config_file(reuse_flag='true',formatOpts='csv',file='data_file.csv',table='csvtable',format='csv',delimiter=\"','\") self.doTest(16)", "else: raise PSQLError('missing cmd and ifile') if ofile == '-':", "return resultList def drop_tables(): try: db = pg.DB(dbname='reuse_gptest' ,host='localhost' ,port=int(PGPORT)", "if not port: port = get_port_from_conf() return port if port", "%s > %s 2>&1' % (LMYD, myinitfile, f1, f2, dfile))", "error might be program error, file not found, etc. Failure", ".sql file whose actual and expected outputs we want to", "if len( outputPath ) == 0: return os.path.splitext( fname )[0]", "to a different host @param port : port where gpdb", "PGOPTIONS=PGOPTIONS, host = host, port = port) else: (ok,out) =", "gpload update mode with fast match and differenct columns number)", "else: (ok,out) = psql_run(ifile =ifile, ofile =outFile(outputFile, outputPath), flag =flag,", "LOCAL_HOSTNAME:\") f.write(\"\\n - \"+hostNameAddrs) if portNum: f.write(\"\\n PORT: \"+portNum) f.write(\"\\n", "os.path.abspath(os.path.dirname(__file__)) if not os.access( f1, os.R_OK ): raise Exception( 'Error:", "mode with fast match and differenct columns number) \" psql_run(cmd=\"ALTER", "f2, dfile ) ) if ok: os.unlink( dfile ) return", "'cp '+ mkpath('data/' + source) + ' ' + mkpath(target)", "mode with fast match and external schema\" file = mkpath('setup.sql')", "fileinput.FileInput(file,inplace=1): line = line.replace(\"gpload.py \",\"gpload \") print str(re.sub('\\n','',line)) def copy_data(source='',target=''):", "f.write(\"\\n PRELOAD:\") f.write(\"\\n - REUSE_TABLES: \"+reuse_flag) f.write(\"\\n - FAST_MATCH: \"+fast_match)", "of the .out and .ans files. optionalFlags: command-line options (if", "username=None, PGOPTIONS=None, host = None, port = None): if len(outputFile)", "delimiter: f.write(\"\\n - DELIMITER: \"+delimiter) if encoding: f.write(\"\\n - ENCODING:", "else: f.write('1997,Ford,E350,\"ac, abs, moon\",3000.00\\n') f.close() copy_data('large_file.csv','data_file.csv') write_config_file(reuse_flag='true',formatOpts='csv',file='data_file.csv',table='csvtable',format='csv',delimiter=\"','\",log_errors=True,error_limit='90000000',externalSchema='test') self.doTest(24) def test_25_gpload_ext_staging_table(self):", "ifile,ofile = outFile(ifile, outputPath),flag = flag, dbname=dbname , username=username, PGOPTIONS=PGOPTIONS,", "= open(mkpath('query23.sql'),'a') f.write(\"\\! psql -d reuse_gptest -c 'select count(*) from", "- SCHEMA: \"+externalSchema) if preload: f.write(\"\\n PRELOAD:\") f.write(\"\\n - REUSE_TABLES:", "self.doTest(37) def test_38_gpload_without_preload(self): \"38 gpload insert mode without preload\" file", "= None): ''' Run a command or file against psql.", "n4: s_n4\") f.write(\"\\n n5: s_n5\") f.write(\"\\n n6: s_n6\") f.write(\"\\n n7:", "if myip.find(\":\") > 0: ipv6 = myip return ipv6 elif", "UPDATE_COLUMNS:\") f.write(\"\\n - n2\") f.write(\"\\n - MATCH_COLUMNS:\") f.write(\"\\n - n1\")", "text\") f.write(\"\\n - s_n9: text\") if format: f.write(\"\\n - FORMAT:", "quote: f.write(\"\\n - QUOTE: \"+quote) if fill: f.write(\"\\n - FILL_MISSING_FIELDS:", "dbname, username= username, PGOPTIONS= PGOPTIONS, host = host, port =", "failures, when the output is different from the expected result.", "@param flag: -e Run SQL with no comments (default) -a", "of the .sql file whose actual and expected outputs we", "TABLE texttable ADD column n8 text\",dbname='reuse_gptest') copy_data('external_file_08.txt','data_file.txt') write_config_file('merge','true',file='data_file.txt') self.doTest(12) def", "write_config_file('merge','true',file='data_file.txt') self.doTest(13) def test_14_gpload_reuse_table_update_mode_with_reuse_RERUN(self): \"14 gpload update mode with reuse", "gpload reuse ext_staging_table if it is configured\" file = mkpath('setup.sql')", "PGHOST is None: PGHOST = HOST d = mkpath('config') if", "copy_data('external_file_08.txt','data_file.txt') write_config_file('merge','true',file='data_file.txt') self.doTest(12) def test_13_gpload_reuse_table_merge_mode_with_different_columns_number_in_DB(self): \"13 gpload merge mode with", "# self.doTest(22) def test_23_gpload_error_count(self): \"23 gpload error_table\" file = mkpath('setup.sql')", "ERROR_LIMIT: \" + error_limit) if error_table: f.write(\"\\n - ERROR_TABLE: \"", "GPLoad_FormatOpts_TestCase(unittest.TestCase): def check_result(self,ifile, optionalFlags = \"-U3\", outputPath = \"\"): \"\"\"", "p.communicate() hostNameAddrs = get_ip(HOST) masterPort = getPortMasterOnly() def get_table_name(): try:", "% port if cmd: arg = '-c \"%s\"' % cmd", "__name__ == '__main__': suite = unittest.TestLoader().loadTestsFromTestCase(GPLoad_FormatOpts_TestCase) runner = unittest.TextTestRunner(verbosity=2) ret", "re.search('port=\\d+',line) if match: match1 = re.search('\\d+', match.group()) if match1: return", "reuse_gptest\\n\") f.close() file = mkpath('setup.sql') runfile(file) self.check_result(file) def test_01_gpload_formatOpts_delimiter(self): \"1", "f.write(\"\\nPORT: \"+masterPort) f.write(\"\\nGPLOAD:\") f.write(\"\\n INPUT:\") f.write(\"\\n - SOURCE:\") f.write(\"\\n LOCAL_HOSTNAME:\")", "write_config_file(reuse_flag='true',formatOpts='csv',file='data_file.csv',table='csvtable',format='csv',delimiter=\"','\") self.doTest(16) def test_17_gpload_formatOpts_quote(self): \"17 gpload formatOpts quote '\\\\x26'(&) with", "reuse\" f = open(mkpath('query9.sql'),'a') f.write(\"\\! psql -d reuse_gptest -c 'select", "copy_data('external_file_01.txt','data_file.txt') write_config_file(reuse_flag='true',formatOpts='text',file='data_file.txt',table='texttable',escape=\"E'\\\\\\\\'\") self.doTest(21) # case 22 is flaky on concourse.", "file against psql. Return True if OK. @param dbname: database", "with the given input file and returns its contents as", "1.0.0.1\") if database: f.write(\"\\nDATABASE: \"+database) f.write(\"\\nUSER: \"+os.environ.get('USER')) f.write(\"\\nHOST: \"+hostNameAddrs) f.write(\"\\nPORT:", "import subprocess import pg def get_port_from_conf(): file = os.environ.get('MASTER_DATA_DIRECTORY')+'/postgresql.conf' if", "update mode with fast match\" drop_tables() copy_data('external_file_04.txt','data_file.txt') write_config_file(mode='update',reuse_flag='true',fast_match='true',file='data_file.txt') self.doTest(30) def", "f.write(\"\\n - s_n3: bigint\") f.write(\"\\n - s_n4: decimal\") f.write(\"\\n -", "= port) return (ok, out) def psql_run(ifile = None, ofile", "hostinfo])) for myip in ipaddrlist: if myip.find(\":\") > 0: ipv6", "s_dt: timestamp\") f.write(\"\\n - s_s3: text\") f.write(\"\\n - s_n1: smallint\")", "= socket.gethostname() else: hostname = hostname hostinfo = socket.getaddrinfo(hostname, None)", "and psql notice @param username: psql user @param host :", "ofile: ofile = '> /dev/null 2>&1' else: ofile = '>", "\"28 gpload reuse ext_staging_table if it is configured with dot\"", "' '%s %s > %s 2>&1' % (LMYD, suitePath, f1,", "gpdb is running @param PGOPTIONS: connects to postgres via utility", "= None, user = os.environ.get('USER'),gphome = os.environ['GPHOME'], mdd=os.environ['MASTER_DATA_DIRECTORY'],port = os.environ['PGPORT']):", "psql_run(ifile = None, ofile = None, cmd = None, flag", "'-e < ' + ifile if flag == '-a': arg", "+ \"/init_file\"): (ok, out) = run('../gpdiff.pl -w ' + optionalFlags", "well as the filename. This function will process this file", "with fast match and differenct columns number) \" psql_run(cmd=\"ALTER TABLE", "a different host @param port : port where gpdb is", "in diff:\\n{}\".format(diff)) return True def doTest(self, num): file = mkpath('query%d.diff'", "format: f.write(\"\\n - FORMAT: \"+format) if log_errors: f.write(\"\\n - LOG_ERRORS:", "test_08_gpload_reuse_table_update_mode_with_reuse(self): \"8 gpload update mode with reuse\" drop_tables() copy_data('external_file_04.txt','data_file.txt') write_config_file(mode='update',reuse_flag='true',file='data_file.txt')", "None, PGOPTIONS = None, host = None, port = None):", "test_14_gpload_reuse_table_update_mode_with_reuse_RERUN(self): \"14 gpload update mode with reuse (RERUN) \" write_config_file('update','true',file='data_file.txt')", "is the base class for exceptions in this module http://docs.python.org/tutorial/errors.html", "drop_tables() copy_data('external_file_04.txt','data_file.txt') write_config_file(mode='update',reuse_flag='true',fast_match='true',file='data_file.txt') self.doTest(30) def test_31_gpload_reuse_table_update_mode_with_fast_match_and_different_columns_number(self): \"31 gpload update mode", "s3: s_s3\") f.write(\"\\n n1: s_n1\") f.write(\"\\n n2: s_n2\") f.write(\"\\n n3:", "is None: PGUSER = USER PGHOST = os.environ.get(\"PGHOST\") if PGHOST", "is configured\" file = mkpath('setup.sql') runfile(file) f = open(mkpath('query25.sql'),'a') f.write(\"\\!", "username= username, PGOPTIONS= PGOPTIONS, host = host, port = port)", "text\") if format: f.write(\"\\n - FORMAT: \"+format) if log_errors: f.write(\"\\n", "f.write(\"\\n - TABLE: \"+table) if mode: if mode == 'insert':", "f2, dfile)) else: (ok, out) = run( '../gpdiff.pl -w '", "\"\"): \"\"\" PURPOSE: compare the actual and expected output files", "elif ifile: arg = ' < ' + ifile if", "csvtable;'\") f.close() copy_data('external_file_13.csv','data_file.csv') write_config_file(reuse_flag='true',formatOpts='csv',file='data_file.csv',table='csvtable',format='csv',delimiter=\"','\",log_errors=True,error_limit='10',staging_table='staging_table',externalSchema='test') self.doTest(26) def test_27_gpload_ext_staging_table_with_externalschema(self): \"27 gpload reuse", "for the second parameter of open(). \"\"\" p = subprocess.Popen(cmd,shell=True,stdout=subprocess.PIPE,stderr=subprocess.PIPE)", "reuse\" copy_data('external_file_03.txt','data_file.txt') write_config_file(reuse_flag='true',formatOpts='text',file='data_file.txt',table='texttable',delimiter=\"E'\\''\") self.doTest(5) def test_06_gpload_formatOpts_delimiter(self): \"6 gpload formatOpts delimiter", "num),'w') f.write(\"\\! gpload -f \"+mkpath('config/config_file')+ \" -d reuse_gptest\\n\"+\"\\! gpload -f", "line in fileinput.FileInput(file,inplace=1): line = line.replace(\"gpload.py \",\"gpload \") print str(re.sub('\\n','',line))", "master_value is None: error_msg = \"\".join(out) raise Exception(error_msg) return str(master_value)", "we disable it, in order to not blocking others #def", "@param PGOPTIONS: connects to postgres via utility mode ''' if", "pass class GPLoad_FormatOpts_TestCase(unittest.TestCase): def check_result(self,ifile, optionalFlags = \"-U3\", outputPath =", "otherwise. @params cmd: The command to run at the shell.", "encoding is setted from GBK to empty\" write_config_file(mode='merge',reuse_flag='true',fast_match='true',file='data_file.txt') self.doTest(35) def", "Gets the suitePath name to add init_file suitePath = f1[0:f1.rindex(", "= mkpath('setup.sql') runfile(file) f = open(mkpath('query23.sql'),'a') f.write(\"\\! psql -d reuse_gptest", "from csvtable;'\") f.close() f = open(mkpath('data/large_file.csv'),'w') for i in range(0,", "optionalFlags + \\ ' -I NOTICE: -I HINT: -I CONTEXT:", "reuse\" copy_data('external_file_02.txt','data_file.txt') write_config_file(reuse_flag='true',formatOpts='text',file='data_file.txt',table='texttable',delimiter=\"E'\\u0009'\") self.doTest(4) def test_05_gpload_formatOpts_delimiter(self): \"5 gpload formatOpts delimiter", "except: import subprocess import pg def get_port_from_conf(): file = os.environ.get('MASTER_DATA_DIRECTORY')+'/postgresql.conf'", "-c 'select count(*) from texttable;'\") f.close() write_config_file(mode='insert',reuse_flag='false') self.doTest(7) def test_08_gpload_reuse_table_update_mode_with_reuse(self):", "f = open(mkpath('query9.sql'),'a') f.write(\"\\! psql -d reuse_gptest -c 'select count(*)", "CSV with reuse \" copy_data('external_file_11.csv','data_file.csv') write_config_file(reuse_flag='true',formatOpts='csv',file='data_file.csv',table='csvtable',format='csv',delimiter=\"','\") self.doTest(16) def test_17_gpload_formatOpts_quote(self): \"17", "s_s1\") f.write(\"\\n s2: s_s2\") f.write(\"\\n dt: s_dt\") f.write(\"\\n s3: s_s3\")", "-U %s %s %s %s' % (PGOPTIONS, dbname, host, port,", "get_ip(HOST) masterPort = getPortMasterOnly() def get_table_name(): try: db = pg.DB(dbname='reuse_gptest'", "open(). \"\"\" p = subprocess.Popen(cmd,shell=True,stdout=subprocess.PIPE,stderr=subprocess.PIPE) out = p.communicate()[0] ret =", "numeric\") f.write(\"\\n - s_n6: real\") f.write(\"\\n - s_n7: double precision\")", "test_23_gpload_error_count(self): \"23 gpload error_table\" file = mkpath('setup.sql') runfile(file) f =", "[]) otherwise. @params cmd: The command to run at the", "# \"22 gpload error count\" # f = open(mkpath('query22.sql'),'a') #", "\"34 gpload merge mode with fast match and encoding GBK\"", "\"25 gpload reuse ext_staging_table if it is configured\" file =", "def test_01_gpload_formatOpts_delimiter(self): \"1 gpload formatOpts delimiter '|' with reuse \"", "Defaults to append (so that the function is backwards compatible).", "#!/usr/bin/env python import unittest import sys import os import string", "GP_IGNORE: --gp_init_file=%s/global_init_file --gp_init_file=%s/init_file ' '%s %s > %s 2>&1' %", "line in f.xreadlines(): match = re.search('port=\\d+',line) if match: match1 =", "% 2 == 0: f.write('1997,Ford,E350,\"ac, abs, moon\",3000.00,a\\n') else: f.write('1997,Ford,E350,\"ac, abs,", "\"+'merge') f.write(\"\\n - UPDATE_COLUMNS:\") f.write(\"\\n - n2\") f.write(\"\\n - MATCH_COLUMNS:\")", "+ file) modify_sql_file(num) file = mkpath('query%d.sql' % num) runfile(file) self.check_result(file)", "\"3 gpload formatOpts delimiter E'\\t' with reuse\" copy_data('external_file_02.txt','data_file.txt') write_config_file(reuse_flag='true',formatOpts='text',file='data_file.txt',table='texttable',delimiter=\"E'\\\\t'\") self.doTest(3)", "outputPath=\"\", outputFile=\"\", username=None, PGOPTIONS=None, host = None, port = None):", "dbname is None: dbname = DBNAME if username is None:", "write_config_file('merge','true',file='data_file.txt') self.doTest(12) def test_13_gpload_reuse_table_merge_mode_with_different_columns_number_in_DB(self): \"13 gpload merge mode with reuse", "outFile(fname,outputPath = ''): return changeExtFile(fname, \".out\", outputPath) def diffFile( fname,", "\"postgres\" USER = os.environ.get( \"LOGNAME\" ) HOST = socket.gethostname() GPHOME", "connect to database: ' + errorMessage list = get_table_name() for", "copy_data('external_file_04.txt','data_file.txt') write_config_file(mode='insert',reuse_flag='true',fast_match='false',file='data_file.txt',error_table=\"err_table\",error_limit='1000',preload=False) self.doTest(38) def test_39_gpload_fill_missing_fields(self): \"39 gpload fill missing fields\"", "num) if os.path.isfile(file): run(\"rm -f\" + \" \" + file)", "\"0 gpload setup\" for num in range(1,40): f = open(mkpath('query%d.sql'", "PGOPTIONS is None: PGOPTIONS = \"\" else: PGOPTIONS = \"PGOPTIONS='%s'\"", "%s %s' % (PGOPTIONS, dbname, host, port, username, flag, arg,", "dbname='reuse_gptest') copy_data('external_file_09.txt','data_file.txt') write_config_file('merge','true',file='data_file.txt') self.doTest(13) def test_14_gpload_reuse_table_update_mode_with_reuse_RERUN(self): \"14 gpload update mode", "= myip return ipv6 elif myip.find(\".\") > 0: ipv4 =", "(LMYD, myinitfile, f1, f2, dfile)) else: (ok, out) = run(", "error_table\" file = mkpath('setup.sql') runfile(file) f = open(mkpath('query23.sql'),'a') f.write(\"\\! psql", "self.doTest(39) if __name__ == '__main__': suite = unittest.TestLoader().loadTestsFromTestCase(GPLoad_FormatOpts_TestCase) runner =", "% ( \"port\" ) cmd = \"source %s/greenplum_path.sh; export MASTER_DATA_DIRECTORY=%s;", "write_config_file('update','true',file='data_file.txt') self.doTest(14) def test_15_gpload_reuse_table_merge_mode_with_different_columns_order(self): \"15 gpload merge mode with different", "error and not a failure. The reason for an error", "n8 text\",dbname='reuse_gptest') copy_data('external_file_08.txt','data_file.txt') write_config_file('merge','true',file='data_file.txt') self.doTest(12) def test_13_gpload_reuse_table_merge_mode_with_different_columns_number_in_DB(self): \"13 gpload merge", "elif myip.find(\".\") > 0: ipv4 = myip return ipv4 def", "comments and psql notice @param username: psql user @param host", "connect to segment server %s as user %s\" % (host,", "(i.e. optionalFlags is \"-U3\"). \"\"\" f1 = gpdbAnsFile(ifile) f2 =", "re try: import subprocess32 as subprocess except: import subprocess import", "connects to postgres via utility mode ''' if dbname is", "test_37_gpload_reuse_table_merge_mode_invalid_encoding(self): \"37 gpload merge mode with invalid encoding\" file =", "-d reuse_gptest\\n\"+\"\\! gpload -f \"+mkpath('config/config_file')+ \" -d reuse_gptest\\n\") f.close() file", "\"14 gpload update mode with reuse (RERUN) \" write_config_file('update','true',file='data_file.txt') self.doTest(14)", "line.split('\\n') for line in out: if re.search(master_pattern, line): master_value =", "reuse\" copy_data('external_file_12.csv','data_file.csv') write_config_file(reuse_flag='true',formatOpts='csv',file='data_file.csv',table='csvtable',format='csv',delimiter=\"','\",quote=\"'\\x26'\") self.doTest(17) def test_18_gpload_formatOpts_quote(self): \"18 gpload formatOpts quote", "diff.read() def modify_sql_file(num): file = mkpath('query%d.sql' % num) user =", "expected result. ''' pass class GPLoad_FormatOpts_TestCase(unittest.TestCase): def check_result(self,ifile, optionalFlags =", "true\") f.write(\"\\n - ERROR_LIMIT: \" + error_limit) if error_table: f.write(\"\\n", "-I GP_IGNORE: --gp_init_file=%s/global_init_file --gp_init_file=%s ' '%s %s > %s 2>&1'", "error: GC object already tracked during testing. # This is", "n3: s_n3\") f.write(\"\\n n4: s_n4\") f.write(\"\\n n5: s_n5\") f.write(\"\\n n6:", "fast match and external schema\" file = mkpath('setup.sql') runfile(file) copy_data('external_file_04.txt','data_file.txt')", "the output file already exists: 'a' = append; 'w' =", "'could not connect to database: ' + errorMessage queryString =", "\"\"\" Opens the diff file that is assocated with the", "= write. Defaults to append (so that the function is", "return True def doTest(self, num): file = mkpath('query%d.diff' % num)", "do if the output file already exists: 'a' = append;", "ESCAPE: \"+escape) if quote: f.write(\"\\n - QUOTE: \"+quote) if fill:", "\"/init_file\"): (ok, out) = run('../gpdiff.pl -w ' + optionalFlags +", "report an error if they don't match. PARAMETERS: ifile: the", "any) for diff. For example, pass \" -B \" (with", "platform import re try: import subprocess32 as subprocess except: import", "self.doTest(26) def test_27_gpload_ext_staging_table_with_externalschema(self): \"27 gpload reuse ext_staging_table if it is", "def test_35_gpload_reuse_table_merge_mode_with_fast_match_default_encoding(self): \"35 gpload does not reuse table when encoding", "def copy_data(source='',target=''): cmd = 'cp '+ mkpath('data/' + source) +", "options (if any) for diff. For example, pass \" -B", "figure out the proper names of the .out and .ans", "it locally, so we disable it, in order to not", "runfile(file) f = open(mkpath('query27.sql'),'a') f.write(\"\\! psql -d reuse_gptest -c 'select", "> %s 2>&1' % (LMYD, suitePath, f1, f2, dfile)) else:", "formatOpts delimiter E'\\\\'' with reuse\" copy_data('external_file_03.txt','data_file.txt') write_config_file(reuse_flag='true',formatOpts='text',file='data_file.txt',table='texttable',delimiter=\"E'\\''\") self.doTest(5) def test_06_gpload_formatOpts_delimiter(self):", "Fatal Python error: GC object already tracked during testing. #", "cmd: command line @param flag: -e Run SQL with no", "the base class for exceptions in this module http://docs.python.org/tutorial/errors.html We", "to raise an error and not a failure. The reason", "= None, ofile = None, cmd = None, flag =", "reuse_gptest -c 'select count(*) from csvtable;'\") f.close() f = open(mkpath('data/large_file.csv'),'w')", "fname )[0] + ext else: filename = fname.split( \"/\" )", "# case 22 is flaky on concourse. It may report:", "formatOpts delimiter '\\t' with reuse\" copy_data('external_file_02.txt','data_file.txt') write_config_file(reuse_flag='true',formatOpts='text',file='data_file.txt',table='texttable',delimiter=\"'\\t'\") self.doTest(2) def test_03_gpload_formatOpts_delimiter(self):", "changeExtFile( fname, ext = \".diff\", outputPath = \"\" ): if", "f.write(\"\\! psql -d reuse_gptest -c 'select count(*) from csvtable;'\") #", "an error if they don't match. PARAMETERS: ifile: the name", "user = os.environ.get('USER') if not user: user = os.environ.get('USER') if", "string. \"\"\" dfile = diffFile(ifile, outputPath) with open(dfile, 'r') as", "as user %s\" % (host, user)) for line in out:", "run(cmd): \"\"\" Run a shell command. Return (True, [result]) if", "program error, file not found, etc. Failure is define as", "error_limit='0',error_table=None,externalSchema=None,staging_table=None,fast_match='false', encoding=None, preload=True, fill=False): f = open(mkpath('config/config_file'),'w') f.write(\"VERSION: 1.0.0.1\") if", "unified with 3 lines of context (i.e. optionalFlags is \"-U3\").", "(host, user)) for line in out: out = line.split('\\n') for", "subprocess import pg def get_port_from_conf(): file = os.environ.get('MASTER_DATA_DIRECTORY')+'/postgresql.conf' if os.path.isfile(file):", "self.doTest(34) def test_35_gpload_reuse_table_merge_mode_with_fast_match_default_encoding(self): \"35 gpload does not reuse table when", "def test_10_gpload_reuse_table_merge_mode_with_reuse(self): \"10 gpload merge mode with reuse \" drop_tables()", "test.csvtable;'\") f.close() copy_data('external_file_13.csv','data_file.csv') write_config_file(reuse_flag='true',formatOpts='csv',file='data_file.csv',table='test.csvtable',format='csv',delimiter=\"','\",log_errors=True,error_limit='10',staging_table='staging_table',externalSchema=\"'%'\") self.doTest(27) def test_28_gpload_ext_staging_table_with_dot(self): \"28 gpload reuse", "to ignore blank lines. By default, diffs are unified with", "\"source %s/greenplum_path.sh; export MASTER_DATA_DIRECTORY=%s; export PGPORT=%s; %s\" \\ % (gphome,", "file) modify_sql_file(num) file = mkpath('query%d.sql' % num) runfile(file) self.check_result(file) def", "pass any value that is valid for the second parameter", "== '-': ofile = '2>&1' elif not ofile: ofile =", "not os.access( f1, os.R_OK ): raise Exception( 'Error: cannot find", "with reuse\" copy_data('external_file_01.txt','data_file.txt') file = mkpath('setup.sql') runfile(file) write_config_file(reuse_flag='true',formatOpts='text',file='data_file.txt',table='texttable',escape='\\\\') self.doTest(19) def", "lambda *x: os.path.join(MYD, *x) UPD = os.path.abspath(mkpath('..')) if UPD not", "run at the shell. oFile: an optional output file. mode:", "reproduce it locally, so we disable it, in order to", "disable it, in order to not blocking others #def test_22_gpload_error_count(self):", "EXTERNAL TABLE %s\" % name db.query(queryString.encode('utf-8')) else: queryString = \"DROP", "f.write(\"\\! psql -d reuse_gptest -c 'select count(*) from texttable where", "string import time import socket import fileinput import platform import", "= open(mkpath('query%d.sql' % num),'w') f.write(\"\\! gpload -f \"+mkpath('config/config_file')+ \" -d", "None, host = None, port = None): ''' Run a", "= isFileEqual(f1, f2, optionalFlags, outputPath=outputPath) diff = None if result", "run('%s psql -d %s %s %s -U %s %s %s", "FORMAT: \"+format) if log_errors: f.write(\"\\n - LOG_ERRORS: true\") f.write(\"\\n -", "resultList = db.query(queryString.encode('utf-8')).getresult() return resultList def drop_tables(): try: db =", "= mkpath('query%d.sql' % num) user = os.environ.get('USER') if not user:", "host, port = port) return (ok, out) def psql_run(ifile =", "\"20 gpload formatOpts escape '\\\\' with reuse\" copy_data('external_file_01.txt','data_file.txt') write_config_file(reuse_flag='true',formatOpts='text',file='data_file.txt',table='texttable',escape= '\\x5C')", "port = \"\" else: port = \"-p %s\" % port", "gpload formatOpts escape '\\\\' with reuse\" copy_data('external_file_01.txt','data_file.txt') write_config_file(reuse_flag='true',formatOpts='text',file='data_file.txt',table='texttable',escape= '\\x5C') self.doTest(20)", "if cmd: arg = '-c \"%s\"' % cmd elif ifile:", "sys.path.append(UPD) DBNAME = \"postgres\" USER = os.environ.get( \"LOGNAME\" ) HOST", "> %s 2>&1' % ( LMYD, f1, f2, dfile )", "= \"\"): LMYD = os.path.abspath(os.path.dirname(__file__)) if not os.access( f1, os.R_OK", "names of the .out and .ans files. optionalFlags: command-line options", "gpload formatOpts quote E'\\\\x26'(&) with reuse\" copy_data('external_file_12.csv','data_file.csv') write_config_file(reuse_flag='true',formatOpts='csv',file='data_file.csv',table='csvtable',format='csv',delimiter=\"','\",quote=\"E'\\x26'\") self.doTest(18) def", "compare. You may include the path as well as the", "def test_34_gpload_reuse_table_merge_mode_with_fast_match_and_encoding(self): \"34 gpload merge mode with fast match and", "= ''): return changeExtFile(fname, \".out\", outputPath) def diffFile( fname, outputPath", "optionalFlags is \"-U3\"). \"\"\" f1 = gpdbAnsFile(ifile) f2 = outFile(ifile,", "if error_table: f.write(\"\\n - ERROR_TABLE: \" + error_table) f.write(\"\\n -", "\"35 gpload does not reuse table when encoding is setted", "copy_data('large_file.csv','data_file.csv') write_config_file(reuse_flag='true',formatOpts='csv',file='data_file.csv',table='csvtable',format='csv',delimiter=\"','\",log_errors=True,error_limit='90000000',externalSchema='test') self.doTest(24) def test_25_gpload_ext_staging_table(self): \"25 gpload reuse ext_staging_table if", "= HOST d = mkpath('config') if not os.path.exists(d): os.mkdir(d) def", "mode with reuse (RERUN with different columns number in DB", "Opens the diff file that is assocated with the given", "f.write('1997,Ford,E350,\"ac, abs, moon\",3000.00,a\\n') # else: # f.write('1997,Ford,E350,\"ac, abs, moon\",3000.00\\n') #", "-e Run SQL with no comments (default) -a Run SQL", "--gp_init_file=%s/global_init_file ' '%s %s > %s 2>&1' % ( LMYD,", ".out and .ans files. optionalFlags: command-line options (if any) for", "with open(dfile, 'r') as diff: return diff.read() def modify_sql_file(num): file", "# f = open(mkpath('data/large_file.csv'),'w') # for i in range(0, 10000):", "fill missing fields\" file = mkpath('setup.sql') runfile(file) copy_data('external_file_04.txt','data_file.txt') write_config_file(mode='insert',reuse_flag='false',fast_match='false',file='data_file.txt',table='texttable1', error_limit='1000',", "to figure out the proper names of the .out and", "copy_data('external_file_03.txt','data_file.txt') write_config_file(reuse_flag='true',formatOpts='text',file='data_file.txt',table='texttable',delimiter=\"E'\\''\") self.doTest(5) def test_06_gpload_formatOpts_delimiter(self): \"6 gpload formatOpts delimiter \\\"'\\\"", "files and report an error if they don't match. PARAMETERS:", "if username is None: username = PGUSER # Use the", "HOST = socket.gethostname() GPHOME = os.getenv(\"GPHOME\") PGPORT = get_port() PGUSER", "range(0, 10000): if i % 2 == 0: f.write('1997,Ford,E350,\"ac, abs,", "self.doTest(22) def test_23_gpload_error_count(self): \"23 gpload error_table\" file = mkpath('setup.sql') runfile(file)", "port = get_port_from_conf() return port if port else 5432 def", "f.write(\"\\n EXTERNAL:\") f.write(\"\\n - SCHEMA: \"+externalSchema) if preload: f.write(\"\\n PRELOAD:\")", "test_16_gpload_formatOpts_quote(self): \"16 gpload formatOpts quote unspecified in CSV with reuse", "append (so that the function is backwards compatible). Yes, this", "runfile(file) copy_data('external_file_04.txt','data_file.txt') write_config_file(mode='merge',reuse_flag='true',fast_match='true',file='data_file.txt',externalSchema='test') self.doTest(33) def test_34_gpload_reuse_table_merge_mode_with_fast_match_and_encoding(self): \"34 gpload merge mode", "% num) if os.path.isfile(file): run(\"rm -f\" + \" \" +", "- s_n4: decimal\") f.write(\"\\n - s_n5: numeric\") f.write(\"\\n - s_n6:", "n2\") f.write(\"\\n - MATCH_COLUMNS:\") f.write(\"\\n - n1\") f.write(\"\\n - s1\")", "def run(cmd): \"\"\" Run a shell command. Return (True, [result])", "exceptions in this module http://docs.python.org/tutorial/errors.html We want to raise an", "this is passed to the open() function, so you can", "myip in ipaddrlist: if myip.find(\":\") > 0: ipv6 = myip", "reuse \" copy_data('external_file_11.csv','data_file.csv') write_config_file(reuse_flag='true',formatOpts='csv',file='data_file.csv',table='csvtable',format='csv',delimiter=\"','\") self.doTest(16) def test_17_gpload_formatOpts_quote(self): \"17 gpload formatOpts", "\"26 gpload reuse ext_staging_table if it is configured with externalschema\"", "-I HINT: -I CONTEXT: -I GP_IGNORE: --gp_init_file=%s/global_init_file --gp_init_file=%s ' '%s", "# Gets the suitePath name to add init_file suitePath =", "= f1[0:f1.rindex( \"/\" )] if os.path.exists(suitePath + \"/init_file\"): (ok, out)", "open(mkpath('query29.sql'),'a') f.write(\"\\! psql -d reuse_gptest -c 'select count(*) from texttable", "spaces) to ignore blank lines. By default, diffs are unified", "gpload reuse ext_staging_table if it is configured with externalschema\" file", "+ optionalFlags + \\ ' -I NOTICE: -I HINT: -I", "formatOpts delimiter E'\\t' with reuse\" copy_data('external_file_02.txt','data_file.txt') write_config_file(reuse_flag='true',formatOpts='text',file='data_file.txt',table='texttable',delimiter=\"E'\\\\t'\") self.doTest(3) def test_04_gpload_formatOpts_delimiter(self):", "\"+os.environ.get('USER')) f.write(\"\\nHOST: \"+hostNameAddrs) f.write(\"\\nPORT: \"+masterPort) f.write(\"\\nGPLOAD:\") f.write(\"\\n INPUT:\") f.write(\"\\n -", "def test_12_gpload_reuse_table_merge_mode_with_different_columns_number_in_file(self): \"12 gpload merge mode with reuse (RERUN with", "cmd elif ifile: arg = ' < ' + ifile", "write_config_file(reuse_flag='true',formatOpts='csv',file='data_file.csv',table='test.csvtable',format='csv',delimiter=\"','\",log_errors=True,error_limit='10',staging_table='t.staging_table') self.doTest(28) def test_29_gpload_reuse_table_insert_mode_with_reuse_and_null(self): \"29 gpload insert mode with reuse", "suite = unittest.TestLoader().loadTestsFromTestCase(GPLoad_FormatOpts_TestCase) runner = unittest.TextTestRunner(verbosity=2) ret = not runner.run(suite).wasSuccessful()", "reuse_gptest -c 'select count(*) from test.csvtable;'\") f.close() copy_data('external_file_13.csv','data_file.csv') write_config_file(reuse_flag='true',formatOpts='csv',file='data_file.csv',table='test.csvtable',format='csv',delimiter=\"','\",log_errors=True,error_limit='10',staging_table='t.staging_table') self.doTest(28)", "( \"port\" ) cmd = \"source %s/greenplum_path.sh; export MASTER_DATA_DIRECTORY=%s; export", "port if port else 5432 def get_ip(hostname=None): if hostname is", "runfile(file) f = open(mkpath('query23.sql'),'a') f.write(\"\\! psql -d reuse_gptest -c 'select", "self.doTest(24) def test_25_gpload_ext_staging_table(self): \"25 gpload reuse ext_staging_table if it is", "return outputPath + \"/\" + fname + ext def gpdbAnsFile(fname):", "moon\",3000.00,a\\n') # else: # f.write('1997,Ford,E350,\"ac, abs, moon\",3000.00\\n') # f.close() #", "different columns number in file) \" psql_run(cmd=\"ALTER TABLE texttable ADD", "from the expected result. ''' pass class GPLoad_FormatOpts_TestCase(unittest.TestCase): def check_result(self,ifile,", "timestamp\") f.write(\"\\n - s_s3: text\") f.write(\"\\n - s_n1: smallint\") f.write(\"\\n", "f.close() copy_data('large_file.csv','data_file.csv') write_config_file(reuse_flag='true',formatOpts='csv',file='data_file.csv',table='csvtable',format='csv',delimiter=\"','\",log_errors=True,error_limit='90000000',externalSchema='test') self.doTest(24) def test_25_gpload_ext_staging_table(self): \"25 gpload reuse ext_staging_table", "is null;'\") f.close() copy_data('external_file_14.txt','data_file.txt') write_config_file(mode='insert',reuse_flag='true',file='data_file.txt',log_errors=True, error_limit='100') self.doTest(29) def test_30_gpload_reuse_table_update_mode_with_fast_match(self): \"30", "@param username: psql user @param host : to connect to", "dfile)) else: if os.path.exists(myinitfile): (ok, out) = run('../gpdiff.pl -w '", "self.doTest(16) def test_17_gpload_formatOpts_quote(self): \"17 gpload formatOpts quote '\\\\x26'(&) with reuse\"", "export MASTER_DATA_DIRECTORY=%s; export PGPORT=%s; %s\" \\ % (gphome, mdd, port,", "'select count(*) from texttable;'\\n\"+\"\\! psql -d reuse_gptest -c 'select *", "reuse_gptest -c 'select count(*) from csvtable;'\") f.close() copy_data('external_file_13.csv','data_file.csv') write_config_file(reuse_flag='true',formatOpts='csv',file='data_file.csv',table='csvtable',format='csv',delimiter=\"','\",log_errors=True,error_limit='10',staging_table='staging_table') self.doTest(25)", "myip return ipv4 def getPortMasterOnly(host = 'localhost',master_value = None, user", "else True return (rc,ret) def outFile(fname,outputPath = ''): return changeExtFile(fname,", "run(\"rm -f\" + \" \" + file) modify_sql_file(num) file =", "and external schema\" file = mkpath('setup.sql') runfile(file) copy_data('external_file_04.txt','data_file.txt') write_config_file(mode='merge',reuse_flag='true',fast_match='true',file='data_file.txt',externalSchema='test') self.doTest(33)", "psql -d reuse_gptest -c 'select count(*) from csvtable;'\") # f.close()", "postgres via utility mode ''' if dbname is None: dbname", "p.wait() else True return (rc,ret) def outFile(fname,outputPath = ''): return", "reuse_gptest -c 'select count(*) from csvtable;'\") f.close() copy_data('external_file_13.csv','data_file.csv') write_config_file(reuse_flag='true',formatOpts='csv',file='data_file.csv',table='csvtable',format='csv',delimiter=\"','\",log_errors=True,error_limit='10',staging_table='staging_table',externalSchema='test') self.doTest(26)", "hostname = hostname hostinfo = socket.getaddrinfo(hostname, None) ipaddrlist = list(set([(ai[4][0])", "and .ans files. optionalFlags: command-line options (if any) for diff.", "s_n8\") f.write(\"\\n n9: s_n9\") if externalSchema: f.write(\"\\n EXTERNAL:\") f.write(\"\\n -", "outputPath = \"\" ): return changeExtFile( fname, \".diff\", outputPath )", "so we disable it, in order to not blocking others", "port, username, flag, arg, ofile)) def run(cmd): \"\"\" Run a", "mode without preload\" file = mkpath('setup.sql') runfile(file) copy_data('external_file_04.txt','data_file.txt') write_config_file(mode='insert',reuse_flag='true',fast_match='false',file='data_file.txt',error_table=\"err_table\",error_limit='1000',preload=False) self.doTest(38)", "export PGPORT=%s; %s\" \\ % (gphome, mdd, port, command) (ok,out)", "formatOpts delimiter '|' with reuse \" copy_data('external_file_01.txt','data_file.txt') write_config_file(reuse_flag='true',formatOpts='text',file='data_file.txt',table='texttable',delimiter=\"'|'\") self.doTest(1) def", "blocking others #def test_22_gpload_error_count(self): # \"22 gpload error count\" #", "f.xreadlines(): match = re.search('port=\\d+',line) if match: match1 = re.search('\\d+', match.group())", "class PSQLError(Exception): ''' PSQLError is the base class for exceptions", "moon\",3000.00\\n') # f.close() # copy_data('large_file.csv','data_file.csv') # write_config_file(reuse_flag='true',formatOpts='csv',file='data_file.csv',table='csvtable',format='csv',delimiter=\"','\",log_errors=True,error_limit='90000000') # self.doTest(22) def", "reuse ext_staging_table if it is configured\" file = mkpath('setup.sql') runfile(file)", "can't reproduce it locally, so we disable it, in order", "UPD = os.path.abspath(mkpath('..')) if UPD not in sys.path: sys.path.append(UPD) DBNAME", "os.environ.get(\"PGUSER\") if PGUSER is None: PGUSER = USER PGHOST =", "% 2 == 0: # f.write('1997,Ford,E350,\"ac, abs, moon\",3000.00,a\\n') # else:", "if i % 2 == 0: # f.write('1997,Ford,E350,\"ac, abs, moon\",3000.00,a\\n')", "def test_09_gpload_reuse_table_update_mode_without_reuse(self): \"9 gpload update mode without reuse\" f =", "= open(mkpath('query28.sql'),'a') f.write(\"\\! psql -d reuse_gptest -c 'select count(*) from", "runfile(file) self.check_result(file) def test_00_gpload_formatOpts_setup(self): \"0 gpload setup\" for num in", "gpload update mode when reuse table is false and fast", "'-q'): # Don't echo commands sent to server arg =", "s_n1\") f.write(\"\\n n2: s_n2\") f.write(\"\\n n3: s_n3\") f.write(\"\\n n4: s_n4\")", "+ ' ' + mkpath(target) p = subprocess.Popen(cmd,shell=True,stdout=subprocess.PIPE,stderr=subprocess.PIPE) return p.communicate()", "PSQLError is the base class for exceptions in this module", "actual and expected outputs we want to compare. You may", "- s_s3: text\") f.write(\"\\n - s_n1: smallint\") f.write(\"\\n - s_n2:", "fill=True) self.doTest(39) if __name__ == '__main__': suite = unittest.TestLoader().loadTestsFromTestCase(GPLoad_FormatOpts_TestCase) runner", "open(file) as f: for line in f.xreadlines(): match = re.search('port=\\d+',line)", "order to not blocking others #def test_22_gpload_error_count(self): # \"22 gpload", "filename ) - 1] )[0] return outputPath + \"/\" +", "PSQLError(Exception): ''' PSQLError is the base class for exceptions in", "= mkpath('setup.sql') runfile(file) f = open(mkpath('query27.sql'),'a') f.write(\"\\! psql -d reuse_gptest", "s_n5\") f.write(\"\\n n6: s_n6\") f.write(\"\\n n7: s_n7\") f.write(\"\\n n8: s_n8\")", "os.environ['PGPORT'] if not port: port = get_port_from_conf() return port if", "-I NOTICE: -I HINT: -I CONTEXT: -I GP_IGNORE: --gp_init_file=%s/global_init_file --gp_init_file=%s/init_file", "self.doTest(2) def test_03_gpload_formatOpts_delimiter(self): \"3 gpload formatOpts delimiter E'\\t' with reuse\"", "> %s 2>&1' % (LMYD, myinitfile, f1, f2, dfile)) else:", "' + errorMessage queryString = \"\"\"SELECT relname from pg_class WHERE", "f.write(\"\\n - FILL_MISSING_FIELDS: true\") f.write(\"\\n OUTPUT:\") f.write(\"\\n - TABLE: \"+table)", "gpload merge mode with encoding GBK\" file = mkpath('setup.sql') runfile(file)", "f.close() copy_data('external_file_13.csv','data_file.csv') write_config_file(reuse_flag='true',formatOpts='csv',file='data_file.csv',table='test.csvtable',format='csv',delimiter=\"','\",log_errors=True,error_limit='10',staging_table='staging_table',externalSchema=\"'%'\") self.doTest(27) def test_28_gpload_ext_staging_table_with_dot(self): \"28 gpload reuse ext_staging_table", "- FAST_MATCH: \"+fast_match) if staging_table: f.write(\"\\n - STAGING_TABLE: \"+staging_table) f.write(\"\\n\")", "'2>&1' elif not ofile: ofile = '> /dev/null 2>&1' else:", "columns_flag=='1': f.write(\"\\n - COLUMNS:\") f.write(\"\\n - s_s1: text\") f.write(\"\\n -", "copy_data(source='',target=''): cmd = 'cp '+ mkpath('data/' + source) + '", "f.write(\"\\n n4: s_n4\") f.write(\"\\n n5: s_n5\") f.write(\"\\n n6: s_n6\") f.write(\"\\n", "file = mkpath('setup.sql') runfile(file) copy_data('external_file_04.txt','data_file.txt') write_config_file(mode='insert',reuse_flag='true',fast_match='false',file='data_file.txt',error_table=\"err_table\",error_limit='1000',preload=False) self.doTest(38) def test_39_gpload_fill_missing_fields(self): \"39", "\\ ' -I NOTICE: -I HINT: -I CONTEXT: -I GP_IGNORE:", "None): ''' Run a command or file against psql. Return", "port, command) (ok,out) = run(cmd) if not ok: raise Exception(\"Unable", "gpload formatOpts delimiter E'\\u0009' with reuse\" copy_data('external_file_02.txt','data_file.txt') write_config_file(reuse_flag='true',formatOpts='text',file='data_file.txt',table='texttable',delimiter=\"E'\\u0009'\") self.doTest(4) def", "os.environ.get('USER') if os.path.isfile(file): for line in fileinput.FileInput(file,inplace=1): line = line.replace(\"gpload.py", "queryString = \"DROP EXTERNAL TABLE %s\" % name db.query(queryString.encode('utf-8')) else:", "def diffFile( fname, outputPath = \"\" ): return changeExtFile( fname,", "proper names of the .out and .ans files. optionalFlags: command-line", "print str(re.sub('\\n','',line)) def copy_data(source='',target=''): cmd = 'cp '+ mkpath('data/' +", "file not found, etc. Failure is define as test case", "line @param flag: -e Run SQL with no comments (default)", "+ ifile if not (flag == '-q'): # Don't echo", "(if any) for diff. For example, pass \" -B \"", "match.group()) if match1: return match1.group() def get_port(): port = os.environ['PGPORT']", "-I CONTEXT: -I GP_IGNORE: --gp_init_file=%s/global_init_file --gp_init_file=%s ' '%s %s >", "% ofile return run('%s psql -d %s %s %s -U", "+ errorMessage list = get_table_name() for i in list: name", "runfile(file) copy_data('external_file_04.txt','data_file.txt') write_config_file(mode='merge',reuse_flag='true',fast_match='false',file='data_file.txt',encoding='xxxx') self.doTest(37) def test_38_gpload_without_preload(self): \"38 gpload insert mode", "if os.path.isfile(file): with open(file) as f: for line in f.xreadlines():", "commands sent to server arg = '-e < ' +", "ipv6 = myip return ipv6 elif myip.find(\".\") > 0: ipv4", "\"10 gpload merge mode with reuse \" drop_tables() copy_data('external_file_06.txt','data_file.txt') write_config_file('merge','true',file='data_file.txt')", ": port where gpdb is running @param PGOPTIONS: connects to", "f.write(\"\\n - UPDATE_COLUMNS:\") f.write(\"\\n - n2\") f.write(\"\\n - MATCH_COLUMNS:\") f.write(\"\\n", "get_table_name(): try: db = pg.DB(dbname='reuse_gptest' ,host='localhost' ,port=int(PGPORT) ) except Exception,e:", "error, file not found, etc. Failure is define as test", "f.write(\"\\n - MODE: \"+'merge') f.write(\"\\n - UPDATE_COLUMNS:\") f.write(\"\\n - n2\")", "suitePath, f1, f2, dfile)) else: if os.path.exists(myinitfile): (ok, out) =", "- MODE: \"+'update') if mode == 'merge': f.write(\"\\n - MODE:", "port = \"-p %s\" % port if cmd: arg =", "write_config_file(reuse_flag='true',formatOpts='text',file='data_file.txt',table='texttable',delimiter=\"\\\"'\\\"\") self.doTest(6) def test_07_gpload_reuse_table_insert_mode_without_reuse(self): \"7 gpload insert mode without reuse\"", "ADD column n8 text\",dbname='reuse_gptest') copy_data('external_file_08.txt','data_file.txt') write_config_file(mode='update',reuse_flag='true',fast_match='true',file='data_file.txt') self.doTest(31) def test_32_gpload_update_mode_without_reuse_table_with_fast_match(self): \"32", "if escape: f.write(\"\\n - ESCAPE: \"+escape) if quote: f.write(\"\\n -", "n9: s_n9\") if externalSchema: f.write(\"\\n EXTERNAL:\") f.write(\"\\n - SCHEMA: \"+externalSchema)", "socket.getaddrinfo(hostname, None) ipaddrlist = list(set([(ai[4][0]) for ai in hostinfo])) for", "def test_37_gpload_reuse_table_merge_mode_invalid_encoding(self): \"37 gpload merge mode with invalid encoding\" file", "if quote: f.write(\"\\n - QUOTE: \"+quote) if fill: f.write(\"\\n -", "socket.gethostname() GPHOME = os.getenv(\"GPHOME\") PGPORT = get_port() PGUSER = os.environ.get(\"PGUSER\")", "if the output file already exists: 'a' = append; 'w'", "different host @param port : port where gpdb is running", "NOTICE: -I HINT: -I CONTEXT: -I GP_IGNORE: --gp_init_file=%s/global_init_file --gp_init_file=%s '", "\"+encoding) if escape: f.write(\"\\n - ESCAPE: \"+escape) if quote: f.write(\"\\n", "write_config_file(mode='insert',reuse_flag='true',file='data_file.txt',log_errors=True, error_limit='100') self.doTest(29) def test_30_gpload_reuse_table_update_mode_with_fast_match(self): \"30 gpload update mode with", "order \" copy_data('external_file_10.txt','data/data_file.tbl') write_config_file('merge','true',file='data/data_file.tbl',columns_flag='1',mapping='1') self.doTest(15) def test_16_gpload_formatOpts_quote(self): \"16 gpload formatOpts", "does not reuse table when encoding is setted from GBK", "abs, moon\",3000.00\\n') f.close() copy_data('large_file.csv','data_file.csv') write_config_file(reuse_flag='true',formatOpts='csv',file='data_file.csv',table='csvtable',format='csv',delimiter=\"','\",log_errors=True,error_limit='90000000',externalSchema='test') self.doTest(24) def test_25_gpload_ext_staging_table(self): \"25 gpload", "HINT: -I CONTEXT: -I GP_IGNORE: --gp_init_file=%s/global_init_file --gp_init_file=%s/init_file ' '%s %s", "10000): # if i % 2 == 0: # f.write('1997,Ford,E350,\"ac,", "externalschema\" file = mkpath('setup.sql') runfile(file) f = open(mkpath('query26.sql'),'a') f.write(\"\\! psql", "reuse and null\" runfile(mkpath('setup.sql')) f = open(mkpath('query29.sql'),'a') f.write(\"\\! psql -d", "- FILL_MISSING_FIELDS: true\") f.write(\"\\n OUTPUT:\") f.write(\"\\n - TABLE: \"+table) if", "s_n1: smallint\") f.write(\"\\n - s_n2: integer\") f.write(\"\\n - s_n3: bigint\")", "lines of context (i.e. optionalFlags is \"-U3\"). \"\"\" f1 =", "'%s %s > %s 2>&1' % ( LMYD, f1, f2,", "ifile: input file @param cmd: command line @param flag: -e", "%s\" % (host, user)) for line in out: out =", "= os.path.abspath(os.path.dirname(__file__)) mkpath = lambda *x: os.path.join(MYD, *x) UPD =", "def test_29_gpload_reuse_table_insert_mode_with_reuse_and_null(self): \"29 gpload insert mode with reuse and null\"", "GP_IGNORE: --gp_init_file=%s/global_init_file ' '%s %s > %s 2>&1' % (", "PGOPTIONS = \"\" else: PGOPTIONS = \"PGOPTIONS='%s'\" % PGOPTIONS if", "-d reuse_gptest -c 'select count(*) from texttable;'\\n\"+\"\\! psql -d reuse_gptest", "psql_run(ifile =ifile, ofile =outFile(outputFile, outputPath), flag =flag, dbname= dbname, username=", "\"-p %s\" % port if cmd: arg = '-c \"%s\"'", "--gp_init_file=%s/global_init_file --gp_init_file=%s/init_file ' '%s %s > %s 2>&1' % (LMYD,", "This function will process this file name to figure out", "with no comments (default) -a Run SQL with comments and", "be program error, file not found, etc. Failure is define", "value that is valid for the second parameter of open().", "num in range(1,40): f = open(mkpath('query%d.sql' % num),'w') f.write(\"\\! gpload", "\"+portNum) f.write(\"\\n FILE:\") f.write(\"\\n - \"+mkpath(file)) if columns_flag=='1': f.write(\"\\n -", "merge mode with reuse \" drop_tables() copy_data('external_file_06.txt','data_file.txt') write_config_file('merge','true',file='data_file.txt') self.doTest(10) def", "+ \\ ' -I NOTICE: -I HINT: -I CONTEXT: -I", "psql -d reuse_gptest -c 'select * from texttable where n2=222;'\")", "= '> %s 2>&1' % ofile return run('%s psql -d", "# Don't echo commands sent to server arg = '-e", "' + mkpath(target) p = subprocess.Popen(cmd,shell=True,stdout=subprocess.PIPE,stderr=subprocess.PIPE) return p.communicate() hostNameAddrs =", "if os.path.isfile(file): for line in fileinput.FileInput(file,inplace=1): line = line.replace(\"gpload.py \",\"gpload", "2>&1' % ofile return run('%s psql -d %s %s %s", "os.environ.get('MASTER_DATA_DIRECTORY')+'/postgresql.conf' if os.path.isfile(file): with open(file) as f: for line in", "'update': f.write(\"\\n - MODE: \"+'update') if mode == 'merge': f.write(\"\\n", "os.path.splitext( filename[len( filename ) - 1] )[0] return outputPath +", "line in out: out = line.split('\\n') for line in out:", "we want to compare. You may include the path as", "file = mkpath('setup.sql') runfile(file) write_config_file(reuse_flag='true',formatOpts='text',file='data_file.txt',table='texttable',escape='\\\\') self.doTest(19) def test_20_gpload_formatOpts_escape(self): \"20 gpload", "try: import subprocess32 as subprocess except: import subprocess import pg", "copy_data('external_file_02.txt','data_file.txt') write_config_file(reuse_flag='true',formatOpts='text',file='data_file.txt',table='texttable',delimiter=\"E'\\\\t'\") self.doTest(3) def test_04_gpload_formatOpts_delimiter(self): \"4 gpload formatOpts delimiter E'\\u0009'", "if mapping=='1': f.write(\"\\n - MAPPING:\") f.write(\"\\n s1: s_s1\") f.write(\"\\n s2:", "\"\" ): if len( outputPath ) == 0: return os.path.splitext(", "arg = '-e < ' + ifile if flag ==", "import unittest import sys import os import string import time", "' ' + mkpath(target) p = subprocess.Popen(cmd,shell=True,stdout=subprocess.PIPE,stderr=subprocess.PIPE) return p.communicate() hostNameAddrs", "mode with reuse (RERUN with different columns number in file)", "-c 'select count(*) from csvtable;'\") f.close() copy_data('external_file_13.csv','data_file.csv') write_config_file(reuse_flag='true',formatOpts='csv',file='data_file.csv',table='csvtable',format='csv',delimiter=\"','\",log_errors=True,error_limit='10',staging_table='staging_table',externalSchema='test') self.doTest(26) def", "with externalschema\" file = mkpath('setup.sql') runfile(file) f = open(mkpath('query27.sql'),'a') f.write(\"\\!", "= subprocess.Popen(cmd,shell=True,stdout=subprocess.PIPE,stderr=subprocess.PIPE) return p.communicate() hostNameAddrs = get_ip(HOST) masterPort = getPortMasterOnly()", "[] ret.append(out) rc = False if p.wait() else True return", ") fname = os.path.splitext( filename[len( filename ) - 1] )[0]", "re.search('\\d+', match.group()) if match1: return match1.group() def get_port(): port =", "(ok, out) def psql_run(ifile = None, ofile = None, cmd", "HINT: -I CONTEXT: -I GP_IGNORE: --gp_init_file=%s/global_init_file --gp_init_file=%s ' '%s %s", "= \"gpconfig -s %s\" % ( \"port\" ) cmd =", "f.write(\"\\! psql -d reuse_gptest -c 'select count(*) from test.csvtable;'\") f.close()", "self.doTest(18) def test_19_gpload_formatOpts_escape(self): \"19 gpload formatOpts escape '\\\\' with reuse\"", "if dbname is None: dbname = DBNAME if username is", "configured with dot\" file = mkpath('setup.sql') runfile(file) f = open(mkpath('query28.sql'),'a')", "subprocess except: import subprocess import pg def get_port_from_conf(): file =", "PSQLError('missing cmd and ifile') if ofile == '-': ofile =", "psql -d reuse_gptest -c 'select count(*) from csvtable;'\") f.close() f", "% num),'w') f.write(\"\\! gpload -f \"+mkpath('config/config_file')+ \" -d reuse_gptest\\n\"+\"\\! gpload", "-d reuse_gptest -c 'select count(*) from texttable where n2 is", "'|' with reuse \" copy_data('external_file_01.txt','data_file.txt') write_config_file(reuse_flag='true',formatOpts='text',file='data_file.txt',table='texttable',delimiter=\"'|'\") self.doTest(1) def test_02_gpload_formatOpts_delimiter(self): \"2", "if port is None: port = \"\" else: port =", "without reuse\" f = open(mkpath('query9.sql'),'a') f.write(\"\\! psql -d reuse_gptest -c", "gpload merge mode with invalid encoding\" file = mkpath('setup.sql') runfile(file)", "f.write(\"\\n - QUOTE: \"+quote) if fill: f.write(\"\\n - FILL_MISSING_FIELDS: true\")", "= diffFile(ifile, outputPath) with open(dfile, 'r') as diff: return diff.read()", "psql_run(cmd=\"ALTER TABLE texttable ADD column n8 text\",dbname='reuse_gptest') copy_data('external_file_08.txt','data_file.txt') write_config_file('merge','true',file='data_file.txt') self.doTest(12)", "an error might be program error, file not found, etc.", "with different columns number in file) \" psql_run(cmd=\"ALTER TABLE texttable", "if mode == 'update': f.write(\"\\n - MODE: \"+'update') if mode", "username: psql user @param host : to connect to a", "OK, or (False, []) otherwise. @params cmd: The command to", "= flag, dbname=dbname , username=username, PGOPTIONS=PGOPTIONS, host = host, port", "p = subprocess.Popen(cmd,shell=True,stdout=subprocess.PIPE,stderr=subprocess.PIPE) return p.communicate() hostNameAddrs = get_ip(HOST) masterPort =", "test_03_gpload_formatOpts_delimiter(self): \"3 gpload formatOpts delimiter E'\\t' with reuse\" copy_data('external_file_02.txt','data_file.txt') write_config_file(reuse_flag='true',formatOpts='text',file='data_file.txt',table='texttable',delimiter=\"E'\\\\t'\")", "test_04_gpload_formatOpts_delimiter(self): \"4 gpload formatOpts delimiter E'\\u0009' with reuse\" copy_data('external_file_02.txt','data_file.txt') write_config_file(reuse_flag='true',formatOpts='text',file='data_file.txt',table='texttable',delimiter=\"E'\\u0009'\")", "\"38 gpload insert mode without preload\" file = mkpath('setup.sql') runfile(file)", "open(mkpath('query26.sql'),'a') f.write(\"\\! psql -d reuse_gptest -c 'select count(*) from csvtable;'\")", "not ok: raise Exception(\"Unable to connect to segment server %s", "myip.find(\":\") > 0: ipv6 = myip return ipv6 elif myip.find(\".\")", "diffFile( fname, outputPath = \"\" ): return changeExtFile( fname, \".diff\",", "like 'staging_gpload_reusable%';\"\"\" resultList = db.query(queryString.encode('utf-8')).getresult() return resultList def drop_tables(): try:", "= host, port = port) return (ok, out) def psql_run(ifile", "%s > %s 2>&1' % (LMYD, suitePath, f1, f2, dfile))", "def read_diff(ifile, outputPath): \"\"\" Opens the diff file that is", "\" drop_tables() copy_data('external_file_06.txt','data_file.txt') write_config_file('merge','true',file='data_file.txt') self.doTest(10) def test_11_gpload_reuse_table_merge_mode_without_reuse(self): \"11 gpload merge", "not a failure. The reason for an error might be", "''' pass class GPLoad_FormatOpts_TestCase(unittest.TestCase): def check_result(self,ifile, optionalFlags = \"-U3\", outputPath", "server %s as user %s\" % (host, user)) for line", "'select count(*) from csvtable;'\") # f.close() # f = open(mkpath('data/large_file.csv'),'w')", "self.doTest(31) def test_32_gpload_update_mode_without_reuse_table_with_fast_match(self): \"32 gpload update mode when reuse table", "check_result(self,ifile, optionalFlags = \"-U3\", outputPath = \"\"): \"\"\" PURPOSE: compare", "outputPath) def diffFile( fname, outputPath = \"\" ): return changeExtFile(", "insert mode without preload\" file = mkpath('setup.sql') runfile(file) copy_data('external_file_04.txt','data_file.txt') write_config_file(mode='insert',reuse_flag='true',fast_match='false',file='data_file.txt',error_table=\"err_table\",error_limit='1000',preload=False)", "str(master_value) \"\"\" Global Values \"\"\" MYD = os.path.abspath(os.path.dirname(__file__)) mkpath =", ") == 0: return os.path.splitext( fname )[0] + ext else:", "may include the path as well as the filename. This", "real\") f.write(\"\\n - s_n7: double precision\") f.write(\"\\n - s_n8: text\")", "= mkpath('setup.sql') runfile(file) self.check_result(file) def test_01_gpload_formatOpts_delimiter(self): \"1 gpload formatOpts delimiter", "\"21 gpload formatOpts escape E'\\\\\\\\' with reuse\" copy_data('external_file_01.txt','data_file.txt') write_config_file(reuse_flag='true',formatOpts='text',file='data_file.txt',table='texttable',escape=\"E'\\\\\\\\'\") self.doTest(21)", "f.write(\"\\n n8: s_n8\") f.write(\"\\n n9: s_n9\") if externalSchema: f.write(\"\\n EXTERNAL:\")", "flaky on concourse. It may report: Fatal Python error: GC", "if flag == '-a': arg = '-f ' + ifile", "runfile(mkpath('setup.sql')) f = open(mkpath('query29.sql'),'a') f.write(\"\\! psql -d reuse_gptest -c 'select", "ok def read_diff(ifile, outputPath): \"\"\" Opens the diff file that", "os.environ['PGPORT']): master_pattern = \"Context:\\s*-1\\s*Value:\\s*\\d+\" command = \"gpconfig -s %s\" %", "in CSV with reuse \" copy_data('external_file_11.csv','data_file.csv') write_config_file(reuse_flag='true',formatOpts='csv',file='data_file.csv',table='csvtable',format='csv',delimiter=\"','\") self.doTest(16) def test_17_gpload_formatOpts_quote(self):", "% PGHOST else: host = \"-h %s\" % host if", "\"-h %s\" % host if port is None: port =", "-I HINT: -I CONTEXT: -I GP_IGNORE: --gp_init_file=%s/global_init_file --gp_init_file=%s/init_file ' '%s", "None: hostname = socket.gethostname() else: hostname = hostname hostinfo =", "second parameter of open(). \"\"\" p = subprocess.Popen(cmd,shell=True,stdout=subprocess.PIPE,stderr=subprocess.PIPE) out =", "CONTEXT: -I GP_IGNORE: --gp_init_file=%s/global_init_file --gp_init_file=%s/init_file ' '%s %s > %s", "( LMYD, f1, f2, dfile ) ) if ok: os.unlink(", "def gpdbAnsFile(fname): ext = '.ans' return os.path.splitext(fname)[0] + ext def", "missing fields\" file = mkpath('setup.sql') runfile(file) copy_data('external_file_04.txt','data_file.txt') write_config_file(mode='insert',reuse_flag='false',fast_match='false',file='data_file.txt',table='texttable1', error_limit='1000', fill=True)", "\"\"\" dfile = diffFile(ifile, outputPath) with open(dfile, 'r') as diff:", "runfile(file) copy_data('external_file_04.txt','data_file.txt') write_config_file(mode='insert',reuse_flag='false',fast_match='false',file='data_file.txt',table='texttable1', error_limit='1000', fill=True) self.doTest(39) if __name__ == '__main__':", "= open(mkpath('query29.sql'),'a') f.write(\"\\! psql -d reuse_gptest -c 'select count(*) from", "GBK\" file = mkpath('setup.sql') runfile(file) copy_data('external_file_04.txt','data_file.txt') write_config_file(mode='merge',reuse_flag='true',fast_match='true',file='data_file.txt',encoding='GBK') self.doTest(34) def test_35_gpload_reuse_table_merge_mode_with_fast_match_default_encoding(self):", "- STAGING_TABLE: \"+staging_table) f.write(\"\\n\") f.close() def runfile(ifile, flag='', dbname=None, outputPath=\"\",", "file and returns its contents as a string. \"\"\" dfile", "os.path.isfile(file): for line in fileinput.FileInput(file,inplace=1): line = line.replace(\"gpload.py \",\"gpload \")", "def test_03_gpload_formatOpts_delimiter(self): \"3 gpload formatOpts delimiter E'\\t' with reuse\" copy_data('external_file_02.txt','data_file.txt')", "file = mkpath('setup.sql') runfile(file) copy_data('external_file_04.txt','data_file.txt') write_config_file(mode='merge',reuse_flag='true',fast_match='false',file='data_file.txt',encoding='GBK') self.doTest(36) def test_37_gpload_reuse_table_merge_mode_invalid_encoding(self): \"37", "+ errorMessage queryString = \"\"\"SELECT relname from pg_class WHERE relname", "def test_07_gpload_reuse_table_insert_mode_without_reuse(self): \"7 gpload insert mode without reuse\" runfile(mkpath('setup.sql')) f", "copy_data('external_file_07.txt','data_file.txt') write_config_file('merge','false',file='data_file.txt') self.doTest(11) def test_12_gpload_reuse_table_merge_mode_with_different_columns_number_in_file(self): \"12 gpload merge mode with", "port is None: port = \"\" else: port = \"-p", "write_config_file(reuse_flag='true',formatOpts='csv',file='data_file.csv',table='csvtable',format='csv',delimiter=\"','\",log_errors=True,error_limit='10',staging_table='staging_table',externalSchema='test') self.doTest(26) def test_27_gpload_ext_staging_table_with_externalschema(self): \"27 gpload reuse ext_staging_table if it", "% num) user = os.environ.get('USER') if not user: user =", "utility mode ''' if dbname is None: dbname = DBNAME", "class GPLoad_FormatOpts_TestCase(unittest.TestCase): def check_result(self,ifile, optionalFlags = \"-U3\", outputPath = \"\"):", "result else read_diff(ifile, outputPath) self.assertTrue(result, \"query resulted in diff:\\n{}\".format(diff)) return", "outFile(ifile, outputPath=outputPath) result = isFileEqual(f1, f2, optionalFlags, outputPath=outputPath) diff =", "+ \"/\" + fname + ext def gpdbAnsFile(fname): ext =", "'.ans' return os.path.splitext(fname)[0] + ext def isFileEqual( f1, f2, optionalFlags", "myip return ipv6 elif myip.find(\".\") > 0: ipv4 = myip", "f.write(\"\\n n1: s_n1\") f.write(\"\\n n2: s_n2\") f.write(\"\\n n3: s_n3\") f.write(\"\\n", ")] if os.path.exists(suitePath + \"/init_file\"): (ok, out) = run('../gpdiff.pl -w", "where n2=222;'\") f.close() copy_data('external_file_05.txt','data_file.txt') write_config_file(mode='update',reuse_flag='false',file='data_file.txt') self.doTest(9) def test_10_gpload_reuse_table_merge_mode_with_reuse(self): \"10 gpload", "ifile: the name of the .sql file whose actual and", "copy_data('external_file_03.txt','data_file.txt') write_config_file(reuse_flag='true',formatOpts='text',file='data_file.txt',table='texttable',delimiter=\"\\\"'\\\"\") self.doTest(6) def test_07_gpload_reuse_table_insert_mode_without_reuse(self): \"7 gpload insert mode without", "(default) -a Run SQL with comments and psql notice @param", "s2: s_s2\") f.write(\"\\n dt: s_dt\") f.write(\"\\n s3: s_s3\") f.write(\"\\n n1:", "can theoretically pass any value that is valid for the", "= \"-p %s\" % port if cmd: arg = '-c", "\".out\", outputPath) def diffFile( fname, outputPath = \"\" ): return", "s2\") if mapping=='1': f.write(\"\\n - MAPPING:\") f.write(\"\\n s1: s_s1\") f.write(\"\\n", "database: ' + errorMessage queryString = \"\"\"SELECT relname from pg_class", "f.write(\"\\n - s2\") if mapping=='1': f.write(\"\\n - MAPPING:\") f.write(\"\\n s1:", "ifile if not (flag == '-q'): # Don't echo commands", "\"+fast_match) if staging_table: f.write(\"\\n - STAGING_TABLE: \"+staging_table) f.write(\"\\n\") f.close() def", "reuse\" drop_tables() copy_data('external_file_04.txt','data_file.txt') write_config_file(mode='update',reuse_flag='true',file='data_file.txt') self.doTest(8) def test_09_gpload_reuse_table_update_mode_without_reuse(self): \"9 gpload update", "that is valid for the second parameter of open(). \"\"\"", "LMYD, f1, f2, dfile ) ) if ok: os.unlink( dfile", "copy_data('external_file_12.csv','data_file.csv') write_config_file(reuse_flag='true',formatOpts='csv',file='data_file.csv',table='csvtable',format='csv',delimiter=\"','\",quote=\"E'\\x26'\") self.doTest(18) def test_19_gpload_formatOpts_escape(self): \"19 gpload formatOpts escape '\\\\'", "is None: dbname = DBNAME if username is None: username", "-s %s\" % ( \"port\" ) cmd = \"source %s/greenplum_path.sh;", "match. PARAMETERS: ifile: the name of the .sql file whose", "f.close() write_config_file(mode='insert',reuse_flag='false') self.doTest(7) def test_08_gpload_reuse_table_update_mode_with_reuse(self): \"8 gpload update mode with", "' + errorMessage list = get_table_name() for i in list:", "\"18 gpload formatOpts quote E'\\\\x26'(&) with reuse\" copy_data('external_file_12.csv','data_file.csv') write_config_file(reuse_flag='true',formatOpts='csv',file='data_file.csv',table='csvtable',format='csv',delimiter=\"','\",quote=\"E'\\x26'\") self.doTest(18)", "given input file and returns its contents as a string.", "def isFileEqual( f1, f2, optionalFlags = \"\", outputPath = \"\",", "DELIMITER: \"+delimiter) if encoding: f.write(\"\\n - ENCODING: \"+encoding) if escape:", "reuse table is false and fast match is true\" drop_tables()", "dfile = diffFile(ifile, outputPath) with open(dfile, 'r') as diff: return", "def test_20_gpload_formatOpts_escape(self): \"20 gpload formatOpts escape '\\\\' with reuse\" copy_data('external_file_01.txt','data_file.txt')", "UPD not in sys.path: sys.path.append(UPD) DBNAME = \"postgres\" USER =", "match1.group() def get_port(): port = os.environ['PGPORT'] if not port: port", "not os.path.exists(d): os.mkdir(d) def write_config_file(mode='insert', reuse_flag='',columns_flag='0',mapping='0',portNum='8081',database='reuse_gptest',host='localhost',formatOpts='text',file='data/external_file_01.txt',table='texttable',format='text',delimiter=\"'|'\",escape='',quote='',truncate='False',log_errors=None, error_limit='0',error_table=None,externalSchema=None,staging_table=None,fast_match='false', encoding=None, preload=True, fill=False):", "invalid encoding\" file = mkpath('setup.sql') runfile(file) copy_data('external_file_04.txt','data_file.txt') write_config_file(mode='merge',reuse_flag='true',fast_match='false',file='data_file.txt',encoding='xxxx') self.doTest(37) def", "copy_data('external_file_04.txt','data_file.txt') write_config_file(mode='merge',reuse_flag='true',fast_match='false',file='data_file.txt',encoding='GBK') self.doTest(36) def test_37_gpload_reuse_table_merge_mode_invalid_encoding(self): \"37 gpload merge mode with", "get_table_name() for i in list: name = i[0] match =", "modify_sql_file(num): file = mkpath('query%d.sql' % num) user = os.environ.get('USER') if", "of context (i.e. optionalFlags is \"-U3\"). \"\"\" f1 = gpdbAnsFile(ifile)", "'select count(*) from csvtable;'\") f.close() f = open(mkpath('data/large_file.csv'),'w') for i", "ofile =outFile(outputFile, outputPath), flag =flag, dbname= dbname, username= username, PGOPTIONS=", "if i % 2 == 0: f.write('1997,Ford,E350,\"ac, abs, moon\",3000.00,a\\n') else:", "5432 def get_ip(hostname=None): if hostname is None: hostname = socket.gethostname()", "os.access( f2, os.R_OK ): raise Exception( 'Error: cannot find file", "\"+staging_table) f.write(\"\\n\") f.close() def runfile(ifile, flag='', dbname=None, outputPath=\"\", outputFile=\"\", username=None,", "num) user = os.environ.get('USER') if not user: user = os.environ.get('USER')", "copy_data('external_file_14.txt','data_file.txt') write_config_file(mode='insert',reuse_flag='true',file='data_file.txt',log_errors=True, error_limit='100') self.doTest(29) def test_30_gpload_reuse_table_update_mode_with_fast_match(self): \"30 gpload update mode", "concourse. It may report: Fatal Python error: GC object already", "< ' + ifile if not (flag == '-q'): #", "dfile = diffFile( f1, outputPath = outputPath ) # Gets", "else: filename = fname.split( \"/\" ) fname = os.path.splitext( filename[len(", "f = open(mkpath('query29.sql'),'a') f.write(\"\\! psql -d reuse_gptest -c 'select count(*)", "None): if len(outputFile) == 0: (ok, out) = psql_run(ifile =", "psql notice @param username: psql user @param host : to", "optionalFlags, outputPath=outputPath) diff = None if result else read_diff(ifile, outputPath)", "f.write(\"\\nGPLOAD:\") f.write(\"\\n INPUT:\") f.write(\"\\n - SOURCE:\") f.write(\"\\n LOCAL_HOSTNAME:\") f.write(\"\\n -", "and null\" runfile(mkpath('setup.sql')) f = open(mkpath('query29.sql'),'a') f.write(\"\\! psql -d reuse_gptest", "outputPath ) == 0: return os.path.splitext( fname )[0] + ext", "the proper names of the .out and .ans files. optionalFlags:", "raise PSQLError('missing cmd and ifile') if ofile == '-': ofile", "test_34_gpload_reuse_table_merge_mode_with_fast_match_and_encoding(self): \"34 gpload merge mode with fast match and encoding", "encoding GBK\" file = mkpath('setup.sql') runfile(file) copy_data('external_file_04.txt','data_file.txt') write_config_file(mode='merge',reuse_flag='true',fast_match='false',file='data_file.txt',encoding='GBK') self.doTest(36) def", "This is seldom issue. we can't reproduce it locally, so", "\"\"\" MYD = os.path.abspath(os.path.dirname(__file__)) mkpath = lambda *x: os.path.join(MYD, *x)", "GC object already tracked during testing. # This is seldom", "f.close() copy_data('external_file_13.csv','data_file.csv') write_config_file(reuse_flag='true',formatOpts='csv',file='data_file.csv',table='test.csvtable',format='csv',delimiter=\"','\",log_errors=True,error_limit='10',staging_table='t.staging_table') self.doTest(28) def test_29_gpload_reuse_table_insert_mode_with_reuse_and_null(self): \"29 gpload insert mode", "to the open() function, so you can theoretically pass any", "0: (ok, out) = psql_run(ifile = ifile,ofile = outFile(ifile, outputPath),flag", "n2: s_n2\") f.write(\"\\n n3: s_n3\") f.write(\"\\n n4: s_n4\") f.write(\"\\n n5:", "f.write(\"\\n - \"+mkpath(file)) if columns_flag=='1': f.write(\"\\n - COLUMNS:\") f.write(\"\\n -", "already tracked during testing. # This is seldom issue. we", "\".diff\", outputPath = \"\" ): if len( outputPath ) ==", "== 0: # f.write('1997,Ford,E350,\"ac, abs, moon\",3000.00,a\\n') # else: # f.write('1997,Ford,E350,\"ac,", "texttable ADD column n8 text\",dbname='reuse_gptest') copy_data('external_file_08.txt','data_file.txt') write_config_file(mode='update',reuse_flag='true',fast_match='true',file='data_file.txt') self.doTest(31) def test_32_gpload_update_mode_without_reuse_table_with_fast_match(self):", "print 'could not connect to database: ' + errorMessage list", "text\") f.write(\"\\n - s_s2: text\") f.write(\"\\n - s_dt: timestamp\") f.write(\"\\n", "mode: if mode == 'insert': f.write(\"\\n - MODE: \"+'insert') if", "diff:\\n{}\".format(diff)) return True def doTest(self, num): file = mkpath('query%d.diff' %", "count(*) from csvtable;'\") f.close() f = open(mkpath('data/large_file.csv'),'w') for i in", "for i in list: name = i[0] match = re.search('ext_gpload',name)", "user %s\" % (host, user)) for line in out: out", "host = \"-h %s\" % host if port is None:", "in fileinput.FileInput(file,inplace=1): line = line.replace(\"gpload.py \",\"gpload \") print str(re.sub('\\n','',line)) def", "outputPath): \"\"\" Opens the diff file that is assocated with", "else read_diff(ifile, outputPath) self.assertTrue(result, \"query resulted in diff:\\n{}\".format(diff)) return True", "def test_00_gpload_formatOpts_setup(self): \"0 gpload setup\" for num in range(1,40): f", "assocated with the given input file and returns its contents", "gpload insert mode without reuse\" runfile(mkpath('setup.sql')) f = open(mkpath('query7.sql'),'a') f.write(\"\\!", "-d reuse_gptest -c 'select count(*) from csvtable;'\") # f.close() #", "'select count(*) from test.csvtable;'\") f.close() copy_data('external_file_13.csv','data_file.csv') write_config_file(reuse_flag='true',formatOpts='csv',file='data_file.csv',table='test.csvtable',format='csv',delimiter=\"','\",log_errors=True,error_limit='10',staging_table='t.staging_table') self.doTest(28) def test_29_gpload_reuse_table_insert_mode_with_reuse_and_null(self):", "filename[len( filename ) - 1] )[0] return outputPath + \"/\"", "self.doTest(14) def test_15_gpload_reuse_table_merge_mode_with_different_columns_order(self): \"15 gpload merge mode with different columns'", "abs, moon\",3000.00,a\\n') else: f.write('1997,Ford,E350,\"ac, abs, moon\",3000.00\\n') f.close() copy_data('large_file.csv','data_file.csv') write_config_file(reuse_flag='true',formatOpts='csv',file='data_file.csv',table='csvtable',format='csv',delimiter=\"','\",error_table=\"err_table\",error_limit='90000000') self.doTest(23)", "= None, flag = '-e',dbname = None, username = None,", "for i in range(0, 10000): # if i % 2", "\"7 gpload insert mode without reuse\" runfile(mkpath('setup.sql')) f = open(mkpath('query7.sql'),'a')", "ipaddrlist = list(set([(ai[4][0]) for ai in hostinfo])) for myip in", ") if not os.access( f2, os.R_OK ): raise Exception( 'Error:", "f.write(\"\\! psql -d reuse_gptest -c 'select count(*) from csvtable;'\") f.close()", "file = mkpath('setup.sql') runfile(file) f = open(mkpath('query25.sql'),'a') f.write(\"\\! psql -d", "n6: s_n6\") f.write(\"\\n n7: s_n7\") f.write(\"\\n n8: s_n8\") f.write(\"\\n n9:", "mode without reuse \" copy_data('external_file_07.txt','data_file.txt') write_config_file('merge','false',file='data_file.txt') self.doTest(11) def test_12_gpload_reuse_table_merge_mode_with_different_columns_number_in_file(self): \"12", "\\ % (gphome, mdd, port, command) (ok,out) = run(cmd) if", "gpload -f \"+mkpath('config/config_file')+ \" -d reuse_gptest\\n\") f.close() file = mkpath('setup.sql')", "gpload formatOpts delimiter E'\\t' with reuse\" copy_data('external_file_02.txt','data_file.txt') write_config_file(reuse_flag='true',formatOpts='text',file='data_file.txt',table='texttable',delimiter=\"E'\\\\t'\") self.doTest(3) def", "count(*) from texttable;'\") f.close() write_config_file(mode='insert',reuse_flag='false') self.doTest(7) def test_08_gpload_reuse_table_update_mode_with_reuse(self): \"8 gpload", "schema\" file = mkpath('setup.sql') runfile(file) f = open(mkpath('query24.sql'),'a') f.write(\"\\! psql", "= None, port = None): ''' Run a command or", "\"\".join(out) raise Exception(error_msg) return str(master_value) \"\"\" Global Values \"\"\" MYD", "f = open(mkpath('query7.sql'),'a') f.write(\"\\! psql -d reuse_gptest -c 'select count(*)", "write_config_file(mode='update',reuse_flag='true',fast_match='true',file='data_file.txt') self.doTest(30) def test_31_gpload_reuse_table_update_mode_with_fast_match_and_different_columns_number(self): \"31 gpload update mode with fast", "copy_data('external_file_12.csv','data_file.csv') write_config_file(reuse_flag='true',formatOpts='csv',file='data_file.csv',table='csvtable',format='csv',delimiter=\"','\",quote=\"'\\x26'\") self.doTest(17) def test_18_gpload_formatOpts_quote(self): \"18 gpload formatOpts quote E'\\\\x26'(&)", "# else: # f.write('1997,Ford,E350,\"ac, abs, moon\",3000.00\\n') # f.close() # copy_data('large_file.csv','data_file.csv')", "psql -d reuse_gptest -c 'select count(*) from texttable where n2", "its contents as a string. \"\"\" dfile = diffFile(ifile, outputPath)", "insert mode without reuse\" runfile(mkpath('setup.sql')) f = open(mkpath('query7.sql'),'a') f.write(\"\\! psql", "for num in range(1,40): f = open(mkpath('query%d.sql' % num),'w') f.write(\"\\!", "Values \"\"\" MYD = os.path.abspath(os.path.dirname(__file__)) mkpath = lambda *x: os.path.join(MYD,", "from texttable where n2=222;'\") f.close() copy_data('external_file_05.txt','data_file.txt') write_config_file(mode='update',reuse_flag='false',file='data_file.txt') self.doTest(9) def test_10_gpload_reuse_table_merge_mode_with_reuse(self):", "f.write(\"\\n n6: s_n6\") f.write(\"\\n n7: s_n7\") f.write(\"\\n n8: s_n8\") f.write(\"\\n", "bigint\") f.write(\"\\n - s_n4: decimal\") f.write(\"\\n - s_n5: numeric\") f.write(\"\\n", "else: if os.path.exists(myinitfile): (ok, out) = run('../gpdiff.pl -w ' +", "= \"\", myinitfile = \"\"): LMYD = os.path.abspath(os.path.dirname(__file__)) if not", "@param cmd: command line @param flag: -e Run SQL with", "or file against psql. Return True if OK. @param dbname:", "escape E'\\\\\\\\' with reuse\" copy_data('external_file_01.txt','data_file.txt') write_config_file(reuse_flag='true',formatOpts='text',file='data_file.txt',table='texttable',escape=\"E'\\\\\\\\'\") self.doTest(21) # case 22", "f.write(\"\\n - MATCH_COLUMNS:\") f.write(\"\\n - n1\") f.write(\"\\n - s1\") f.write(\"\\n", "write_config_file(reuse_flag='true',formatOpts='text',file='data_file.txt',table='texttable',delimiter=\"E'\\u0009'\") self.doTest(4) def test_05_gpload_formatOpts_delimiter(self): \"5 gpload formatOpts delimiter E'\\\\'' with", "reuse (RERUN with different columns number in file) \" psql_run(cmd=\"ALTER", "is None: PGHOST = HOST d = mkpath('config') if not", "- UPDATE_COLUMNS:\") f.write(\"\\n - n2\") f.write(\"\\n - MATCH_COLUMNS:\") f.write(\"\\n -", "as test case failures, when the output is different from", "reuse \" drop_tables() copy_data('external_file_06.txt','data_file.txt') write_config_file('merge','true',file='data_file.txt') self.doTest(10) def test_11_gpload_reuse_table_merge_mode_without_reuse(self): \"11 gpload", "a failure. The reason for an error might be program", "dfile ) ) if ok: os.unlink( dfile ) return ok", "socket import fileinput import platform import re try: import subprocess32", "diff: return diff.read() def modify_sql_file(num): file = mkpath('query%d.sql' % num)", "p = subprocess.Popen(cmd,shell=True,stdout=subprocess.PIPE,stderr=subprocess.PIPE) out = p.communicate()[0] ret = [] ret.append(out)", "contents as a string. \"\"\" dfile = diffFile(ifile, outputPath) with", "blank spaces) to ignore blank lines. By default, diffs are", "By default, diffs are unified with 3 lines of context", "PGUSER # Use the default login user if PGOPTIONS is", "oFile: an optional output file. mode: What to do if", "= open(mkpath('query22.sql'),'a') # f.write(\"\\! psql -d reuse_gptest -c 'select count(*)", "f.write('1997,Ford,E350,\"ac, abs, moon\",3000.00\\n') f.close() copy_data('large_file.csv','data_file.csv') write_config_file(reuse_flag='true',formatOpts='csv',file='data_file.csv',table='csvtable',format='csv',delimiter=\"','\",error_table=\"err_table\",error_limit='90000000') self.doTest(23) def test_24_gpload_error_count(self): \"24", "def test_21_gpload_formatOpts_escape(self): \"21 gpload formatOpts escape E'\\\\\\\\' with reuse\" copy_data('external_file_01.txt','data_file.txt')", "def get_table_name(): try: db = pg.DB(dbname='reuse_gptest' ,host='localhost' ,port=int(PGPORT) ) except", "# if i % 2 == 0: # f.write('1997,Ford,E350,\"ac, abs,", "\"\"\" p = subprocess.Popen(cmd,shell=True,stdout=subprocess.PIPE,stderr=subprocess.PIPE) out = p.communicate()[0] ret = []", "init_file suitePath = f1[0:f1.rindex( \"/\" )] if os.path.exists(suitePath + \"/init_file\"):", "is define as test case failures, when the output is", "if result else read_diff(ifile, outputPath) self.assertTrue(result, \"query resulted in diff:\\n{}\".format(diff))", "return (ok, out) def psql_run(ifile = None, ofile = None,", "file = os.environ.get('MASTER_DATA_DIRECTORY')+'/postgresql.conf' if os.path.isfile(file): with open(file) as f: for", "dot\" file = mkpath('setup.sql') runfile(file) f = open(mkpath('query28.sql'),'a') f.write(\"\\! psql", "return (rc,ret) def outFile(fname,outputPath = ''): return changeExtFile(fname, \".out\", outputPath)", "= mkpath('setup.sql') runfile(file) copy_data('external_file_04.txt','data_file.txt') write_config_file(mode='merge',reuse_flag='true',fast_match='true',file='data_file.txt',encoding='GBK') self.doTest(34) def test_35_gpload_reuse_table_merge_mode_with_fast_match_default_encoding(self): \"35 gpload", "open(mkpath('config/config_file'),'w') f.write(\"VERSION: 1.0.0.1\") if database: f.write(\"\\nDATABASE: \"+database) f.write(\"\\nUSER: \"+os.environ.get('USER')) f.write(\"\\nHOST:", "pass \" -B \" (with the blank spaces) to ignore", "f.write(\"\\n INPUT:\") f.write(\"\\n - SOURCE:\") f.write(\"\\n LOCAL_HOSTNAME:\") f.write(\"\\n - \"+hostNameAddrs)", "optional output file. mode: What to do if the output", "*x: os.path.join(MYD, *x) UPD = os.path.abspath(mkpath('..')) if UPD not in", "ifile: arg = ' < ' + ifile if not", "self.doTest(4) def test_05_gpload_formatOpts_delimiter(self): \"5 gpload formatOpts delimiter E'\\\\'' with reuse\"", "- MODE: \"+'insert') if mode == 'update': f.write(\"\\n - MODE:", "len(outputFile) == 0: (ok, out) = psql_run(ifile = ifile,ofile =", "= None, username = None, PGOPTIONS = None, host =", "mode ''' if dbname is None: dbname = DBNAME if", "write_config_file(mode='update',reuse_flag='true',fast_match='true',file='data_file.txt') self.doTest(31) def test_32_gpload_update_mode_without_reuse_table_with_fast_match(self): \"32 gpload update mode when reuse", "os.path.isfile(file): run(\"rm -f\" + \" \" + file) modify_sql_file(num) file", "self.doTest(13) def test_14_gpload_reuse_table_update_mode_with_reuse_RERUN(self): \"14 gpload update mode with reuse (RERUN)", "differenct columns number) \" psql_run(cmd=\"ALTER TABLE texttable ADD column n8", "formatOpts delimiter E'\\u0009' with reuse\" copy_data('external_file_02.txt','data_file.txt') write_config_file(reuse_flag='true',formatOpts='text',file='data_file.txt',table='texttable',delimiter=\"E'\\u0009'\") self.doTest(4) def test_05_gpload_formatOpts_delimiter(self):", "to connect to a different host @param port : port", "None: dbname = DBNAME if username is None: username =", "user = os.environ.get('USER'),gphome = os.environ['GPHOME'], mdd=os.environ['MASTER_DATA_DIRECTORY'],port = os.environ['PGPORT']): master_pattern =", "is different from the expected result. ''' pass class GPLoad_FormatOpts_TestCase(unittest.TestCase):", "- DELIMITER: \"+delimiter) if encoding: f.write(\"\\n - ENCODING: \"+encoding) if", "columns number in DB table) \" preTest = mkpath('pre_test_13.sql') psql_run(preTest,", "isFileEqual( f1, f2, optionalFlags = \"\", outputPath = \"\", myinitfile", "\" \" + file) modify_sql_file(num) file = mkpath('query%d.sql' % num)", "None, port = None): if len(outputFile) == 0: (ok, out)", "doTest(self, num): file = mkpath('query%d.diff' % num) if os.path.isfile(file): run(\"rm", "mdd=os.environ['MASTER_DATA_DIRECTORY'],port = os.environ['PGPORT']): master_pattern = \"Context:\\s*-1\\s*Value:\\s*\\d+\" command = \"gpconfig -s", "2>&1' % (LMYD, myinitfile, f1, f2, dfile)) else: (ok, out)", "locally, so we disable it, in order to not blocking", "outputPath = \"\", myinitfile = \"\"): LMYD = os.path.abspath(os.path.dirname(__file__)) if", "test_36_gpload_reuse_table_merge_mode_default_encoding(self): \"36 gpload merge mode with encoding GBK\" file =", "'../gpdiff.pl -w ' + optionalFlags + \\ ' -I NOTICE:", "is None: hostname = socket.gethostname() else: hostname = hostname hostinfo", "file = mkpath('query%d.diff' % num) if os.path.isfile(file): run(\"rm -f\" +", "command to run at the shell. oFile: an optional output", "\"23 gpload error_table\" file = mkpath('setup.sql') runfile(file) f = open(mkpath('query23.sql'),'a')", "write_config_file(mode='merge',reuse_flag='true',fast_match='true',file='data_file.txt',externalSchema='test') self.doTest(33) def test_34_gpload_reuse_table_merge_mode_with_fast_match_and_encoding(self): \"34 gpload merge mode with fast", "n2=222;'\") f.close() copy_data('external_file_05.txt','data_file.txt') write_config_file(mode='update',reuse_flag='false',file='data_file.txt') self.doTest(9) def test_10_gpload_reuse_table_merge_mode_with_reuse(self): \"10 gpload merge", "mode with different columns' order \" copy_data('external_file_10.txt','data/data_file.tbl') write_config_file('merge','true',file='data/data_file.tbl',columns_flag='1',mapping='1') self.doTest(15) def", "-d reuse_gptest -c 'select count(*) from csvtable;'\") f.close() copy_data('external_file_13.csv','data_file.csv') write_config_file(reuse_flag='true',formatOpts='csv',file='data_file.csv',table='csvtable',format='csv',delimiter=\"','\",log_errors=True,error_limit='10',staging_table='staging_table')", "write_config_file(reuse_flag='true',formatOpts='text',file='data_file.txt',table='texttable',delimiter=\"'\\t'\") self.doTest(2) def test_03_gpload_formatOpts_delimiter(self): \"3 gpload formatOpts delimiter E'\\t' with", "if columns_flag=='1': f.write(\"\\n - COLUMNS:\") f.write(\"\\n - s_s1: text\") f.write(\"\\n", "result. ''' pass class GPLoad_FormatOpts_TestCase(unittest.TestCase): def check_result(self,ifile, optionalFlags = \"-U3\",", "number) \" psql_run(cmd=\"ALTER TABLE texttable ADD column n8 text\",dbname='reuse_gptest') copy_data('external_file_08.txt','data_file.txt')", "self.check_result(file) def test_00_gpload_formatOpts_setup(self): \"0 gpload setup\" for num in range(1,40):", "None, flag = '-e',dbname = None, username = None, PGOPTIONS", "file = mkpath('setup.sql') runfile(file) f = open(mkpath('query23.sql'),'a') f.write(\"\\! psql -d", "f.write(\"\\n - s1\") f.write(\"\\n - s2\") if mapping=='1': f.write(\"\\n -", "= subprocess.Popen(cmd,shell=True,stdout=subprocess.PIPE,stderr=subprocess.PIPE) out = p.communicate()[0] ret = [] ret.append(out) rc", "example, pass \" -B \" (with the blank spaces) to", "None) ipaddrlist = list(set([(ai[4][0]) for ai in hostinfo])) for myip", "valid for the second parameter of open(). \"\"\" p =", "error_limit='1000', fill=True) self.doTest(39) if __name__ == '__main__': suite = unittest.TestLoader().loadTestsFromTestCase(GPLoad_FormatOpts_TestCase)" ]
[ "4 }, # 0. start with 'blank' { 'd': 2,", "}, # 3. 'digit' after 'dot' { 'd': 3 },", "5. 'e' { 'd': 7 }, # 6. 'sign' after", "'digit' after 'dot' (‘blank’ before 'dot') { 's': 6, 'd':", "'e' { ' ': 8 } # 8. end with", "''' 请实现一个函数用来判断字符串是否表示数值(包括整数和小数)。例如,字符串\"+100\"、\"5e2\"、\"-123\"、\"3.1416\"、\"-1E-16\"、\"0123\"都表示数值,但\"12e\"、\"1a3.14\"、\"1.2.3\"、\"+-5\"及\"12e+5.4\"都不是。 来源:力扣(LeetCode) 链接:https://leetcode-cn.com/problems/biao-shi-shu-zhi-de-zi-fu-chuan-lcof ''' class Solution: def isNumber(self, s:", "str) -> bool: states = [ { ' ': 0,", "'dot') { 's': 6, 'd': 7 }, # 5. 'e'", "'e': 5, ' ': 8 }, # 2. 'digit' before", "2, '.': 4 } , # 1. 'sign' before 'e'", "8 }, # 7. 'digit' after 'e' { ' ':", "after 'e' { 'd': 7, ' ': 8 }, #", "dot, blank else: t = '?' # unknown if t", "def isNumber(self, s: str) -> bool: states = [ {", "# 5. 'e' { 'd': 7 }, # 6. 'sign'", "# e or E elif c in \". \": t", "E elif c in \". \": t = c #", ", # 1. 'sign' before 'e' { 'd': 2, '.':", "5, ' ': 8 }, # 2. 'digit' before 'dot'", "7 }, # 5. 'e' { 'd': 7 }, #", "'s' # sign elif c in \"eE\": t = 'e'", "{ 'd': 3, 'e': 5, ' ': 8 }, #", "'blank' ] p = 0 # start with state 0", "states[p]: return False p = states[p][t] return p in (2,", "0. start with 'blank' { 'd': 2, '.': 4 }", "6, 'd': 7 }, # 5. 'e' { 'd': 7", "unknown if t not in states[p]: return False p =", "before 'dot' { 'd': 3, 'e': 5, ' ': 8", "<= c <= '9': t = 'd' # digit elif", "'d': 3, 'e': 5, ' ': 8 }, # 3.", "'.': 3, 'e': 5, ' ': 8 }, # 2.", "2. 'digit' before 'dot' { 'd': 3, 'e': 5, '", "'d': 2, '.': 4 }, # 0. start with 'blank'", "not in states[p]: return False p = states[p][t] return p", "'d': 7 }, # 6. 'sign' after 'e' { 'd':", "8 } # 8. end with 'blank' ] p =", "state 0 for c in s: if '0' <= c", "s: if '0' <= c <= '9': t = 'd'", "c # dot, blank else: t = '?' # unknown", "] p = 0 # start with state 0 for", "= [ { ' ': 0, 's': 1, 'd': 2,", "elif c in \"eE\": t = 'e' # e or", "3, 'e': 5, ' ': 8 }, # 3. 'digit'", "sign elif c in \"eE\": t = 'e' # e", "start with 'blank' { 'd': 2, '.': 4 } ,", "= 's' # sign elif c in \"eE\": t =", "'d': 3 }, # 4. 'digit' after 'dot' (‘blank’ before", "in states[p]: return False p = states[p][t] return p in", "3 }, # 4. 'digit' after 'dot' (‘blank’ before 'dot')", "'e' { 'd': 7 }, # 6. 'sign' after 'e'", "'e' # e or E elif c in \". \":", "' ': 0, 's': 1, 'd': 2, '.': 4 },", "8 }, # 2. 'digit' before 'dot' { 'd': 3,", "# unknown if t not in states[p]: return False p", "{ 'd': 3 }, # 4. 'digit' after 'dot' (‘blank’", "return False p = states[p][t] return p in (2, 3,", "# 6. 'sign' after 'e' { 'd': 7, ' ':", "0 # start with state 0 for c in s:", "'d': 2, '.': 4 } , # 1. 'sign' before", "}, # 4. 'digit' after 'dot' (‘blank’ before 'dot') {", "t = 'e' # e or E elif c in", "in \"+-\": t = 's' # sign elif c in", "e or E elif c in \". \": t =", "' ': 8 }, # 2. 'digit' before 'dot' {", "# 3. 'digit' after 'dot' { 'd': 3 }, #", "-> bool: states = [ { ' ': 0, 's':", "# 7. 'digit' after 'e' { ' ': 8 }", "= 'e' # e or E elif c in \".", "in s: if '0' <= c <= '9': t =", "# dot, blank else: t = '?' # unknown if", "\"eE\": t = 'e' # e or E elif c", "' ': 8 } # 8. end with 'blank' ]", "p = states[p][t] return p in (2, 3, 7, 8)", "after 'dot' (‘blank’ before 'dot') { 's': 6, 'd': 7", "states = [ { ' ': 0, 's': 1, 'd':", "c in \"eE\": t = 'e' # e or E", "3. 'digit' after 'dot' { 'd': 3 }, # 4.", "# digit elif c in \"+-\": t = 's' #", "s: str) -> bool: states = [ { ' ':", "{ ' ': 0, 's': 1, 'd': 2, '.': 4", "end with 'blank' ] p = 0 # start with", "}, # 5. 'e' { 'd': 7 }, # 6.", "6. 'sign' after 'e' { 'd': 7, ' ': 8", "c in s: if '0' <= c <= '9': t", "': 8 } # 8. end with 'blank' ] p", "'sign' after 'e' { 'd': 7, ' ': 8 },", "': 8 }, # 2. 'digit' before 'dot' { 'd':", "'digit' after 'e' { ' ': 8 } # 8.", "'d': 7 }, # 5. 'e' { 'd': 7 },", "(‘blank’ before 'dot') { 's': 6, 'd': 7 }, #", "= c # dot, blank else: t = '?' #", "}, # 0. start with 'blank' { 'd': 2, '.':", "start with state 0 for c in s: if '0'", "elif c in \"+-\": t = 's' # sign elif", "0, 's': 1, 'd': 2, '.': 4 }, # 0.", "if '0' <= c <= '9': t = 'd' #", "class Solution: def isNumber(self, s: str) -> bool: states =", "after 'dot' { 'd': 3 }, # 4. 'digit' after", "8. end with 'blank' ] p = 0 # start", "{ 'd': 2, '.': 3, 'e': 5, ' ': 8", "'e' { 'd': 2, '.': 3, 'e': 5, ' ':", "bool: states = [ { ' ': 0, 's': 1,", "'digit' before 'dot' { 'd': 3, 'e': 5, ' ':", "with 'blank' ] p = 0 # start with state", "= 'd' # digit elif c in \"+-\": t =", "}, # 7. 'digit' after 'e' { ' ': 8", "}, # 2. 'digit' before 'dot' { 'd': 3, 'e':", "{ ' ': 8 } # 8. end with 'blank'", "'dot' (‘blank’ before 'dot') { 's': 6, 'd': 7 },", "': 0, 's': 1, 'd': 2, '.': 4 }, #", "'?' # unknown if t not in states[p]: return False", "7, ' ': 8 }, # 7. 'digit' after 'e'", "8 }, # 3. 'digit' after 'dot' { 'd': 3", "# start with state 0 for c in s: if", "blank else: t = '?' # unknown if t not", "2, '.': 4 }, # 0. start with 'blank' {", "t not in states[p]: return False p = states[p][t] return", "elif c in \". \": t = c # dot,", "'d' # digit elif c in \"+-\": t = 's'", "else: t = '?' # unknown if t not in", "t = 's' # sign elif c in \"eE\": t", "1. 'sign' before 'e' { 'd': 2, '.': 3, 'e':", "链接:https://leetcode-cn.com/problems/biao-shi-shu-zhi-de-zi-fu-chuan-lcof ''' class Solution: def isNumber(self, s: str) -> bool:", "[ { ' ': 0, 's': 1, 'd': 2, '.':", "{ 'd': 7 }, # 6. 'sign' after 'e' {", "# 1. 'sign' before 'e' { 'd': 2, '.': 3,", "} , # 1. 'sign' before 'e' { 'd': 2,", "or E elif c in \". \": t = c", "False p = states[p][t] return p in (2, 3, 7,", "digit elif c in \"+-\": t = 's' # sign", "c in \". \": t = c # dot, blank", "# 4. 'digit' after 'dot' (‘blank’ before 'dot') { 's':", "with state 0 for c in s: if '0' <=", "'e' { 'd': 7, ' ': 8 }, # 7.", "t = '?' # unknown if t not in states[p]:", "' ': 8 }, # 3. 'digit' after 'dot' {", "'.': 4 }, # 0. start with 'blank' { 'd':", "p = 0 # start with state 0 for c", "'s': 1, 'd': 2, '.': 4 }, # 0. start", "'d': 2, '.': 3, 'e': 5, ' ': 8 },", "2, '.': 3, 'e': 5, ' ': 8 }, #", "\". \": t = c # dot, blank else: t", "if t not in states[p]: return False p = states[p][t]", "': 8 }, # 3. 'digit' after 'dot' { 'd':", "''' class Solution: def isNumber(self, s: str) -> bool: states", "t = c # dot, blank else: t = '?'", "5, ' ': 8 }, # 3. 'digit' after 'dot'", "t = 'd' # digit elif c in \"+-\": t", "'d': 7, ' ': 8 }, # 7. 'digit' after", "'dot' { 'd': 3 }, # 4. 'digit' after 'dot'", "} # 8. end with 'blank' ] p = 0", "}, # 6. 'sign' after 'e' { 'd': 7, '", "# 0. start with 'blank' { 'd': 2, '.': 4", "# 8. end with 'blank' ] p = 0 #", "' ': 8 }, # 7. 'digit' after 'e' {", "'digit' after 'dot' { 'd': 3 }, # 4. 'digit'", "with 'blank' { 'd': 2, '.': 4 } , #", "3, 'e': 5, ' ': 8 }, # 2. 'digit'", "before 'dot') { 's': 6, 'd': 7 }, # 5.", "for c in s: if '0' <= c <= '9':", "{ 'd': 2, '.': 4 } , # 1. 'sign'", "\"+-\": t = 's' # sign elif c in \"eE\":", "{ 'd': 7, ' ': 8 }, # 7. 'digit'", "<filename>code_week19_831_96/biao_shi_shu_zi.py ''' 请实现一个函数用来判断字符串是否表示数值(包括整数和小数)。例如,字符串\"+100\"、\"5e2\"、\"-123\"、\"3.1416\"、\"-1E-16\"、\"0123\"都表示数值,但\"12e\"、\"1a3.14\"、\"1.2.3\"、\"+-5\"及\"12e+5.4\"都不是。 来源:力扣(LeetCode) 链接:https://leetcode-cn.com/problems/biao-shi-shu-zhi-de-zi-fu-chuan-lcof ''' class Solution: def isNumber(self,", "Solution: def isNumber(self, s: str) -> bool: states = [", "'s': 6, 'd': 7 }, # 5. 'e' { 'd':", "after 'e' { ' ': 8 } # 8. end", "= '?' # unknown if t not in states[p]: return", "before 'e' { 'd': 2, '.': 3, 'e': 5, '", "= 0 # start with state 0 for c in", "# sign elif c in \"eE\": t = 'e' #", "'0' <= c <= '9': t = 'd' # digit", "# 2. 'digit' before 'dot' { 'd': 3, 'e': 5,", "7 }, # 6. 'sign' after 'e' { 'd': 7,", "': 8 }, # 7. 'digit' after 'e' { '", "c <= '9': t = 'd' # digit elif c", "7. 'digit' after 'e' { ' ': 8 } #", "1, 'd': 2, '.': 4 }, # 0. start with", "来源:力扣(LeetCode) 链接:https://leetcode-cn.com/problems/biao-shi-shu-zhi-de-zi-fu-chuan-lcof ''' class Solution: def isNumber(self, s: str) ->", "'sign' before 'e' { 'd': 2, '.': 3, 'e': 5,", "0 for c in s: if '0' <= c <=", "4 } , # 1. 'sign' before 'e' { 'd':", "'.': 4 } , # 1. 'sign' before 'e' {", "{ 's': 6, 'd': 7 }, # 5. 'e' {", "'blank' { 'd': 2, '.': 4 } , # 1.", "<= '9': t = 'd' # digit elif c in", "'dot' { 'd': 3, 'e': 5, ' ': 8 },", "'9': t = 'd' # digit elif c in \"+-\":", "c in \"+-\": t = 's' # sign elif c", "in \"eE\": t = 'e' # e or E elif", "4. 'digit' after 'dot' (‘blank’ before 'dot') { 's': 6,", "\": t = c # dot, blank else: t =", "in \". \": t = c # dot, blank else:", "isNumber(self, s: str) -> bool: states = [ { '", "'e': 5, ' ': 8 }, # 3. 'digit' after", "请实现一个函数用来判断字符串是否表示数值(包括整数和小数)。例如,字符串\"+100\"、\"5e2\"、\"-123\"、\"3.1416\"、\"-1E-16\"、\"0123\"都表示数值,但\"12e\"、\"1a3.14\"、\"1.2.3\"、\"+-5\"及\"12e+5.4\"都不是。 来源:力扣(LeetCode) 链接:https://leetcode-cn.com/problems/biao-shi-shu-zhi-de-zi-fu-chuan-lcof ''' class Solution: def isNumber(self, s: str)" ]
[ "[], u'admin_state_up': True, u'network_id': u'NETWORK1', u'tenant_id': u'TENANTID', u'extra_dhcp_opts': [], u'binding:vif_type':", "u'10.0.0.1', u'cidr': u'10.0.0.0/24', u'id': u'SUBNET1' } SUBNET2_RESPONSE = { u'name':", "Rackspace, Inc. Licensed under the Apache License, Version 2.0 (the", "} ], u'id': u'PORT2', u'security_groups': [u'SECGRP'], u'device_id': u'' } SUBNET1_RESPONSE", "test_attach_client_exception(self): exc = neutron_exceptions.NeutronException() self.neutron_mock.create_port.side_effect = exc self.assertRaises(self.provider.NetworkProviderException, self.provider.attach, 'mac_address',", "u'id': u'SUBNET1' } SUBNET2_RESPONSE = { u'name': u'public-subnet', u'enable_dhcp': False,", "self.neutron_mock.list_ports.assert_called_with(mac_address='a:b:c:d') def test_detach_specific_network(self): ports = {'ports': [PORT1_RESPONSE]} self.neutron_mock.list_ports.return_value = ports", "]) SERIALIZED_PORT1 = collections.OrderedDict([ ('id', u'PORT1'), ('name', u''), ('status', u'ACTIVE'),", "u'host_routes': [], u'ip_version': 4, u'gateway_ip': u'10.0.0.1', u'cidr': u'10.0.0.0/24', u'id': u'SUBNET1'", "u'provider:segmentation_id': None } NETWORK2_RESPONSE = { u'status': u'ACTIVE', u'subnets': [u'SUBNET2'],", "as keystone_exceptions from keystoneclient.v2_0 import client as keystone_client from neutronclient.common", "u'SUBNET1', u'ip_address': u'10.0.0.3' } ], u'id': u'PORT1', u'security_groups': [], u'device_id':", "neutronclient.common import exceptions as neutron_exceptions from neutronclient.neutron import client as", "], u'id': u'PORT2', u'security_groups': [u'SECGRP'], u'device_id': u'' } SUBNET1_RESPONSE =", "True, u'tenant_id': u'TENANTID', u'provider:network_type': u'local', u'router:external': True, u'shared': False, u'id':", "[ { u'subnet_id': u'SUBNET2', u'ip_address': u'192.168.27.3' } ], u'id': u'PORT2',", "u'security_groups': [], u'device_id': u'' } PORT2_RESPONSE = { u'status': u'DOWN',", "SUBNET1_RESPONSE} self.neutron_mock.show_subnet.return_value = subnet self.provider.detach('a:b:c:d', 'network_id') self.neutron_mock.delete_port.assert_called_with(PORT1_RESPONSE['id']) self.neutron_mock.list_ports.assert_called_with( mac_address='a:b:c:d', network_id='network_id')", "self.assertRaises(self.provider.NetworkProviderException, self.provider.attach, 'mac_address', 'network_id') def test_detatch(self): ports = {'ports': [PORT1_RESPONSE]}", "NETWORK1_RESPONSE} self.neutron_mock.show_network.return_value = network subnet = {'subnet': SUBNET1_RESPONSE} self.neutron_mock.show_subnet.return_value =", "u'admin_state_up': True, u'tenant_id': u'TENANTID', u'provider:network_type': u'local', u'router:external': True, u'shared': False,", "test_get_default_networks(self): network_ids = self.provider.get_default_networks() self.assertEqual(network_ids, [self.config.NEUTRON_PUBLIC_NETWORK, self.config.NEUTRON_PRIVATE_NETWORK]) def test_get_service_network(self): network_id", "u'SUBNET2' } SERIALIZED_NETWORK1 = collections.OrderedDict([ ('id', u'NETWORK1'), ('name', u'private'), ('status',", "u'local', u'router:external': False, u'shared': False, u'id': u'NETWORK1', u'provider:segmentation_id': None }", "self.add_mock(keystone_client, 'Client') self.keystone_client_mock.return_value.auth_token = '<PASSWORD>' self.provider = neutron.NeutronProvider(self.config) def test_get_auth_token(self):", "Apache License, Version 2.0 (the \"License\"); you may not use", "self.neutron_mock.show_network.return_value = network subnet = {'subnet': SUBNET1_RESPONSE} self.neutron_mock.show_subnet.return_value = subnet", "= self.provider._get_auth_token() self.assertEqual(t, 'auth_token') self.keystone_client_mock.assert_called_with( username='user', password='<PASSWORD>', tenant_id='tenant', auth_url='auth_url' )", "u'public-subnet', u'enable_dhcp': False, u'network_id': u'NETWORK2', u'tenant_id': u'TENANTID', u'dns_nameservers': [], u'allocation_pools':", "IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "u'PORT1'), ('name', u''), ('status', u'ACTIVE'), ('mac_address', u'fa:16:3e:e0:d4:63'), ('fixed_ips', [ {", "u'fa:16:3e:e0:d4:63'), ('fixed_ips', [ { u'subnet_id': u'SUBNET1', u'ip_address': u'10.0.0.3' } ]),", "of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law", "exceptions as keystone_exceptions from keystoneclient.v2_0 import client as keystone_client from", "collections.OrderedDict([ ('id', u'NETWORK1'), ('name', u'private'), ('status', u'ACTIVE'), ('subnets', [ collections.OrderedDict([", "} SERIALIZED_NETWORK1 = collections.OrderedDict([ ('id', u'NETWORK1'), ('name', u'private'), ('status', u'ACTIVE'),", "u'name': u'private-subnet', u'enable_dhcp': True, u'network_id': u'NETWORK1', u'tenant_id': u'TENANTID', u'dns_nameservers': [],", "\"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "results) def test_list_networks_empty(self): self.neutron_mock.list_networks.return_value = {'networks': []} networks = self.provider.list_networks()", "distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS", "} PORT1_RESPONSE = { u'status': u'ACTIVE', u'binding:host_id': u'precise64', u'name': u'',", "u'public', u'provider:physical_network': None, u'admin_state_up': True, u'tenant_id': u'TENANTID', u'provider:network_type': u'local', u'router:external':", "test_get_auth_token(self): t = self.provider._get_auth_token() self.assertEqual(t, 'auth_token') self.keystone_client_mock.assert_called_with( username='user', password='<PASSWORD>', tenant_id='tenant',", "'d6b32008-1432-4299-81c7-cbe3128ba13f', 'NEUTRON_PRIVATE_NETWORK': '2afa16d6-7b84-484f-a642-af243b0e5b10', 'NEUTRON_SERVICE_NETWORK': '2afa16d6-7b84-484f-a642-af243b0e5b10', }) self.neutron_client_mock = self.add_mock(neutron_client, 'Client')", "the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or", "client as keystone_client from neutronclient.common import exceptions as neutron_exceptions from", "u'dns_nameservers': [], u'allocation_pools': [ { u'start': u'192.168.27.1', u'end': u'192.168.27.1' },", "port = self.provider.attach('a:b:c:d', 'network_id') self.neutron_mock.create_port.assert_called_with({ 'port': { 'network_id': 'network_id', 'admin_state_up':", "ANY KIND, either express or implied. See the License for", "'port': { 'network_id': 'network_id', 'admin_state_up': True, 'mac_address': 'a:b:c:d' } })", "http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in", "may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless", "[ { u'start': u'10.0.0.2', u'end': u'10.0.0.254' } ], u'host_routes': [],", "NETWORK1_RESPONSE = { u'status': u'ACTIVE', u'subnets': [u'SUBNET1'], u'name': u'private', u'provider:physical_network':", "network = self.provider.get_network_info('NETWORK1') self.assertEqual(network.serialize(), SERIALIZED_NETWORK1) self.neutron_mock.show_network.assert_called_with('NETWORK1') def test_get_network_info_does_not_exist(self): exc =", "u'10.0.0.254' } ], u'host_routes': [], u'ip_version': 4, u'gateway_ip': u'10.0.0.1', u'cidr':", "neutronclient.neutron import client as neutron_client NETWORK1_RESPONSE = { u'status': u'ACTIVE',", "} SUBNET1_RESPONSE = { u'name': u'private-subnet', u'enable_dhcp': True, u'network_id': u'NETWORK1',", "} SUBNET2_RESPONSE = { u'name': u'public-subnet', u'enable_dhcp': False, u'network_id': u'NETWORK2',", "[], u'binding:vif_type': u'unbound', u'device_owner': u'', u'binding:capabilities': {u'port_filter': False}, u'mac_address': u'00:09:7b:3e:18:ca',", "u'subnet_id': u'SUBNET2', u'ip_address': u'192.168.27.3' } ], u'id': u'PORT2', u'security_groups': [u'SECGRP'],", "u'10.0.0.0/24', u'id': u'SUBNET1' } SUBNET2_RESPONSE = { u'name': u'public-subnet', u'enable_dhcp':", "under the License is distributed on an \"AS IS\" BASIS,", "True, u'network_id': u'NETWORK1', u'tenant_id': u'TENANTID', u'extra_dhcp_opts': [], u'binding:vif_type': u'ovs', u'device_owner':", "SERIALIZED_PORT1 = collections.OrderedDict([ ('id', u'PORT1'), ('name', u''), ('status', u'ACTIVE'), ('mac_address',", "exc self.assertRaises(self.provider.NetworkProviderException, self.provider._get_neutron_client) def test_list_networks(self): networks = {'networks': [NETWORK1_RESPONSE, NETWORK2_RESPONSE]}", "= exc self.assertRaises(self.provider.NetworkProviderException, self.provider.get_network_info, 'NETWORK1') def test_list_ports(self): ports = {'ports':", "('cidr', u'10.0.0.0/24'), ('enable_dhcp', True) ]) ]) ]) SERIALIZED_NETWORK2 = collections.OrderedDict([", "u'tenant_id': u'TENANTID', u'dns_nameservers': [], u'allocation_pools': [ { u'start': u'192.168.27.1', u'end':", "self.keystone_client_mock.side_effect = exc self.assertRaises(self.provider.NetworkProviderException, self.provider._get_auth_token) def test_get_neutron_client(self): self.provider._get_neutron_client() self.neutron_client_mock.assert_called_with( '2.0',", "self.neutron_mock.list_networks.assert_called() self.assertEqual(networks, []) def test_list_networks_client_exception(self): exc = neutron_exceptions.NeutronException() self.neutron_mock.list_networks.side_effect =", "= {'subnet': SUBNET1_RESPONSE} self.neutron_mock.show_subnet.return_value = subnet self.provider.detach('a:b:c:d') self.neutron_mock.delete_port.assert_called_with(PORT1_RESPONSE['id']) self.neutron_mock.list_ports.assert_called_with(mac_address='a:b:c:d') def", "License. \"\"\" import collections from teeth_overlord import config from teeth_overlord.networks", "as keystone_client from neutronclient.common import exceptions as neutron_exceptions from neutronclient.neutron", "networks = {'networks': [NETWORK1_RESPONSE, NETWORK2_RESPONSE]} self.neutron_mock.list_networks.return_value = networks self.neutron_mock.show_subnet.side_effect =", "'NEUTRON_PRIVATE_NETWORK': '2afa16d6-7b84-484f-a642-af243b0e5b10', 'NEUTRON_SERVICE_NETWORK': '2afa16d6-7b84-484f-a642-af243b0e5b10', }) self.neutron_client_mock = self.add_mock(neutron_client, 'Client') self.neutron_mock", "self.neutron_mock.show_network.return_value = network self.neutron_mock.show_subnet.side_effect = [ {'subnet': SUBNET1_RESPONSE} ] network", "u'00:09:7b:3e:18:ca', u'fixed_ips': [ { u'subnet_id': u'SUBNET2', u'ip_address': u'192.168.27.3' } ],", "self.config = config.LazyConfig(config={ 'KEYSTONE_USER': 'user', 'KEYSTONE_PASS': '<PASSWORD>', 'KEYSTONE_TENANT_ID': 'tenant', 'KEYSTONE_AUTH_URL':", "this file except in compliance with the License. You may", "NETWORK1_RESPONSE} self.neutron_mock.show_network.return_value = network self.neutron_mock.show_subnet.side_effect = [ {'subnet': SUBNET1_RESPONSE} ]", "= neutron_exceptions.NeutronException() self.neutron_client_mock.side_effect = exc self.assertRaises(self.provider.NetworkProviderException, self.provider._get_neutron_client) def test_list_networks(self): networks", "= exc self.assertRaises(self.provider.NetworkProviderException, self.provider._get_neutron_client) def test_list_networks(self): networks = {'networks': [NETWORK1_RESPONSE,", "('id', u'PORT1'), ('name', u''), ('status', u'ACTIVE'), ('mac_address', u'fa:16:3e:e0:d4:63'), ('fixed_ips', [", "SERIALIZED_NETWORK1) ]) class TestNeutronProvider(tests.TeethMockTestUtilities): def setUp(self): super(TestNeutronProvider, self).setUp() self.config =", "= neutron_exceptions.NeutronException() exc.message = '404 Not Found' self.neutron_mock.show_network.side_effect = exc", "u'security_groups': [u'SECGRP'], u'device_id': u'' } SUBNET1_RESPONSE = { u'name': u'private-subnet',", "False, u'network_id': u'NETWORK2', u'tenant_id': u'TENANTID', u'dns_nameservers': [], u'allocation_pools': [ {", "'Client') self.keystone_client_mock.return_value.auth_token = '<PASSWORD>' self.provider = neutron.NeutronProvider(self.config) def test_get_auth_token(self): t", ") def test_get_neutron_client_exception(self): exc = neutron_exceptions.NeutronException() self.neutron_client_mock.side_effect = exc self.assertRaises(self.provider.NetworkProviderException,", "}, { u'start': u'192.168.27.3', u'end': u'192.168.27.254' } ], u'host_routes': [],", "exc = neutron_exceptions.NeutronException() self.neutron_mock.create_port.side_effect = exc self.assertRaises(self.provider.NetworkProviderException, self.provider.attach, 'mac_address', 'network_id')", "test_detach_client_exception(self): ports = {'ports': [PORT1_RESPONSE]} self.neutron_mock.list_ports.return_value = ports network =", "= {'subnet': SUBNET1_RESPONSE} self.neutron_mock.show_subnet.return_value = subnet exc = neutron_exceptions.NeutronException() self.neutron_mock.delete_port.side_effect", "= {'ports': [PORT1_RESPONSE]} self.neutron_mock.list_ports.return_value = ports network = {'network': NETWORK1_RESPONSE}", "u'status': u'ACTIVE', u'binding:host_id': u'precise64', u'name': u'', u'allowed_address_pairs': [], u'admin_state_up': True,", "'tenant', 'KEYSTONE_AUTH_URL': 'auth_url', 'NEUTRON_VERSION': '2.0', 'NEUTRON_URL': 'neutron_url', 'NEUTRON_PUBLIC_NETWORK': 'd6b32008-1432-4299-81c7-cbe3128ba13f', 'NEUTRON_PRIVATE_NETWORK':", "as neutron_exceptions from neutronclient.neutron import client as neutron_client NETWORK1_RESPONSE =", "test_get_network_info_client_exception(self): exc = neutron_exceptions.NeutronException() self.neutron_mock.show_network.side_effect = exc self.assertRaises(self.provider.NetworkProviderException, self.provider.get_network_info, 'NETWORK1')", "test_get_neutron_client(self): self.provider._get_neutron_client() self.neutron_client_mock.assert_called_with( '2.0', endpoint_url='neutron_url', token='auth_token' ) def test_get_neutron_client_exception(self): exc", "[], u'binding:vif_type': u'ovs', u'device_owner': u'network:dhcp', u'binding:capabilities': {u'port_filter': True}, u'mac_address': u'fa:16:3e:e0:d4:63',", "u'router:external': False, u'shared': False, u'id': u'NETWORK1', u'provider:segmentation_id': None } NETWORK2_RESPONSE", "= subnet ports = self.provider.list_ports('a:b:c:d') self.assertEqual([p.serialize() for p in ports],", "in ports], [SERIALIZED_PORT1]) self.neutron_mock.list_ports.assert_called_with(mac_address='a:b:c:d') def test_attach(self): port = {'port': PORT1_RESPONSE}", "file except in compliance with the License. You may obtain", "'NETWORK1') def test_list_ports(self): ports = {'ports': [PORT1_RESPONSE]} self.neutron_mock.list_ports.return_value = ports", "self.provider.list_networks) def test_get_network_info(self): network = {'network': NETWORK1_RESPONSE} self.neutron_mock.show_network.return_value = network", "('enable_dhcp', True) ]) ]) ]) SERIALIZED_NETWORK2 = collections.OrderedDict([ ('id', u'NETWORK2'),", "self.neutron_client_mock.return_value self.keystone_client_mock = self.add_mock(keystone_client, 'Client') self.keystone_client_mock.return_value.auth_token = '<PASSWORD>' self.provider =", "= neutron.NeutronProvider(self.config) def test_get_auth_token(self): t = self.provider._get_auth_token() self.assertEqual(t, 'auth_token') self.keystone_client_mock.assert_called_with(", "token='auth_token' ) def test_get_neutron_client_exception(self): exc = neutron_exceptions.NeutronException() self.neutron_client_mock.side_effect = exc", "u'gateway_ip': u'192.168.27.2', u'cidr': u'192.168.27.0/24', u'id': u'SUBNET2' } SERIALIZED_NETWORK1 = collections.OrderedDict([", "u'TENANTID', u'dns_nameservers': [], u'allocation_pools': [ { u'start': u'192.168.27.1', u'end': u'192.168.27.1'", "self.provider.get_network_info('NETWORK1') self.assertEqual(network.serialize(), SERIALIZED_NETWORK1) self.neutron_mock.show_network.assert_called_with('NETWORK1') def test_get_network_info_does_not_exist(self): exc = neutron_exceptions.NeutronException() exc.message", "u'device_owner': u'', u'binding:capabilities': {u'port_filter': False}, u'mac_address': u'00:09:7b:3e:18:ca', u'fixed_ips': [ {", "OR CONDITIONS OF ANY KIND, either express or implied. See", "= neutron_exceptions.NeutronException() self.neutron_mock.show_network.side_effect = exc self.assertRaises(self.provider.NetworkProviderException, self.provider.get_network_info, 'NETWORK1') def test_list_ports(self):", "]) ]) ]) SERIALIZED_PORT1 = collections.OrderedDict([ ('id', u'PORT1'), ('name', u''),", "u'ip_address': u'192.168.27.3' } ], u'id': u'PORT2', u'security_groups': [u'SECGRP'], u'device_id': u''", "u'SUBNET2', u'ip_address': u'192.168.27.3' } ], u'id': u'PORT2', u'security_groups': [u'SECGRP'], u'device_id':", "self.provider._get_auth_token) def test_get_neutron_client(self): self.provider._get_neutron_client() self.neutron_client_mock.assert_called_with( '2.0', endpoint_url='neutron_url', token='auth_token' ) def", "= exc self.assertRaises(self.provider.NetworkDoesNotExist, self.provider.get_network_info, 'NETWORK1') def test_get_network_info_client_exception(self): exc = neutron_exceptions.NeutronException()", "= exc self.assertRaises(self.provider.NetworkProviderException, self.provider.detach, 'a:b:c:d') def test_get_default_networks(self): network_ids = self.provider.get_default_networks()", "u'private-subnet', u'enable_dhcp': True, u'network_id': u'NETWORK1', u'tenant_id': u'TENANTID', u'dns_nameservers': [], u'allocation_pools':", "under the Apache License, Version 2.0 (the \"License\"); you may", "= collections.OrderedDict([ ('id', u'PORT1'), ('name', u''), ('status', u'ACTIVE'), ('mac_address', u'fa:16:3e:e0:d4:63'),", "= [ {'subnet': SUBNET1_RESPONSE}, {'subnet': SUBNET2_RESPONSE} ] networks = self.provider.list_networks()", "subnet = {'subnet': SUBNET1_RESPONSE} self.neutron_mock.show_subnet.return_value = subnet self.provider.detach('a:b:c:d') self.neutron_mock.delete_port.assert_called_with(PORT1_RESPONSE['id']) self.neutron_mock.list_ports.assert_called_with(mac_address='a:b:c:d')", "u'provider:segmentation_id': None } PORT1_RESPONSE = { u'status': u'ACTIVE', u'binding:host_id': u'precise64',", "= self.add_mock(neutron_client, 'Client') self.neutron_mock = self.neutron_client_mock.return_value self.keystone_client_mock = self.add_mock(keystone_client, 'Client')", "u'ACTIVE'), ('subnets', [ collections.OrderedDict([ ('id', u'SUBNET2'), ('name', u'public-subnet'), ('ip_version', 4),", "as neutron_client NETWORK1_RESPONSE = { u'status': u'ACTIVE', u'subnets': [u'SUBNET1'], u'name':", "= {'network': NETWORK1_RESPONSE} self.neutron_mock.show_network.return_value = network self.neutron_mock.show_subnet.side_effect = [ {'subnet':", "self.neutron_mock.show_subnet.return_value = subnet port = self.provider.attach('a:b:c:d', 'network_id') self.neutron_mock.create_port.assert_called_with({ 'port': {", "neutron_exceptions from neutronclient.neutron import client as neutron_client NETWORK1_RESPONSE = {", "u'admin_state_up': True, u'network_id': u'NETWORK2', u'tenant_id': u'TENANTID', u'extra_dhcp_opts': [], u'binding:vif_type': u'unbound',", "('fixed_ips', [ { u'subnet_id': u'SUBNET1', u'ip_address': u'10.0.0.3' } ]), ('network',", "'mac_address': 'a:b:c:d' } }) self.assertEqual(port.serialize(), SERIALIZED_PORT1) def test_attach_client_exception(self): exc =", "[], u'admin_state_up': True, u'network_id': u'NETWORK2', u'tenant_id': u'TENANTID', u'extra_dhcp_opts': [], u'binding:vif_type':", "neutron from teeth_overlord import tests from keystoneclient.apiclient import exceptions as", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "self).setUp() self.config = config.LazyConfig(config={ 'KEYSTONE_USER': 'user', 'KEYSTONE_PASS': '<PASSWORD>', 'KEYSTONE_TENANT_ID': 'tenant',", "See the License for the specific language governing permissions and", "self.neutron_mock.delete_port.assert_called_with(PORT1_RESPONSE['id']) self.neutron_mock.list_ports.assert_called_with(mac_address='a:b:c:d') def test_detach_specific_network(self): ports = {'ports': [PORT1_RESPONSE]} self.neutron_mock.list_ports.return_value =", "False) ]) ]) ]) SERIALIZED_PORT1 = collections.OrderedDict([ ('id', u'PORT1'), ('name',", "test_list_ports(self): ports = {'ports': [PORT1_RESPONSE]} self.neutron_mock.list_ports.return_value = ports network =", "def test_attach_client_exception(self): exc = neutron_exceptions.NeutronException() self.neutron_mock.create_port.side_effect = exc self.assertRaises(self.provider.NetworkProviderException, self.provider.attach,", "} ], u'host_routes': [], u'ip_version': 4, u'gateway_ip': u'10.0.0.1', u'cidr': u'10.0.0.0/24',", "test_get_neutron_client_exception(self): exc = neutron_exceptions.NeutronException() self.neutron_client_mock.side_effect = exc self.assertRaises(self.provider.NetworkProviderException, self.provider._get_neutron_client) def", "{'ports': [PORT1_RESPONSE]} self.neutron_mock.list_ports.return_value = ports network = {'network': NETWORK1_RESPONSE} self.neutron_mock.show_network.return_value", "4, u'gateway_ip': u'192.168.27.2', u'cidr': u'192.168.27.0/24', u'id': u'SUBNET2' } SERIALIZED_NETWORK1 =", "= { u'name': u'public-subnet', u'enable_dhcp': False, u'network_id': u'NETWORK2', u'tenant_id': u'TENANTID',", "{ 'network_id': 'network_id', 'admin_state_up': True, 'mac_address': 'a:b:c:d' } }) self.assertEqual(port.serialize(),", "u'public'), ('status', u'ACTIVE'), ('subnets', [ collections.OrderedDict([ ('id', u'SUBNET2'), ('name', u'public-subnet'),", "u'binding:capabilities': {u'port_filter': True}, u'mac_address': u'fa:16:3e:e0:d4:63', u'fixed_ips': [ { u'subnet_id': u'SUBNET1',", "self.neutron_mock.list_ports.assert_called_with( mac_address='a:b:c:d', network_id='network_id') def test_detach_client_exception(self): ports = {'ports': [PORT1_RESPONSE]} self.neutron_mock.list_ports.return_value", "in writing, software distributed under the License is distributed on", "{ u'subnet_id': u'SUBNET1', u'ip_address': u'10.0.0.3' } ], u'id': u'PORT1', u'security_groups':", "u''), ('status', u'ACTIVE'), ('mac_address', u'fa:16:3e:e0:d4:63'), ('fixed_ips', [ { u'subnet_id': u'SUBNET1',", "required by applicable law or agreed to in writing, software", "self.provider._get_auth_token() self.assertEqual(t, 'auth_token') self.keystone_client_mock.assert_called_with( username='user', password='<PASSWORD>', tenant_id='tenant', auth_url='auth_url' ) def", "exc = neutron_exceptions.NeutronException() exc.message = '404 Not Found' self.neutron_mock.show_network.side_effect =", "PORT2_RESPONSE = { u'status': u'DOWN', u'binding:host_id': u'', u'name': u'', u'allowed_address_pairs':", "True, u'network_id': u'NETWORK1', u'tenant_id': u'TENANTID', u'dns_nameservers': [], u'allocation_pools': [ {", "network subnet = {'subnet': SUBNET1_RESPONSE} self.neutron_mock.show_subnet.return_value = subnet port =", "self.provider.detach('a:b:c:d', 'network_id') self.neutron_mock.delete_port.assert_called_with(PORT1_RESPONSE['id']) self.neutron_mock.list_ports.assert_called_with( mac_address='a:b:c:d', network_id='network_id') def test_detach_client_exception(self): ports =", "u'binding:vif_type': u'unbound', u'device_owner': u'', u'binding:capabilities': {u'port_filter': False}, u'mac_address': u'00:09:7b:3e:18:ca', u'fixed_ips':", "u'ip_version': 4, u'gateway_ip': u'192.168.27.2', u'cidr': u'192.168.27.0/24', u'id': u'SUBNET2' } SERIALIZED_NETWORK1", "u'NETWORK2', u'tenant_id': u'TENANTID', u'dns_nameservers': [], u'allocation_pools': [ { u'start': u'192.168.27.1',", "u'NETWORK1', u'tenant_id': u'TENANTID', u'dns_nameservers': [], u'allocation_pools': [ { u'start': u'10.0.0.2',", "self.provider.attach, 'mac_address', 'network_id') def test_detatch(self): ports = {'ports': [PORT1_RESPONSE]} self.neutron_mock.list_ports.return_value", "= {'subnet': SUBNET1_RESPONSE} self.neutron_mock.show_subnet.return_value = subnet self.provider.detach('a:b:c:d', 'network_id') self.neutron_mock.delete_port.assert_called_with(PORT1_RESPONSE['id']) self.neutron_mock.list_ports.assert_called_with(", "'Client') self.neutron_mock = self.neutron_client_mock.return_value self.keystone_client_mock = self.add_mock(keystone_client, 'Client') self.keystone_client_mock.return_value.auth_token =", "'NEUTRON_URL': 'neutron_url', 'NEUTRON_PUBLIC_NETWORK': 'd6b32008-1432-4299-81c7-cbe3128ba13f', 'NEUTRON_PRIVATE_NETWORK': '2afa16d6-7b84-484f-a642-af243b0e5b10', 'NEUTRON_SERVICE_NETWORK': '2afa16d6-7b84-484f-a642-af243b0e5b10', }) self.neutron_client_mock", "'auth_token') self.keystone_client_mock.assert_called_with( username='user', password='<PASSWORD>', tenant_id='tenant', auth_url='auth_url' ) def test_get_auth_token_client_exception(self): exc", "('subnets', [ collections.OrderedDict([ ('id', u'SUBNET1'), ('name', u'private-subnet'), ('ip_version', 4), ('gateway_ip',", "self.provider.list_ports('a:b:c:d') self.assertEqual([p.serialize() for p in ports], [SERIALIZED_PORT1]) self.neutron_mock.list_ports.assert_called_with(mac_address='a:b:c:d') def test_attach(self):", "{'subnet': SUBNET1_RESPONSE}, {'subnet': SUBNET2_RESPONSE} ] networks = self.provider.list_networks() results =", "= exc self.assertRaises(self.provider.NetworkProviderException, self.provider.attach, 'mac_address', 'network_id') def test_detatch(self): ports =", "u'192.168.27.254' } ], u'host_routes': [], u'ip_version': 4, u'gateway_ip': u'192.168.27.2', u'cidr':", "test_list_networks_client_exception(self): exc = neutron_exceptions.NeutronException() self.neutron_mock.list_networks.side_effect = exc self.assertRaises(self.provider.NetworkProviderException, self.provider.list_networks) def", "software distributed under the License is distributed on an \"AS", "distributed under the License is distributed on an \"AS IS\"", "keystoneclient.v2_0 import client as keystone_client from neutronclient.common import exceptions as", "] networks = self.provider.list_networks() results = [ SERIALIZED_NETWORK1, SERIALIZED_NETWORK2 ]", "self.assertEqual([n.serialize() for n in networks], results) def test_list_networks_empty(self): self.neutron_mock.list_networks.return_value =", "False, u'shared': False, u'id': u'NETWORK1', u'provider:segmentation_id': None } NETWORK2_RESPONSE =", "SUBNET1_RESPONSE = { u'name': u'private-subnet', u'enable_dhcp': True, u'network_id': u'NETWORK1', u'tenant_id':", "u'private', u'provider:physical_network': None, u'admin_state_up': True, u'tenant_id': u'TENANTID', u'provider:network_type': u'local', u'router:external':", "def test_list_ports(self): ports = {'ports': [PORT1_RESPONSE]} self.neutron_mock.list_ports.return_value = ports network", "CONDITIONS OF ANY KIND, either express or implied. See the", "]) ]) SERIALIZED_PORT1 = collections.OrderedDict([ ('id', u'PORT1'), ('name', u''), ('status',", "u'10.0.0.3' } ], u'id': u'PORT1', u'security_groups': [], u'device_id': u'' }", "Version 2.0 (the \"License\"); you may not use this file", "= {'subnet': SUBNET1_RESPONSE} self.neutron_mock.show_subnet.return_value = subnet ports = self.provider.list_ports('a:b:c:d') self.assertEqual([p.serialize()", "networks self.neutron_mock.show_subnet.side_effect = [ {'subnet': SUBNET1_RESPONSE}, {'subnet': SUBNET2_RESPONSE} ] networks", "neutron_exceptions.NeutronException() self.neutron_mock.create_port.side_effect = exc self.assertRaises(self.provider.NetworkProviderException, self.provider.attach, 'mac_address', 'network_id') def test_detatch(self):", "u'extra_dhcp_opts': [], u'binding:vif_type': u'unbound', u'device_owner': u'', u'binding:capabilities': {u'port_filter': False}, u'mac_address':", "]) class TestNeutronProvider(tests.TeethMockTestUtilities): def setUp(self): super(TestNeutronProvider, self).setUp() self.config = config.LazyConfig(config={", "u'name': u'', u'allowed_address_pairs': [], u'admin_state_up': True, u'network_id': u'NETWORK2', u'tenant_id': u'TENANTID',", "'NEUTRON_PUBLIC_NETWORK': 'd6b32008-1432-4299-81c7-cbe3128ba13f', 'NEUTRON_PRIVATE_NETWORK': '2afa16d6-7b84-484f-a642-af243b0e5b10', 'NEUTRON_SERVICE_NETWORK': '2afa16d6-7b84-484f-a642-af243b0e5b10', }) self.neutron_client_mock = self.add_mock(neutron_client,", "network_ids = self.provider.get_default_networks() self.assertEqual(network_ids, [self.config.NEUTRON_PUBLIC_NETWORK, self.config.NEUTRON_PRIVATE_NETWORK]) def test_get_service_network(self): network_id =", "not use this file except in compliance with the License.", "2.0 (the \"License\"); you may not use this file except", "copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable", "u'', u'binding:capabilities': {u'port_filter': False}, u'mac_address': u'00:09:7b:3e:18:ca', u'fixed_ips': [ { u'subnet_id':", "('name', u'private-subnet'), ('ip_version', 4), ('gateway_ip', u'10.0.0.1'), ('cidr', u'10.0.0.0/24'), ('enable_dhcp', True)", "Inc. Licensed under the Apache License, Version 2.0 (the \"License\");", "= {'networks': []} networks = self.provider.list_networks() self.neutron_mock.list_networks.assert_called() self.assertEqual(networks, []) def", "self.assertEqual(network.serialize(), SERIALIZED_NETWORK1) self.neutron_mock.show_network.assert_called_with('NETWORK1') def test_get_network_info_does_not_exist(self): exc = neutron_exceptions.NeutronException() exc.message =", "self.neutron_mock.create_port.assert_called_with({ 'port': { 'network_id': 'network_id', 'admin_state_up': True, 'mac_address': 'a:b:c:d' }", "4), ('gateway_ip', u'10.0.0.1'), ('cidr', u'10.0.0.0/24'), ('enable_dhcp', True) ]) ]) ])", "you may not use this file except in compliance with", "neutron_exceptions.NeutronException() self.neutron_mock.delete_port.side_effect = exc self.assertRaises(self.provider.NetworkProviderException, self.provider.detach, 'a:b:c:d') def test_get_default_networks(self): network_ids", "exc self.assertRaises(self.provider.NetworkDoesNotExist, self.provider.get_network_info, 'NETWORK1') def test_get_network_info_client_exception(self): exc = neutron_exceptions.NeutronException() self.neutron_mock.show_network.side_effect", "import collections from teeth_overlord import config from teeth_overlord.networks import neutron", "is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR", "the License. You may obtain a copy of the License", "teeth_overlord import config from teeth_overlord.networks import neutron from teeth_overlord import", "None, u'admin_state_up': True, u'tenant_id': u'TENANTID', u'provider:network_type': u'local', u'router:external': True, u'shared':", "u'network_id': u'NETWORK1', u'tenant_id': u'TENANTID', u'dns_nameservers': [], u'allocation_pools': [ { u'start':", "Found' self.neutron_mock.show_network.side_effect = exc self.assertRaises(self.provider.NetworkDoesNotExist, self.provider.get_network_info, 'NETWORK1') def test_get_network_info_client_exception(self): exc", "('name', u'public'), ('status', u'ACTIVE'), ('subnets', [ collections.OrderedDict([ ('id', u'SUBNET2'), ('name',", "setUp(self): super(TestNeutronProvider, self).setUp() self.config = config.LazyConfig(config={ 'KEYSTONE_USER': 'user', 'KEYSTONE_PASS': '<PASSWORD>',", "'404 Not Found' self.neutron_mock.show_network.side_effect = exc self.assertRaises(self.provider.NetworkDoesNotExist, self.provider.get_network_info, 'NETWORK1') def", "use this file except in compliance with the License. You", "self.assertRaises(self.provider.NetworkProviderException, self.provider.get_network_info, 'NETWORK1') def test_list_ports(self): ports = {'ports': [PORT1_RESPONSE]} self.neutron_mock.list_ports.return_value", "self.assertRaises(self.provider.NetworkProviderException, self.provider.list_networks) def test_get_network_info(self): network = {'network': NETWORK1_RESPONSE} self.neutron_mock.show_network.return_value =", "u'' } PORT2_RESPONSE = { u'status': u'DOWN', u'binding:host_id': u'', u'name':", "[], u'device_id': u'' } PORT2_RESPONSE = { u'status': u'DOWN', u'binding:host_id':", "False, u'id': u'NETWORK2', u'provider:segmentation_id': None } PORT1_RESPONSE = { u'status':", "'auth_url', 'NEUTRON_VERSION': '2.0', 'NEUTRON_URL': 'neutron_url', 'NEUTRON_PUBLIC_NETWORK': 'd6b32008-1432-4299-81c7-cbe3128ba13f', 'NEUTRON_PRIVATE_NETWORK': '2afa16d6-7b84-484f-a642-af243b0e5b10', 'NEUTRON_SERVICE_NETWORK':", "('subnets', [ collections.OrderedDict([ ('id', u'SUBNET2'), ('name', u'public-subnet'), ('ip_version', 4), ('gateway_ip',", "u'id': u'NETWORK1', u'provider:segmentation_id': None } NETWORK2_RESPONSE = { u'status': u'ACTIVE',", "for n in networks], results) def test_list_networks_empty(self): self.neutron_mock.list_networks.return_value = {'networks':", "[]} networks = self.provider.list_networks() self.neutron_mock.list_networks.assert_called() self.assertEqual(networks, []) def test_list_networks_client_exception(self): exc", "True) ]) ]) ]) SERIALIZED_NETWORK2 = collections.OrderedDict([ ('id', u'NETWORK2'), ('name',", "u'192.168.27.1', u'end': u'192.168.27.1' }, { u'start': u'192.168.27.3', u'end': u'192.168.27.254' }", "self.neutron_mock.show_subnet.return_value = subnet self.provider.detach('a:b:c:d', 'network_id') self.neutron_mock.delete_port.assert_called_with(PORT1_RESPONSE['id']) self.neutron_mock.list_ports.assert_called_with( mac_address='a:b:c:d', network_id='network_id') def", "collections from teeth_overlord import config from teeth_overlord.networks import neutron from", "'KEYSTONE_PASS': '<PASSWORD>', 'KEYSTONE_TENANT_ID': 'tenant', 'KEYSTONE_AUTH_URL': 'auth_url', 'NEUTRON_VERSION': '2.0', 'NEUTRON_URL': 'neutron_url',", "exc self.assertRaises(self.provider.NetworkProviderException, self.provider.detach, 'a:b:c:d') def test_get_default_networks(self): network_ids = self.provider.get_default_networks() self.assertEqual(network_ids,", "u'TENANTID', u'extra_dhcp_opts': [], u'binding:vif_type': u'ovs', u'device_owner': u'network:dhcp', u'binding:capabilities': {u'port_filter': True},", "keystone_exceptions from keystoneclient.v2_0 import client as keystone_client from neutronclient.common import", "u'', u'allowed_address_pairs': [], u'admin_state_up': True, u'network_id': u'NETWORK1', u'tenant_id': u'TENANTID', u'extra_dhcp_opts':", "= exc self.assertRaises(self.provider.NetworkProviderException, self.provider.list_networks) def test_get_network_info(self): network = {'network': NETWORK1_RESPONSE}", "exc = neutron_exceptions.NeutronException() self.neutron_mock.show_network.side_effect = exc self.assertRaises(self.provider.NetworkProviderException, self.provider.get_network_info, 'NETWORK1') def", "SUBNET1_RESPONSE} self.neutron_mock.show_subnet.return_value = subnet port = self.provider.attach('a:b:c:d', 'network_id') self.neutron_mock.create_port.assert_called_with({ 'port':", "u'10.0.0.0/24'), ('enable_dhcp', True) ]) ]) ]) SERIALIZED_NETWORK2 = collections.OrderedDict([ ('id',", "u'provider:network_type': u'local', u'router:external': False, u'shared': False, u'id': u'NETWORK1', u'provider:segmentation_id': None", "password='<PASSWORD>', tenant_id='tenant', auth_url='auth_url' ) def test_get_auth_token_client_exception(self): exc = keystone_exceptions.ClientException self.keystone_client_mock.side_effect", "[ {'subnet': SUBNET1_RESPONSE} ] network = self.provider.get_network_info('NETWORK1') self.assertEqual(network.serialize(), SERIALIZED_NETWORK1) self.neutron_mock.show_network.assert_called_with('NETWORK1')", "u'cidr': u'192.168.27.0/24', u'id': u'SUBNET2' } SERIALIZED_NETWORK1 = collections.OrderedDict([ ('id', u'NETWORK1'),", "= network self.neutron_mock.show_subnet.side_effect = [ {'subnet': SUBNET1_RESPONSE} ] network =", "subnet = {'subnet': SUBNET1_RESPONSE} self.neutron_mock.show_subnet.return_value = subnet exc = neutron_exceptions.NeutronException()", "SUBNET1_RESPONSE} self.neutron_mock.show_subnet.return_value = subnet exc = neutron_exceptions.NeutronException() self.neutron_mock.delete_port.side_effect = exc", "import exceptions as neutron_exceptions from neutronclient.neutron import client as neutron_client", "u'gateway_ip': u'10.0.0.1', u'cidr': u'10.0.0.0/24', u'id': u'SUBNET1' } SUBNET2_RESPONSE = {", "the License. \"\"\" import collections from teeth_overlord import config from", "Not Found' self.neutron_mock.show_network.side_effect = exc self.assertRaises(self.provider.NetworkDoesNotExist, self.provider.get_network_info, 'NETWORK1') def test_get_network_info_client_exception(self):", "u'end': u'192.168.27.254' } ], u'host_routes': [], u'ip_version': 4, u'gateway_ip': u'192.168.27.2',", "('gateway_ip', u'10.0.0.1'), ('cidr', u'10.0.0.0/24'), ('enable_dhcp', True) ]) ]) ]) SERIALIZED_NETWORK2", "[PORT1_RESPONSE]} self.neutron_mock.list_ports.return_value = ports network = {'network': NETWORK1_RESPONSE} self.neutron_mock.show_network.return_value =", "= { u'name': u'private-subnet', u'enable_dhcp': True, u'network_id': u'NETWORK1', u'tenant_id': u'TENANTID',", "keystone_client from neutronclient.common import exceptions as neutron_exceptions from neutronclient.neutron import", "u'admin_state_up': True, u'tenant_id': u'TENANTID', u'provider:network_type': u'local', u'router:external': False, u'shared': False,", "networks = self.provider.list_networks() results = [ SERIALIZED_NETWORK1, SERIALIZED_NETWORK2 ] self.assertEqual([n.serialize()", "from teeth_overlord import config from teeth_overlord.networks import neutron from teeth_overlord", "(the \"License\"); you may not use this file except in", "('enable_dhcp', False) ]) ]) ]) SERIALIZED_PORT1 = collections.OrderedDict([ ('id', u'PORT1'),", "], u'host_routes': [], u'ip_version': 4, u'gateway_ip': u'10.0.0.1', u'cidr': u'10.0.0.0/24', u'id':", "= subnet self.provider.detach('a:b:c:d') self.neutron_mock.delete_port.assert_called_with(PORT1_RESPONSE['id']) self.neutron_mock.list_ports.assert_called_with(mac_address='a:b:c:d') def test_detach_specific_network(self): ports = {'ports':", "} ], u'host_routes': [], u'ip_version': 4, u'gateway_ip': u'192.168.27.2', u'cidr': u'192.168.27.0/24',", "from teeth_overlord import tests from keystoneclient.apiclient import exceptions as keystone_exceptions", "exc = neutron_exceptions.NeutronException() self.neutron_client_mock.side_effect = exc self.assertRaises(self.provider.NetworkProviderException, self.provider._get_neutron_client) def test_list_networks(self):", "self.neutron_mock.show_subnet.side_effect = [ {'subnet': SUBNET1_RESPONSE} ] network = self.provider.get_network_info('NETWORK1') self.assertEqual(network.serialize(),", "} ]), ('network', SERIALIZED_NETWORK1) ]) class TestNeutronProvider(tests.TeethMockTestUtilities): def setUp(self): super(TestNeutronProvider,", "You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0", "subnet = {'subnet': SUBNET1_RESPONSE} self.neutron_mock.show_subnet.return_value = subnet ports = self.provider.list_ports('a:b:c:d')", "def test_get_auth_token(self): t = self.provider._get_auth_token() self.assertEqual(t, 'auth_token') self.keystone_client_mock.assert_called_with( username='user', password='<PASSWORD>',", "'network_id': 'network_id', 'admin_state_up': True, 'mac_address': 'a:b:c:d' } }) self.assertEqual(port.serialize(), SERIALIZED_PORT1)", "u'status': u'ACTIVE', u'subnets': [u'SUBNET2'], u'name': u'public', u'provider:physical_network': None, u'admin_state_up': True,", "] self.assertEqual([n.serialize() for n in networks], results) def test_list_networks_empty(self): self.neutron_mock.list_networks.return_value", "u'mac_address': u'fa:16:3e:e0:d4:63', u'fixed_ips': [ { u'subnet_id': u'SUBNET1', u'ip_address': u'10.0.0.3' }", "the Apache License, Version 2.0 (the \"License\"); you may not", "or implied. See the License for the specific language governing", "under the License. \"\"\" import collections from teeth_overlord import config", "ports], [SERIALIZED_PORT1]) self.neutron_mock.list_ports.assert_called_with(mac_address='a:b:c:d') def test_attach(self): port = {'port': PORT1_RESPONSE} self.neutron_mock.create_port.return_value", "KIND, either express or implied. See the License for the", "to in writing, software distributed under the License is distributed", "u'subnets': [u'SUBNET2'], u'name': u'public', u'provider:physical_network': None, u'admin_state_up': True, u'tenant_id': u'TENANTID',", "port = {'port': PORT1_RESPONSE} self.neutron_mock.create_port.return_value = port network = {'network':", "[], u'allocation_pools': [ { u'start': u'192.168.27.1', u'end': u'192.168.27.1' }, {", "u'provider:network_type': u'local', u'router:external': True, u'shared': False, u'id': u'NETWORK2', u'provider:segmentation_id': None", "u'NETWORK1', u'provider:segmentation_id': None } NETWORK2_RESPONSE = { u'status': u'ACTIVE', u'subnets':", "law or agreed to in writing, software distributed under the", "True, u'network_id': u'NETWORK2', u'tenant_id': u'TENANTID', u'extra_dhcp_opts': [], u'binding:vif_type': u'unbound', u'device_owner':", "'2afa16d6-7b84-484f-a642-af243b0e5b10', }) self.neutron_client_mock = self.add_mock(neutron_client, 'Client') self.neutron_mock = self.neutron_client_mock.return_value self.keystone_client_mock", "}) self.neutron_client_mock = self.add_mock(neutron_client, 'Client') self.neutron_mock = self.neutron_client_mock.return_value self.keystone_client_mock =", "SERIALIZED_NETWORK1) self.neutron_mock.show_network.assert_called_with('NETWORK1') def test_get_network_info_does_not_exist(self): exc = neutron_exceptions.NeutronException() exc.message = '404", "('id', u'SUBNET2'), ('name', u'public-subnet'), ('ip_version', 4), ('gateway_ip', u'192.168.27.2'), ('cidr', u'192.168.27.0/24'),", "u'TENANTID', u'extra_dhcp_opts': [], u'binding:vif_type': u'unbound', u'device_owner': u'', u'binding:capabilities': {u'port_filter': False},", "self.assertEqual([p.serialize() for p in ports], [SERIALIZED_PORT1]) self.neutron_mock.list_ports.assert_called_with(mac_address='a:b:c:d') def test_attach(self): port", "exc = neutron_exceptions.NeutronException() self.neutron_mock.delete_port.side_effect = exc self.assertRaises(self.provider.NetworkProviderException, self.provider.detach, 'a:b:c:d') def", "= collections.OrderedDict([ ('id', u'NETWORK1'), ('name', u'private'), ('status', u'ACTIVE'), ('subnets', [", "{'network': NETWORK1_RESPONSE} self.neutron_mock.show_network.return_value = network subnet = {'subnet': SUBNET1_RESPONSE} self.neutron_mock.show_subnet.return_value", "on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF", "= { u'status': u'DOWN', u'binding:host_id': u'', u'name': u'', u'allowed_address_pairs': [],", "u'network_id': u'NETWORK2', u'tenant_id': u'TENANTID', u'extra_dhcp_opts': [], u'binding:vif_type': u'unbound', u'device_owner': u'',", "self.assertEqual(network_ids, [self.config.NEUTRON_PUBLIC_NETWORK, self.config.NEUTRON_PRIVATE_NETWORK]) def test_get_service_network(self): network_id = self.provider.get_service_network() self.assertEqual(network_id, self.config.NEUTRON_SERVICE_NETWORK)", "subnet = {'subnet': SUBNET1_RESPONSE} self.neutron_mock.show_subnet.return_value = subnet port = self.provider.attach('a:b:c:d',", "def test_get_network_info_client_exception(self): exc = neutron_exceptions.NeutronException() self.neutron_mock.show_network.side_effect = exc self.assertRaises(self.provider.NetworkProviderException, self.provider.get_network_info,", "'NETWORK1') def test_get_network_info_client_exception(self): exc = neutron_exceptions.NeutronException() self.neutron_mock.show_network.side_effect = exc self.assertRaises(self.provider.NetworkProviderException,", "u'192.168.27.0/24'), ('enable_dhcp', False) ]) ]) ]) SERIALIZED_PORT1 = collections.OrderedDict([ ('id',", "u'subnet_id': u'SUBNET1', u'ip_address': u'10.0.0.3' } ]), ('network', SERIALIZED_NETWORK1) ]) class", "exc = neutron_exceptions.NeutronException() self.neutron_mock.list_networks.side_effect = exc self.assertRaises(self.provider.NetworkProviderException, self.provider.list_networks) def test_get_network_info(self):", "self.provider = neutron.NeutronProvider(self.config) def test_get_auth_token(self): t = self.provider._get_auth_token() self.assertEqual(t, 'auth_token')", "self.neutron_mock.list_networks.return_value = networks self.neutron_mock.show_subnet.side_effect = [ {'subnet': SUBNET1_RESPONSE}, {'subnet': SUBNET2_RESPONSE}", "[u'SECGRP'], u'device_id': u'' } SUBNET1_RESPONSE = { u'name': u'private-subnet', u'enable_dhcp':", "self.keystone_client_mock = self.add_mock(keystone_client, 'Client') self.keystone_client_mock.return_value.auth_token = '<PASSWORD>' self.provider = neutron.NeutronProvider(self.config)", "for the specific language governing permissions and limitations under the", "u'10.0.0.1'), ('cidr', u'10.0.0.0/24'), ('enable_dhcp', True) ]) ]) ]) SERIALIZED_NETWORK2 =", "def test_detach_specific_network(self): ports = {'ports': [PORT1_RESPONSE]} self.neutron_mock.list_ports.return_value = ports network", "self.neutron_mock.delete_port.assert_called_with(PORT1_RESPONSE['id']) self.neutron_mock.list_ports.assert_called_with( mac_address='a:b:c:d', network_id='network_id') def test_detach_client_exception(self): ports = {'ports': [PORT1_RESPONSE]}", "[ { u'subnet_id': u'SUBNET1', u'ip_address': u'10.0.0.3' } ]), ('network', SERIALIZED_NETWORK1)", "'2.0', 'NEUTRON_URL': 'neutron_url', 'NEUTRON_PUBLIC_NETWORK': 'd6b32008-1432-4299-81c7-cbe3128ba13f', 'NEUTRON_PRIVATE_NETWORK': '2afa16d6-7b84-484f-a642-af243b0e5b10', 'NEUTRON_SERVICE_NETWORK': '2afa16d6-7b84-484f-a642-af243b0e5b10', })", "self.neutron_mock = self.neutron_client_mock.return_value self.keystone_client_mock = self.add_mock(keystone_client, 'Client') self.keystone_client_mock.return_value.auth_token = '<PASSWORD>'", "exc self.assertRaises(self.provider.NetworkProviderException, self.provider.list_networks) def test_get_network_info(self): network = {'network': NETWORK1_RESPONSE} self.neutron_mock.show_network.return_value", "network subnet = {'subnet': SUBNET1_RESPONSE} self.neutron_mock.show_subnet.return_value = subnet exc =", "[ { u'start': u'192.168.27.1', u'end': u'192.168.27.1' }, { u'start': u'192.168.27.3',", "'NEUTRON_VERSION': '2.0', 'NEUTRON_URL': 'neutron_url', 'NEUTRON_PUBLIC_NETWORK': 'd6b32008-1432-4299-81c7-cbe3128ba13f', 'NEUTRON_PRIVATE_NETWORK': '2afa16d6-7b84-484f-a642-af243b0e5b10', 'NEUTRON_SERVICE_NETWORK': '2afa16d6-7b84-484f-a642-af243b0e5b10',", "} NETWORK2_RESPONSE = { u'status': u'ACTIVE', u'subnets': [u'SUBNET2'], u'name': u'public',", "u'shared': False, u'id': u'NETWORK1', u'provider:segmentation_id': None } NETWORK2_RESPONSE = {", "u'end': u'192.168.27.1' }, { u'start': u'192.168.27.3', u'end': u'192.168.27.254' } ],", "the License for the specific language governing permissions and limitations", "may not use this file except in compliance with the", "u'10.0.0.2', u'end': u'10.0.0.254' } ], u'host_routes': [], u'ip_version': 4, u'gateway_ip':", "implied. See the License for the specific language governing permissions", "import tests from keystoneclient.apiclient import exceptions as keystone_exceptions from keystoneclient.v2_0", "u'PORT2', u'security_groups': [u'SECGRP'], u'device_id': u'' } SUBNET1_RESPONSE = { u'name':", "def test_get_network_info_does_not_exist(self): exc = neutron_exceptions.NeutronException() exc.message = '404 Not Found'", "network subnet = {'subnet': SUBNET1_RESPONSE} self.neutron_mock.show_subnet.return_value = subnet self.provider.detach('a:b:c:d') self.neutron_mock.delete_port.assert_called_with(PORT1_RESPONSE['id'])", "def test_list_networks_empty(self): self.neutron_mock.list_networks.return_value = {'networks': []} networks = self.provider.list_networks() self.neutron_mock.list_networks.assert_called()", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "NETWORK2_RESPONSE = { u'status': u'ACTIVE', u'subnets': [u'SUBNET2'], u'name': u'public', u'provider:physical_network':", "self.provider.attach('a:b:c:d', 'network_id') self.neutron_mock.create_port.assert_called_with({ 'port': { 'network_id': 'network_id', 'admin_state_up': True, 'mac_address':", "= { u'status': u'ACTIVE', u'subnets': [u'SUBNET1'], u'name': u'private', u'provider:physical_network': None,", "= {'network': NETWORK1_RESPONSE} self.neutron_mock.show_network.return_value = network subnet = {'subnet': SUBNET1_RESPONSE}", "2013 Rackspace, Inc. Licensed under the Apache License, Version 2.0", "u'device_id': u'' } SUBNET1_RESPONSE = { u'name': u'private-subnet', u'enable_dhcp': True,", "[ collections.OrderedDict([ ('id', u'SUBNET1'), ('name', u'private-subnet'), ('ip_version', 4), ('gateway_ip', u'10.0.0.1'),", "SUBNET1_RESPONSE} ] network = self.provider.get_network_info('NETWORK1') self.assertEqual(network.serialize(), SERIALIZED_NETWORK1) self.neutron_mock.show_network.assert_called_with('NETWORK1') def test_get_network_info_does_not_exist(self):", "and limitations under the License. \"\"\" import collections from teeth_overlord", "u'allocation_pools': [ { u'start': u'10.0.0.2', u'end': u'10.0.0.254' } ], u'host_routes':", "u'device_id': u'' } PORT2_RESPONSE = { u'status': u'DOWN', u'binding:host_id': u'',", "exc self.assertRaises(self.provider.NetworkProviderException, self.provider._get_auth_token) def test_get_neutron_client(self): self.provider._get_neutron_client() self.neutron_client_mock.assert_called_with( '2.0', endpoint_url='neutron_url', token='auth_token'", "self.neutron_mock.delete_port.side_effect = exc self.assertRaises(self.provider.NetworkProviderException, self.provider.detach, 'a:b:c:d') def test_get_default_networks(self): network_ids =", "u'SUBNET1'), ('name', u'private-subnet'), ('ip_version', 4), ('gateway_ip', u'10.0.0.1'), ('cidr', u'10.0.0.0/24'), ('enable_dhcp',", "test_list_networks_empty(self): self.neutron_mock.list_networks.return_value = {'networks': []} networks = self.provider.list_networks() self.neutron_mock.list_networks.assert_called() self.assertEqual(networks,", "= subnet port = self.provider.attach('a:b:c:d', 'network_id') self.neutron_mock.create_port.assert_called_with({ 'port': { 'network_id':", "{ u'start': u'192.168.27.1', u'end': u'192.168.27.1' }, { u'start': u'192.168.27.3', u'end':", "'KEYSTONE_USER': 'user', 'KEYSTONE_PASS': '<PASSWORD>', 'KEYSTONE_TENANT_ID': 'tenant', 'KEYSTONE_AUTH_URL': 'auth_url', 'NEUTRON_VERSION': '2.0',", "network = {'network': NETWORK1_RESPONSE} self.neutron_mock.show_network.return_value = network self.neutron_mock.show_subnet.side_effect = [", "exc self.assertRaises(self.provider.NetworkProviderException, self.provider.attach, 'mac_address', 'network_id') def test_detatch(self): ports = {'ports':", "network subnet = {'subnet': SUBNET1_RESPONSE} self.neutron_mock.show_subnet.return_value = subnet self.provider.detach('a:b:c:d', 'network_id')", "collections.OrderedDict([ ('id', u'NETWORK2'), ('name', u'public'), ('status', u'ACTIVE'), ('subnets', [ collections.OrderedDict([", "u'end': u'10.0.0.254' } ], u'host_routes': [], u'ip_version': 4, u'gateway_ip': u'10.0.0.1',", "self.neutron_mock.create_port.return_value = port network = {'network': NETWORK1_RESPONSE} self.neutron_mock.show_network.return_value = network", "def test_get_auth_token_client_exception(self): exc = keystone_exceptions.ClientException self.keystone_client_mock.side_effect = exc self.assertRaises(self.provider.NetworkProviderException, self.provider._get_auth_token)", "{u'port_filter': False}, u'mac_address': u'00:09:7b:3e:18:ca', u'fixed_ips': [ { u'subnet_id': u'SUBNET2', u'ip_address':", "def test_get_neutron_client(self): self.provider._get_neutron_client() self.neutron_client_mock.assert_called_with( '2.0', endpoint_url='neutron_url', token='auth_token' ) def test_get_neutron_client_exception(self):", "writing, software distributed under the License is distributed on an", "limitations under the License. \"\"\" import collections from teeth_overlord import", "keystoneclient.apiclient import exceptions as keystone_exceptions from keystoneclient.v2_0 import client as", "subnet exc = neutron_exceptions.NeutronException() self.neutron_mock.delete_port.side_effect = exc self.assertRaises(self.provider.NetworkProviderException, self.provider.detach, 'a:b:c:d')", "collections.OrderedDict([ ('id', u'SUBNET2'), ('name', u'public-subnet'), ('ip_version', 4), ('gateway_ip', u'192.168.27.2'), ('cidr',", "'KEYSTONE_TENANT_ID': 'tenant', 'KEYSTONE_AUTH_URL': 'auth_url', 'NEUTRON_VERSION': '2.0', 'NEUTRON_URL': 'neutron_url', 'NEUTRON_PUBLIC_NETWORK': 'd6b32008-1432-4299-81c7-cbe3128ba13f',", "True, u'tenant_id': u'TENANTID', u'provider:network_type': u'local', u'router:external': False, u'shared': False, u'id':", "('id', u'NETWORK1'), ('name', u'private'), ('status', u'ACTIVE'), ('subnets', [ collections.OrderedDict([ ('id',", "u'allowed_address_pairs': [], u'admin_state_up': True, u'network_id': u'NETWORK2', u'tenant_id': u'TENANTID', u'extra_dhcp_opts': [],", "super(TestNeutronProvider, self).setUp() self.config = config.LazyConfig(config={ 'KEYSTONE_USER': 'user', 'KEYSTONE_PASS': '<PASSWORD>', 'KEYSTONE_TENANT_ID':", "= self.provider.get_default_networks() self.assertEqual(network_ids, [self.config.NEUTRON_PUBLIC_NETWORK, self.config.NEUTRON_PRIVATE_NETWORK]) def test_get_service_network(self): network_id = self.provider.get_service_network()", "[], u'allocation_pools': [ { u'start': u'10.0.0.2', u'end': u'10.0.0.254' } ],", "def test_detatch(self): ports = {'ports': [PORT1_RESPONSE]} self.neutron_mock.list_ports.return_value = ports network", "self.provider.list_networks() results = [ SERIALIZED_NETWORK1, SERIALIZED_NETWORK2 ] self.assertEqual([n.serialize() for n", "in compliance with the License. You may obtain a copy", "= subnet self.provider.detach('a:b:c:d', 'network_id') self.neutron_mock.delete_port.assert_called_with(PORT1_RESPONSE['id']) self.neutron_mock.list_ports.assert_called_with( mac_address='a:b:c:d', network_id='network_id') def test_detach_client_exception(self):", "exc = keystone_exceptions.ClientException self.keystone_client_mock.side_effect = exc self.assertRaises(self.provider.NetworkProviderException, self.provider._get_auth_token) def test_get_neutron_client(self):", "self.assertEqual(networks, []) def test_list_networks_client_exception(self): exc = neutron_exceptions.NeutronException() self.neutron_mock.list_networks.side_effect = exc", "from keystoneclient.apiclient import exceptions as keystone_exceptions from keystoneclient.v2_0 import client", "{'subnet': SUBNET1_RESPONSE} self.neutron_mock.show_subnet.return_value = subnet self.provider.detach('a:b:c:d') self.neutron_mock.delete_port.assert_called_with(PORT1_RESPONSE['id']) self.neutron_mock.list_ports.assert_called_with(mac_address='a:b:c:d') def test_detach_specific_network(self):", "= {'subnet': SUBNET1_RESPONSE} self.neutron_mock.show_subnet.return_value = subnet port = self.provider.attach('a:b:c:d', 'network_id')", "= config.LazyConfig(config={ 'KEYSTONE_USER': 'user', 'KEYSTONE_PASS': '<PASSWORD>', 'KEYSTONE_TENANT_ID': 'tenant', 'KEYSTONE_AUTH_URL': 'auth_url',", "agreed to in writing, software distributed under the License is", "u'shared': False, u'id': u'NETWORK2', u'provider:segmentation_id': None } PORT1_RESPONSE = {", "]) ]) ]) SERIALIZED_NETWORK2 = collections.OrderedDict([ ('id', u'NETWORK2'), ('name', u'public'),", "at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to", "u'192.168.27.0/24', u'id': u'SUBNET2' } SERIALIZED_NETWORK1 = collections.OrderedDict([ ('id', u'NETWORK1'), ('name',", "def test_list_networks_client_exception(self): exc = neutron_exceptions.NeutronException() self.neutron_mock.list_networks.side_effect = exc self.assertRaises(self.provider.NetworkProviderException, self.provider.list_networks)", "]) SERIALIZED_NETWORK2 = collections.OrderedDict([ ('id', u'NETWORK2'), ('name', u'public'), ('status', u'ACTIVE'),", "u'192.168.27.1' }, { u'start': u'192.168.27.3', u'end': u'192.168.27.254' } ], u'host_routes':", "'network_id', 'admin_state_up': True, 'mac_address': 'a:b:c:d' } }) self.assertEqual(port.serialize(), SERIALIZED_PORT1) def", "SERIALIZED_NETWORK2 = collections.OrderedDict([ ('id', u'NETWORK2'), ('name', u'public'), ('status', u'ACTIVE'), ('subnets',", "u'private'), ('status', u'ACTIVE'), ('subnets', [ collections.OrderedDict([ ('id', u'SUBNET1'), ('name', u'private-subnet'),", "p in ports], [SERIALIZED_PORT1]) self.neutron_mock.list_ports.assert_called_with(mac_address='a:b:c:d') def test_attach(self): port = {'port':", "SUBNET1_RESPONSE} self.neutron_mock.show_subnet.return_value = subnet self.provider.detach('a:b:c:d') self.neutron_mock.delete_port.assert_called_with(PORT1_RESPONSE['id']) self.neutron_mock.list_ports.assert_called_with(mac_address='a:b:c:d') def test_detach_specific_network(self): ports", "def test_detach_client_exception(self): ports = {'ports': [PORT1_RESPONSE]} self.neutron_mock.list_ports.return_value = ports network", "('id', u'SUBNET1'), ('name', u'private-subnet'), ('ip_version', 4), ('gateway_ip', u'10.0.0.1'), ('cidr', u'10.0.0.0/24'),", "= networks self.neutron_mock.show_subnet.side_effect = [ {'subnet': SUBNET1_RESPONSE}, {'subnet': SUBNET2_RESPONSE} ]", "= { u'status': u'ACTIVE', u'subnets': [u'SUBNET2'], u'name': u'public', u'provider:physical_network': None,", "u'tenant_id': u'TENANTID', u'provider:network_type': u'local', u'router:external': False, u'shared': False, u'id': u'NETWORK1',", "u'192.168.27.2', u'cidr': u'192.168.27.0/24', u'id': u'SUBNET2' } SERIALIZED_NETWORK1 = collections.OrderedDict([ ('id',", "{'network': NETWORK1_RESPONSE} self.neutron_mock.show_network.return_value = network self.neutron_mock.show_subnet.side_effect = [ {'subnet': SUBNET1_RESPONSE}", "either express or implied. See the License for the specific", "neutron_exceptions.NeutronException() exc.message = '404 Not Found' self.neutron_mock.show_network.side_effect = exc self.assertRaises(self.provider.NetworkDoesNotExist,", "BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "[ { u'subnet_id': u'SUBNET1', u'ip_address': u'10.0.0.3' } ], u'id': u'PORT1',", "u'tenant_id': u'TENANTID', u'extra_dhcp_opts': [], u'binding:vif_type': u'unbound', u'device_owner': u'', u'binding:capabilities': {u'port_filter':", "'neutron_url', 'NEUTRON_PUBLIC_NETWORK': 'd6b32008-1432-4299-81c7-cbe3128ba13f', 'NEUTRON_PRIVATE_NETWORK': '2afa16d6-7b84-484f-a642-af243b0e5b10', 'NEUTRON_SERVICE_NETWORK': '2afa16d6-7b84-484f-a642-af243b0e5b10', }) self.neutron_client_mock =", "\"License\"); you may not use this file except in compliance", "u'provider:physical_network': None, u'admin_state_up': True, u'tenant_id': u'TENANTID', u'provider:network_type': u'local', u'router:external': False,", "import neutron from teeth_overlord import tests from keystoneclient.apiclient import exceptions", "License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES", "Licensed under the Apache License, Version 2.0 (the \"License\"); you", "u'ip_address': u'10.0.0.3' } ]), ('network', SERIALIZED_NETWORK1) ]) class TestNeutronProvider(tests.TeethMockTestUtilities): def", "= neutron_exceptions.NeutronException() self.neutron_mock.list_networks.side_effect = exc self.assertRaises(self.provider.NetworkProviderException, self.provider.list_networks) def test_get_network_info(self): network", "def test_get_network_info(self): network = {'network': NETWORK1_RESPONSE} self.neutron_mock.show_network.return_value = network self.neutron_mock.show_subnet.side_effect", "self.neutron_mock.list_ports.assert_called_with(mac_address='a:b:c:d') def test_attach(self): port = {'port': PORT1_RESPONSE} self.neutron_mock.create_port.return_value = port", "= {'port': PORT1_RESPONSE} self.neutron_mock.create_port.return_value = port network = {'network': NETWORK1_RESPONSE}", "mac_address='a:b:c:d', network_id='network_id') def test_detach_client_exception(self): ports = {'ports': [PORT1_RESPONSE]} self.neutron_mock.list_ports.return_value =", "self.assertRaises(self.provider.NetworkProviderException, self.provider.detach, 'a:b:c:d') def test_get_default_networks(self): network_ids = self.provider.get_default_networks() self.assertEqual(network_ids, [self.config.NEUTRON_PUBLIC_NETWORK,", "u'name': u'public-subnet', u'enable_dhcp': False, u'network_id': u'NETWORK2', u'tenant_id': u'TENANTID', u'dns_nameservers': [],", "u'id': u'SUBNET2' } SERIALIZED_NETWORK1 = collections.OrderedDict([ ('id', u'NETWORK1'), ('name', u'private'),", "self.provider._get_neutron_client() self.neutron_client_mock.assert_called_with( '2.0', endpoint_url='neutron_url', token='auth_token' ) def test_get_neutron_client_exception(self): exc =", "NETWORK2_RESPONSE]} self.neutron_mock.list_networks.return_value = networks self.neutron_mock.show_subnet.side_effect = [ {'subnet': SUBNET1_RESPONSE}, {'subnet':", "License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed", "= self.neutron_client_mock.return_value self.keystone_client_mock = self.add_mock(keystone_client, 'Client') self.keystone_client_mock.return_value.auth_token = '<PASSWORD>' self.provider", "'network_id') def test_detatch(self): ports = {'ports': [PORT1_RESPONSE]} self.neutron_mock.list_ports.return_value = ports", "License for the specific language governing permissions and limitations under", "u'192.168.27.3' } ], u'id': u'PORT2', u'security_groups': [u'SECGRP'], u'device_id': u'' }", "'<PASSWORD>' self.provider = neutron.NeutronProvider(self.config) def test_get_auth_token(self): t = self.provider._get_auth_token() self.assertEqual(t,", "{ u'start': u'192.168.27.3', u'end': u'192.168.27.254' } ], u'host_routes': [], u'ip_version':", "neutron.NeutronProvider(self.config) def test_get_auth_token(self): t = self.provider._get_auth_token() self.assertEqual(t, 'auth_token') self.keystone_client_mock.assert_called_with( username='user',", "self.provider._get_neutron_client) def test_list_networks(self): networks = {'networks': [NETWORK1_RESPONSE, NETWORK2_RESPONSE]} self.neutron_mock.list_networks.return_value =", "{ u'status': u'ACTIVE', u'subnets': [u'SUBNET2'], u'name': u'public', u'provider:physical_network': None, u'admin_state_up':", "], u'host_routes': [], u'ip_version': 4, u'gateway_ip': u'192.168.27.2', u'cidr': u'192.168.27.0/24', u'id':", "self.keystone_client_mock.return_value.auth_token = '<PASSWORD>' self.provider = neutron.NeutronProvider(self.config) def test_get_auth_token(self): t =", "u'TENANTID', u'dns_nameservers': [], u'allocation_pools': [ { u'start': u'10.0.0.2', u'end': u'10.0.0.254'", "= self.provider.list_ports('a:b:c:d') self.assertEqual([p.serialize() for p in ports], [SERIALIZED_PORT1]) self.neutron_mock.list_ports.assert_called_with(mac_address='a:b:c:d') def", "False}, u'mac_address': u'00:09:7b:3e:18:ca', u'fixed_ips': [ { u'subnet_id': u'SUBNET2', u'ip_address': u'192.168.27.3'", "('network', SERIALIZED_NETWORK1) ]) class TestNeutronProvider(tests.TeethMockTestUtilities): def setUp(self): super(TestNeutronProvider, self).setUp() self.config", "'2afa16d6-7b84-484f-a642-af243b0e5b10', 'NEUTRON_SERVICE_NETWORK': '2afa16d6-7b84-484f-a642-af243b0e5b10', }) self.neutron_client_mock = self.add_mock(neutron_client, 'Client') self.neutron_mock =", "self.provider.get_network_info, 'NETWORK1') def test_get_network_info_client_exception(self): exc = neutron_exceptions.NeutronException() self.neutron_mock.show_network.side_effect = exc", "= collections.OrderedDict([ ('id', u'NETWORK2'), ('name', u'public'), ('status', u'ACTIVE'), ('subnets', [", "{ u'name': u'private-subnet', u'enable_dhcp': True, u'network_id': u'NETWORK1', u'tenant_id': u'TENANTID', u'dns_nameservers':", "False, u'id': u'NETWORK1', u'provider:segmentation_id': None } NETWORK2_RESPONSE = { u'status':", "('cidr', u'192.168.27.0/24'), ('enable_dhcp', False) ]) ]) ]) SERIALIZED_PORT1 = collections.OrderedDict([", "u'fixed_ips': [ { u'subnet_id': u'SUBNET1', u'ip_address': u'10.0.0.3' } ], u'id':", "test_get_network_info(self): network = {'network': NETWORK1_RESPONSE} self.neutron_mock.show_network.return_value = network self.neutron_mock.show_subnet.side_effect =", "True}, u'mac_address': u'fa:16:3e:e0:d4:63', u'fixed_ips': [ { u'subnet_id': u'SUBNET1', u'ip_address': u'10.0.0.3'", "self.assertRaises(self.provider.NetworkProviderException, self.provider._get_auth_token) def test_get_neutron_client(self): self.provider._get_neutron_client() self.neutron_client_mock.assert_called_with( '2.0', endpoint_url='neutron_url', token='auth_token' )", "('id', u'NETWORK2'), ('name', u'public'), ('status', u'ACTIVE'), ('subnets', [ collections.OrderedDict([ ('id',", "= port network = {'network': NETWORK1_RESPONSE} self.neutron_mock.show_network.return_value = network subnet", "t = self.provider._get_auth_token() self.assertEqual(t, 'auth_token') self.keystone_client_mock.assert_called_with( username='user', password='<PASSWORD>', tenant_id='tenant', auth_url='auth_url'", "test_attach(self): port = {'port': PORT1_RESPONSE} self.neutron_mock.create_port.return_value = port network =", "neutron_exceptions.NeutronException() self.neutron_mock.show_network.side_effect = exc self.assertRaises(self.provider.NetworkProviderException, self.provider.get_network_info, 'NETWORK1') def test_list_ports(self): ports", "u'id': u'PORT1', u'security_groups': [], u'device_id': u'' } PORT2_RESPONSE = {", "u'ip_version': 4, u'gateway_ip': u'10.0.0.1', u'cidr': u'10.0.0.0/24', u'id': u'SUBNET1' } SUBNET2_RESPONSE", "u'allocation_pools': [ { u'start': u'192.168.27.1', u'end': u'192.168.27.1' }, { u'start':", "4), ('gateway_ip', u'192.168.27.2'), ('cidr', u'192.168.27.0/24'), ('enable_dhcp', False) ]) ]) ])", "('status', u'ACTIVE'), ('mac_address', u'fa:16:3e:e0:d4:63'), ('fixed_ips', [ { u'subnet_id': u'SUBNET1', u'ip_address':", "}) self.assertEqual(port.serialize(), SERIALIZED_PORT1) def test_attach_client_exception(self): exc = neutron_exceptions.NeutronException() self.neutron_mock.create_port.side_effect =", "auth_url='auth_url' ) def test_get_auth_token_client_exception(self): exc = keystone_exceptions.ClientException self.keystone_client_mock.side_effect = exc", "def test_attach(self): port = {'port': PORT1_RESPONSE} self.neutron_mock.create_port.return_value = port network", "= '<PASSWORD>' self.provider = neutron.NeutronProvider(self.config) def test_get_auth_token(self): t = self.provider._get_auth_token()", "[u'SUBNET1'], u'name': u'private', u'provider:physical_network': None, u'admin_state_up': True, u'tenant_id': u'TENANTID', u'provider:network_type':", "u'name': u'', u'allowed_address_pairs': [], u'admin_state_up': True, u'network_id': u'NETWORK1', u'tenant_id': u'TENANTID',", "{ u'subnet_id': u'SUBNET2', u'ip_address': u'192.168.27.3' } ], u'id': u'PORT2', u'security_groups':", "def setUp(self): super(TestNeutronProvider, self).setUp() self.config = config.LazyConfig(config={ 'KEYSTONE_USER': 'user', 'KEYSTONE_PASS':", "= network subnet = {'subnet': SUBNET1_RESPONSE} self.neutron_mock.show_subnet.return_value = subnet self.provider.detach('a:b:c:d')", "except in compliance with the License. You may obtain a", "self.provider.get_network_info, 'NETWORK1') def test_list_ports(self): ports = {'ports': [PORT1_RESPONSE]} self.neutron_mock.list_ports.return_value =", "u'', u'allowed_address_pairs': [], u'admin_state_up': True, u'network_id': u'NETWORK2', u'tenant_id': u'TENANTID', u'extra_dhcp_opts':", "self.neutron_client_mock = self.add_mock(neutron_client, 'Client') self.neutron_mock = self.neutron_client_mock.return_value self.keystone_client_mock = self.add_mock(keystone_client,", "neutron_client NETWORK1_RESPONSE = { u'status': u'ACTIVE', u'subnets': [u'SUBNET1'], u'name': u'private',", "self.neutron_mock.create_port.side_effect = exc self.assertRaises(self.provider.NetworkProviderException, self.provider.attach, 'mac_address', 'network_id') def test_detatch(self): ports", "ports network = {'network': NETWORK1_RESPONSE} self.neutron_mock.show_network.return_value = network subnet =", "]), ('network', SERIALIZED_NETWORK1) ]) class TestNeutronProvider(tests.TeethMockTestUtilities): def setUp(self): super(TestNeutronProvider, self).setUp()", "u'tenant_id': u'TENANTID', u'provider:network_type': u'local', u'router:external': True, u'shared': False, u'id': u'NETWORK2',", "= {'networks': [NETWORK1_RESPONSE, NETWORK2_RESPONSE]} self.neutron_mock.list_networks.return_value = networks self.neutron_mock.show_subnet.side_effect = [", "True, 'mac_address': 'a:b:c:d' } }) self.assertEqual(port.serialize(), SERIALIZED_PORT1) def test_attach_client_exception(self): exc", "u'subnets': [u'SUBNET1'], u'name': u'private', u'provider:physical_network': None, u'admin_state_up': True, u'tenant_id': u'TENANTID',", "governing permissions and limitations under the License. \"\"\" import collections", "import client as keystone_client from neutronclient.common import exceptions as neutron_exceptions", "SERIALIZED_NETWORK1 = collections.OrderedDict([ ('id', u'NETWORK1'), ('name', u'private'), ('status', u'ACTIVE'), ('subnets',", "compliance with the License. You may obtain a copy of", "[], u'ip_version': 4, u'gateway_ip': u'192.168.27.2', u'cidr': u'192.168.27.0/24', u'id': u'SUBNET2' }", "neutron_exceptions.NeutronException() self.neutron_client_mock.side_effect = exc self.assertRaises(self.provider.NetworkProviderException, self.provider._get_neutron_client) def test_list_networks(self): networks =", "language governing permissions and limitations under the License. \"\"\" import", "u'public-subnet'), ('ip_version', 4), ('gateway_ip', u'192.168.27.2'), ('cidr', u'192.168.27.0/24'), ('enable_dhcp', False) ])", "None } PORT1_RESPONSE = { u'status': u'ACTIVE', u'binding:host_id': u'precise64', u'name':", "import client as neutron_client NETWORK1_RESPONSE = { u'status': u'ACTIVE', u'subnets':", "{ u'start': u'10.0.0.2', u'end': u'10.0.0.254' } ], u'host_routes': [], u'ip_version':", "'a:b:c:d' } }) self.assertEqual(port.serialize(), SERIALIZED_PORT1) def test_attach_client_exception(self): exc = neutron_exceptions.NeutronException()", "def test_list_networks(self): networks = {'networks': [NETWORK1_RESPONSE, NETWORK2_RESPONSE]} self.neutron_mock.list_networks.return_value = networks", "'user', 'KEYSTONE_PASS': '<PASSWORD>', 'KEYSTONE_TENANT_ID': 'tenant', 'KEYSTONE_AUTH_URL': 'auth_url', 'NEUTRON_VERSION': '2.0', 'NEUTRON_URL':", "u'binding:vif_type': u'ovs', u'device_owner': u'network:dhcp', u'binding:capabilities': {u'port_filter': True}, u'mac_address': u'fa:16:3e:e0:d4:63', u'fixed_ips':", "u'ACTIVE', u'subnets': [u'SUBNET1'], u'name': u'private', u'provider:physical_network': None, u'admin_state_up': True, u'tenant_id':", "= exc self.assertRaises(self.provider.NetworkProviderException, self.provider._get_auth_token) def test_get_neutron_client(self): self.provider._get_neutron_client() self.neutron_client_mock.assert_called_with( '2.0', endpoint_url='neutron_url',", "[ collections.OrderedDict([ ('id', u'SUBNET2'), ('name', u'public-subnet'), ('ip_version', 4), ('gateway_ip', u'192.168.27.2'),", "u'id': u'PORT2', u'security_groups': [u'SECGRP'], u'device_id': u'' } SUBNET1_RESPONSE = {", "= { u'status': u'ACTIVE', u'binding:host_id': u'precise64', u'name': u'', u'allowed_address_pairs': [],", "('ip_version', 4), ('gateway_ip', u'192.168.27.2'), ('cidr', u'192.168.27.0/24'), ('enable_dhcp', False) ]) ])", "subnet self.provider.detach('a:b:c:d') self.neutron_mock.delete_port.assert_called_with(PORT1_RESPONSE['id']) self.neutron_mock.list_ports.assert_called_with(mac_address='a:b:c:d') def test_detach_specific_network(self): ports = {'ports': [PORT1_RESPONSE]}", "self.provider.detach('a:b:c:d') self.neutron_mock.delete_port.assert_called_with(PORT1_RESPONSE['id']) self.neutron_mock.list_ports.assert_called_with(mac_address='a:b:c:d') def test_detach_specific_network(self): ports = {'ports': [PORT1_RESPONSE]} self.neutron_mock.list_ports.return_value", "'network_id') self.neutron_mock.create_port.assert_called_with({ 'port': { 'network_id': 'network_id', 'admin_state_up': True, 'mac_address': 'a:b:c:d'", "permissions and limitations under the License. \"\"\" import collections from", "u'name': u'private', u'provider:physical_network': None, u'admin_state_up': True, u'tenant_id': u'TENANTID', u'provider:network_type': u'local',", "PORT1_RESPONSE} self.neutron_mock.create_port.return_value = port network = {'network': NETWORK1_RESPONSE} self.neutron_mock.show_network.return_value =", "u'precise64', u'name': u'', u'allowed_address_pairs': [], u'admin_state_up': True, u'network_id': u'NETWORK1', u'tenant_id':", "u'ovs', u'device_owner': u'network:dhcp', u'binding:capabilities': {u'port_filter': True}, u'mac_address': u'fa:16:3e:e0:d4:63', u'fixed_ips': [", "collections.OrderedDict([ ('id', u'SUBNET1'), ('name', u'private-subnet'), ('ip_version', 4), ('gateway_ip', u'10.0.0.1'), ('cidr',", "<reponame>rackerlabs/teeth-overlord \"\"\" Copyright 2013 Rackspace, Inc. Licensed under the Apache", "u'enable_dhcp': True, u'network_id': u'NETWORK1', u'tenant_id': u'TENANTID', u'dns_nameservers': [], u'allocation_pools': [", "u'tenant_id': u'TENANTID', u'extra_dhcp_opts': [], u'binding:vif_type': u'ovs', u'device_owner': u'network:dhcp', u'binding:capabilities': {u'port_filter':", "self.provider.get_default_networks() self.assertEqual(network_ids, [self.config.NEUTRON_PUBLIC_NETWORK, self.config.NEUTRON_PRIVATE_NETWORK]) def test_get_service_network(self): network_id = self.provider.get_service_network() self.assertEqual(network_id,", "u'provider:physical_network': None, u'admin_state_up': True, u'tenant_id': u'TENANTID', u'provider:network_type': u'local', u'router:external': True,", "u'ACTIVE'), ('subnets', [ collections.OrderedDict([ ('id', u'SUBNET1'), ('name', u'private-subnet'), ('ip_version', 4),", "= network subnet = {'subnet': SUBNET1_RESPONSE} self.neutron_mock.show_subnet.return_value = subnet ports", "= self.provider.list_networks() self.neutron_mock.list_networks.assert_called() self.assertEqual(networks, []) def test_list_networks_client_exception(self): exc = neutron_exceptions.NeutronException()", "u'start': u'10.0.0.2', u'end': u'10.0.0.254' } ], u'host_routes': [], u'ip_version': 4,", "u'DOWN', u'binding:host_id': u'', u'name': u'', u'allowed_address_pairs': [], u'admin_state_up': True, u'network_id':", "u'TENANTID', u'provider:network_type': u'local', u'router:external': True, u'shared': False, u'id': u'NETWORK2', u'provider:segmentation_id':", "ports = {'ports': [PORT1_RESPONSE]} self.neutron_mock.list_ports.return_value = ports network = {'network':", "Copyright 2013 Rackspace, Inc. Licensed under the Apache License, Version", "('status', u'ACTIVE'), ('subnets', [ collections.OrderedDict([ ('id', u'SUBNET1'), ('name', u'private-subnet'), ('ip_version',", "ports = self.provider.list_ports('a:b:c:d') self.assertEqual([p.serialize() for p in ports], [SERIALIZED_PORT1]) self.neutron_mock.list_ports.assert_called_with(mac_address='a:b:c:d')", "u'local', u'router:external': True, u'shared': False, u'id': u'NETWORK2', u'provider:segmentation_id': None }", "} ], u'id': u'PORT1', u'security_groups': [], u'device_id': u'' } PORT2_RESPONSE", "n in networks], results) def test_list_networks_empty(self): self.neutron_mock.list_networks.return_value = {'networks': []}", "SERIALIZED_NETWORK2 ] self.assertEqual([n.serialize() for n in networks], results) def test_list_networks_empty(self):", "self.neutron_mock.show_network.side_effect = exc self.assertRaises(self.provider.NetworkDoesNotExist, self.provider.get_network_info, 'NETWORK1') def test_get_network_info_client_exception(self): exc =", "networks], results) def test_list_networks_empty(self): self.neutron_mock.list_networks.return_value = {'networks': []} networks =", "from neutronclient.common import exceptions as neutron_exceptions from neutronclient.neutron import client", "u'allowed_address_pairs': [], u'admin_state_up': True, u'network_id': u'NETWORK1', u'tenant_id': u'TENANTID', u'extra_dhcp_opts': [],", "u'fa:16:3e:e0:d4:63', u'fixed_ips': [ { u'subnet_id': u'SUBNET1', u'ip_address': u'10.0.0.3' } ],", "u'NETWORK2', u'tenant_id': u'TENANTID', u'extra_dhcp_opts': [], u'binding:vif_type': u'unbound', u'device_owner': u'', u'binding:capabilities':", "('name', u'public-subnet'), ('ip_version', 4), ('gateway_ip', u'192.168.27.2'), ('cidr', u'192.168.27.0/24'), ('enable_dhcp', False)", "in networks], results) def test_list_networks_empty(self): self.neutron_mock.list_networks.return_value = {'networks': []} networks", "[]) def test_list_networks_client_exception(self): exc = neutron_exceptions.NeutronException() self.neutron_mock.list_networks.side_effect = exc self.assertRaises(self.provider.NetworkProviderException,", "[ SERIALIZED_NETWORK1, SERIALIZED_NETWORK2 ] self.assertEqual([n.serialize() for n in networks], results)", "= ports network = {'network': NETWORK1_RESPONSE} self.neutron_mock.show_network.return_value = network subnet", "self.neutron_mock.list_networks.side_effect = exc self.assertRaises(self.provider.NetworkProviderException, self.provider.list_networks) def test_get_network_info(self): network = {'network':", "Unless required by applicable law or agreed to in writing,", "by applicable law or agreed to in writing, software distributed", "'mac_address', 'network_id') def test_detatch(self): ports = {'ports': [PORT1_RESPONSE]} self.neutron_mock.list_ports.return_value =", "{'subnet': SUBNET1_RESPONSE} self.neutron_mock.show_subnet.return_value = subnet self.provider.detach('a:b:c:d', 'network_id') self.neutron_mock.delete_port.assert_called_with(PORT1_RESPONSE['id']) self.neutron_mock.list_ports.assert_called_with( mac_address='a:b:c:d',", "import exceptions as keystone_exceptions from keystoneclient.v2_0 import client as keystone_client", "u'status': u'DOWN', u'binding:host_id': u'', u'name': u'', u'allowed_address_pairs': [], u'admin_state_up': True,", "subnet ports = self.provider.list_ports('a:b:c:d') self.assertEqual([p.serialize() for p in ports], [SERIALIZED_PORT1])", "= [ SERIALIZED_NETWORK1, SERIALIZED_NETWORK2 ] self.assertEqual([n.serialize() for n in networks],", "[], u'ip_version': 4, u'gateway_ip': u'10.0.0.1', u'cidr': u'10.0.0.0/24', u'id': u'SUBNET1' }", "from teeth_overlord.networks import neutron from teeth_overlord import tests from keystoneclient.apiclient", "self.assertEqual(t, 'auth_token') self.keystone_client_mock.assert_called_with( username='user', password='<PASSWORD>', tenant_id='tenant', auth_url='auth_url' ) def test_get_auth_token_client_exception(self):", "from neutronclient.neutron import client as neutron_client NETWORK1_RESPONSE = { u'status':", "SUBNET2_RESPONSE} ] networks = self.provider.list_networks() results = [ SERIALIZED_NETWORK1, SERIALIZED_NETWORK2", "('name', u''), ('status', u'ACTIVE'), ('mac_address', u'fa:16:3e:e0:d4:63'), ('fixed_ips', [ { u'subnet_id':", "{'subnet': SUBNET2_RESPONSE} ] networks = self.provider.list_networks() results = [ SERIALIZED_NETWORK1,", "self.neutron_mock.show_subnet.return_value = subnet ports = self.provider.list_ports('a:b:c:d') self.assertEqual([p.serialize() for p in", "import config from teeth_overlord.networks import neutron from teeth_overlord import tests", "u'unbound', u'device_owner': u'', u'binding:capabilities': {u'port_filter': False}, u'mac_address': u'00:09:7b:3e:18:ca', u'fixed_ips': [", "u'dns_nameservers': [], u'allocation_pools': [ { u'start': u'10.0.0.2', u'end': u'10.0.0.254' }", "u'ip_address': u'10.0.0.3' } ], u'id': u'PORT1', u'security_groups': [], u'device_id': u''", "= '404 Not Found' self.neutron_mock.show_network.side_effect = exc self.assertRaises(self.provider.NetworkDoesNotExist, self.provider.get_network_info, 'NETWORK1')", "self.keystone_client_mock.assert_called_with( username='user', password='<PASSWORD>', tenant_id='tenant', auth_url='auth_url' ) def test_get_auth_token_client_exception(self): exc =", "tests from keystoneclient.apiclient import exceptions as keystone_exceptions from keystoneclient.v2_0 import", "express or implied. See the License for the specific language", "u'status': u'ACTIVE', u'subnets': [u'SUBNET1'], u'name': u'private', u'provider:physical_network': None, u'admin_state_up': True,", "} }) self.assertEqual(port.serialize(), SERIALIZED_PORT1) def test_attach_client_exception(self): exc = neutron_exceptions.NeutronException() self.neutron_mock.create_port.side_effect", "u'NETWORK1', u'tenant_id': u'TENANTID', u'extra_dhcp_opts': [], u'binding:vif_type': u'ovs', u'device_owner': u'network:dhcp', u'binding:capabilities':", "PORT1_RESPONSE = { u'status': u'ACTIVE', u'binding:host_id': u'precise64', u'name': u'', u'allowed_address_pairs':", "u'192.168.27.2'), ('cidr', u'192.168.27.0/24'), ('enable_dhcp', False) ]) ]) ]) SERIALIZED_PORT1 =", "u'admin_state_up': True, u'network_id': u'NETWORK1', u'tenant_id': u'TENANTID', u'extra_dhcp_opts': [], u'binding:vif_type': u'ovs',", "TestNeutronProvider(tests.TeethMockTestUtilities): def setUp(self): super(TestNeutronProvider, self).setUp() self.config = config.LazyConfig(config={ 'KEYSTONE_USER': 'user',", "= keystone_exceptions.ClientException self.keystone_client_mock.side_effect = exc self.assertRaises(self.provider.NetworkProviderException, self.provider._get_auth_token) def test_get_neutron_client(self): self.provider._get_neutron_client()", "def test_get_neutron_client_exception(self): exc = neutron_exceptions.NeutronException() self.neutron_client_mock.side_effect = exc self.assertRaises(self.provider.NetworkProviderException, self.provider._get_neutron_client)", "'<PASSWORD>', 'KEYSTONE_TENANT_ID': 'tenant', 'KEYSTONE_AUTH_URL': 'auth_url', 'NEUTRON_VERSION': '2.0', 'NEUTRON_URL': 'neutron_url', 'NEUTRON_PUBLIC_NETWORK':", "test_get_auth_token_client_exception(self): exc = keystone_exceptions.ClientException self.keystone_client_mock.side_effect = exc self.assertRaises(self.provider.NetworkProviderException, self.provider._get_auth_token) def", "u'host_routes': [], u'ip_version': 4, u'gateway_ip': u'192.168.27.2', u'cidr': u'192.168.27.0/24', u'id': u'SUBNET2'", "keystone_exceptions.ClientException self.keystone_client_mock.side_effect = exc self.assertRaises(self.provider.NetworkProviderException, self.provider._get_auth_token) def test_get_neutron_client(self): self.provider._get_neutron_client() self.neutron_client_mock.assert_called_with(", "SUBNET1_RESPONSE}, {'subnet': SUBNET2_RESPONSE} ] networks = self.provider.list_networks() results = [", "]) ]) SERIALIZED_NETWORK2 = collections.OrderedDict([ ('id', u'NETWORK2'), ('name', u'public'), ('status',", "test_detatch(self): ports = {'ports': [PORT1_RESPONSE]} self.neutron_mock.list_ports.return_value = ports network =", "= self.provider.attach('a:b:c:d', 'network_id') self.neutron_mock.create_port.assert_called_with({ 'port': { 'network_id': 'network_id', 'admin_state_up': True,", "{ u'status': u'ACTIVE', u'subnets': [u'SUBNET1'], u'name': u'private', u'provider:physical_network': None, u'admin_state_up':", "test_get_network_info_does_not_exist(self): exc = neutron_exceptions.NeutronException() exc.message = '404 Not Found' self.neutron_mock.show_network.side_effect", "teeth_overlord.networks import neutron from teeth_overlord import tests from keystoneclient.apiclient import", "u'', u'name': u'', u'allowed_address_pairs': [], u'admin_state_up': True, u'network_id': u'NETWORK2', u'tenant_id':", "self.neutron_mock.show_network.side_effect = exc self.assertRaises(self.provider.NetworkProviderException, self.provider.get_network_info, 'NETWORK1') def test_list_ports(self): ports =", "obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required", "test_detach_specific_network(self): ports = {'ports': [PORT1_RESPONSE]} self.neutron_mock.list_ports.return_value = ports network =", "u'network_id': u'NETWORK2', u'tenant_id': u'TENANTID', u'dns_nameservers': [], u'allocation_pools': [ { u'start':", "self.neutron_client_mock.side_effect = exc self.assertRaises(self.provider.NetworkProviderException, self.provider._get_neutron_client) def test_list_networks(self): networks = {'networks':", "self.neutron_mock.show_subnet.return_value = subnet exc = neutron_exceptions.NeutronException() self.neutron_mock.delete_port.side_effect = exc self.assertRaises(self.provider.NetworkProviderException,", "class TestNeutronProvider(tests.TeethMockTestUtilities): def setUp(self): super(TestNeutronProvider, self).setUp() self.config = config.LazyConfig(config={ 'KEYSTONE_USER':", "u'start': u'192.168.27.3', u'end': u'192.168.27.254' } ], u'host_routes': [], u'ip_version': 4,", "SERIALIZED_PORT1) def test_attach_client_exception(self): exc = neutron_exceptions.NeutronException() self.neutron_mock.create_port.side_effect = exc self.assertRaises(self.provider.NetworkProviderException,", "network_id='network_id') def test_detach_client_exception(self): ports = {'ports': [PORT1_RESPONSE]} self.neutron_mock.list_ports.return_value = ports", "} PORT2_RESPONSE = { u'status': u'DOWN', u'binding:host_id': u'', u'name': u'',", "subnet port = self.provider.attach('a:b:c:d', 'network_id') self.neutron_mock.create_port.assert_called_with({ 'port': { 'network_id': 'network_id',", "\"\"\" import collections from teeth_overlord import config from teeth_overlord.networks import", "u'enable_dhcp': False, u'network_id': u'NETWORK2', u'tenant_id': u'TENANTID', u'dns_nameservers': [], u'allocation_pools': [", "u'binding:host_id': u'', u'name': u'', u'allowed_address_pairs': [], u'admin_state_up': True, u'network_id': u'NETWORK2',", "'2.0', endpoint_url='neutron_url', token='auth_token' ) def test_get_neutron_client_exception(self): exc = neutron_exceptions.NeutronException() self.neutron_client_mock.side_effect", "subnet self.provider.detach('a:b:c:d', 'network_id') self.neutron_mock.delete_port.assert_called_with(PORT1_RESPONSE['id']) self.neutron_mock.list_ports.assert_called_with( mac_address='a:b:c:d', network_id='network_id') def test_detach_client_exception(self): ports", "{'subnet': SUBNET1_RESPONSE} self.neutron_mock.show_subnet.return_value = subnet exc = neutron_exceptions.NeutronException() self.neutron_mock.delete_port.side_effect =", "client as neutron_client NETWORK1_RESPONSE = { u'status': u'ACTIVE', u'subnets': [u'SUBNET1'],", "u'PORT1', u'security_groups': [], u'device_id': u'' } PORT2_RESPONSE = { u'status':", "('status', u'ACTIVE'), ('subnets', [ collections.OrderedDict([ ('id', u'SUBNET2'), ('name', u'public-subnet'), ('ip_version',", "('ip_version', 4), ('gateway_ip', u'10.0.0.1'), ('cidr', u'10.0.0.0/24'), ('enable_dhcp', True) ]) ])", "u'SUBNET2'), ('name', u'public-subnet'), ('ip_version', 4), ('gateway_ip', u'192.168.27.2'), ('cidr', u'192.168.27.0/24'), ('enable_dhcp',", "= subnet exc = neutron_exceptions.NeutronException() self.neutron_mock.delete_port.side_effect = exc self.assertRaises(self.provider.NetworkProviderException, self.provider.detach,", "'NEUTRON_SERVICE_NETWORK': '2afa16d6-7b84-484f-a642-af243b0e5b10', }) self.neutron_client_mock = self.add_mock(neutron_client, 'Client') self.neutron_mock = self.neutron_client_mock.return_value", "for p in ports], [SERIALIZED_PORT1]) self.neutron_mock.list_ports.assert_called_with(mac_address='a:b:c:d') def test_attach(self): port =", ") def test_get_auth_token_client_exception(self): exc = keystone_exceptions.ClientException self.keystone_client_mock.side_effect = exc self.assertRaises(self.provider.NetworkProviderException,", "with the License. You may obtain a copy of the", "{ u'name': u'public-subnet', u'enable_dhcp': False, u'network_id': u'NETWORK2', u'tenant_id': u'TENANTID', u'dns_nameservers':", "= [ {'subnet': SUBNET1_RESPONSE} ] network = self.provider.get_network_info('NETWORK1') self.assertEqual(network.serialize(), SERIALIZED_NETWORK1)", "self.neutron_mock.show_subnet.return_value = subnet self.provider.detach('a:b:c:d') self.neutron_mock.delete_port.assert_called_with(PORT1_RESPONSE['id']) self.neutron_mock.list_ports.assert_called_with(mac_address='a:b:c:d') def test_detach_specific_network(self): ports =", "u'router:external': True, u'shared': False, u'id': u'NETWORK2', u'provider:segmentation_id': None } PORT1_RESPONSE", "u'SUBNET1', u'ip_address': u'10.0.0.3' } ]), ('network', SERIALIZED_NETWORK1) ]) class TestNeutronProvider(tests.TeethMockTestUtilities):", "= self.provider.get_network_info('NETWORK1') self.assertEqual(network.serialize(), SERIALIZED_NETWORK1) self.neutron_mock.show_network.assert_called_with('NETWORK1') def test_get_network_info_does_not_exist(self): exc = neutron_exceptions.NeutronException()", "= network subnet = {'subnet': SUBNET1_RESPONSE} self.neutron_mock.show_subnet.return_value = subnet port", "u'tenant_id': u'TENANTID', u'dns_nameservers': [], u'allocation_pools': [ { u'start': u'10.0.0.2', u'end':", "network subnet = {'subnet': SUBNET1_RESPONSE} self.neutron_mock.show_subnet.return_value = subnet ports =", "\"\"\" Copyright 2013 Rackspace, Inc. Licensed under the Apache License,", "self.provider.list_networks() self.neutron_mock.list_networks.assert_called() self.assertEqual(networks, []) def test_list_networks_client_exception(self): exc = neutron_exceptions.NeutronException() self.neutron_mock.list_networks.side_effect", "self.neutron_mock.show_subnet.side_effect = [ {'subnet': SUBNET1_RESPONSE}, {'subnet': SUBNET2_RESPONSE} ] networks =", "self.neutron_mock.list_ports.return_value = ports network = {'network': NETWORK1_RESPONSE} self.neutron_mock.show_network.return_value = network", "specific language governing permissions and limitations under the License. \"\"\"", "{'networks': [NETWORK1_RESPONSE, NETWORK2_RESPONSE]} self.neutron_mock.list_networks.return_value = networks self.neutron_mock.show_subnet.side_effect = [ {'subnet':", "= network subnet = {'subnet': SUBNET1_RESPONSE} self.neutron_mock.show_subnet.return_value = subnet exc", "u'' } SUBNET1_RESPONSE = { u'name': u'private-subnet', u'enable_dhcp': True, u'network_id':", "u'binding:host_id': u'precise64', u'name': u'', u'allowed_address_pairs': [], u'admin_state_up': True, u'network_id': u'NETWORK1',", "collections.OrderedDict([ ('id', u'PORT1'), ('name', u''), ('status', u'ACTIVE'), ('mac_address', u'fa:16:3e:e0:d4:63'), ('fixed_ips',", "u'mac_address': u'00:09:7b:3e:18:ca', u'fixed_ips': [ { u'subnet_id': u'SUBNET2', u'ip_address': u'192.168.27.3' }", "self.assertEqual(port.serialize(), SERIALIZED_PORT1) def test_attach_client_exception(self): exc = neutron_exceptions.NeutronException() self.neutron_mock.create_port.side_effect = exc", "{ u'subnet_id': u'SUBNET1', u'ip_address': u'10.0.0.3' } ]), ('network', SERIALIZED_NETWORK1) ])", "[SERIALIZED_PORT1]) self.neutron_mock.list_ports.assert_called_with(mac_address='a:b:c:d') def test_attach(self): port = {'port': PORT1_RESPONSE} self.neutron_mock.create_port.return_value =", "{ u'status': u'ACTIVE', u'binding:host_id': u'precise64', u'name': u'', u'allowed_address_pairs': [], u'admin_state_up':", "applicable law or agreed to in writing, software distributed under", "username='user', password='<PASSWORD>', tenant_id='tenant', auth_url='auth_url' ) def test_get_auth_token_client_exception(self): exc = keystone_exceptions.ClientException", "self.add_mock(neutron_client, 'Client') self.neutron_mock = self.neutron_client_mock.return_value self.keystone_client_mock = self.add_mock(keystone_client, 'Client') self.keystone_client_mock.return_value.auth_token", "('mac_address', u'fa:16:3e:e0:d4:63'), ('fixed_ips', [ { u'subnet_id': u'SUBNET1', u'ip_address': u'10.0.0.3' }", "tenant_id='tenant', auth_url='auth_url' ) def test_get_auth_token_client_exception(self): exc = keystone_exceptions.ClientException self.keystone_client_mock.side_effect =", "u'10.0.0.3' } ]), ('network', SERIALIZED_NETWORK1) ]) class TestNeutronProvider(tests.TeethMockTestUtilities): def setUp(self):", "u'SUBNET1' } SUBNET2_RESPONSE = { u'name': u'public-subnet', u'enable_dhcp': False, u'network_id':", "self.neutron_client_mock.assert_called_with( '2.0', endpoint_url='neutron_url', token='auth_token' ) def test_get_neutron_client_exception(self): exc = neutron_exceptions.NeutronException()", "exc self.assertRaises(self.provider.NetworkProviderException, self.provider.get_network_info, 'NETWORK1') def test_list_ports(self): ports = {'ports': [PORT1_RESPONSE]}", "the License is distributed on an \"AS IS\" BASIS, WITHOUT", "self.assertRaises(self.provider.NetworkDoesNotExist, self.provider.get_network_info, 'NETWORK1') def test_get_network_info_client_exception(self): exc = neutron_exceptions.NeutronException() self.neutron_mock.show_network.side_effect =", "True, u'shared': False, u'id': u'NETWORK2', u'provider:segmentation_id': None } PORT1_RESPONSE =", "the specific language governing permissions and limitations under the License.", "u'network:dhcp', u'binding:capabilities': {u'port_filter': True}, u'mac_address': u'fa:16:3e:e0:d4:63', u'fixed_ips': [ { u'subnet_id':", "'network_id') self.neutron_mock.delete_port.assert_called_with(PORT1_RESPONSE['id']) self.neutron_mock.list_ports.assert_called_with( mac_address='a:b:c:d', network_id='network_id') def test_detach_client_exception(self): ports = {'ports':", "('gateway_ip', u'192.168.27.2'), ('cidr', u'192.168.27.0/24'), ('enable_dhcp', False) ]) ]) ]) SERIALIZED_PORT1", "4, u'gateway_ip': u'10.0.0.1', u'cidr': u'10.0.0.0/24', u'id': u'SUBNET1' } SUBNET2_RESPONSE =", "u'192.168.27.3', u'end': u'192.168.27.254' } ], u'host_routes': [], u'ip_version': 4, u'gateway_ip':", "u'extra_dhcp_opts': [], u'binding:vif_type': u'ovs', u'device_owner': u'network:dhcp', u'binding:capabilities': {u'port_filter': True}, u'mac_address':", "u'binding:capabilities': {u'port_filter': False}, u'mac_address': u'00:09:7b:3e:18:ca', u'fixed_ips': [ { u'subnet_id': u'SUBNET2',", "u'device_owner': u'network:dhcp', u'binding:capabilities': {u'port_filter': True}, u'mac_address': u'fa:16:3e:e0:d4:63', u'fixed_ips': [ {", "networks = self.provider.list_networks() self.neutron_mock.list_networks.assert_called() self.assertEqual(networks, []) def test_list_networks_client_exception(self): exc =", "[ {'subnet': SUBNET1_RESPONSE}, {'subnet': SUBNET2_RESPONSE} ] networks = self.provider.list_networks() results", "or agreed to in writing, software distributed under the License", "u'id': u'NETWORK2', u'provider:segmentation_id': None } PORT1_RESPONSE = { u'status': u'ACTIVE',", "'admin_state_up': True, 'mac_address': 'a:b:c:d' } }) self.assertEqual(port.serialize(), SERIALIZED_PORT1) def test_attach_client_exception(self):", "self.assertRaises(self.provider.NetworkProviderException, self.provider._get_neutron_client) def test_list_networks(self): networks = {'networks': [NETWORK1_RESPONSE, NETWORK2_RESPONSE]} self.neutron_mock.list_networks.return_value", "from keystoneclient.v2_0 import client as keystone_client from neutronclient.common import exceptions", "u'ACTIVE', u'subnets': [u'SUBNET2'], u'name': u'public', u'provider:physical_network': None, u'admin_state_up': True, u'tenant_id':", "'a:b:c:d') def test_get_default_networks(self): network_ids = self.provider.get_default_networks() self.assertEqual(network_ids, [self.config.NEUTRON_PUBLIC_NETWORK, self.config.NEUTRON_PRIVATE_NETWORK]) def", "], u'id': u'PORT1', u'security_groups': [], u'device_id': u'' } PORT2_RESPONSE =", "= self.add_mock(keystone_client, 'Client') self.keystone_client_mock.return_value.auth_token = '<PASSWORD>' self.provider = neutron.NeutronProvider(self.config) def", "endpoint_url='neutron_url', token='auth_token' ) def test_get_neutron_client_exception(self): exc = neutron_exceptions.NeutronException() self.neutron_client_mock.side_effect =", "self.provider.detach, 'a:b:c:d') def test_get_default_networks(self): network_ids = self.provider.get_default_networks() self.assertEqual(network_ids, [self.config.NEUTRON_PUBLIC_NETWORK, self.config.NEUTRON_PRIVATE_NETWORK])", "teeth_overlord import tests from keystoneclient.apiclient import exceptions as keystone_exceptions from", "u'ACTIVE', u'binding:host_id': u'precise64', u'name': u'', u'allowed_address_pairs': [], u'admin_state_up': True, u'network_id':", "OF ANY KIND, either express or implied. See the License", "config from teeth_overlord.networks import neutron from teeth_overlord import tests from", "u'start': u'192.168.27.1', u'end': u'192.168.27.1' }, { u'start': u'192.168.27.3', u'end': u'192.168.27.254'", "u'NETWORK2'), ('name', u'public'), ('status', u'ACTIVE'), ('subnets', [ collections.OrderedDict([ ('id', u'SUBNET2'),", "port network = {'network': NETWORK1_RESPONSE} self.neutron_mock.show_network.return_value = network subnet =", "None, u'admin_state_up': True, u'tenant_id': u'TENANTID', u'provider:network_type': u'local', u'router:external': False, u'shared':", "u'NETWORK1'), ('name', u'private'), ('status', u'ACTIVE'), ('subnets', [ collections.OrderedDict([ ('id', u'SUBNET1'),", "'KEYSTONE_AUTH_URL': 'auth_url', 'NEUTRON_VERSION': '2.0', 'NEUTRON_URL': 'neutron_url', 'NEUTRON_PUBLIC_NETWORK': 'd6b32008-1432-4299-81c7-cbe3128ba13f', 'NEUTRON_PRIVATE_NETWORK': '2afa16d6-7b84-484f-a642-af243b0e5b10',", "u'NETWORK2', u'provider:segmentation_id': None } PORT1_RESPONSE = { u'status': u'ACTIVE', u'binding:host_id':", "self.neutron_mock.list_networks.return_value = {'networks': []} networks = self.provider.list_networks() self.neutron_mock.list_networks.assert_called() self.assertEqual(networks, [])", "u'name': u'public', u'provider:physical_network': None, u'admin_state_up': True, u'tenant_id': u'TENANTID', u'provider:network_type': u'local',", "exceptions as neutron_exceptions from neutronclient.neutron import client as neutron_client NETWORK1_RESPONSE", "{'subnet': SUBNET1_RESPONSE} self.neutron_mock.show_subnet.return_value = subnet ports = self.provider.list_ports('a:b:c:d') self.assertEqual([p.serialize() for", "[NETWORK1_RESPONSE, NETWORK2_RESPONSE]} self.neutron_mock.list_networks.return_value = networks self.neutron_mock.show_subnet.side_effect = [ {'subnet': SUBNET1_RESPONSE},", "License, Version 2.0 (the \"License\"); you may not use this", "{'networks': []} networks = self.provider.list_networks() self.neutron_mock.list_networks.assert_called() self.assertEqual(networks, []) def test_list_networks_client_exception(self):", "= neutron_exceptions.NeutronException() self.neutron_mock.create_port.side_effect = exc self.assertRaises(self.provider.NetworkProviderException, self.provider.attach, 'mac_address', 'network_id') def", "config.LazyConfig(config={ 'KEYSTONE_USER': 'user', 'KEYSTONE_PASS': '<PASSWORD>', 'KEYSTONE_TENANT_ID': 'tenant', 'KEYSTONE_AUTH_URL': 'auth_url', 'NEUTRON_VERSION':", "u'fixed_ips': [ { u'subnet_id': u'SUBNET2', u'ip_address': u'192.168.27.3' } ], u'id':", "u'private-subnet'), ('ip_version', 4), ('gateway_ip', u'10.0.0.1'), ('cidr', u'10.0.0.0/24'), ('enable_dhcp', True) ])", "None } NETWORK2_RESPONSE = { u'status': u'ACTIVE', u'subnets': [u'SUBNET2'], u'name':", "network self.neutron_mock.show_subnet.side_effect = [ {'subnet': SUBNET1_RESPONSE} ] network = self.provider.get_network_info('NETWORK1')", "results = [ SERIALIZED_NETWORK1, SERIALIZED_NETWORK2 ] self.assertEqual([n.serialize() for n in", "a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by", "= network subnet = {'subnet': SUBNET1_RESPONSE} self.neutron_mock.show_subnet.return_value = subnet self.provider.detach('a:b:c:d',", "= neutron_exceptions.NeutronException() self.neutron_mock.delete_port.side_effect = exc self.assertRaises(self.provider.NetworkProviderException, self.provider.detach, 'a:b:c:d') def test_get_default_networks(self):", "def test_get_default_networks(self): network_ids = self.provider.get_default_networks() self.assertEqual(network_ids, [self.config.NEUTRON_PUBLIC_NETWORK, self.config.NEUTRON_PRIVATE_NETWORK]) def test_get_service_network(self):", "u'network_id': u'NETWORK1', u'tenant_id': u'TENANTID', u'extra_dhcp_opts': [], u'binding:vif_type': u'ovs', u'device_owner': u'network:dhcp',", "u'ACTIVE'), ('mac_address', u'fa:16:3e:e0:d4:63'), ('fixed_ips', [ { u'subnet_id': u'SUBNET1', u'ip_address': u'10.0.0.3'", "exc.message = '404 Not Found' self.neutron_mock.show_network.side_effect = exc self.assertRaises(self.provider.NetworkDoesNotExist, self.provider.get_network_info,", "u'cidr': u'10.0.0.0/24', u'id': u'SUBNET1' } SUBNET2_RESPONSE = { u'name': u'public-subnet',", "{'subnet': SUBNET1_RESPONSE} self.neutron_mock.show_subnet.return_value = subnet port = self.provider.attach('a:b:c:d', 'network_id') self.neutron_mock.create_port.assert_called_with({", "License. You may obtain a copy of the License at", "neutron_exceptions.NeutronException() self.neutron_mock.list_networks.side_effect = exc self.assertRaises(self.provider.NetworkProviderException, self.provider.list_networks) def test_get_network_info(self): network =", "{'subnet': SUBNET1_RESPONSE} ] network = self.provider.get_network_info('NETWORK1') self.assertEqual(network.serialize(), SERIALIZED_NETWORK1) self.neutron_mock.show_network.assert_called_with('NETWORK1') def", "test_list_networks(self): networks = {'networks': [NETWORK1_RESPONSE, NETWORK2_RESPONSE]} self.neutron_mock.list_networks.return_value = networks self.neutron_mock.show_subnet.side_effect", "{'port': PORT1_RESPONSE} self.neutron_mock.create_port.return_value = port network = {'network': NETWORK1_RESPONSE} self.neutron_mock.show_network.return_value", "self.neutron_mock.show_network.assert_called_with('NETWORK1') def test_get_network_info_does_not_exist(self): exc = neutron_exceptions.NeutronException() exc.message = '404 Not", "subnet = {'subnet': SUBNET1_RESPONSE} self.neutron_mock.show_subnet.return_value = subnet self.provider.detach('a:b:c:d', 'network_id') self.neutron_mock.delete_port.assert_called_with(PORT1_RESPONSE['id'])", "SERIALIZED_NETWORK1, SERIALIZED_NETWORK2 ] self.assertEqual([n.serialize() for n in networks], results) def", "u'TENANTID', u'provider:network_type': u'local', u'router:external': False, u'shared': False, u'id': u'NETWORK1', u'provider:segmentation_id':", "[u'SUBNET2'], u'name': u'public', u'provider:physical_network': None, u'admin_state_up': True, u'tenant_id': u'TENANTID', u'provider:network_type':", "an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY", "= self.provider.list_networks() results = [ SERIALIZED_NETWORK1, SERIALIZED_NETWORK2 ] self.assertEqual([n.serialize() for", "('name', u'private'), ('status', u'ACTIVE'), ('subnets', [ collections.OrderedDict([ ('id', u'SUBNET1'), ('name',", "SUBNET1_RESPONSE} self.neutron_mock.show_subnet.return_value = subnet ports = self.provider.list_ports('a:b:c:d') self.assertEqual([p.serialize() for p", "{u'port_filter': True}, u'mac_address': u'fa:16:3e:e0:d4:63', u'fixed_ips': [ { u'subnet_id': u'SUBNET1', u'ip_address':", "{ u'status': u'DOWN', u'binding:host_id': u'', u'name': u'', u'allowed_address_pairs': [], u'admin_state_up':", "u'subnet_id': u'SUBNET1', u'ip_address': u'10.0.0.3' } ], u'id': u'PORT1', u'security_groups': [],", "] network = self.provider.get_network_info('NETWORK1') self.assertEqual(network.serialize(), SERIALIZED_NETWORK1) self.neutron_mock.show_network.assert_called_with('NETWORK1') def test_get_network_info_does_not_exist(self): exc", "SUBNET2_RESPONSE = { u'name': u'public-subnet', u'enable_dhcp': False, u'network_id': u'NETWORK2', u'tenant_id':", "network = {'network': NETWORK1_RESPONSE} self.neutron_mock.show_network.return_value = network subnet = {'subnet':" ]
[ "global brightness global calibration_mode brightness = 500 calibration_mode = False", "def init(): global brightness global calibration_mode brightness = 500 calibration_mode", "init(): global brightness global calibration_mode brightness = 500 calibration_mode =" ]
[ "from django.db.models import Q, F from django.shortcuts import render from", "+ 1, uv=F('uv') + 1) elif increase_pv: Post.objects.filter(pk=self.object.id).update(pv=F('pv') + 1)", "class IndexView(CommonViewMinxin, ListView): queryset = Post.latest_posts() paginate_by = 5 context_object_name", "1, 1*60) #1分钟有效 if not cache.get(uv_key): increase_uv = True cache.set(uv_key,", "= super().get_context_data() context.update({ 'keyword': self.request.GET.get('keyword', '') }) return context def", "class CommonViewMinxin: def get_context_data(self, **kwargs): context = super().get_context_data(**kwargs) context.update({ 'sidebars':", "= Post.objects.get(id=post_id) except Post.DoesNotExist: raise Http404('Post does not exist!') context={", "def post_list(request, category_id=None, tag_id=None): tag = None category = None", "pv_key = 'pv:%s:%s' % (uid, self.request.path) uv_key = 'uv:%s:%s:%s' %", "return response def handle_visited(self): increase_pv = False increase_uv = False", "self.kwargs.get('category_id') category = get_object_or_404(Category, pk=category_id) context.update({ 'category': category, }) return", "import silk_profile from config.models import SideBar from .models import Post,", "queryset.filter(owner_id=author_id) ''' def post_list(request, category_id=None, tag_id=None): tag = None category", "silk.profiling.profiler import silk_profile from config.models import SideBar from .models import", "'category': category, }) return context def get_queryset(self): '''重写queryset,根据分类过滤''' queryset =", "get_queryset(self): queryset = super().get_queryset() author_id = self.kwargs.get('owner_id') return queryset.filter(owner_id=author_id) '''", "increase_pv: Post.objects.filter(pk=self.object.id).update(pv=F('uv') + 1) class SearchView(IndexView): def get_context_data(self): context =", "class SearchView(IndexView): def get_context_data(self): context = super().get_context_data() context.update({ 'keyword': self.request.GET.get('keyword',", "class TagView(IndexView): def get_context_data(self, **kwargs): context = super().get_context_data(**kwargs) tag_id =", "return context def get_queryset(self): '''重写queryset,根据分类过滤''' queryset = super().get_queryset() category_id =", "#1分钟有效 if not cache.get(uv_key): increase_uv = True cache.set(uv_key, 1, 24*60*60)", "= self.kwargs.get('owner_id') return queryset.filter(owner_id=author_id) ''' def post_list(request, category_id=None, tag_id=None): tag", "= Category.objects.filter(status=Category.STATUS_NORMAL) nav_categories = [] normal_categories = [] for cate", "not cache.get(uv_key): increase_uv = True cache.set(uv_key, 1, 24*60*60) if increase_pv", "= None if tag_id: post_list, tag = Post.get_by_tag(tag_id) elif category_id:", "self.kwargs.get('tag_id') return queryset.filter(tag__id=tag_id) class PostDetailView(CommonViewMinxin, DetailView): queryset = Post.latest_posts() template_name", "tag = Post.get_by_tag(tag_id) elif category_id: post_list, category=Post.get_by_category(category_id) else: post_list =", "= super().get_queryset() keyword = self.request.GET.get('keyword') if not keyword: return queryset", "Post.latest_posts() paginate_by = 5 context_object_name = 'post_list' template_name = 'blog/list.html'", "return context def get_queryset(self): '''重写queryset,根据标签过滤''' queryset = super().get_queryset() tag_id =", "else: normal_categories.append(cate) return { 'navs': nav_categories, 'categories': normal_categories, } class", "category_id=None, tag_id=None): tag = None category = None if tag_id:", "def get_sidebars(self): return SideBar.objects.filter(status=SideBar.STATUS_SHOW) def get_navs(self): categories = Category.objects.filter(status=Category.STATUS_NORMAL) nav_categories", "= 'blog/list.html' class CategoryView(IndexView): def get_context_data(self, **kwargs): context = super().get_context_data(**kwargs)", "context = super().get_context_data(**kwargs) category_id = self.kwargs.get('category_id') category = get_object_or_404(Category, pk=category_id)", "queryset.filter(Q(title__icontains=keyword) | Q(desc__icontains =keyword)) class AuthorView(IndexView): def get_queryset(self): queryset =", "= Post.latest_posts() context = { 'category': category, 'tag': tag, 'post_list':", "#from silk.profiling.profiler import silk_profile from config.models import SideBar from .models", "= 'post' pk_url_kwarg = 'post_id' def get(self, request, *args, **kwargs):", "and increase_uv: Post.objects.filter(pk=self.object.id).update(pv=F('pv') + 1, uv=F('uv') + 1) elif increase_pv:", "= False uid = self.request.uid pv_key = 'pv:%s:%s' % (uid,", "pk_url_kwarg = 'post_id' def get(self, request, *args, **kwargs): response =", "queryset = Post.latest_posts() paginate_by = 5 context_object_name = 'post_list' template_name", "template_name = 'blog/list.html' class CategoryView(IndexView): def get_context_data(self, **kwargs): context =", "return queryset.filter(Q(title__icontains=keyword) | Q(desc__icontains =keyword)) class AuthorView(IndexView): def get_queryset(self): queryset", "def handle_visited(self): increase_pv = False increase_uv = False uid =", "context.update({ 'tag': tag, }) return context def get_queryset(self): '''重写queryset,根据标签过滤''' queryset", "def get_queryset(self): '''重写queryset,根据分类过滤''' queryset = super().get_queryset() category_id = self.kwargs.get('category_id') return", "super().get_context_data(**kwargs) context.update({ 'sidebars': self.get_sidebars(), }) context.update(self.get_navs()) return context def get_sidebars(self):", "return queryset return queryset.filter(Q(title__icontains=keyword) | Q(desc__icontains =keyword)) class AuthorView(IndexView): def", "PostDetailView(CommonViewMinxin, DetailView): queryset = Post.latest_posts() template_name = 'blog/detail.html' context_object_name =", "super().get(request, *args, **kwargs) self.handle_visited() return response def handle_visited(self): increase_pv =", "class AuthorView(IndexView): def get_queryset(self): queryset = super().get_queryset() author_id = self.kwargs.get('owner_id')", "post_id=None): try: post = Post.objects.get(id=post_id) except Post.DoesNotExist: raise Http404('Post does", "ListView): queryset = Post.latest_posts() paginate_by = 5 context_object_name = 'post_list'", "get_queryset(self): '''重写queryset,根据标签过滤''' queryset = super().get_queryset() tag_id = self.kwargs.get('tag_id') return queryset.filter(tag__id=tag_id)", "'blog/detail.html' context_object_name = 'post' pk_url_kwarg = 'post_id' def get(self, request,", "def get_context_data(self, **kwargs): context = super().get_context_data(**kwargs) tag_id = self.kwargs.get('tag_id') tag", "= super().get_queryset() author_id = self.kwargs.get('owner_id') return queryset.filter(owner_id=author_id) ''' def post_list(request,", "post_list = Post.latest_posts() context = { 'category': category, 'tag': tag,", "uid = self.request.uid pv_key = 'pv:%s:%s' % (uid, self.request.path) uv_key", "}) context.update(self.get_navs()) return context def get_sidebars(self): return SideBar.objects.filter(status=SideBar.STATUS_SHOW) def get_navs(self):", "if not cache.get(pv_key): increase_pv = True cache.set(pv_key, 1, 1*60) #1分钟有效", "tag_id = self.kwargs.get('tag_id') tag = get_object_or_404(Tag, pk=tag_id) context.update({ 'tag': tag,", "def get_navs(self): categories = Category.objects.filter(status=Category.STATUS_NORMAL) nav_categories = [] normal_categories =", "None category = None if tag_id: post_list, tag = Post.get_by_tag(tag_id)", "category = None if tag_id: post_list, tag = Post.get_by_tag(tag_id) elif", "category_id = self.kwargs.get('category_id') category = get_object_or_404(Category, pk=category_id) context.update({ 'category': category,", ".models import Post, Tag, Category from comment.models import Comment class", "context = super().get_context_data() context.update({ 'keyword': self.request.GET.get('keyword', '') }) return context", "if tag_id: post_list, tag = Post.get_by_tag(tag_id) elif category_id: post_list, category=Post.get_by_category(category_id)", "get_object_or_404(Category, pk=category_id) context.update({ 'category': category, }) return context def get_queryset(self):", "super().get_context_data(**kwargs) tag_id = self.kwargs.get('tag_id') tag = get_object_or_404(Tag, pk=tag_id) context.update({ 'tag':", "[] for cate in categories: if cate.is_nav: nav_categories.append(cate) else: normal_categories.append(cate)", "get_queryset(self): '''重写queryset,根据分类过滤''' queryset = super().get_queryset() category_id = self.kwargs.get('category_id') return queryset.filter(category_id=category_id)", "Q, F from django.shortcuts import render from django.shortcuts import get_object_or_404", "django.db.models import Q, F from django.shortcuts import render from django.shortcuts", "post_list, tag = Post.get_by_tag(tag_id) elif category_id: post_list, category=Post.get_by_category(category_id) else: post_list", "= True cache.set(pv_key, 1, 1*60) #1分钟有效 if not cache.get(uv_key): increase_uv", "= 'post_list' template_name = 'blog/list.html' class CategoryView(IndexView): def get_context_data(self, **kwargs):", "24*60*60) if increase_pv and increase_uv: Post.objects.filter(pk=self.object.id).update(pv=F('pv') + 1, uv=F('uv') +", "increase_pv: Post.objects.filter(pk=self.object.id).update(pv=F('pv') + 1) elif increase_pv: Post.objects.filter(pk=self.object.id).update(pv=F('uv') + 1) class", "cache.get(pv_key): increase_pv = True cache.set(pv_key, 1, 1*60) #1分钟有效 if not", "self.request.GET.get('keyword') if not keyword: return queryset return queryset.filter(Q(title__icontains=keyword) | Q(desc__icontains", "= [] for cate in categories: if cate.is_nav: nav_categories.append(cate) else:", "% (uid, str(date.today()), self.request.path) if not cache.get(pv_key): increase_pv = True", "import SideBar from .models import Post, Tag, Category from comment.models", "return { 'navs': nav_categories, 'categories': normal_categories, } class IndexView(CommonViewMinxin, ListView):", "SideBar.objects.filter(status=SideBar.STATUS_SHOW) def get_navs(self): categories = Category.objects.filter(status=Category.STATUS_NORMAL) nav_categories = [] normal_categories", "+ 1) elif increase_pv: Post.objects.filter(pk=self.object.id).update(pv=F('uv') + 1) class SearchView(IndexView): def", "1, 24*60*60) if increase_pv and increase_uv: Post.objects.filter(pk=self.object.id).update(pv=F('pv') + 1, uv=F('uv')", "'sidebars': SideBar.get_all(), } context.update(Category.get_navs()) return render(request, 'blog/list.html', context=context) def post_detail(request,", "queryset return queryset.filter(Q(title__icontains=keyword) | Q(desc__icontains =keyword)) class AuthorView(IndexView): def get_queryset(self):", "= Post.latest_posts() paginate_by = 5 context_object_name = 'post_list' template_name =", "(uid, self.request.path) uv_key = 'uv:%s:%s:%s' % (uid, str(date.today()), self.request.path) if", "= self.kwargs.get('category_id') category = get_object_or_404(Category, pk=category_id) context.update({ 'category': category, })", "template_name = 'blog/detail.html' context_object_name = 'post' pk_url_kwarg = 'post_id' def", "def get_queryset(self): queryset = super().get_queryset() author_id = self.kwargs.get('owner_id') return queryset.filter(owner_id=author_id)", "'category': category, 'tag': tag, 'post_list': post_list, 'sidebars': SideBar.get_all(), } context.update(Category.get_navs())", "super().get_context_data(**kwargs) category_id = self.kwargs.get('category_id') category = get_object_or_404(Category, pk=category_id) context.update({ 'category':", "post_detail(request, post_id=None): try: post = Post.objects.get(id=post_id) except Post.DoesNotExist: raise Http404('Post", "Post.objects.filter(pk=self.object.id).update(pv=F('pv') + 1, uv=F('uv') + 1) elif increase_pv: Post.objects.filter(pk=self.object.id).update(pv=F('pv') +", "render from django.shortcuts import get_object_or_404 from django.views.generic import ListView, DetailView", "class PostDetailView(CommonViewMinxin, DetailView): queryset = Post.latest_posts() template_name = 'blog/detail.html' context_object_name", "return SideBar.objects.filter(status=SideBar.STATUS_SHOW) def get_navs(self): categories = Category.objects.filter(status=Category.STATUS_NORMAL) nav_categories = []", "import render from django.shortcuts import get_object_or_404 from django.views.generic import ListView,", "post = Post.objects.get(id=post_id) except Post.DoesNotExist: raise Http404('Post does not exist!')", "django.core.cache import cache from django.db.models import Q, F from django.shortcuts", "= self.kwargs.get('tag_id') tag = get_object_or_404(Tag, pk=tag_id) context.update({ 'tag': tag, })", "context.update({ 'category': category, }) return context def get_queryset(self): '''重写queryset,根据分类过滤''' queryset", "try: post = Post.objects.get(id=post_id) except Post.DoesNotExist: raise Http404('Post does not", "nav_categories = [] normal_categories = [] for cate in categories:", "exist!') context={ 'post': post, 'sidebars': SideBar.get_all(), } context.update(Category.get_navs()) return render(request,", "(uid, str(date.today()), self.request.path) if not cache.get(pv_key): increase_pv = True cache.set(pv_key,", "= self.kwargs.get('category_id') return queryset.filter(category_id=category_id) class TagView(IndexView): def get_context_data(self, **kwargs): context", "Q(desc__icontains =keyword)) class AuthorView(IndexView): def get_queryset(self): queryset = super().get_queryset() author_id", "context={ 'post': post, 'sidebars': SideBar.get_all(), } context.update(Category.get_navs()) return render(request, 'blog/detail.html',", "super().get_queryset() keyword = self.request.GET.get('keyword') if not keyword: return queryset return", "get_queryset(self): queryset = super().get_queryset() keyword = self.request.GET.get('keyword') if not keyword:", "'''重写queryset,根据标签过滤''' queryset = super().get_queryset() tag_id = self.kwargs.get('tag_id') return queryset.filter(tag__id=tag_id) class", "for cate in categories: if cate.is_nav: nav_categories.append(cate) else: normal_categories.append(cate) return", "+ 1) elif increase_pv: Post.objects.filter(pk=self.object.id).update(pv=F('pv') + 1) elif increase_pv: Post.objects.filter(pk=self.object.id).update(pv=F('uv')", "request, *args, **kwargs): response = super().get(request, *args, **kwargs) self.handle_visited() return", "self.request.path) if not cache.get(pv_key): increase_pv = True cache.set(pv_key, 1, 1*60)", "= super().get_context_data(**kwargs) context.update({ 'sidebars': self.get_sidebars(), }) context.update(self.get_navs()) return context def", "elif increase_pv: Post.objects.filter(pk=self.object.id).update(pv=F('pv') + 1) elif increase_pv: Post.objects.filter(pk=self.object.id).update(pv=F('uv') + 1)", "context.update(self.get_navs()) return context def get_sidebars(self): return SideBar.objects.filter(status=SideBar.STATUS_SHOW) def get_navs(self): categories", "CategoryView(IndexView): def get_context_data(self, **kwargs): context = super().get_context_data(**kwargs) category_id = self.kwargs.get('category_id')", "context_object_name = 'post' pk_url_kwarg = 'post_id' def get(self, request, *args,", "cache from django.db.models import Q, F from django.shortcuts import render", "render(request, 'blog/list.html', context=context) def post_detail(request, post_id=None): try: post = Post.objects.get(id=post_id)", "import date from django.core.cache import cache from django.db.models import Q,", "cache.set(pv_key, 1, 1*60) #1分钟有效 if not cache.get(uv_key): increase_uv = True", "handle_visited(self): increase_pv = False increase_uv = False uid = self.request.uid", "category, }) return context def get_queryset(self): '''重写queryset,根据分类过滤''' queryset = super().get_queryset()", "post, 'sidebars': SideBar.get_all(), } context.update(Category.get_navs()) return render(request, 'blog/detail.html', context=context) '''", "cate in categories: if cate.is_nav: nav_categories.append(cate) else: normal_categories.append(cate) return {", "*args, **kwargs): response = super().get(request, *args, **kwargs) self.handle_visited() return response", "category=Post.get_by_category(category_id) else: post_list = Post.latest_posts() context = { 'category': category,", "from datetime import date from django.core.cache import cache from django.db.models", "increase_pv = False increase_uv = False uid = self.request.uid pv_key", "Post, Tag, Category from comment.models import Comment class CommonViewMinxin: def", "get(self, request, *args, **kwargs): response = super().get(request, *args, **kwargs) self.handle_visited()", "return queryset.filter(owner_id=author_id) ''' def post_list(request, category_id=None, tag_id=None): tag = None", "= super().get_queryset() tag_id = self.kwargs.get('tag_id') return queryset.filter(tag__id=tag_id) class PostDetailView(CommonViewMinxin, DetailView):", "django.views.generic import ListView, DetailView #from silk.profiling.profiler import silk_profile from config.models", "'post_id' def get(self, request, *args, **kwargs): response = super().get(request, *args,", "super().get_queryset() category_id = self.kwargs.get('category_id') return queryset.filter(category_id=category_id) class TagView(IndexView): def get_context_data(self,", "context = super().get_context_data(**kwargs) context.update({ 'sidebars': self.get_sidebars(), }) context.update(self.get_navs()) return context", "tag_id: post_list, tag = Post.get_by_tag(tag_id) elif category_id: post_list, category=Post.get_by_category(category_id) else:", "uv_key = 'uv:%s:%s:%s' % (uid, str(date.today()), self.request.path) if not cache.get(pv_key):", "F from django.shortcuts import render from django.shortcuts import get_object_or_404 from", "**kwargs): context = super().get_context_data(**kwargs) tag_id = self.kwargs.get('tag_id') tag = get_object_or_404(Tag,", "tag, }) return context def get_queryset(self): '''重写queryset,根据标签过滤''' queryset = super().get_queryset()", "def get(self, request, *args, **kwargs): response = super().get(request, *args, **kwargs)", "def get_context_data(self, **kwargs): context = super().get_context_data(**kwargs) category_id = self.kwargs.get('category_id') category", "= self.kwargs.get('tag_id') return queryset.filter(tag__id=tag_id) class PostDetailView(CommonViewMinxin, DetailView): queryset = Post.latest_posts()", "import Q, F from django.shortcuts import render from django.shortcuts import", "def get_context_data(self): context = super().get_context_data() context.update({ 'keyword': self.request.GET.get('keyword', '') })", "get_context_data(self, **kwargs): context = super().get_context_data(**kwargs) context.update({ 'sidebars': self.get_sidebars(), }) context.update(self.get_navs())", "SideBar.get_all(), } context.update(Category.get_navs()) return render(request, 'blog/list.html', context=context) def post_detail(request, post_id=None):", "not cache.get(pv_key): increase_pv = True cache.set(pv_key, 1, 1*60) #1分钟有效 if", "except Post.DoesNotExist: raise Http404('Post does not exist!') context={ 'post': post,", "uv=F('uv') + 1) elif increase_pv: Post.objects.filter(pk=self.object.id).update(pv=F('pv') + 1) elif increase_pv:", "= True cache.set(uv_key, 1, 24*60*60) if increase_pv and increase_uv: Post.objects.filter(pk=self.object.id).update(pv=F('pv')", "'tag': tag, }) return context def get_queryset(self): '''重写queryset,根据标签过滤''' queryset =", "from django.core.cache import cache from django.db.models import Q, F from", "queryset = super().get_queryset() keyword = self.request.GET.get('keyword') if not keyword: return", "=keyword)) class AuthorView(IndexView): def get_queryset(self): queryset = super().get_queryset() author_id =", "tag = None category = None if tag_id: post_list, tag", "nav_categories, 'categories': normal_categories, } class IndexView(CommonViewMinxin, ListView): queryset = Post.latest_posts()", "context_object_name = 'post_list' template_name = 'blog/list.html' class CategoryView(IndexView): def get_context_data(self,", "ListView, DetailView #from silk.profiling.profiler import silk_profile from config.models import SideBar", "True cache.set(pv_key, 1, 1*60) #1分钟有效 if not cache.get(uv_key): increase_uv =", "Post.objects.get(id=post_id) except Post.DoesNotExist: raise Http404('Post does not exist!') context={ 'post':", "from django.shortcuts import render from django.shortcuts import get_object_or_404 from django.views.generic", "from config.models import SideBar from .models import Post, Tag, Category", "context = super().get_context_data(**kwargs) tag_id = self.kwargs.get('tag_id') tag = get_object_or_404(Tag, pk=tag_id)", "keyword: return queryset return queryset.filter(Q(title__icontains=keyword) | Q(desc__icontains =keyword)) class AuthorView(IndexView):", "context.update({ 'keyword': self.request.GET.get('keyword', '') }) return context def get_queryset(self): queryset", "self.kwargs.get('tag_id') tag = get_object_or_404(Tag, pk=tag_id) context.update({ 'tag': tag, }) return", "'uv:%s:%s:%s' % (uid, str(date.today()), self.request.path) if not cache.get(pv_key): increase_pv =", "DetailView #from silk.profiling.profiler import silk_profile from config.models import SideBar from", "DetailView): queryset = Post.latest_posts() template_name = 'blog/detail.html' context_object_name = 'post'", "= super().get(request, *args, **kwargs) self.handle_visited() return response def handle_visited(self): increase_pv", "{ 'category': category, 'tag': tag, 'post_list': post_list, 'sidebars': SideBar.get_all(), }", "get_navs(self): categories = Category.objects.filter(status=Category.STATUS_NORMAL) nav_categories = [] normal_categories = []", "context def get_sidebars(self): return SideBar.objects.filter(status=SideBar.STATUS_SHOW) def get_navs(self): categories = Category.objects.filter(status=Category.STATUS_NORMAL)", "1) elif increase_pv: Post.objects.filter(pk=self.object.id).update(pv=F('pv') + 1) elif increase_pv: Post.objects.filter(pk=self.object.id).update(pv=F('uv') +", "def get_queryset(self): queryset = super().get_queryset() keyword = self.request.GET.get('keyword') if not", "cache.set(uv_key, 1, 24*60*60) if increase_pv and increase_uv: Post.objects.filter(pk=self.object.id).update(pv=F('pv') + 1,", "else: post_list = Post.latest_posts() context = { 'category': category, 'tag':", "increase_pv and increase_uv: Post.objects.filter(pk=self.object.id).update(pv=F('pv') + 1, uv=F('uv') + 1) elif", "self.request.GET.get('keyword', '') }) return context def get_queryset(self): queryset = super().get_queryset()", "datetime import date from django.core.cache import cache from django.db.models import", "not keyword: return queryset return queryset.filter(Q(title__icontains=keyword) | Q(desc__icontains =keyword)) class", "self.kwargs.get('category_id') return queryset.filter(category_id=category_id) class TagView(IndexView): def get_context_data(self, **kwargs): context =", "import ListView, DetailView #from silk.profiling.profiler import silk_profile from config.models import", "elif category_id: post_list, category=Post.get_by_category(category_id) else: post_list = Post.latest_posts() context =", "if not cache.get(uv_key): increase_uv = True cache.set(uv_key, 1, 24*60*60) if", "context def get_queryset(self): '''重写queryset,根据标签过滤''' queryset = super().get_queryset() tag_id = self.kwargs.get('tag_id')", "= [] normal_categories = [] for cate in categories: if", "response = super().get(request, *args, **kwargs) self.handle_visited() return response def handle_visited(self):", "'categories': normal_categories, } class IndexView(CommonViewMinxin, ListView): queryset = Post.latest_posts() paginate_by", "context = { 'category': category, 'tag': tag, 'post_list': post_list, 'sidebars':", "'keyword': self.request.GET.get('keyword', '') }) return context def get_queryset(self): queryset =", "**kwargs): response = super().get(request, *args, **kwargs) self.handle_visited() return response def", "queryset = super().get_queryset() tag_id = self.kwargs.get('tag_id') return queryset.filter(tag__id=tag_id) class PostDetailView(CommonViewMinxin,", "context def get_queryset(self): '''重写queryset,根据分类过滤''' queryset = super().get_queryset() category_id = self.kwargs.get('category_id')", "Post.objects.filter(pk=self.object.id).update(pv=F('pv') + 1) elif increase_pv: Post.objects.filter(pk=self.object.id).update(pv=F('uv') + 1) class SearchView(IndexView):", "if increase_pv and increase_uv: Post.objects.filter(pk=self.object.id).update(pv=F('pv') + 1, uv=F('uv') + 1)", "= get_object_or_404(Tag, pk=tag_id) context.update({ 'tag': tag, }) return context def", "increase_pv = True cache.set(pv_key, 1, 1*60) #1分钟有效 if not cache.get(uv_key):", "**kwargs): context = super().get_context_data(**kwargs) context.update({ 'sidebars': self.get_sidebars(), }) context.update(self.get_navs()) return", "= super().get_context_data(**kwargs) category_id = self.kwargs.get('category_id') category = get_object_or_404(Category, pk=category_id) context.update({", "Post.get_by_tag(tag_id) elif category_id: post_list, category=Post.get_by_category(category_id) else: post_list = Post.latest_posts() context", "'pv:%s:%s' % (uid, self.request.path) uv_key = 'uv:%s:%s:%s' % (uid, str(date.today()),", "config.models import SideBar from .models import Post, Tag, Category from", "tag_id = self.kwargs.get('tag_id') return queryset.filter(tag__id=tag_id) class PostDetailView(CommonViewMinxin, DetailView): queryset =", "category = get_object_or_404(Category, pk=category_id) context.update({ 'category': category, }) return context", "self.handle_visited() return response def handle_visited(self): increase_pv = False increase_uv =", "= { 'category': category, 'tag': tag, 'post_list': post_list, 'sidebars': SideBar.get_all(),", "context def get_queryset(self): queryset = super().get_queryset() keyword = self.request.GET.get('keyword') if", "django.shortcuts import get_object_or_404 from django.views.generic import ListView, DetailView #from silk.profiling.profiler", "increase_uv: Post.objects.filter(pk=self.object.id).update(pv=F('pv') + 1, uv=F('uv') + 1) elif increase_pv: Post.objects.filter(pk=self.object.id).update(pv=F('pv')", "normal_categories = [] for cate in categories: if cate.is_nav: nav_categories.append(cate)", "silk_profile from config.models import SideBar from .models import Post, Tag,", "elif increase_pv: Post.objects.filter(pk=self.object.id).update(pv=F('uv') + 1) class SearchView(IndexView): def get_context_data(self): context", "'post' pk_url_kwarg = 'post_id' def get(self, request, *args, **kwargs): response", "= Post.latest_posts() template_name = 'blog/detail.html' context_object_name = 'post' pk_url_kwarg =", "Post.DoesNotExist: raise Http404('Post does not exist!') context={ 'post': post, 'sidebars':", "queryset = super().get_queryset() author_id = self.kwargs.get('owner_id') return queryset.filter(owner_id=author_id) ''' def", "post_list, 'sidebars': SideBar.get_all(), } context.update(Category.get_navs()) return render(request, 'blog/list.html', context=context) def", "def post_detail(request, post_id=None): try: post = Post.objects.get(id=post_id) except Post.DoesNotExist: raise", "str(date.today()), self.request.path) if not cache.get(pv_key): increase_pv = True cache.set(pv_key, 1,", "False uid = self.request.uid pv_key = 'pv:%s:%s' % (uid, self.request.path)", "} context.update(Category.get_navs()) return render(request, 'blog/list.html', context=context) def post_detail(request, post_id=None): try:", "does not exist!') context={ 'post': post, 'sidebars': SideBar.get_all(), } context.update(Category.get_navs())", "def get_queryset(self): '''重写queryset,根据标签过滤''' queryset = super().get_queryset() tag_id = self.kwargs.get('tag_id') return", "Comment class CommonViewMinxin: def get_context_data(self, **kwargs): context = super().get_context_data(**kwargs) context.update({", "return render(request, 'blog/list.html', context=context) def post_detail(request, post_id=None): try: post =", "increase_uv = False uid = self.request.uid pv_key = 'pv:%s:%s' %", "= False increase_uv = False uid = self.request.uid pv_key =", "tag = get_object_or_404(Tag, pk=tag_id) context.update({ 'tag': tag, }) return context", "= super().get_queryset() category_id = self.kwargs.get('category_id') return queryset.filter(category_id=category_id) class TagView(IndexView): def", "not exist!') context={ 'post': post, 'sidebars': SideBar.get_all(), } context.update(Category.get_navs()) return", "get_object_or_404 from django.views.generic import ListView, DetailView #from silk.profiling.profiler import silk_profile", "| Q(desc__icontains =keyword)) class AuthorView(IndexView): def get_queryset(self): queryset = super().get_queryset()", "raise Http404('Post does not exist!') context={ 'post': post, 'sidebars': SideBar.get_all(),", "categories = Category.objects.filter(status=Category.STATUS_NORMAL) nav_categories = [] normal_categories = [] for", "'sidebars': self.get_sidebars(), }) context.update(self.get_navs()) return context def get_sidebars(self): return SideBar.objects.filter(status=SideBar.STATUS_SHOW)", "self.request.uid pv_key = 'pv:%s:%s' % (uid, self.request.path) uv_key = 'uv:%s:%s:%s'", "Post.latest_posts() template_name = 'blog/detail.html' context_object_name = 'post' pk_url_kwarg = 'post_id'", "= 5 context_object_name = 'post_list' template_name = 'blog/list.html' class CategoryView(IndexView):", "IndexView(CommonViewMinxin, ListView): queryset = Post.latest_posts() paginate_by = 5 context_object_name =", "SideBar from .models import Post, Tag, Category from comment.models import", "} class IndexView(CommonViewMinxin, ListView): queryset = Post.latest_posts() paginate_by = 5", "'''重写queryset,根据分类过滤''' queryset = super().get_queryset() category_id = self.kwargs.get('category_id') return queryset.filter(category_id=category_id) class", "= 'uv:%s:%s:%s' % (uid, str(date.today()), self.request.path) if not cache.get(pv_key): increase_pv", "= self.request.uid pv_key = 'pv:%s:%s' % (uid, self.request.path) uv_key =", "= 'pv:%s:%s' % (uid, self.request.path) uv_key = 'uv:%s:%s:%s' % (uid,", "context.update({ 'sidebars': self.get_sidebars(), }) context.update(self.get_navs()) return context def get_sidebars(self): return", "self.get_sidebars(), }) context.update(self.get_navs()) return context def get_sidebars(self): return SideBar.objects.filter(status=SideBar.STATUS_SHOW) def", "= 'blog/detail.html' context_object_name = 'post' pk_url_kwarg = 'post_id' def get(self,", "*args, **kwargs) self.handle_visited() return response def handle_visited(self): increase_pv = False", "'blog/list.html' class CategoryView(IndexView): def get_context_data(self, **kwargs): context = super().get_context_data(**kwargs) category_id", "cache.get(uv_key): increase_uv = True cache.set(uv_key, 1, 24*60*60) if increase_pv and", "nav_categories.append(cate) else: normal_categories.append(cate) return { 'navs': nav_categories, 'categories': normal_categories, }", "Category.objects.filter(status=Category.STATUS_NORMAL) nav_categories = [] normal_categories = [] for cate in", "queryset.filter(category_id=category_id) class TagView(IndexView): def get_context_data(self, **kwargs): context = super().get_context_data(**kwargs) tag_id", "Post.latest_posts() context = { 'category': category, 'tag': tag, 'post_list': post_list,", "'tag': tag, 'post_list': post_list, 'sidebars': SideBar.get_all(), } context.update(Category.get_navs()) return render(request,", "normal_categories, } class IndexView(CommonViewMinxin, ListView): queryset = Post.latest_posts() paginate_by =", "context=context) def post_detail(request, post_id=None): try: post = Post.objects.get(id=post_id) except Post.DoesNotExist:", "1) class SearchView(IndexView): def get_context_data(self): context = super().get_context_data() context.update({ 'keyword':", "author_id = self.kwargs.get('owner_id') return queryset.filter(owner_id=author_id) ''' def post_list(request, category_id=None, tag_id=None):", "SearchView(IndexView): def get_context_data(self): context = super().get_context_data() context.update({ 'keyword': self.request.GET.get('keyword', '')", "get_context_data(self, **kwargs): context = super().get_context_data(**kwargs) tag_id = self.kwargs.get('tag_id') tag =", "super().get_context_data() context.update({ 'keyword': self.request.GET.get('keyword', '') }) return context def get_queryset(self):", "= super().get_context_data(**kwargs) tag_id = self.kwargs.get('tag_id') tag = get_object_or_404(Tag, pk=tag_id) context.update({", "= 'post_id' def get(self, request, *args, **kwargs): response = super().get(request,", "cate.is_nav: nav_categories.append(cate) else: normal_categories.append(cate) return { 'navs': nav_categories, 'categories': normal_categories,", "1) elif increase_pv: Post.objects.filter(pk=self.object.id).update(pv=F('uv') + 1) class SearchView(IndexView): def get_context_data(self):", "return context def get_sidebars(self): return SideBar.objects.filter(status=SideBar.STATUS_SHOW) def get_navs(self): categories =", "queryset = Post.latest_posts() template_name = 'blog/detail.html' context_object_name = 'post' pk_url_kwarg", "from django.shortcuts import get_object_or_404 from django.views.generic import ListView, DetailView #from", "TagView(IndexView): def get_context_data(self, **kwargs): context = super().get_context_data(**kwargs) tag_id = self.kwargs.get('tag_id')", "increase_uv = True cache.set(uv_key, 1, 24*60*60) if increase_pv and increase_uv:", "super().get_queryset() tag_id = self.kwargs.get('tag_id') return queryset.filter(tag__id=tag_id) class PostDetailView(CommonViewMinxin, DetailView): queryset", "self.request.path) uv_key = 'uv:%s:%s:%s' % (uid, str(date.today()), self.request.path) if not", "if cate.is_nav: nav_categories.append(cate) else: normal_categories.append(cate) return { 'navs': nav_categories, 'categories':", "if not keyword: return queryset return queryset.filter(Q(title__icontains=keyword) | Q(desc__icontains =keyword))", "= None category = None if tag_id: post_list, tag =", "normal_categories.append(cate) return { 'navs': nav_categories, 'categories': normal_categories, } class IndexView(CommonViewMinxin,", "import cache from django.db.models import Q, F from django.shortcuts import", "1, uv=F('uv') + 1) elif increase_pv: Post.objects.filter(pk=self.object.id).update(pv=F('pv') + 1) elif", "get_sidebars(self): return SideBar.objects.filter(status=SideBar.STATUS_SHOW) def get_navs(self): categories = Category.objects.filter(status=Category.STATUS_NORMAL) nav_categories =", "None if tag_id: post_list, tag = Post.get_by_tag(tag_id) elif category_id: post_list,", "post_list, category=Post.get_by_category(category_id) else: post_list = Post.latest_posts() context = { 'category':", "categories: if cate.is_nav: nav_categories.append(cate) else: normal_categories.append(cate) return { 'navs': nav_categories,", "Post.objects.filter(pk=self.object.id).update(pv=F('uv') + 1) class SearchView(IndexView): def get_context_data(self): context = super().get_context_data()", "+ 1) class SearchView(IndexView): def get_context_data(self): context = super().get_context_data() context.update({", "get_object_or_404(Tag, pk=tag_id) context.update({ 'tag': tag, }) return context def get_queryset(self):", "super().get_queryset() author_id = self.kwargs.get('owner_id') return queryset.filter(owner_id=author_id) ''' def post_list(request, category_id=None,", "get_context_data(self): context = super().get_context_data() context.update({ 'keyword': self.request.GET.get('keyword', '') }) return", "= Post.get_by_tag(tag_id) elif category_id: post_list, category=Post.get_by_category(category_id) else: post_list = Post.latest_posts()", "[] normal_categories = [] for cate in categories: if cate.is_nav:", "''' def post_list(request, category_id=None, tag_id=None): tag = None category =", "keyword = self.request.GET.get('keyword') if not keyword: return queryset return queryset.filter(Q(title__icontains=keyword)", "return queryset.filter(tag__id=tag_id) class PostDetailView(CommonViewMinxin, DetailView): queryset = Post.latest_posts() template_name =", "post_list(request, category_id=None, tag_id=None): tag = None category = None if", "5 context_object_name = 'post_list' template_name = 'blog/list.html' class CategoryView(IndexView): def", "pk=category_id) context.update({ 'category': category, }) return context def get_queryset(self): '''重写queryset,根据分类过滤'''", "import get_object_or_404 from django.views.generic import ListView, DetailView #from silk.profiling.profiler import", "response def handle_visited(self): increase_pv = False increase_uv = False uid", "import Comment class CommonViewMinxin: def get_context_data(self, **kwargs): context = super().get_context_data(**kwargs)", "{ 'navs': nav_categories, 'categories': normal_categories, } class IndexView(CommonViewMinxin, ListView): queryset", "from .models import Post, Tag, Category from comment.models import Comment", "'post_list' template_name = 'blog/list.html' class CategoryView(IndexView): def get_context_data(self, **kwargs): context", "'navs': nav_categories, 'categories': normal_categories, } class IndexView(CommonViewMinxin, ListView): queryset =", "category, 'tag': tag, 'post_list': post_list, 'sidebars': SideBar.get_all(), } context.update(Category.get_navs()) return", "1*60) #1分钟有效 if not cache.get(uv_key): increase_uv = True cache.set(uv_key, 1,", "category_id = self.kwargs.get('category_id') return queryset.filter(category_id=category_id) class TagView(IndexView): def get_context_data(self, **kwargs):", "date from django.core.cache import cache from django.db.models import Q, F", "'') }) return context def get_queryset(self): queryset = super().get_queryset() keyword", "'post': post, 'sidebars': SideBar.get_all(), } context.update(Category.get_navs()) return render(request, 'blog/detail.html', context=context)", "get_context_data(self, **kwargs): context = super().get_context_data(**kwargs) category_id = self.kwargs.get('category_id') category =", "True cache.set(uv_key, 1, 24*60*60) if increase_pv and increase_uv: Post.objects.filter(pk=self.object.id).update(pv=F('pv') +", "tag, 'post_list': post_list, 'sidebars': SideBar.get_all(), } context.update(Category.get_navs()) return render(request, 'blog/list.html',", "comment.models import Comment class CommonViewMinxin: def get_context_data(self, **kwargs): context =", "CommonViewMinxin: def get_context_data(self, **kwargs): context = super().get_context_data(**kwargs) context.update({ 'sidebars': self.get_sidebars(),", "django.shortcuts import render from django.shortcuts import get_object_or_404 from django.views.generic import", "def get_context_data(self, **kwargs): context = super().get_context_data(**kwargs) context.update({ 'sidebars': self.get_sidebars(), })", "return queryset.filter(category_id=category_id) class TagView(IndexView): def get_context_data(self, **kwargs): context = super().get_context_data(**kwargs)", "False increase_uv = False uid = self.request.uid pv_key = 'pv:%s:%s'", "from django.views.generic import ListView, DetailView #from silk.profiling.profiler import silk_profile from", "self.kwargs.get('owner_id') return queryset.filter(owner_id=author_id) ''' def post_list(request, category_id=None, tag_id=None): tag =", "= get_object_or_404(Category, pk=category_id) context.update({ 'category': category, }) return context def", "class CategoryView(IndexView): def get_context_data(self, **kwargs): context = super().get_context_data(**kwargs) category_id =", "tag_id=None): tag = None category = None if tag_id: post_list,", "context.update(Category.get_navs()) return render(request, 'blog/list.html', context=context) def post_detail(request, post_id=None): try: post", "import Post, Tag, Category from comment.models import Comment class CommonViewMinxin:", "% (uid, self.request.path) uv_key = 'uv:%s:%s:%s' % (uid, str(date.today()), self.request.path)", "paginate_by = 5 context_object_name = 'post_list' template_name = 'blog/list.html' class", "**kwargs) self.handle_visited() return response def handle_visited(self): increase_pv = False increase_uv", "Category from comment.models import Comment class CommonViewMinxin: def get_context_data(self, **kwargs):", "in categories: if cate.is_nav: nav_categories.append(cate) else: normal_categories.append(cate) return { 'navs':", "from comment.models import Comment class CommonViewMinxin: def get_context_data(self, **kwargs): context", "}) return context def get_queryset(self): '''重写queryset,根据分类过滤''' queryset = super().get_queryset() category_id", "category_id: post_list, category=Post.get_by_category(category_id) else: post_list = Post.latest_posts() context = {", "queryset = super().get_queryset() category_id = self.kwargs.get('category_id') return queryset.filter(category_id=category_id) class TagView(IndexView):", "'post_list': post_list, 'sidebars': SideBar.get_all(), } context.update(Category.get_navs()) return render(request, 'blog/list.html', context=context)", "queryset.filter(tag__id=tag_id) class PostDetailView(CommonViewMinxin, DetailView): queryset = Post.latest_posts() template_name = 'blog/detail.html'", "}) return context def get_queryset(self): queryset = super().get_queryset() keyword =", "AuthorView(IndexView): def get_queryset(self): queryset = super().get_queryset() author_id = self.kwargs.get('owner_id') return", "Http404('Post does not exist!') context={ 'post': post, 'sidebars': SideBar.get_all(), }", "Tag, Category from comment.models import Comment class CommonViewMinxin: def get_context_data(self,", "return context def get_queryset(self): queryset = super().get_queryset() keyword = self.request.GET.get('keyword')", "'blog/list.html', context=context) def post_detail(request, post_id=None): try: post = Post.objects.get(id=post_id) except", "= self.request.GET.get('keyword') if not keyword: return queryset return queryset.filter(Q(title__icontains=keyword) |", "}) return context def get_queryset(self): '''重写queryset,根据标签过滤''' queryset = super().get_queryset() tag_id", "pk=tag_id) context.update({ 'tag': tag, }) return context def get_queryset(self): '''重写queryset,根据标签过滤'''", "**kwargs): context = super().get_context_data(**kwargs) category_id = self.kwargs.get('category_id') category = get_object_or_404(Category," ]
[ "coding: utf-8 -*- \"\"\" Created on Sat May 7 11:38:18", "Created on Sat May 7 11:38:18 2016 @author: thomasbarillot VMI", "<reponame>thomasbarillot/DAQ # -*- coding: utf-8 -*- \"\"\" Created on Sat", "import pyplot as plt #%% b=np.reshape(a,[400,400]) print b plt.figure() plt.pcolor(np.reshape(a,[400,400]))", "print test.GetFilename() #%% test.setFilename('20161115_1841.dat') print test.GetFilename() #%% test.StartAcquisitionPrev() #%% test.StopAcquisition()", "#%% import numpy as np print np.shape(img) a=np.array(img) print a", "print a #%% from matplotlib import pyplot as plt #%%", "VMI control \"\"\" from ctypes import cdll #slib=\"VMIcrtl_ext.dll\" #hlib=cdll('VMIcrtl.dll') import", "test.GetFilename() #%% test.setFilename('20161115_1841.dat') print test.GetFilename() #%% test.StartAcquisitionPrev() #%% test.StopAcquisition() #%%", "#%% test.StartAcquisitionPrev() #%% test.StopAcquisition() #%% img=test.RecallImagePrev() #%% import numpy as", "import numpy as np print np.shape(img) a=np.array(img) print a #%%", "control \"\"\" from ctypes import cdll #slib=\"VMIcrtl_ext.dll\" #hlib=cdll('VMIcrtl.dll') import VMIcrtl_ext", "from ctypes import cdll #slib=\"VMIcrtl_ext.dll\" #hlib=cdll('VMIcrtl.dll') import VMIcrtl_ext test=VMIcrtl_ext.VMIcrtl() #%%", "cdll #slib=\"VMIcrtl_ext.dll\" #hlib=cdll('VMIcrtl.dll') import VMIcrtl_ext test=VMIcrtl_ext.VMIcrtl() #%% print test.GetFilename() #%%", "img=test.RecallImagePrev() #%% import numpy as np print np.shape(img) a=np.array(img) print", "test.StartAcquisitionPrev() #%% test.StopAcquisition() #%% img=test.RecallImagePrev() #%% import numpy as np", "test.StopAcquisition() #%% img=test.RecallImagePrev() #%% import numpy as np print np.shape(img)", "test.GetFilename() #%% test.StartAcquisitionPrev() #%% test.StopAcquisition() #%% img=test.RecallImagePrev() #%% import numpy", "May 7 11:38:18 2016 @author: thomasbarillot VMI control \"\"\" from", "a=np.array(img) print a #%% from matplotlib import pyplot as plt", "thomasbarillot VMI control \"\"\" from ctypes import cdll #slib=\"VMIcrtl_ext.dll\" #hlib=cdll('VMIcrtl.dll')", "as np print np.shape(img) a=np.array(img) print a #%% from matplotlib", "7 11:38:18 2016 @author: thomasbarillot VMI control \"\"\" from ctypes", "test.setFilename('20161115_1841.dat') print test.GetFilename() #%% test.StartAcquisitionPrev() #%% test.StopAcquisition() #%% img=test.RecallImagePrev() #%%", "ctypes import cdll #slib=\"VMIcrtl_ext.dll\" #hlib=cdll('VMIcrtl.dll') import VMIcrtl_ext test=VMIcrtl_ext.VMIcrtl() #%% print", "# -*- coding: utf-8 -*- \"\"\" Created on Sat May", "numpy as np print np.shape(img) a=np.array(img) print a #%% from", "Sat May 7 11:38:18 2016 @author: thomasbarillot VMI control \"\"\"", "#%% print test.GetFilename() #%% test.setFilename('20161115_1841.dat') print test.GetFilename() #%% test.StartAcquisitionPrev() #%%", "np print np.shape(img) a=np.array(img) print a #%% from matplotlib import", "np.shape(img) a=np.array(img) print a #%% from matplotlib import pyplot as", "#%% test.setFilename('20161115_1841.dat') print test.GetFilename() #%% test.StartAcquisitionPrev() #%% test.StopAcquisition() #%% img=test.RecallImagePrev()", "from matplotlib import pyplot as plt #%% b=np.reshape(a,[400,400]) print b", "-*- coding: utf-8 -*- \"\"\" Created on Sat May 7", "#%% img=test.RecallImagePrev() #%% import numpy as np print np.shape(img) a=np.array(img)", "on Sat May 7 11:38:18 2016 @author: thomasbarillot VMI control", "utf-8 -*- \"\"\" Created on Sat May 7 11:38:18 2016", "VMIcrtl_ext test=VMIcrtl_ext.VMIcrtl() #%% print test.GetFilename() #%% test.setFilename('20161115_1841.dat') print test.GetFilename() #%%", "a #%% from matplotlib import pyplot as plt #%% b=np.reshape(a,[400,400])", "2016 @author: thomasbarillot VMI control \"\"\" from ctypes import cdll", "print np.shape(img) a=np.array(img) print a #%% from matplotlib import pyplot", "@author: thomasbarillot VMI control \"\"\" from ctypes import cdll #slib=\"VMIcrtl_ext.dll\"", "matplotlib import pyplot as plt #%% b=np.reshape(a,[400,400]) print b plt.figure()", "import cdll #slib=\"VMIcrtl_ext.dll\" #hlib=cdll('VMIcrtl.dll') import VMIcrtl_ext test=VMIcrtl_ext.VMIcrtl() #%% print test.GetFilename()", "\"\"\" Created on Sat May 7 11:38:18 2016 @author: thomasbarillot", "11:38:18 2016 @author: thomasbarillot VMI control \"\"\" from ctypes import", "-*- \"\"\" Created on Sat May 7 11:38:18 2016 @author:", "test=VMIcrtl_ext.VMIcrtl() #%% print test.GetFilename() #%% test.setFilename('20161115_1841.dat') print test.GetFilename() #%% test.StartAcquisitionPrev()", "#hlib=cdll('VMIcrtl.dll') import VMIcrtl_ext test=VMIcrtl_ext.VMIcrtl() #%% print test.GetFilename() #%% test.setFilename('20161115_1841.dat') print", "print test.GetFilename() #%% test.StartAcquisitionPrev() #%% test.StopAcquisition() #%% img=test.RecallImagePrev() #%% import", "#%% from matplotlib import pyplot as plt #%% b=np.reshape(a,[400,400]) print", "\"\"\" from ctypes import cdll #slib=\"VMIcrtl_ext.dll\" #hlib=cdll('VMIcrtl.dll') import VMIcrtl_ext test=VMIcrtl_ext.VMIcrtl()", "#slib=\"VMIcrtl_ext.dll\" #hlib=cdll('VMIcrtl.dll') import VMIcrtl_ext test=VMIcrtl_ext.VMIcrtl() #%% print test.GetFilename() #%% test.setFilename('20161115_1841.dat')", "#%% test.StopAcquisition() #%% img=test.RecallImagePrev() #%% import numpy as np print", "import VMIcrtl_ext test=VMIcrtl_ext.VMIcrtl() #%% print test.GetFilename() #%% test.setFilename('20161115_1841.dat') print test.GetFilename()" ]
[ "= Executable('./config') config('--prefix=%s' % prefix, '--openssldir=%s' % join_path(prefix, 'etc', 'openssl'),", "* import spack.architecture import os class Openssl(Package): # Uses Fake", "must remain http:// so Spack can bootstrap curl url =", "them. filter_file(r'-arch x86_64', '', 'Makefile') make() if self.run_tests: make('test') #", "version('1.0.2p', sha256='50a98e07b1a89eb8f6a99477f262df71c6fa7bef77df4dc83025a2845c827d00') version('1.0.2o', sha256='ec3f5c9714ba0fd45cb4e087301eb1336c317e0d20b575a125050470e8089e4d') version('1.0.2n', sha256='370babb75f278c39e0c50e8c4e7493bc0f18db6867478341a832a982fd15a8fe') version('1.0.2m', sha256='8c6ff15ec6b319b50788f42c7abc2890c08ba5a1cdcd3810eb9092deada37b0f') version('1.0.2k', sha256='6b3977c61f2aedf0f96367dcfb5c6e578cf37e7b8d913b4ecb6643c3cb88d8c0')", "this will override what is set in the # Makefile,", "(except Linux, # where it happens automatically?) env['KERNEL_BITS'] = '64'", "os.path.exists(pkg_cert): os.symlink(sys_cert, pkg_cert) sys_certs = join_path(directory, 'certs') pkg_certs = join_path(pkg_dir,", "gcc) will not accept them. filter_file(r'-arch x86_64', '', 'Makefile') make()", "contents might change without Spack noticing. if os.path.isdir(sys_certs) and not", "homepage = \"http://www.openssl.org\" # URL must remain http:// so Spack", "# Makefile, leading to build errors. env.pop('APPS', None) if str(spec.target.family)", "Uses Fake Autotools, should subclass Package \"\"\"OpenSSL is an open", "for the Transport Layer Security (TLS) and Secure Sockets Layer", "version('1.0.1e', sha256='f74f15e8c8ff11aa3d5bb5f276d202ec18d7246e95f961db76054199c69c1ae3') variant('systemcerts', default=True, description='Use system certificates') depends_on('zlib') depends_on('perl@5.14.0:', type=('build',", "the package. # We symlink the whole directory instead of", "of all files because # the directory contents might change", "= \"http://www.openssl.org/source/old/\" list_depth = 1 # The latest stable version", "= join_path(directory, 'cert.pem') pkg_cert = join_path(pkg_dir, 'cert.pem') # If a", "sha256='ec3f5c9714ba0fd45cb4e087301eb1336c317e0d20b575a125050470e8089e4d') version('1.0.2n', sha256='370babb75f278c39e0c50e8c4e7493bc0f18db6867478341a832a982fd15a8fe') version('1.0.2m', sha256='8c6ff15ec6b319b50788f42c7abc2890c08ba5a1cdcd3810eb9092deada37b0f') version('1.0.2k', sha256='6b3977c61f2aedf0f96367dcfb5c6e578cf37e7b8d913b4ecb6643c3cb88d8c0') version('1.0.2j', sha256='e7aff292be21c259c6af26469c7a9b3ba26e9abaaffd325e3dccc9785256c431') version('1.0.2i',", "because # the directory contents might change without Spack noticing.", "llnl.util.tty as tty from spack import * import spack.architecture import", "series is out of support and should not be used.", "version is out of support and should not be used.", "used. version('1.0.2u', sha256='ecd0c6ffb493dd06707d38b14bb4d8c2288bb7033735606569d8f90f89669d16') version('1.0.2t', sha256='14cb464efe7ac6b54799b34456bd69558a749a4931ecfd9cf9f71d7881cac7bc') version('1.0.2s', sha256='cabd5c9492825ce5bd23f3c3aeed6a97f8142f606d893df216411f07d1abab96') version('1.0.2r', sha256='ae51d08bba8a83958e894946f15303ff894d75c2b8bbd44a852b64e3fe11d0d6') version('1.0.2p',", "should not be used. version('1.1.0l', sha256='74a2f756c64fd7386a29184dc0344f4831192d61dc2481a93a4c5dd727f41148') version('1.1.0k', sha256='efa4965f4f773574d6cbda1cf874dbbe455ab1c0d4f906115f867d30444470b1') version('1.1.0j', sha256='31bec6c203ce1a8e93d5994f4ed304c63ccf07676118b6634edded12ad1b3246')", "is out of support and should not be used. version('1.1.0l',", "# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other", "(Apache-2.0 OR MIT) import llnl.util.tty as tty from spack import", "used. version('1.1.0l', sha256='74a2f756c64fd7386a29184dc0344f4831192d61dc2481a93a4c5dd727f41148') version('1.1.0k', sha256='efa4965f4f773574d6cbda1cf874dbbe455ab1c0d4f906115f867d30444470b1') version('1.1.0j', sha256='31bec6c203ce1a8e93d5994f4ed304c63ccf07676118b6634edded12ad1b3246') version('1.1.0i', sha256='ebbfc844a8c8cc0ea5dc10b86c9ce97f401837f3fa08c17b2cdadc118253cf99') version('1.1.0g',", "not be used. version('1.0.2u', sha256='ecd0c6ffb493dd06707d38b14bb4d8c2288bb7033735606569d8f90f89669d16') version('1.0.2t', sha256='14cb464efe7ac6b54799b34456bd69558a749a4931ecfd9cf9f71d7881cac7bc') version('1.0.2s', sha256='cabd5c9492825ce5bd23f3c3aeed6a97f8142f606d893df216411f07d1abab96') version('1.0.2r',", "clang does not support the .arch directive in assembly files.", "a variable APPS in its Makefile. If it happens to", "the directory contents might change without Spack noticing. if os.path.isdir(sys_certs)", "Package \"\"\"OpenSSL is an open source project that provides a", "# clang does not support the .arch directive in assembly", "sha256='186c6bfe6ecfba7a5b48c47f8a1673d0f3b0e5ba2e25602dd23b629975da3f35') version('1.1.1e', sha256='694f61ac11cb51c9bf73f54e771ff6022b0327a43bbdfa1b2f19de1662a6dcbe') version('1.1.1d', sha256='1e3a91bc1f9dfce01af26026f856e064eab4c8ee0a8f457b5ae30b40b8b711f2') version('1.1.1c', sha256='f6fb3079ad15076154eda9413fed42877d668e7069d9b87396d0804fdb3f4c90') version('1.1.1b', sha256='5c557b023230413dfb0756f3137a13e6d726838ccd1430888ad15bfb2b43ea4b') version('1.1.1a',", "set in the # Makefile, leading to build errors. env.pop('APPS',", "# Ubuntu '/usr/lib/ssl', # OpenSUSE '/etc/ssl' ] pkg_dir = join_path(self.prefix,", "% join_path(prefix, 'etc', 'openssl'), '-I{0}'.format(self.spec['zlib'].prefix.include), '-L{0}'.format(self.spec['zlib'].prefix.lib), *options) # Remove non-standard", "version('1.0.1h', sha256='9d1c8a9836aa63e2c6adb684186cbd4371c9e9dcc01d6e3bb447abf2d4d3d093') version('1.0.1e', sha256='f74f15e8c8ff11aa3d5bb5f276d202ec18d7246e95f961db76054199c69c1ae3') variant('systemcerts', default=True, description='Use system certificates') depends_on('zlib')", "open source project that provides a robust, commercial-grade, and full-featured", "is the 1.1.1 series. This is also our Long Term", "e.g. on Darwin. They are non-standard, i.e. most compilers #", "sha256='6b3977c61f2aedf0f96367dcfb5c6e578cf37e7b8d913b4ecb6643c3cb88d8c0') version('1.0.2j', sha256='e7aff292be21c259c6af26469c7a9b3ba26e9abaaffd325e3dccc9785256c431') version('1.0.2i', sha256='9287487d11c9545b6efb287cdb70535d4e9b284dd10d51441d9b9963d000de6f') version('1.0.2h', sha256='1d4007e53aad94a5b2002fe045ee7bb0b3d98f1a47f8b2bc851dcd1c74332919') version('1.0.2g', sha256='b784b1b3907ce39abf4098702dade6365522a253ad1552e267a9a0e89594aa33') version('1.0.2f',", "version.\") def install(self, spec, prefix): # OpenSSL uses a variable", "sha256='31bec6c203ce1a8e93d5994f4ed304c63ccf07676118b6634edded12ad1b3246') version('1.1.0i', sha256='ebbfc844a8c8cc0ea5dc10b86c9ce97f401837f3fa08c17b2cdadc118253cf99') version('1.1.0g', sha256='de4d501267da39310905cb6dc8c6121f7a2cad45a7707f76df828fe1b85073af') version('1.1.0e', sha256='57be8618979d80c910728cfc99369bf97b2a1abd8f366ab6ebdee8975ad3874c') version('1.1.0d', sha256='7d5ebb9e89756545c156ff9c13cf2aa6214193b010a468a3bc789c3c28fe60df') version('1.1.0c',", "version('1.1.1d', sha256='1e3a91bc1f9dfce01af26026f856e064eab4c8ee0a8f457b5ae30b40b8b711f2') version('1.1.1c', sha256='f6fb3079ad15076154eda9413fed42877d668e7069d9b87396d0804fdb3f4c90') version('1.1.1b', sha256='5c557b023230413dfb0756f3137a13e6d726838ccd1430888ad15bfb2b43ea4b') version('1.1.1a', sha256='fc20130f8b7cbd2fb918b2f14e2f429e109c31ddd0fb38fc5d71d9ffed3f9f41') version('1.1.1', sha256='2836875a0f89c03d0fdf483941512613a50cfb421d6fd94b9f41d7279d586a3d')", "Security, LLC and other # Spack Project Developers. See the", "version('1.1.1c', sha256='f6fb3079ad15076154eda9413fed42877d668e7069d9b87396d0804fdb3f4c90') version('1.1.1b', sha256='5c557b023230413dfb0756f3137a13e6d726838ccd1430888ad15bfb2b43ea4b') version('1.1.1a', sha256='fc20130f8b7cbd2fb918b2f14e2f429e109c31ddd0fb38fc5d71d9ffed3f9f41') version('1.1.1', sha256='2836875a0f89c03d0fdf483941512613a50cfb421d6fd94b9f41d7279d586a3d') # The", "This needs to be done for all 64-bit architectures (except", "is an open source project that provides a robust, commercial-grade,", "latest OpenSSL version.\") def install(self, spec, prefix): # OpenSSL uses", "'certs') # If the certs directory exists, symlink it into", "pkg_dir = join_path(self.prefix, 'etc', 'openssl') for directory in system_dirs: sys_cert", "version('1.0.2j', sha256='e7aff292be21c259c6af26469c7a9b3ba26e9abaaffd325e3dccc9785256c431') version('1.0.2i', sha256='9287487d11c9545b6efb287cdb70535d4e9b284dd10d51441d9b9963d000de6f') version('1.0.2h', sha256='1d4007e53aad94a5b2002fe045ee7bb0b3d98f1a47f8b2bc851dcd1c74332919') version('1.0.2g', sha256='b784b1b3907ce39abf4098702dade6365522a253ad1552e267a9a0e89594aa33') version('1.0.2f', sha256='932b4ee4def2b434f85435d9e3e19ca8ba99ce9a065a61524b429a9d5e9b2e9c')", "\"http://www.openssl.org\" # URL must remain http:// so Spack can bootstrap", "'+systemcerts' not in self.spec: return system_dirs = [ # CentOS,", "should not be used. version('1.0.1u', sha256='4312b4ca1215b6f2c97007503d80db80d5157f76f8f7d3febbe6b4c56ff26739') version('1.0.1t', sha256='4a6ee491a2fdb22e519c76fdc2a628bb3cec12762cd456861d207996c8a07088') version('1.0.1r', sha256='784bd8d355ed01ce98b812f873f8b2313da61df7c7b5677fcf2e57b0863a3346')", "preferred way on Fedora, # where the certs directory does", "version('1.0.2k', sha256='6b3977c61f2aedf0f96367dcfb5c6e578cf37e7b8d913b4ecb6643c3cb88d8c0') version('1.0.2j', sha256='e7aff292be21c259c6af26469c7a9b3ba26e9abaaffd325e3dccc9785256c431') version('1.0.2i', sha256='9287487d11c9545b6efb287cdb70535d4e9b284dd10d51441d9b9963d000de6f') version('1.0.2h', sha256='1d4007e53aad94a5b2002fe045ee7bb0b3d98f1a47f8b2bc851dcd1c74332919') version('1.0.2g', sha256='b784b1b3907ce39abf4098702dade6365522a253ad1552e267a9a0e89594aa33')", "# Spack Project Developers. See the top-level COPYRIGHT file for", "its Makefile. If it happens to be set # in", "the .arch directive in assembly files. if 'clang' in self.compiler.cc", "version('1.0.2h', sha256='1d4007e53aad94a5b2002fe045ee7bb0b3d98f1a47f8b2bc851dcd1c74332919') version('1.0.2g', sha256='b784b1b3907ce39abf4098702dade6365522a253ad1552e267a9a0e89594aa33') version('1.0.2f', sha256='932b4ee4def2b434f85435d9e3e19ca8ba99ce9a065a61524b429a9d5e9b2e9c') version('1.0.2e', sha256='e23ccafdb75cfcde782da0151731aa2185195ac745eea3846133f2e05c0e0bff') version('1.0.2d', sha256='671c36487785628a703374c652ad2cebea45fa920ae5681515df25d9f2c9a8c8')", "change without Spack noticing. if os.path.isdir(sys_certs) and not os.path.islink(pkg_certs): os.rmdir(pkg_certs)", "commercial-grade, and full-featured toolkit for the Transport Layer Security (TLS)", "needs to be done for all 64-bit architectures (except Linux,", "options = ['zlib', 'shared'] if spec.satisfies('@1.0'): options.append('no-krb5') # clang does", "Makefile, leading to build errors. env.pop('APPS', None) if str(spec.target.family) in", "directory exists, symlink it into the package. # We symlink", "It is also a general-purpose cryptography library.\"\"\" homepage = \"http://www.openssl.org\"", "directory contents might change without Spack noticing. if os.path.isdir(sys_certs) and", "make('test') # 'VERBOSE=1' make('install') @run_after('install') def link_system_certs(self): if '+systemcerts' not", "instead of all files because # the directory contents might", "Consider updating to the latest OpenSSL version.\") def install(self, spec,", "] pkg_dir = join_path(self.prefix, 'etc', 'openssl') for directory in system_dirs:", "error): tty.warn(\"Fetching OpenSSL failed. This may indicate that OpenSSL has", "# If the certs directory exists, symlink it into the", "self.spec: return system_dirs = [ # CentOS, Fedora, RHEL '/etc/pki/tls',", "sha256='370babb75f278c39e0c50e8c4e7493bc0f18db6867478341a832a982fd15a8fe') version('1.0.2m', sha256='8c6ff15ec6b319b50788f42c7abc2890c08ba5a1cdcd3810eb9092deada37b0f') version('1.0.2k', sha256='6b3977c61f2aedf0f96367dcfb5c6e578cf37e7b8d913b4ecb6643c3cb88d8c0') version('1.0.2j', sha256='e7aff292be21c259c6af26469c7a9b3ba26e9abaaffd325e3dccc9785256c431') version('1.0.2i', sha256='9287487d11c9545b6efb287cdb70535d4e9b284dd10d51441d9b9963d000de6f') version('1.0.2h',", "OpenSSL version.\") def install(self, spec, prefix): # OpenSSL uses a", "use it. This is the preferred way on Fedora, #", "whole directory instead of all files because # the directory", "\\ 'aarch64' in spack.architecture.sys_type(): options.append('no-asm') config = Executable('./config') config('--prefix=%s' %", "be done for all 64-bit architectures (except Linux, # where", "If a bundle exists, use it. This is the preferred", "symlink it into the package. # We symlink the whole", "leading to build errors. env.pop('APPS', None) if str(spec.target.family) in ('x86_64',", "and the version in your instance of Spack is \"", "the 1.1.1 series. This is also our Long Term #", "This may indicate that OpenSSL has \" \"been updated, and", "options.append('no-krb5') # clang does not support the .arch directive in", "version('1.0.2i', sha256='9287487d11c9545b6efb287cdb70535d4e9b284dd10d51441d9b9963d000de6f') version('1.0.2h', sha256='1d4007e53aad94a5b2002fe045ee7bb0b3d98f1a47f8b2bc851dcd1c74332919') version('1.0.2g', sha256='b784b1b3907ce39abf4098702dade6365522a253ad1552e267a9a0e89594aa33') version('1.0.2f', sha256='932b4ee4def2b434f85435d9e3e19ca8ba99ce9a065a61524b429a9d5e9b2e9c') version('1.0.2e', sha256='e23ccafdb75cfcde782da0151731aa2185195ac745eea3846133f2e05c0e0bff')", "= join_path(directory, 'certs') pkg_certs = join_path(pkg_dir, 'certs') # If the", "be used. version('1.0.2u', sha256='ecd0c6ffb493dd06707d38b14bb4d8c2288bb7033735606569d8f90f89669d16') version('1.0.2t', sha256='14cb464efe7ac6b54799b34456bd69558a749a4931ecfd9cf9f71d7881cac7bc') version('1.0.2s', sha256='cabd5c9492825ce5bd23f3c3aeed6a97f8142f606d893df216411f07d1abab96') version('1.0.2r', sha256='ae51d08bba8a83958e894946f15303ff894d75c2b8bbd44a852b64e3fe11d0d6')", "build errors. env.pop('APPS', None) if str(spec.target.family) in ('x86_64', 'ppc64'): #", "in self.compiler.cc and \\ 'aarch64' in spack.architecture.sys_type(): options.append('no-asm') config =", "Security (TLS) and Secure Sockets Layer (SSL) protocols. It is", "url = \"http://www.openssl.org/source/openssl-1.1.1d.tar.gz\" list_url = \"http://www.openssl.org/source/old/\" list_depth = 1 #", "'aarch64' in spack.architecture.sys_type(): options.append('no-asm') config = Executable('./config') config('--prefix=%s' % prefix,", "sha256='e7aff292be21c259c6af26469c7a9b3ba26e9abaaffd325e3dccc9785256c431') version('1.0.2i', sha256='9287487d11c9545b6efb287cdb70535d4e9b284dd10d51441d9b9963d000de6f') version('1.0.2h', sha256='1d4007e53aad94a5b2002fe045ee7bb0b3d98f1a47f8b2bc851dcd1c74332919') version('1.0.2g', sha256='b784b1b3907ce39abf4098702dade6365522a253ad1552e267a9a0e89594aa33') version('1.0.2f', sha256='932b4ee4def2b434f85435d9e3e19ca8ba99ce9a065a61524b429a9d5e9b2e9c') version('1.0.2e',", "version('1.0.2o', sha256='ec3f5c9714ba0fd45cb4e087301eb1336c317e0d20b575a125050470e8089e4d') version('1.0.2n', sha256='370babb75f278c39e0c50e8c4e7493bc0f18db6867478341a832a982fd15a8fe') version('1.0.2m', sha256='8c6ff15ec6b319b50788f42c7abc2890c08ba5a1cdcd3810eb9092deada37b0f') version('1.0.2k', sha256='6b3977c61f2aedf0f96367dcfb5c6e578cf37e7b8d913b4ecb6643c3cb88d8c0') version('1.0.2j', sha256='e7aff292be21c259c6af26469c7a9b3ba26e9abaaffd325e3dccc9785256c431')", "so Spack can bootstrap curl url = \"http://www.openssl.org/source/openssl-1.1.1d.tar.gz\" list_url =", "list_url = \"http://www.openssl.org/source/old/\" list_depth = 1 # The latest stable", "compiler options if present. These options are # present e.g.", "list_depth = 1 # The latest stable version is the", "sha256='e23ccafdb75cfcde782da0151731aa2185195ac745eea3846133f2e05c0e0bff') version('1.0.2d', sha256='671c36487785628a703374c652ad2cebea45fa920ae5681515df25d9f2c9a8c8') # The 1.0.1 version is out of", "sha256='4312b4ca1215b6f2c97007503d80db80d5157f76f8f7d3febbe6b4c56ff26739') version('1.0.1t', sha256='4a6ee491a2fdb22e519c76fdc2a628bb3cec12762cd456861d207996c8a07088') version('1.0.1r', sha256='784bd8d355ed01ce98b812f873f8b2313da61df7c7b5677fcf2e57b0863a3346') version('1.0.1h', sha256='9d1c8a9836aa63e2c6adb684186cbd4371c9e9dcc01d6e3bb447abf2d4d3d093') version('1.0.1e', sha256='f74f15e8c8ff11aa3d5bb5f276d202ec18d7246e95f961db76054199c69c1ae3') variant('systemcerts',", "the latest OpenSSL version.\") def install(self, spec, prefix): # OpenSSL", "os.path.exists(sys_cert) and not os.path.exists(pkg_cert): os.symlink(sys_cert, pkg_cert) sys_certs = join_path(directory, 'certs')", "def link_system_certs(self): if '+systemcerts' not in self.spec: return system_dirs =", "options are # present e.g. on Darwin. They are non-standard,", "work. if os.path.exists(sys_cert) and not os.path.exists(pkg_cert): os.symlink(sys_cert, pkg_cert) sys_certs =", "URL must remain http:// so Spack can bootstrap curl url", "pkg_certs = join_path(pkg_dir, 'certs') # If the certs directory exists,", "variable APPS in its Makefile. If it happens to be", "See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier:", "The latest stable version is the 1.1.1 series. This is", "and other # Spack Project Developers. See the top-level COPYRIGHT", "sha256='671c36487785628a703374c652ad2cebea45fa920ae5681515df25d9f2c9a8c8') # The 1.0.1 version is out of support and", "bundle exists, use it. This is the preferred way on", "find_libraries(['libssl', 'libcrypto'], root=self.prefix.lib) def handle_fetch_error(self, error): tty.warn(\"Fetching OpenSSL failed. This", "files because # the directory contents might change without Spack", "# The latest stable version is the 1.1.1 series. This", "# If a bundle exists, use it. This is the", "does not work. if os.path.exists(sys_cert) and not os.path.exists(pkg_cert): os.symlink(sys_cert, pkg_cert)", "\"http://www.openssl.org/source/old/\" list_depth = 1 # The latest stable version is", "other # Spack Project Developers. See the top-level COPYRIGHT file", "join_path(directory, 'certs') pkg_certs = join_path(pkg_dir, 'certs') # If the certs", "a general-purpose cryptography library.\"\"\" homepage = \"http://www.openssl.org\" # URL must", "# The 1.0.2 series is out of support and should", "exists, use it. This is the preferred way on Fedora,", "'cert.pem') # If a bundle exists, use it. This is", "without Spack noticing. if os.path.isdir(sys_certs) and not os.path.islink(pkg_certs): os.rmdir(pkg_certs) os.symlink(sys_certs,", "(e.g. gcc) will not accept them. filter_file(r'-arch x86_64', '', 'Makefile')", "happens to be set # in the environment, then this", "import os class Openssl(Package): # Uses Fake Autotools, should subclass", "certs directory exists, symlink it into the package. # We", "False @property def libs(self): return find_libraries(['libssl', 'libcrypto'], root=self.prefix.lib) def handle_fetch_error(self,", "present. These options are # present e.g. on Darwin. They", "# Remove non-standard compiler options if present. These options are", "return system_dirs = [ # CentOS, Fedora, RHEL '/etc/pki/tls', #", "Spack is \" \"insecure. Consider updating to the latest OpenSSL", "version('1.0.2f', sha256='932b4ee4def2b434f85435d9e3e19ca8ba99ce9a065a61524b429a9d5e9b2e9c') version('1.0.2e', sha256='e23ccafdb75cfcde782da0151731aa2185195ac745eea3846133f2e05c0e0bff') version('1.0.2d', sha256='671c36487785628a703374c652ad2cebea45fa920ae5681515df25d9f2c9a8c8') # The 1.0.1 version", "to be done for all 64-bit architectures (except Linux, #", "in assembly files. if 'clang' in self.compiler.cc and \\ 'aarch64'", "Spack noticing. if os.path.isdir(sys_certs) and not os.path.islink(pkg_certs): os.rmdir(pkg_certs) os.symlink(sys_certs, pkg_certs)", "and full-featured toolkit for the Transport Layer Security (TLS) and", "done for all 64-bit architectures (except Linux, # where it", "# Support (LTS) version, supported until 11th September 2023. version('1.1.1g',", "are non-standard, i.e. most compilers # (e.g. gcc) will not", "support and should not be used. version('1.0.2u', sha256='ecd0c6ffb493dd06707d38b14bb4d8c2288bb7033735606569d8f90f89669d16') version('1.0.2t', sha256='14cb464efe7ac6b54799b34456bd69558a749a4931ecfd9cf9f71d7881cac7bc')", "Makefile. If it happens to be set # in the", "version('1.0.1r', sha256='784bd8d355ed01ce98b812f873f8b2313da61df7c7b5677fcf2e57b0863a3346') version('1.0.1h', sha256='9d1c8a9836aa63e2c6adb684186cbd4371c9e9dcc01d6e3bb447abf2d4d3d093') version('1.0.1e', sha256='f74f15e8c8ff11aa3d5bb5f276d202ec18d7246e95f961db76054199c69c1ae3') variant('systemcerts', default=True, description='Use system", "sha256='50a98e07b1a89eb8f6a99477f262df71c6fa7bef77df4dc83025a2845c827d00') version('1.0.2o', sha256='ec3f5c9714ba0fd45cb4e087301eb1336c317e0d20b575a125050470e8089e4d') version('1.0.2n', sha256='370babb75f278c39e0c50e8c4e7493bc0f18db6867478341a832a982fd15a8fe') version('1.0.2m', sha256='8c6ff15ec6b319b50788f42c7abc2890c08ba5a1cdcd3810eb9092deada37b0f') version('1.0.2k', sha256='6b3977c61f2aedf0f96367dcfb5c6e578cf37e7b8d913b4ecb6643c3cb88d8c0') version('1.0.2j',", "support and should not be used. version('1.0.1u', sha256='4312b4ca1215b6f2c97007503d80db80d5157f76f8f7d3febbe6b4c56ff26739') version('1.0.1t', sha256='4a6ee491a2fdb22e519c76fdc2a628bb3cec12762cd456861d207996c8a07088')", "in system_dirs: sys_cert = join_path(directory, 'cert.pem') pkg_cert = join_path(pkg_dir, 'cert.pem')", "updated, and the version in your instance of Spack is", "compilers # (e.g. gcc) will not accept them. filter_file(r'-arch x86_64',", "toolkit for the Transport Layer Security (TLS) and Secure Sockets", "install(self, spec, prefix): # OpenSSL uses a variable APPS in", "make('install') @run_after('install') def link_system_certs(self): if '+systemcerts' not in self.spec: return", "sha256='de4d501267da39310905cb6dc8c6121f7a2cad45a7707f76df828fe1b85073af') version('1.1.0e', sha256='57be8618979d80c910728cfc99369bf97b2a1abd8f366ab6ebdee8975ad3874c') version('1.1.0d', sha256='7d5ebb9e89756545c156ff9c13cf2aa6214193b010a468a3bc789c3c28fe60df') version('1.1.0c', sha256='fc436441a2e05752d31b4e46115eb89709a28aef96d4fe786abe92409b2fd6f5') # The 1.0.2", "out of support and should not be used. version('1.0.1u', sha256='4312b4ca1215b6f2c97007503d80db80d5157f76f8f7d3febbe6b4c56ff26739')", "sha256='ae51d08bba8a83958e894946f15303ff894d75c2b8bbd44a852b64e3fe11d0d6') version('1.0.2p', sha256='50a98e07b1a89eb8f6a99477f262df71c6fa7bef77df4dc83025a2845c827d00') version('1.0.2o', sha256='ec3f5c9714ba0fd45cb4e087301eb1336c317e0d20b575a125050470e8089e4d') version('1.0.2n', sha256='370babb75f278c39e0c50e8c4e7493bc0f18db6867478341a832a982fd15a8fe') version('1.0.2m', sha256='8c6ff15ec6b319b50788f42c7abc2890c08ba5a1cdcd3810eb9092deada37b0f') version('1.0.2k',", "*options) # Remove non-standard compiler options if present. These options", "[ # CentOS, Fedora, RHEL '/etc/pki/tls', # Ubuntu '/usr/lib/ssl', #", "'-I{0}'.format(self.spec['zlib'].prefix.include), '-L{0}'.format(self.spec['zlib'].prefix.lib), *options) # Remove non-standard compiler options if present.", "National Security, LLC and other # Spack Project Developers. See", "'ppc64'): # This needs to be done for all 64-bit", "1.1.0 series is out of support and should not be", "sha256='9d1c8a9836aa63e2c6adb684186cbd4371c9e9dcc01d6e3bb447abf2d4d3d093') version('1.0.1e', sha256='f74f15e8c8ff11aa3d5bb5f276d202ec18d7246e95f961db76054199c69c1ae3') variant('systemcerts', default=True, description='Use system certificates') depends_on('zlib') depends_on('perl@5.14.0:',", "If it happens to be set # in the environment,", "The 1.1.0 series is out of support and should not", "for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) import llnl.util.tty", "if present. These options are # present e.g. on Darwin.", "and should not be used. version('1.1.0l', sha256='74a2f756c64fd7386a29184dc0344f4831192d61dc2481a93a4c5dd727f41148') version('1.1.0k', sha256='efa4965f4f773574d6cbda1cf874dbbe455ab1c0d4f906115f867d30444470b1') version('1.1.0j',", "'cert.pem') pkg_cert = join_path(pkg_dir, 'cert.pem') # If a bundle exists,", "version('1.0.2n', sha256='370babb75f278c39e0c50e8c4e7493bc0f18db6867478341a832a982fd15a8fe') version('1.0.2m', sha256='8c6ff15ec6b319b50788f42c7abc2890c08ba5a1cdcd3810eb9092deada37b0f') version('1.0.2k', sha256='6b3977c61f2aedf0f96367dcfb5c6e578cf37e7b8d913b4ecb6643c3cb88d8c0') version('1.0.2j', sha256='e7aff292be21c259c6af26469c7a9b3ba26e9abaaffd325e3dccc9785256c431') version('1.0.2i', sha256='9287487d11c9545b6efb287cdb70535d4e9b284dd10d51441d9b9963d000de6f')", "instance of Spack is \" \"insecure. Consider updating to the", "1 # The latest stable version is the 1.1.1 series.", "self.compiler.cc and \\ 'aarch64' in spack.architecture.sys_type(): options.append('no-asm') config = Executable('./config')", "of support and should not be used. version('1.1.0l', sha256='74a2f756c64fd7386a29184dc0344f4831192d61dc2481a93a4c5dd727f41148') version('1.1.0k',", "Layer (SSL) protocols. It is also a general-purpose cryptography library.\"\"\"", "supported until 11th September 2023. version('1.1.1g', sha256='ddb04774f1e32f0c49751e21b67216ac87852ceb056b75209af2443400636d46') version('1.1.1f', sha256='186c6bfe6ecfba7a5b48c47f8a1673d0f3b0e5ba2e25602dd23b629975da3f35') version('1.1.1e',", "if 'clang' in self.compiler.cc and \\ 'aarch64' in spack.architecture.sys_type(): options.append('no-asm')", "OR MIT) import llnl.util.tty as tty from spack import *", "version in your instance of Spack is \" \"insecure. Consider", "details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) import llnl.util.tty as", "type=('build', 'test')) parallel = False @property def libs(self): return find_libraries(['libssl',", "all 64-bit architectures (except Linux, # where it happens automatically?)", "project that provides a robust, commercial-grade, and full-featured toolkit for", "sha256='784bd8d355ed01ce98b812f873f8b2313da61df7c7b5677fcf2e57b0863a3346') version('1.0.1h', sha256='9d1c8a9836aa63e2c6adb684186cbd4371c9e9dcc01d6e3bb447abf2d4d3d093') version('1.0.1e', sha256='f74f15e8c8ff11aa3d5bb5f276d202ec18d7246e95f961db76054199c69c1ae3') variant('systemcerts', default=True, description='Use system certificates')", "version('1.0.2d', sha256='671c36487785628a703374c652ad2cebea45fa920ae5681515df25d9f2c9a8c8') # The 1.0.1 version is out of support", "version('1.0.2m', sha256='8c6ff15ec6b319b50788f42c7abc2890c08ba5a1cdcd3810eb9092deada37b0f') version('1.0.2k', sha256='6b3977c61f2aedf0f96367dcfb5c6e578cf37e7b8d913b4ecb6643c3cb88d8c0') version('1.0.2j', sha256='e7aff292be21c259c6af26469c7a9b3ba26e9abaaffd325e3dccc9785256c431') version('1.0.2i', sha256='9287487d11c9545b6efb287cdb70535d4e9b284dd10d51441d9b9963d000de6f') version('1.0.2h', sha256='1d4007e53aad94a5b2002fe045ee7bb0b3d98f1a47f8b2bc851dcd1c74332919')", "'VERBOSE=1' make('install') @run_after('install') def link_system_certs(self): if '+systemcerts' not in self.spec:", "be set # in the environment, then this will override", "for all 64-bit architectures (except Linux, # where it happens", "= join_path(self.prefix, 'etc', 'openssl') for directory in system_dirs: sys_cert =", "import llnl.util.tty as tty from spack import * import spack.architecture", "1.0.2 series is out of support and should not be", "in your instance of Spack is \" \"insecure. Consider updating", "http:// so Spack can bootstrap curl url = \"http://www.openssl.org/source/openssl-1.1.1d.tar.gz\" list_url", "LLC and other # Spack Project Developers. See the top-level", "env.pop('APPS', None) if str(spec.target.family) in ('x86_64', 'ppc64'): # This needs", "link_system_certs(self): if '+systemcerts' not in self.spec: return system_dirs = [", "sha256='cabd5c9492825ce5bd23f3c3aeed6a97f8142f606d893df216411f07d1abab96') version('1.0.2r', sha256='ae51d08bba8a83958e894946f15303ff894d75c2b8bbd44a852b64e3fe11d0d6') version('1.0.2p', sha256='50a98e07b1a89eb8f6a99477f262df71c6fa7bef77df4dc83025a2845c827d00') version('1.0.2o', sha256='ec3f5c9714ba0fd45cb4e087301eb1336c317e0d20b575a125050470e8089e4d') version('1.0.2n', sha256='370babb75f278c39e0c50e8c4e7493bc0f18db6867478341a832a982fd15a8fe') version('1.0.2m',", "tty from spack import * import spack.architecture import os class", "a bundle exists, use it. This is the preferred way", "remain http:// so Spack can bootstrap curl url = \"http://www.openssl.org/source/openssl-1.1.1d.tar.gz\"", "prefix): # OpenSSL uses a variable APPS in its Makefile.", "sha256='efa4965f4f773574d6cbda1cf874dbbe455ab1c0d4f906115f867d30444470b1') version('1.1.0j', sha256='31bec6c203ce1a8e93d5994f4ed304c63ccf07676118b6634edded12ad1b3246') version('1.1.0i', sha256='ebbfc844a8c8cc0ea5dc10b86c9ce97f401837f3fa08c17b2cdadc118253cf99') version('1.1.0g', sha256='de4d501267da39310905cb6dc8c6121f7a2cad45a7707f76df828fe1b85073af') version('1.1.0e', sha256='57be8618979d80c910728cfc99369bf97b2a1abd8f366ab6ebdee8975ad3874c') version('1.1.0d',", "also our Long Term # Support (LTS) version, supported until", "version('1.1.0i', sha256='ebbfc844a8c8cc0ea5dc10b86c9ce97f401837f3fa08c17b2cdadc118253cf99') version('1.1.0g', sha256='de4d501267da39310905cb6dc8c6121f7a2cad45a7707f76df828fe1b85073af') version('1.1.0e', sha256='57be8618979d80c910728cfc99369bf97b2a1abd8f366ab6ebdee8975ad3874c') version('1.1.0d', sha256='7d5ebb9e89756545c156ff9c13cf2aa6214193b010a468a3bc789c3c28fe60df') version('1.1.0c', sha256='fc436441a2e05752d31b4e46115eb89709a28aef96d4fe786abe92409b2fd6f5')", "default=True, description='Use system certificates') depends_on('zlib') depends_on('perl@5.14.0:', type=('build', 'test')) parallel =", "The 1.0.1 version is out of support and should not", "until 11th September 2023. version('1.1.1g', sha256='ddb04774f1e32f0c49751e21b67216ac87852ceb056b75209af2443400636d46') version('1.1.1f', sha256='186c6bfe6ecfba7a5b48c47f8a1673d0f3b0e5ba2e25602dd23b629975da3f35') version('1.1.1e', sha256='694f61ac11cb51c9bf73f54e771ff6022b0327a43bbdfa1b2f19de1662a6dcbe')", "version('1.1.0l', sha256='74a2f756c64fd7386a29184dc0344f4831192d61dc2481a93a4c5dd727f41148') version('1.1.0k', sha256='efa4965f4f773574d6cbda1cf874dbbe455ab1c0d4f906115f867d30444470b1') version('1.1.0j', sha256='31bec6c203ce1a8e93d5994f4ed304c63ccf07676118b6634edded12ad1b3246') version('1.1.0i', sha256='ebbfc844a8c8cc0ea5dc10b86c9ce97f401837f3fa08c17b2cdadc118253cf99') version('1.1.0g', sha256='de4d501267da39310905cb6dc8c6121f7a2cad45a7707f76df828fe1b85073af')", "description='Use system certificates') depends_on('zlib') depends_on('perl@5.14.0:', type=('build', 'test')) parallel = False", "if str(spec.target.family) in ('x86_64', 'ppc64'): # This needs to be", "architectures (except Linux, # where it happens automatically?) env['KERNEL_BITS'] =", "'Makefile') make() if self.run_tests: make('test') # 'VERBOSE=1' make('install') @run_after('install') def", "'libcrypto'], root=self.prefix.lib) def handle_fetch_error(self, error): tty.warn(\"Fetching OpenSSL failed. This may", "Copyright 2013-2020 Lawrence Livermore National Security, LLC and other #", "depends_on('perl@5.14.0:', type=('build', 'test')) parallel = False @property def libs(self): return", "join_path(self.prefix, 'etc', 'openssl') for directory in system_dirs: sys_cert = join_path(directory,", "# present e.g. on Darwin. They are non-standard, i.e. most", "system_dirs: sys_cert = join_path(directory, 'cert.pem') pkg_cert = join_path(pkg_dir, 'cert.pem') #", "env['KERNEL_BITS'] = '64' options = ['zlib', 'shared'] if spec.satisfies('@1.0'): options.append('no-krb5')", "Fedora, RHEL '/etc/pki/tls', # Ubuntu '/usr/lib/ssl', # OpenSUSE '/etc/ssl' ]", "not os.path.exists(pkg_cert): os.symlink(sys_cert, pkg_cert) sys_certs = join_path(directory, 'certs') pkg_certs =", "not accept them. filter_file(r'-arch x86_64', '', 'Makefile') make() if self.run_tests:", "spack import * import spack.architecture import os class Openssl(Package): #", "sha256='932b4ee4def2b434f85435d9e3e19ca8ba99ce9a065a61524b429a9d5e9b2e9c') version('1.0.2e', sha256='e23ccafdb75cfcde782da0151731aa2185195ac745eea3846133f2e05c0e0bff') version('1.0.2d', sha256='671c36487785628a703374c652ad2cebea45fa920ae5681515df25d9f2c9a8c8') # The 1.0.1 version is", "should not be used. version('1.0.2u', sha256='ecd0c6ffb493dd06707d38b14bb4d8c2288bb7033735606569d8f90f89669d16') version('1.0.2t', sha256='14cb464efe7ac6b54799b34456bd69558a749a4931ecfd9cf9f71d7881cac7bc') version('1.0.2s', sha256='cabd5c9492825ce5bd23f3c3aeed6a97f8142f606d893df216411f07d1abab96')", "'openssl'), '-I{0}'.format(self.spec['zlib'].prefix.include), '-L{0}'.format(self.spec['zlib'].prefix.lib), *options) # Remove non-standard compiler options if", "version('1.1.0c', sha256='fc436441a2e05752d31b4e46115eb89709a28aef96d4fe786abe92409b2fd6f5') # The 1.0.2 series is out of support", "where it happens automatically?) env['KERNEL_BITS'] = '64' options = ['zlib',", "directory instead of all files because # the directory contents", "# URL must remain http:// so Spack can bootstrap curl", "\"insecure. Consider updating to the latest OpenSSL version.\") def install(self,", "where the certs directory does not work. if os.path.exists(sys_cert) and", "\" \"insecure. Consider updating to the latest OpenSSL version.\") def", "and not os.path.exists(pkg_cert): os.symlink(sys_cert, pkg_cert) sys_certs = join_path(directory, 'certs') pkg_certs", "'64' options = ['zlib', 'shared'] if spec.satisfies('@1.0'): options.append('no-krb5') # clang", "on Darwin. They are non-standard, i.e. most compilers # (e.g.", "version is the 1.1.1 series. This is also our Long", "None) if str(spec.target.family) in ('x86_64', 'ppc64'): # This needs to", "% prefix, '--openssldir=%s' % join_path(prefix, 'etc', 'openssl'), '-I{0}'.format(self.spec['zlib'].prefix.include), '-L{0}'.format(self.spec['zlib'].prefix.lib), *options)", "# (e.g. gcc) will not accept them. filter_file(r'-arch x86_64', '',", "version('1.1.0g', sha256='de4d501267da39310905cb6dc8c6121f7a2cad45a7707f76df828fe1b85073af') version('1.1.0e', sha256='57be8618979d80c910728cfc99369bf97b2a1abd8f366ab6ebdee8975ad3874c') version('1.1.0d', sha256='7d5ebb9e89756545c156ff9c13cf2aa6214193b010a468a3bc789c3c28fe60df') version('1.1.0c', sha256='fc436441a2e05752d31b4e46115eb89709a28aef96d4fe786abe92409b2fd6f5') # The", "curl url = \"http://www.openssl.org/source/openssl-1.1.1d.tar.gz\" list_url = \"http://www.openssl.org/source/old/\" list_depth = 1", "protocols. It is also a general-purpose cryptography library.\"\"\" homepage =", "version('1.1.1b', sha256='5c557b023230413dfb0756f3137a13e6d726838ccd1430888ad15bfb2b43ea4b') version('1.1.1a', sha256='fc20130f8b7cbd2fb918b2f14e2f429e109c31ddd0fb38fc5d71d9ffed3f9f41') version('1.1.1', sha256='2836875a0f89c03d0fdf483941512613a50cfb421d6fd94b9f41d7279d586a3d') # The 1.1.0 series", "1.0.1 version is out of support and should not be", "file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) import", "OpenSSL has \" \"been updated, and the version in your", "Project Developers. See the top-level COPYRIGHT file for details. #", "Term # Support (LTS) version, supported until 11th September 2023.", "to build errors. env.pop('APPS', None) if str(spec.target.family) in ('x86_64', 'ppc64'):", "accept them. filter_file(r'-arch x86_64', '', 'Makefile') make() if self.run_tests: make('test')", "updating to the latest OpenSSL version.\") def install(self, spec, prefix):", "Secure Sockets Layer (SSL) protocols. It is also a general-purpose", "MIT) import llnl.util.tty as tty from spack import * import", "and should not be used. version('1.0.2u', sha256='ecd0c6ffb493dd06707d38b14bb4d8c2288bb7033735606569d8f90f89669d16') version('1.0.2t', sha256='14cb464efe7ac6b54799b34456bd69558a749a4931ecfd9cf9f71d7881cac7bc') version('1.0.2s',", "COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT)", "of Spack is \" \"insecure. Consider updating to the latest", "version('1.1.0e', sha256='57be8618979d80c910728cfc99369bf97b2a1abd8f366ab6ebdee8975ad3874c') version('1.1.0d', sha256='7d5ebb9e89756545c156ff9c13cf2aa6214193b010a468a3bc789c3c28fe60df') version('1.1.0c', sha256='fc436441a2e05752d31b4e46115eb89709a28aef96d4fe786abe92409b2fd6f5') # The 1.0.2 series", "top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR", "Ubuntu '/usr/lib/ssl', # OpenSUSE '/etc/ssl' ] pkg_dir = join_path(self.prefix, 'etc',", "sha256='ecd0c6ffb493dd06707d38b14bb4d8c2288bb7033735606569d8f90f89669d16') version('1.0.2t', sha256='14cb464efe7ac6b54799b34456bd69558a749a4931ecfd9cf9f71d7881cac7bc') version('1.0.2s', sha256='cabd5c9492825ce5bd23f3c3aeed6a97f8142f606d893df216411f07d1abab96') version('1.0.2r', sha256='ae51d08bba8a83958e894946f15303ff894d75c2b8bbd44a852b64e3fe11d0d6') version('1.0.2p', sha256='50a98e07b1a89eb8f6a99477f262df71c6fa7bef77df4dc83025a2845c827d00') version('1.0.2o',", "failed. This may indicate that OpenSSL has \" \"been updated,", "join_path(directory, 'cert.pem') pkg_cert = join_path(pkg_dir, 'cert.pem') # If a bundle", "class Openssl(Package): # Uses Fake Autotools, should subclass Package \"\"\"OpenSSL", "general-purpose cryptography library.\"\"\" homepage = \"http://www.openssl.org\" # URL must remain", "sha256='fc436441a2e05752d31b4e46115eb89709a28aef96d4fe786abe92409b2fd6f5') # The 1.0.2 series is out of support and", "Remove non-standard compiler options if present. These options are #", "'openssl') for directory in system_dirs: sys_cert = join_path(directory, 'cert.pem') pkg_cert", "These options are # present e.g. on Darwin. They are", "if spec.satisfies('@1.0'): options.append('no-krb5') # clang does not support the .arch", "def libs(self): return find_libraries(['libssl', 'libcrypto'], root=self.prefix.lib) def handle_fetch_error(self, error): tty.warn(\"Fetching", "sha256='f6fb3079ad15076154eda9413fed42877d668e7069d9b87396d0804fdb3f4c90') version('1.1.1b', sha256='5c557b023230413dfb0756f3137a13e6d726838ccd1430888ad15bfb2b43ea4b') version('1.1.1a', sha256='fc20130f8b7cbd2fb918b2f14e2f429e109c31ddd0fb38fc5d71d9ffed3f9f41') version('1.1.1', sha256='2836875a0f89c03d0fdf483941512613a50cfb421d6fd94b9f41d7279d586a3d') # The 1.1.0", "override what is set in the # Makefile, leading to", "your instance of Spack is \" \"insecure. Consider updating to", "is out of support and should not be used. version('1.0.2u',", "Fake Autotools, should subclass Package \"\"\"OpenSSL is an open source", "'/usr/lib/ssl', # OpenSUSE '/etc/ssl' ] pkg_dir = join_path(self.prefix, 'etc', 'openssl')", "str(spec.target.family) in ('x86_64', 'ppc64'): # This needs to be done", "on Fedora, # where the certs directory does not work.", "a robust, commercial-grade, and full-featured toolkit for the Transport Layer", "('x86_64', 'ppc64'): # This needs to be done for all", "be used. version('1.0.1u', sha256='4312b4ca1215b6f2c97007503d80db80d5157f76f8f7d3febbe6b4c56ff26739') version('1.0.1t', sha256='4a6ee491a2fdb22e519c76fdc2a628bb3cec12762cd456861d207996c8a07088') version('1.0.1r', sha256='784bd8d355ed01ce98b812f873f8b2313da61df7c7b5677fcf2e57b0863a3346') version('1.0.1h', sha256='9d1c8a9836aa63e2c6adb684186cbd4371c9e9dcc01d6e3bb447abf2d4d3d093')", "Openssl(Package): # Uses Fake Autotools, should subclass Package \"\"\"OpenSSL is", "x86_64', '', 'Makefile') make() if self.run_tests: make('test') # 'VERBOSE=1' make('install')", "Sockets Layer (SSL) protocols. It is also a general-purpose cryptography", "@property def libs(self): return find_libraries(['libssl', 'libcrypto'], root=self.prefix.lib) def handle_fetch_error(self, error):", "2013-2020 Lawrence Livermore National Security, LLC and other # Spack", "for directory in system_dirs: sys_cert = join_path(directory, 'cert.pem') pkg_cert =", "version('1.1.1f', sha256='186c6bfe6ecfba7a5b48c47f8a1673d0f3b0e5ba2e25602dd23b629975da3f35') version('1.1.1e', sha256='694f61ac11cb51c9bf73f54e771ff6022b0327a43bbdfa1b2f19de1662a6dcbe') version('1.1.1d', sha256='1e3a91bc1f9dfce01af26026f856e064eab4c8ee0a8f457b5ae30b40b8b711f2') version('1.1.1c', sha256='f6fb3079ad15076154eda9413fed42877d668e7069d9b87396d0804fdb3f4c90') version('1.1.1b', sha256='5c557b023230413dfb0756f3137a13e6d726838ccd1430888ad15bfb2b43ea4b')", "# OpenSSL uses a variable APPS in its Makefile. If", "# We symlink the whole directory instead of all files", "errors. env.pop('APPS', None) if str(spec.target.family) in ('x86_64', 'ppc64'): # This", "indicate that OpenSSL has \" \"been updated, and the version", "config = Executable('./config') config('--prefix=%s' % prefix, '--openssldir=%s' % join_path(prefix, 'etc',", "the certs directory does not work. if os.path.exists(sys_cert) and not", "'/etc/ssl' ] pkg_dir = join_path(self.prefix, 'etc', 'openssl') for directory in", "version, supported until 11th September 2023. version('1.1.1g', sha256='ddb04774f1e32f0c49751e21b67216ac87852ceb056b75209af2443400636d46') version('1.1.1f', sha256='186c6bfe6ecfba7a5b48c47f8a1673d0f3b0e5ba2e25602dd23b629975da3f35')", "version('1.0.2r', sha256='ae51d08bba8a83958e894946f15303ff894d75c2b8bbd44a852b64e3fe11d0d6') version('1.0.2p', sha256='50a98e07b1a89eb8f6a99477f262df71c6fa7bef77df4dc83025a2845c827d00') version('1.0.2o', sha256='ec3f5c9714ba0fd45cb4e087301eb1336c317e0d20b575a125050470e8089e4d') version('1.0.2n', sha256='370babb75f278c39e0c50e8c4e7493bc0f18db6867478341a832a982fd15a8fe') version('1.0.2m', sha256='8c6ff15ec6b319b50788f42c7abc2890c08ba5a1cdcd3810eb9092deada37b0f')", "= join_path(pkg_dir, 'cert.pem') # If a bundle exists, use it.", "import spack.architecture import os class Openssl(Package): # Uses Fake Autotools,", "not be used. version('1.0.1u', sha256='4312b4ca1215b6f2c97007503d80db80d5157f76f8f7d3febbe6b4c56ff26739') version('1.0.1t', sha256='4a6ee491a2fdb22e519c76fdc2a628bb3cec12762cd456861d207996c8a07088') version('1.0.1r', sha256='784bd8d355ed01ce98b812f873f8b2313da61df7c7b5677fcf2e57b0863a3346') version('1.0.1h',", "bootstrap curl url = \"http://www.openssl.org/source/openssl-1.1.1d.tar.gz\" list_url = \"http://www.openssl.org/source/old/\" list_depth =", "version('1.1.1e', sha256='694f61ac11cb51c9bf73f54e771ff6022b0327a43bbdfa1b2f19de1662a6dcbe') version('1.1.1d', sha256='1e3a91bc1f9dfce01af26026f856e064eab4c8ee0a8f457b5ae30b40b8b711f2') version('1.1.1c', sha256='f6fb3079ad15076154eda9413fed42877d668e7069d9b87396d0804fdb3f4c90') version('1.1.1b', sha256='5c557b023230413dfb0756f3137a13e6d726838ccd1430888ad15bfb2b43ea4b') version('1.1.1a', sha256='fc20130f8b7cbd2fb918b2f14e2f429e109c31ddd0fb38fc5d71d9ffed3f9f41')", "# The 1.0.1 version is out of support and should", "version('1.1.0k', sha256='efa4965f4f773574d6cbda1cf874dbbe455ab1c0d4f906115f867d30444470b1') version('1.1.0j', sha256='31bec6c203ce1a8e93d5994f4ed304c63ccf07676118b6634edded12ad1b3246') version('1.1.0i', sha256='ebbfc844a8c8cc0ea5dc10b86c9ce97f401837f3fa08c17b2cdadc118253cf99') version('1.1.0g', sha256='de4d501267da39310905cb6dc8c6121f7a2cad45a7707f76df828fe1b85073af') version('1.1.0e', sha256='57be8618979d80c910728cfc99369bf97b2a1abd8f366ab6ebdee8975ad3874c')", "make() if self.run_tests: make('test') # 'VERBOSE=1' make('install') @run_after('install') def link_system_certs(self):", "full-featured toolkit for the Transport Layer Security (TLS) and Secure", "the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0", "latest stable version is the 1.1.1 series. This is also", "version('1.0.1u', sha256='4312b4ca1215b6f2c97007503d80db80d5157f76f8f7d3febbe6b4c56ff26739') version('1.0.1t', sha256='4a6ee491a2fdb22e519c76fdc2a628bb3cec12762cd456861d207996c8a07088') version('1.0.1r', sha256='784bd8d355ed01ce98b812f873f8b2313da61df7c7b5677fcf2e57b0863a3346') version('1.0.1h', sha256='9d1c8a9836aa63e2c6adb684186cbd4371c9e9dcc01d6e3bb447abf2d4d3d093') version('1.0.1e', sha256='f74f15e8c8ff11aa3d5bb5f276d202ec18d7246e95f961db76054199c69c1ae3')", "and should not be used. version('1.0.1u', sha256='4312b4ca1215b6f2c97007503d80db80d5157f76f8f7d3febbe6b4c56ff26739') version('1.0.1t', sha256='4a6ee491a2fdb22e519c76fdc2a628bb3cec12762cd456861d207996c8a07088') version('1.0.1r',", "out of support and should not be used. version('1.1.0l', sha256='74a2f756c64fd7386a29184dc0344f4831192d61dc2481a93a4c5dd727f41148')", "= [ # CentOS, Fedora, RHEL '/etc/pki/tls', # Ubuntu '/usr/lib/ssl',", "self.run_tests: make('test') # 'VERBOSE=1' make('install') @run_after('install') def link_system_certs(self): if '+systemcerts'", "Layer Security (TLS) and Secure Sockets Layer (SSL) protocols. It", "# # SPDX-License-Identifier: (Apache-2.0 OR MIT) import llnl.util.tty as tty", "They are non-standard, i.e. most compilers # (e.g. gcc) will", "SPDX-License-Identifier: (Apache-2.0 OR MIT) import llnl.util.tty as tty from spack", "'shared'] if spec.satisfies('@1.0'): options.append('no-krb5') # clang does not support the", "sha256='74a2f756c64fd7386a29184dc0344f4831192d61dc2481a93a4c5dd727f41148') version('1.1.0k', sha256='efa4965f4f773574d6cbda1cf874dbbe455ab1c0d4f906115f867d30444470b1') version('1.1.0j', sha256='31bec6c203ce1a8e93d5994f4ed304c63ccf07676118b6634edded12ad1b3246') version('1.1.0i', sha256='ebbfc844a8c8cc0ea5dc10b86c9ce97f401837f3fa08c17b2cdadc118253cf99') version('1.1.0g', sha256='de4d501267da39310905cb6dc8c6121f7a2cad45a7707f76df828fe1b85073af') version('1.1.0e',", "@run_after('install') def link_system_certs(self): if '+systemcerts' not in self.spec: return system_dirs", "The 1.0.2 series is out of support and should not", "Executable('./config') config('--prefix=%s' % prefix, '--openssldir=%s' % join_path(prefix, 'etc', 'openssl'), '-I{0}'.format(self.spec['zlib'].prefix.include),", "of support and should not be used. version('1.0.2u', sha256='ecd0c6ffb493dd06707d38b14bb4d8c2288bb7033735606569d8f90f89669d16') version('1.0.2t',", "# This needs to be done for all 64-bit architectures", "files. if 'clang' in self.compiler.cc and \\ 'aarch64' in spack.architecture.sys_type():", "way on Fedora, # where the certs directory does not", "system certificates') depends_on('zlib') depends_on('perl@5.14.0:', type=('build', 'test')) parallel = False @property", "libs(self): return find_libraries(['libssl', 'libcrypto'], root=self.prefix.lib) def handle_fetch_error(self, error): tty.warn(\"Fetching OpenSSL", "sys_certs = join_path(directory, 'certs') pkg_certs = join_path(pkg_dir, 'certs') # If", "# where it happens automatically?) env['KERNEL_BITS'] = '64' options =", "robust, commercial-grade, and full-featured toolkit for the Transport Layer Security", "an open source project that provides a robust, commercial-grade, and", "if '+systemcerts' not in self.spec: return system_dirs = [ #", "'etc', 'openssl') for directory in system_dirs: sys_cert = join_path(directory, 'cert.pem')", "then this will override what is set in the #", "64-bit architectures (except Linux, # where it happens automatically?) env['KERNEL_BITS']", "directory does not work. if os.path.exists(sys_cert) and not os.path.exists(pkg_cert): os.symlink(sys_cert,", "certificates') depends_on('zlib') depends_on('perl@5.14.0:', type=('build', 'test')) parallel = False @property def", "os.symlink(sys_cert, pkg_cert) sys_certs = join_path(directory, 'certs') pkg_certs = join_path(pkg_dir, 'certs')", "it happens to be set # in the environment, then", "the version in your instance of Spack is \" \"insecure.", "if self.run_tests: make('test') # 'VERBOSE=1' make('install') @run_after('install') def link_system_certs(self): if", "\" \"been updated, and the version in your instance of", "the # Makefile, leading to build errors. env.pop('APPS', None) if", "series. This is also our Long Term # Support (LTS)", "environment, then this will override what is set in the", "'etc', 'openssl'), '-I{0}'.format(self.spec['zlib'].prefix.include), '-L{0}'.format(self.spec['zlib'].prefix.lib), *options) # Remove non-standard compiler options", "Transport Layer Security (TLS) and Secure Sockets Layer (SSL) protocols.", "sha256='1d4007e53aad94a5b2002fe045ee7bb0b3d98f1a47f8b2bc851dcd1c74332919') version('1.0.2g', sha256='b784b1b3907ce39abf4098702dade6365522a253ad1552e267a9a0e89594aa33') version('1.0.2f', sha256='932b4ee4def2b434f85435d9e3e19ca8ba99ce9a065a61524b429a9d5e9b2e9c') version('1.0.2e', sha256='e23ccafdb75cfcde782da0151731aa2185195ac745eea3846133f2e05c0e0bff') version('1.0.2d', sha256='671c36487785628a703374c652ad2cebea45fa920ae5681515df25d9f2c9a8c8') #", "and \\ 'aarch64' in spack.architecture.sys_type(): options.append('no-asm') config = Executable('./config') config('--prefix=%s'", "directive in assembly files. if 'clang' in self.compiler.cc and \\", "We symlink the whole directory instead of all files because", "the whole directory instead of all files because # the", "sha256='9287487d11c9545b6efb287cdb70535d4e9b284dd10d51441d9b9963d000de6f') version('1.0.2h', sha256='1d4007e53aad94a5b2002fe045ee7bb0b3d98f1a47f8b2bc851dcd1c74332919') version('1.0.2g', sha256='b784b1b3907ce39abf4098702dade6365522a253ad1552e267a9a0e89594aa33') version('1.0.2f', sha256='932b4ee4def2b434f85435d9e3e19ca8ba99ce9a065a61524b429a9d5e9b2e9c') version('1.0.2e', sha256='e23ccafdb75cfcde782da0151731aa2185195ac745eea3846133f2e05c0e0bff') version('1.0.2d',", "pkg_cert) sys_certs = join_path(directory, 'certs') pkg_certs = join_path(pkg_dir, 'certs') #", "in the # Makefile, leading to build errors. env.pop('APPS', None)", "sha256='ddb04774f1e32f0c49751e21b67216ac87852ceb056b75209af2443400636d46') version('1.1.1f', sha256='186c6bfe6ecfba7a5b48c47f8a1673d0f3b0e5ba2e25602dd23b629975da3f35') version('1.1.1e', sha256='694f61ac11cb51c9bf73f54e771ff6022b0327a43bbdfa1b2f19de1662a6dcbe') version('1.1.1d', sha256='1e3a91bc1f9dfce01af26026f856e064eab4c8ee0a8f457b5ae30b40b8b711f2') version('1.1.1c', sha256='f6fb3079ad15076154eda9413fed42877d668e7069d9b87396d0804fdb3f4c90') version('1.1.1b',", "spec.satisfies('@1.0'): options.append('no-krb5') # clang does not support the .arch directive", "= join_path(pkg_dir, 'certs') # If the certs directory exists, symlink", "version('1.1.0d', sha256='7d5ebb9e89756545c156ff9c13cf2aa6214193b010a468a3bc789c3c28fe60df') version('1.1.0c', sha256='fc436441a2e05752d31b4e46115eb89709a28aef96d4fe786abe92409b2fd6f5') # The 1.0.2 series is out", "has \" \"been updated, and the version in your instance", "pkg_cert = join_path(pkg_dir, 'cert.pem') # If a bundle exists, use", "config('--prefix=%s' % prefix, '--openssldir=%s' % join_path(prefix, 'etc', 'openssl'), '-I{0}'.format(self.spec['zlib'].prefix.include), '-L{0}'.format(self.spec['zlib'].prefix.lib),", "1.1.1 series. This is also our Long Term # Support", "version('1.1.1g', sha256='ddb04774f1e32f0c49751e21b67216ac87852ceb056b75209af2443400636d46') version('1.1.1f', sha256='186c6bfe6ecfba7a5b48c47f8a1673d0f3b0e5ba2e25602dd23b629975da3f35') version('1.1.1e', sha256='694f61ac11cb51c9bf73f54e771ff6022b0327a43bbdfa1b2f19de1662a6dcbe') version('1.1.1d', sha256='1e3a91bc1f9dfce01af26026f856e064eab4c8ee0a8f457b5ae30b40b8b711f2') version('1.1.1c', sha256='f6fb3079ad15076154eda9413fed42877d668e7069d9b87396d0804fdb3f4c90')", "it happens automatically?) env['KERNEL_BITS'] = '64' options = ['zlib', 'shared']", "be used. version('1.1.0l', sha256='74a2f756c64fd7386a29184dc0344f4831192d61dc2481a93a4c5dd727f41148') version('1.1.0k', sha256='efa4965f4f773574d6cbda1cf874dbbe455ab1c0d4f906115f867d30444470b1') version('1.1.0j', sha256='31bec6c203ce1a8e93d5994f4ed304c63ccf07676118b6634edded12ad1b3246') version('1.1.0i', sha256='ebbfc844a8c8cc0ea5dc10b86c9ce97f401837f3fa08c17b2cdadc118253cf99')", "is also a general-purpose cryptography library.\"\"\" homepage = \"http://www.openssl.org\" #", "parallel = False @property def libs(self): return find_libraries(['libssl', 'libcrypto'], root=self.prefix.lib)", "subclass Package \"\"\"OpenSSL is an open source project that provides", "sha256='694f61ac11cb51c9bf73f54e771ff6022b0327a43bbdfa1b2f19de1662a6dcbe') version('1.1.1d', sha256='1e3a91bc1f9dfce01af26026f856e064eab4c8ee0a8f457b5ae30b40b8b711f2') version('1.1.1c', sha256='f6fb3079ad15076154eda9413fed42877d668e7069d9b87396d0804fdb3f4c90') version('1.1.1b', sha256='5c557b023230413dfb0756f3137a13e6d726838ccd1430888ad15bfb2b43ea4b') version('1.1.1a', sha256='fc20130f8b7cbd2fb918b2f14e2f429e109c31ddd0fb38fc5d71d9ffed3f9f41') version('1.1.1',", "is the preferred way on Fedora, # where the certs", "'certs') pkg_certs = join_path(pkg_dir, 'certs') # If the certs directory", "not work. if os.path.exists(sys_cert) and not os.path.exists(pkg_cert): os.symlink(sys_cert, pkg_cert) sys_certs", "might change without Spack noticing. if os.path.isdir(sys_certs) and not os.path.islink(pkg_certs):", "= ['zlib', 'shared'] if spec.satisfies('@1.0'): options.append('no-krb5') # clang does not", "'clang' in self.compiler.cc and \\ 'aarch64' in spack.architecture.sys_type(): options.append('no-asm') config", "support the .arch directive in assembly files. if 'clang' in", "'--openssldir=%s' % join_path(prefix, 'etc', 'openssl'), '-I{0}'.format(self.spec['zlib'].prefix.include), '-L{0}'.format(self.spec['zlib'].prefix.lib), *options) # Remove", "spec, prefix): # OpenSSL uses a variable APPS in its", "spack.architecture import os class Openssl(Package): # Uses Fake Autotools, should", "cryptography library.\"\"\" homepage = \"http://www.openssl.org\" # URL must remain http://", "in self.spec: return system_dirs = [ # CentOS, Fedora, RHEL", "sha256='57be8618979d80c910728cfc99369bf97b2a1abd8f366ab6ebdee8975ad3874c') version('1.1.0d', sha256='7d5ebb9e89756545c156ff9c13cf2aa6214193b010a468a3bc789c3c28fe60df') version('1.1.0c', sha256='fc436441a2e05752d31b4e46115eb89709a28aef96d4fe786abe92409b2fd6f5') # The 1.0.2 series is", "join_path(prefix, 'etc', 'openssl'), '-I{0}'.format(self.spec['zlib'].prefix.include), '-L{0}'.format(self.spec['zlib'].prefix.lib), *options) # Remove non-standard compiler", ".arch directive in assembly files. if 'clang' in self.compiler.cc and", "provides a robust, commercial-grade, and full-featured toolkit for the Transport", "version('1.0.2u', sha256='ecd0c6ffb493dd06707d38b14bb4d8c2288bb7033735606569d8f90f89669d16') version('1.0.2t', sha256='14cb464efe7ac6b54799b34456bd69558a749a4931ecfd9cf9f71d7881cac7bc') version('1.0.2s', sha256='cabd5c9492825ce5bd23f3c3aeed6a97f8142f606d893df216411f07d1abab96') version('1.0.2r', sha256='ae51d08bba8a83958e894946f15303ff894d75c2b8bbd44a852b64e3fe11d0d6') version('1.0.2p', sha256='50a98e07b1a89eb8f6a99477f262df71c6fa7bef77df4dc83025a2845c827d00')", "version('1.0.2s', sha256='cabd5c9492825ce5bd23f3c3aeed6a97f8142f606d893df216411f07d1abab96') version('1.0.2r', sha256='ae51d08bba8a83958e894946f15303ff894d75c2b8bbd44a852b64e3fe11d0d6') version('1.0.2p', sha256='50a98e07b1a89eb8f6a99477f262df71c6fa7bef77df4dc83025a2845c827d00') version('1.0.2o', sha256='ec3f5c9714ba0fd45cb4e087301eb1336c317e0d20b575a125050470e8089e4d') version('1.0.2n', sha256='370babb75f278c39e0c50e8c4e7493bc0f18db6867478341a832a982fd15a8fe')", "that provides a robust, commercial-grade, and full-featured toolkit for the", "APPS in its Makefile. If it happens to be set", "version('1.0.2t', sha256='14cb464efe7ac6b54799b34456bd69558a749a4931ecfd9cf9f71d7881cac7bc') version('1.0.2s', sha256='cabd5c9492825ce5bd23f3c3aeed6a97f8142f606d893df216411f07d1abab96') version('1.0.2r', sha256='ae51d08bba8a83958e894946f15303ff894d75c2b8bbd44a852b64e3fe11d0d6') version('1.0.2p', sha256='50a98e07b1a89eb8f6a99477f262df71c6fa7bef77df4dc83025a2845c827d00') version('1.0.2o', sha256='ec3f5c9714ba0fd45cb4e087301eb1336c317e0d20b575a125050470e8089e4d')", "in the environment, then this will override what is set", "support and should not be used. version('1.1.0l', sha256='74a2f756c64fd7386a29184dc0344f4831192d61dc2481a93a4c5dd727f41148') version('1.1.0k', sha256='efa4965f4f773574d6cbda1cf874dbbe455ab1c0d4f906115f867d30444470b1')", "['zlib', 'shared'] if spec.satisfies('@1.0'): options.append('no-krb5') # clang does not support", "present e.g. on Darwin. They are non-standard, i.e. most compilers", "is set in the # Makefile, leading to build errors.", "\"\"\"OpenSSL is an open source project that provides a robust,", "import * import spack.architecture import os class Openssl(Package): # Uses", "not in self.spec: return system_dirs = [ # CentOS, Fedora,", "# where the certs directory does not work. if os.path.exists(sys_cert)", "system_dirs = [ # CentOS, Fedora, RHEL '/etc/pki/tls', # Ubuntu", "what is set in the # Makefile, leading to build", "sha256='5c557b023230413dfb0756f3137a13e6d726838ccd1430888ad15bfb2b43ea4b') version('1.1.1a', sha256='fc20130f8b7cbd2fb918b2f14e2f429e109c31ddd0fb38fc5d71d9ffed3f9f41') version('1.1.1', sha256='2836875a0f89c03d0fdf483941512613a50cfb421d6fd94b9f41d7279d586a3d') # The 1.1.0 series is", "sha256='14cb464efe7ac6b54799b34456bd69558a749a4931ecfd9cf9f71d7881cac7bc') version('1.0.2s', sha256='cabd5c9492825ce5bd23f3c3aeed6a97f8142f606d893df216411f07d1abab96') version('1.0.2r', sha256='ae51d08bba8a83958e894946f15303ff894d75c2b8bbd44a852b64e3fe11d0d6') version('1.0.2p', sha256='50a98e07b1a89eb8f6a99477f262df71c6fa7bef77df4dc83025a2845c827d00') version('1.0.2o', sha256='ec3f5c9714ba0fd45cb4e087301eb1336c317e0d20b575a125050470e8089e4d') version('1.0.2n',", "library.\"\"\" homepage = \"http://www.openssl.org\" # URL must remain http:// so", "join_path(pkg_dir, 'certs') # If the certs directory exists, symlink it", "Linux, # where it happens automatically?) env['KERNEL_BITS'] = '64' options", "in ('x86_64', 'ppc64'): # This needs to be done for", "options.append('no-asm') config = Executable('./config') config('--prefix=%s' % prefix, '--openssldir=%s' % join_path(prefix,", "11th September 2023. version('1.1.1g', sha256='ddb04774f1e32f0c49751e21b67216ac87852ceb056b75209af2443400636d46') version('1.1.1f', sha256='186c6bfe6ecfba7a5b48c47f8a1673d0f3b0e5ba2e25602dd23b629975da3f35') version('1.1.1e', sha256='694f61ac11cb51c9bf73f54e771ff6022b0327a43bbdfa1b2f19de1662a6dcbe') version('1.1.1d',", "sys_cert = join_path(directory, 'cert.pem') pkg_cert = join_path(pkg_dir, 'cert.pem') # If", "stable version is the 1.1.1 series. This is also our", "i.e. most compilers # (e.g. gcc) will not accept them.", "filter_file(r'-arch x86_64', '', 'Makefile') make() if self.run_tests: make('test') # 'VERBOSE=1'", "# 'VERBOSE=1' make('install') @run_after('install') def link_system_certs(self): if '+systemcerts' not in", "in spack.architecture.sys_type(): options.append('no-asm') config = Executable('./config') config('--prefix=%s' % prefix, '--openssldir=%s'", "is also our Long Term # Support (LTS) version, supported", "our Long Term # Support (LTS) version, supported until 11th", "(SSL) protocols. It is also a general-purpose cryptography library.\"\"\" homepage", "Spack can bootstrap curl url = \"http://www.openssl.org/source/openssl-1.1.1d.tar.gz\" list_url = \"http://www.openssl.org/source/old/\"", "Lawrence Livermore National Security, LLC and other # Spack Project", "tty.warn(\"Fetching OpenSSL failed. This may indicate that OpenSSL has \"", "'', 'Makefile') make() if self.run_tests: make('test') # 'VERBOSE=1' make('install') @run_after('install')", "Fedora, # where the certs directory does not work. if", "spack.architecture.sys_type(): options.append('no-asm') config = Executable('./config') config('--prefix=%s' % prefix, '--openssldir=%s' %", "sha256='7d5ebb9e89756545c156ff9c13cf2aa6214193b010a468a3bc789c3c28fe60df') version('1.1.0c', sha256='fc436441a2e05752d31b4e46115eb89709a28aef96d4fe786abe92409b2fd6f5') # The 1.0.2 series is out of", "def handle_fetch_error(self, error): tty.warn(\"Fetching OpenSSL failed. This may indicate that", "Long Term # Support (LTS) version, supported until 11th September", "in its Makefile. If it happens to be set #", "Autotools, should subclass Package \"\"\"OpenSSL is an open source project", "non-standard, i.e. most compilers # (e.g. gcc) will not accept", "sha256='ebbfc844a8c8cc0ea5dc10b86c9ce97f401837f3fa08c17b2cdadc118253cf99') version('1.1.0g', sha256='de4d501267da39310905cb6dc8c6121f7a2cad45a7707f76df828fe1b85073af') version('1.1.0e', sha256='57be8618979d80c910728cfc99369bf97b2a1abd8f366ab6ebdee8975ad3874c') version('1.1.0d', sha256='7d5ebb9e89756545c156ff9c13cf2aa6214193b010a468a3bc789c3c28fe60df') version('1.1.0c', sha256='fc436441a2e05752d31b4e46115eb89709a28aef96d4fe786abe92409b2fd6f5') #", "2023. version('1.1.1g', sha256='ddb04774f1e32f0c49751e21b67216ac87852ceb056b75209af2443400636d46') version('1.1.1f', sha256='186c6bfe6ecfba7a5b48c47f8a1673d0f3b0e5ba2e25602dd23b629975da3f35') version('1.1.1e', sha256='694f61ac11cb51c9bf73f54e771ff6022b0327a43bbdfa1b2f19de1662a6dcbe') version('1.1.1d', sha256='1e3a91bc1f9dfce01af26026f856e064eab4c8ee0a8f457b5ae30b40b8b711f2') version('1.1.1c',", "the Transport Layer Security (TLS) and Secure Sockets Layer (SSL)", "depends_on('zlib') depends_on('perl@5.14.0:', type=('build', 'test')) parallel = False @property def libs(self):", "symlink the whole directory instead of all files because #", "# the directory contents might change without Spack noticing. if", "os class Openssl(Package): # Uses Fake Autotools, should subclass Package", "from spack import * import spack.architecture import os class Openssl(Package):", "the preferred way on Fedora, # where the certs directory", "# The 1.1.0 series is out of support and should", "Spack Project Developers. See the top-level COPYRIGHT file for details.", "# OpenSUSE '/etc/ssl' ] pkg_dir = join_path(self.prefix, 'etc', 'openssl') for", "root=self.prefix.lib) def handle_fetch_error(self, error): tty.warn(\"Fetching OpenSSL failed. This may indicate", "package. # We symlink the whole directory instead of all", "can bootstrap curl url = \"http://www.openssl.org/source/openssl-1.1.1d.tar.gz\" list_url = \"http://www.openssl.org/source/old/\" list_depth", "into the package. # We symlink the whole directory instead", "the certs directory exists, symlink it into the package. #", "are # present e.g. on Darwin. They are non-standard, i.e.", "out of support and should not be used. version('1.0.2u', sha256='ecd0c6ffb493dd06707d38b14bb4d8c2288bb7033735606569d8f90f89669d16')", "as tty from spack import * import spack.architecture import os", "sha256='f74f15e8c8ff11aa3d5bb5f276d202ec18d7246e95f961db76054199c69c1ae3') variant('systemcerts', default=True, description='Use system certificates') depends_on('zlib') depends_on('perl@5.14.0:', type=('build', 'test'))", "OpenSSL uses a variable APPS in its Makefile. If it", "\"http://www.openssl.org/source/openssl-1.1.1d.tar.gz\" list_url = \"http://www.openssl.org/source/old/\" list_depth = 1 # The latest", "certs directory does not work. if os.path.exists(sys_cert) and not os.path.exists(pkg_cert):", "will override what is set in the # Makefile, leading", "Darwin. They are non-standard, i.e. most compilers # (e.g. gcc)", "sha256='1e3a91bc1f9dfce01af26026f856e064eab4c8ee0a8f457b5ae30b40b8b711f2') version('1.1.1c', sha256='f6fb3079ad15076154eda9413fed42877d668e7069d9b87396d0804fdb3f4c90') version('1.1.1b', sha256='5c557b023230413dfb0756f3137a13e6d726838ccd1430888ad15bfb2b43ea4b') version('1.1.1a', sha256='fc20130f8b7cbd2fb918b2f14e2f429e109c31ddd0fb38fc5d71d9ffed3f9f41') version('1.1.1', sha256='2836875a0f89c03d0fdf483941512613a50cfb421d6fd94b9f41d7279d586a3d') #", "RHEL '/etc/pki/tls', # Ubuntu '/usr/lib/ssl', # OpenSUSE '/etc/ssl' ] pkg_dir", "sha256='b784b1b3907ce39abf4098702dade6365522a253ad1552e267a9a0e89594aa33') version('1.0.2f', sha256='932b4ee4def2b434f85435d9e3e19ca8ba99ce9a065a61524b429a9d5e9b2e9c') version('1.0.2e', sha256='e23ccafdb75cfcde782da0151731aa2185195ac745eea3846133f2e05c0e0bff') version('1.0.2d', sha256='671c36487785628a703374c652ad2cebea45fa920ae5681515df25d9f2c9a8c8') # The 1.0.1", "variant('systemcerts', default=True, description='Use system certificates') depends_on('zlib') depends_on('perl@5.14.0:', type=('build', 'test')) parallel", "= \"http://www.openssl.org\" # URL must remain http:// so Spack can", "sha256='8c6ff15ec6b319b50788f42c7abc2890c08ba5a1cdcd3810eb9092deada37b0f') version('1.0.2k', sha256='6b3977c61f2aedf0f96367dcfb5c6e578cf37e7b8d913b4ecb6643c3cb88d8c0') version('1.0.2j', sha256='e7aff292be21c259c6af26469c7a9b3ba26e9abaaffd325e3dccc9785256c431') version('1.0.2i', sha256='9287487d11c9545b6efb287cdb70535d4e9b284dd10d51441d9b9963d000de6f') version('1.0.2h', sha256='1d4007e53aad94a5b2002fe045ee7bb0b3d98f1a47f8b2bc851dcd1c74332919') version('1.0.2g',", "options if present. These options are # present e.g. on", "Livermore National Security, LLC and other # Spack Project Developers.", "version('1.1.1', sha256='2836875a0f89c03d0fdf483941512613a50cfb421d6fd94b9f41d7279d586a3d') # The 1.1.0 series is out of support", "join_path(pkg_dir, 'cert.pem') # If a bundle exists, use it. This", "should subclass Package \"\"\"OpenSSL is an open source project that", "# in the environment, then this will override what is", "version('1.0.2g', sha256='b784b1b3907ce39abf4098702dade6365522a253ad1552e267a9a0e89594aa33') version('1.0.2f', sha256='932b4ee4def2b434f85435d9e3e19ca8ba99ce9a065a61524b429a9d5e9b2e9c') version('1.0.2e', sha256='e23ccafdb75cfcde782da0151731aa2185195ac745eea3846133f2e05c0e0bff') version('1.0.2d', sha256='671c36487785628a703374c652ad2cebea45fa920ae5681515df25d9f2c9a8c8') # The", "= False @property def libs(self): return find_libraries(['libssl', 'libcrypto'], root=self.prefix.lib) def", "If the certs directory exists, symlink it into the package.", "to be set # in the environment, then this will", "# SPDX-License-Identifier: (Apache-2.0 OR MIT) import llnl.util.tty as tty from", "'test')) parallel = False @property def libs(self): return find_libraries(['libssl', 'libcrypto'],", "Support (LTS) version, supported until 11th September 2023. version('1.1.1g', sha256='ddb04774f1e32f0c49751e21b67216ac87852ceb056b75209af2443400636d46')", "CentOS, Fedora, RHEL '/etc/pki/tls', # Ubuntu '/usr/lib/ssl', # OpenSUSE '/etc/ssl'", "version('1.0.1t', sha256='4a6ee491a2fdb22e519c76fdc2a628bb3cec12762cd456861d207996c8a07088') version('1.0.1r', sha256='784bd8d355ed01ce98b812f873f8b2313da61df7c7b5677fcf2e57b0863a3346') version('1.0.1h', sha256='9d1c8a9836aa63e2c6adb684186cbd4371c9e9dcc01d6e3bb447abf2d4d3d093') version('1.0.1e', sha256='f74f15e8c8ff11aa3d5bb5f276d202ec18d7246e95f961db76054199c69c1ae3') variant('systemcerts', default=True,", "source project that provides a robust, commercial-grade, and full-featured toolkit", "all files because # the directory contents might change without", "if os.path.exists(sys_cert) and not os.path.exists(pkg_cert): os.symlink(sys_cert, pkg_cert) sys_certs = join_path(directory,", "sha256='2836875a0f89c03d0fdf483941512613a50cfb421d6fd94b9f41d7279d586a3d') # The 1.1.0 series is out of support and", "happens automatically?) env['KERNEL_BITS'] = '64' options = ['zlib', 'shared'] if", "automatically?) env['KERNEL_BITS'] = '64' options = ['zlib', 'shared'] if spec.satisfies('@1.0'):", "the environment, then this will override what is set in", "prefix, '--openssldir=%s' % join_path(prefix, 'etc', 'openssl'), '-I{0}'.format(self.spec['zlib'].prefix.include), '-L{0}'.format(self.spec['zlib'].prefix.lib), *options) #", "This is the preferred way on Fedora, # where the", "= '64' options = ['zlib', 'shared'] if spec.satisfies('@1.0'): options.append('no-krb5') #", "# Uses Fake Autotools, should subclass Package \"\"\"OpenSSL is an", "handle_fetch_error(self, error): tty.warn(\"Fetching OpenSSL failed. This may indicate that OpenSSL", "version('1.0.2e', sha256='e23ccafdb75cfcde782da0151731aa2185195ac745eea3846133f2e05c0e0bff') version('1.0.2d', sha256='671c36487785628a703374c652ad2cebea45fa920ae5681515df25d9f2c9a8c8') # The 1.0.1 version is out", "Developers. See the top-level COPYRIGHT file for details. # #", "return find_libraries(['libssl', 'libcrypto'], root=self.prefix.lib) def handle_fetch_error(self, error): tty.warn(\"Fetching OpenSSL failed.", "not support the .arch directive in assembly files. if 'clang'", "OpenSUSE '/etc/ssl' ] pkg_dir = join_path(self.prefix, 'etc', 'openssl') for directory", "of support and should not be used. version('1.0.1u', sha256='4312b4ca1215b6f2c97007503d80db80d5157f76f8f7d3febbe6b4c56ff26739') version('1.0.1t',", "to the latest OpenSSL version.\") def install(self, spec, prefix): #", "that OpenSSL has \" \"been updated, and the version in", "directory in system_dirs: sys_cert = join_path(directory, 'cert.pem') pkg_cert = join_path(pkg_dir,", "not be used. version('1.1.0l', sha256='74a2f756c64fd7386a29184dc0344f4831192d61dc2481a93a4c5dd727f41148') version('1.1.0k', sha256='efa4965f4f773574d6cbda1cf874dbbe455ab1c0d4f906115f867d30444470b1') version('1.1.0j', sha256='31bec6c203ce1a8e93d5994f4ed304c63ccf07676118b6634edded12ad1b3246') version('1.1.0i',", "'-L{0}'.format(self.spec['zlib'].prefix.lib), *options) # Remove non-standard compiler options if present. These", "OpenSSL failed. This may indicate that OpenSSL has \" \"been", "assembly files. if 'clang' in self.compiler.cc and \\ 'aarch64' in", "it. This is the preferred way on Fedora, # where", "is out of support and should not be used. version('1.0.1u',", "does not support the .arch directive in assembly files. if", "(LTS) version, supported until 11th September 2023. version('1.1.1g', sha256='ddb04774f1e32f0c49751e21b67216ac87852ceb056b75209af2443400636d46') version('1.1.1f',", "= \"http://www.openssl.org/source/openssl-1.1.1d.tar.gz\" list_url = \"http://www.openssl.org/source/old/\" list_depth = 1 # The", "also a general-purpose cryptography library.\"\"\" homepage = \"http://www.openssl.org\" # URL", "\"been updated, and the version in your instance of Spack", "it into the package. # We symlink the whole directory", "non-standard compiler options if present. These options are # present", "<filename>var/spack/repos/builtin/packages/openssl/package.py # Copyright 2013-2020 Lawrence Livermore National Security, LLC and", "is \" \"insecure. Consider updating to the latest OpenSSL version.\")", "used. version('1.0.1u', sha256='4312b4ca1215b6f2c97007503d80db80d5157f76f8f7d3febbe6b4c56ff26739') version('1.0.1t', sha256='4a6ee491a2fdb22e519c76fdc2a628bb3cec12762cd456861d207996c8a07088') version('1.0.1r', sha256='784bd8d355ed01ce98b812f873f8b2313da61df7c7b5677fcf2e57b0863a3346') version('1.0.1h', sha256='9d1c8a9836aa63e2c6adb684186cbd4371c9e9dcc01d6e3bb447abf2d4d3d093') version('1.0.1e',", "will not accept them. filter_file(r'-arch x86_64', '', 'Makefile') make() if", "= 1 # The latest stable version is the 1.1.1", "This is also our Long Term # Support (LTS) version,", "exists, symlink it into the package. # We symlink the", "'/etc/pki/tls', # Ubuntu '/usr/lib/ssl', # OpenSUSE '/etc/ssl' ] pkg_dir =", "version('1.1.1a', sha256='fc20130f8b7cbd2fb918b2f14e2f429e109c31ddd0fb38fc5d71d9ffed3f9f41') version('1.1.1', sha256='2836875a0f89c03d0fdf483941512613a50cfb421d6fd94b9f41d7279d586a3d') # The 1.1.0 series is out", "and Secure Sockets Layer (SSL) protocols. It is also a", "most compilers # (e.g. gcc) will not accept them. filter_file(r'-arch", "September 2023. version('1.1.1g', sha256='ddb04774f1e32f0c49751e21b67216ac87852ceb056b75209af2443400636d46') version('1.1.1f', sha256='186c6bfe6ecfba7a5b48c47f8a1673d0f3b0e5ba2e25602dd23b629975da3f35') version('1.1.1e', sha256='694f61ac11cb51c9bf73f54e771ff6022b0327a43bbdfa1b2f19de1662a6dcbe') version('1.1.1d', sha256='1e3a91bc1f9dfce01af26026f856e064eab4c8ee0a8f457b5ae30b40b8b711f2')", "def install(self, spec, prefix): # OpenSSL uses a variable APPS", "version('1.1.0j', sha256='31bec6c203ce1a8e93d5994f4ed304c63ccf07676118b6634edded12ad1b3246') version('1.1.0i', sha256='ebbfc844a8c8cc0ea5dc10b86c9ce97f401837f3fa08c17b2cdadc118253cf99') version('1.1.0g', sha256='de4d501267da39310905cb6dc8c6121f7a2cad45a7707f76df828fe1b85073af') version('1.1.0e', sha256='57be8618979d80c910728cfc99369bf97b2a1abd8f366ab6ebdee8975ad3874c') version('1.1.0d', sha256='7d5ebb9e89756545c156ff9c13cf2aa6214193b010a468a3bc789c3c28fe60df')", "may indicate that OpenSSL has \" \"been updated, and the", "set # in the environment, then this will override what", "# CentOS, Fedora, RHEL '/etc/pki/tls', # Ubuntu '/usr/lib/ssl', # OpenSUSE", "(TLS) and Secure Sockets Layer (SSL) protocols. It is also", "sha256='fc20130f8b7cbd2fb918b2f14e2f429e109c31ddd0fb38fc5d71d9ffed3f9f41') version('1.1.1', sha256='2836875a0f89c03d0fdf483941512613a50cfb421d6fd94b9f41d7279d586a3d') # The 1.1.0 series is out of", "uses a variable APPS in its Makefile. If it happens", "sha256='4a6ee491a2fdb22e519c76fdc2a628bb3cec12762cd456861d207996c8a07088') version('1.0.1r', sha256='784bd8d355ed01ce98b812f873f8b2313da61df7c7b5677fcf2e57b0863a3346') version('1.0.1h', sha256='9d1c8a9836aa63e2c6adb684186cbd4371c9e9dcc01d6e3bb447abf2d4d3d093') version('1.0.1e', sha256='f74f15e8c8ff11aa3d5bb5f276d202ec18d7246e95f961db76054199c69c1ae3') variant('systemcerts', default=True, description='Use" ]
[ "create a new profiler based on caller's qualname. \"\"\" if", "len(cls._profilers) == 0)): return cls._disabled_profiler # determine the qualified name", "1 new_time = ptime.time() elapsed = (new_time - self._last_time) *", "License. See LICENSE.txt for more info. # Adapted from PyQtGraph", "(ptime.time() - self._firstTime) * 1000) type(self)._depth -= 1 if self._depth", "counter) that contains the time elapsed since the last call.", "of time intervals. By default, profilers are disabled. To enable", "since the last call. When the profiler is about to", "to stdout otherwise. If `delayed` is set to False, messages", "\"\"\" if self.disable: return if msg is None: msg =", "the profiler is about to be garbage-collected, the messages are", "def __init__(self, *args, **kwds): pass def __call__(self, *args): pass def", "\"\"\" _profilers = (config['profile'].split(\",\") if config['profile'] is not None else", "a new profiler based on caller's qualname. \"\"\" if (disabled", "to \"C.function\" (without the module name) will enable this profiler.", "about to be garbage-collected, the messages are passed to the", "msg=None, disabled='env', delayed=True): \"\"\"Optionally create a new profiler based on", "= 0 _msgs = [] # set this flag to", "'env' and func_qualname not in cls._profilers and 'all' not in", "is set to False, messages are immediately printed instead. Example:", "caller_frame = sys._getframe(1) try: caller_object_type = type(caller_frame.f_locals[\"self\"]) except KeyError: #", "(disabled == 'env' and func_qualname not in cls._profilers and 'all'", "obj._finished = False obj._firstTime = obj._last_time = ptime.time() obj._new_msg(\"> Entering", "self._msgs.append((msg, args)) else: self.flush() print(msg % args) def __del__(self): self.finish()", "profiler is about to be garbage-collected, the messages are passed", "new_time def mark(self, msg=None): self(msg) def _new_msg(self, msg, *args): msg", "delayed=True): \"\"\"Optionally create a new profiler based on caller's qualname.", "print(msg % args) def __del__(self): self.finish() def finish(self, msg=None): \"\"\"Add", "`VISPYPROFILE` to a comma-separated list of fully-qualified names of profiled", "type(caller_frame.f_locals[\"self\"]) except KeyError: # we are in a regular function", "caller function caller_frame = sys._getframe(1) try: caller_object_type = type(caller_frame.f_locals[\"self\"]) except", "stuff') # profiler is garbage-collected and flushed at function end", "Development Team. # Distributed under the (new) BSD License. See", "self.flush() def flush(self): if self._msgs: print(\"\\n\".join([m[0] % m[1] for m", "return self._finished = True if msg is not None: self(msg)", "import config class Profiler(object): \"\"\"Simple profiler allowing directed, hierarchical measurement", "1)[1] else: # we are in a method qualifier =", "sys._getframe(1) try: caller_object_type = type(caller_frame.f_locals[\"self\"]) except KeyError: # we are", "and 'all' not in cls._profilers): # don't do anything return", "obj._delayed = delayed obj._mark_count = 0 obj._finished = False obj._firstTime", "else: # we are in a method qualifier = caller_object_type.__name__", "profiled functions. Calling a profiler registers a message (defaulting to", "msg = \" \" * (self._depth - 1) + msg", "time elapsed since the last call. When the profiler is", "functions. Calling a profiler registers a message (defaulting to an", "comma-separated list of fully-qualified names of profiled functions. Calling a", "pass def finish(self): pass def mark(self, msg=None): pass _disabled_profiler =", "is about to be garbage-collected, the messages are passed to", "Profiler(object): \"\"\"Simple profiler allowing directed, hierarchical measurement of time intervals.", "import ptime from .. import config class Profiler(object): \"\"\"Simple profiler", "= DisabledProfiler() def __new__(cls, msg=None, disabled='env', delayed=True): \"\"\"Optionally create a", "profiling object cls._depth += 1 obj = super(Profiler, cls).__new__(cls) obj._name", "msg + \": %0.4f ms\", *(args + (elapsed,))) self._last_time =", "= False class DisabledProfiler(object): def __init__(self, *args, **kwds): pass def", "# set this flag to disable all or individual profilers", "mark(self, msg=None): pass _disabled_profiler = DisabledProfiler() def __new__(cls, msg=None, disabled='env',", "is None: msg = str(self._mark_count) self._mark_count += 1 new_time =", "profiler allowing directed, hierarchical measurement of time intervals. By default,", "self.disable: return if msg is None: msg = str(self._mark_count) self._mark_count", "msg is None: msg = str(self._mark_count) self._mark_count += 1 new_time", "disable all or individual profilers at runtime disable = False", "names of profiled functions. Calling a profiler registers a message", "time: %0.4f ms\", self._name, (ptime.time() - self._firstTime) * 1000) type(self)._depth", "self._msgs: print(\"\\n\".join([m[0] % m[1] for m in self._msgs])) type(self)._msgs =", "function end If this function is a method of class", "1 obj = super(Profiler, cls).__new__(cls) obj._name = msg or func_qualname", "garbage-collected and flushed at function end If this function is", "self._finished = True if msg is not None: self(msg) self._new_msg(\"<", "otherwise. If `delayed` is set to False, messages are immediately", "if config['profile'] is not None else []) _depth = 0", "= super(Profiler, cls).__new__(cls) obj._name = msg or func_qualname obj._delayed =", "from the module. \"\"\" _profilers = (config['profile'].split(\",\") if config['profile'] is", "+ msg + \": %0.4f ms\", *(args + (elapsed,))) self._last_time", "if msg is None: msg = str(self._mark_count) self._mark_count += 1", "# don't do anything return cls._disabled_profiler # create an actual", "at function end If this function is a method of", "the caller function caller_frame = sys._getframe(1) try: caller_object_type = type(caller_frame.f_locals[\"self\"])", "and func_qualname not in cls._profilers and 'all' not in cls._profilers):", "be garbage-collected, the messages are passed to the outer profiler", "messages are passed to the outer profiler if one is", "allowing directed, hierarchical measurement of time intervals. By default, profilers", "For regular functions, use the qualified name of the function,", "__call__(self, *args): pass def finish(self): pass def mark(self, msg=None): pass", "don't do anything return cls._disabled_profiler # create an actual profiling", "regular functions, use the qualified name of the function, stripping", "a method qualifier = caller_object_type.__name__ func_qualname = qualifier + \".\"", "... profiler('did stuff') ... do other stuff ... profiler('did other", "functions, use the qualified name of the function, stripping only", "Calling a profiler registers a message (defaulting to an increasing", "= caller_frame.f_globals[\"__name__\"].split(\".\", 1)[1] else: # we are in a method", "0)): return cls._disabled_profiler # determine the qualified name of the", "cls).__new__(cls) obj._name = msg or func_qualname obj._delayed = delayed obj._mark_count", "and flushed at function end If this function is a", "if (disabled == 'env' and func_qualname not in cls._profilers and", "or printed to stdout otherwise. If `delayed` is set to", "= ptime.time() elapsed = (new_time - self._last_time) * 1000 self._new_msg(\"", "parent profiler. \"\"\" if self._finished or self.disable: return self._finished =", "self._name, (ptime.time() - self._firstTime) * 1000) type(self)._depth -= 1 if", "== 0)): return cls._disabled_profiler # determine the qualified name of", "KeyError: # we are in a regular function qualifier =", "profiler = Profiler() ... do stuff ... profiler('did stuff') ...", "ptime.time() obj._new_msg(\"> Entering \" + obj._name) return obj def __call__(self,", "for more info. # Adapted from PyQtGraph import sys from", "from .. import config class Profiler(object): \"\"\"Simple profiler allowing directed,", "-*- # Copyright (c) 2014, Vispy Development Team. # Distributed", "obj._new_msg(\"> Entering \" + obj._name) return obj def __call__(self, msg=None,", "= msg or func_qualname obj._delayed = delayed obj._mark_count = 0", "delayed obj._mark_count = 0 obj._finished = False obj._firstTime = obj._last_time", "\" + obj._name) return obj def __call__(self, msg=None, *args): \"\"\"Register", "args) def __del__(self): self.finish() def finish(self, msg=None): \"\"\"Add a final", "+= 1 obj = super(Profiler, cls).__new__(cls) obj._name = msg or", "def finish(self): pass def mark(self, msg=None): pass _disabled_profiler = DisabledProfiler()", "To enable profiling, set the environment variable `VISPYPROFILE` to a", "self._last_time = new_time def mark(self, msg=None): self(msg) def _new_msg(self, msg,", "message (defaulting to an increasing counter) that contains the time", "new message with timing information. \"\"\" if self.disable: return if", "+ \": %0.4f ms\", *(args + (elapsed,))) self._last_time = new_time", "self.disable: return self._finished = True if msg is not None:", "# Copyright (c) 2014, Vispy Development Team. # Distributed under", "we are in a method qualifier = caller_object_type.__name__ func_qualname =", "+ caller_frame.f_code.co_name if (disabled == 'env' and func_qualname not in", "# we are in a regular function qualifier = caller_frame.f_globals[\"__name__\"].split(\".\",", "not in cls._profilers and 'all' not in cls._profilers): # don't", "def _new_msg(self, msg, *args): msg = \" \" * (self._depth", "\"\"\"Register or print a new message with timing information. \"\"\"", "msg=None): self(msg) def _new_msg(self, msg, *args): msg = \" \"", "instead. Example: def function(...): profiler = Profiler() ... do stuff", "msg=None): pass _disabled_profiler = DisabledProfiler() def __new__(cls, msg=None, disabled='env', delayed=True):", "object cls._depth += 1 obj = super(Profiler, cls).__new__(cls) obj._name =", "if msg is not None: self(msg) self._new_msg(\"< Exiting %s, total", "= (config['profile'].split(\",\") if config['profile'] is not None else []) _depth", "'env' and len(cls._profilers) == 0)): return cls._disabled_profiler # determine the", "qualified name of the caller function caller_frame = sys._getframe(1) try:", "module. \"\"\" _profilers = (config['profile'].split(\",\") if config['profile'] is not None", "'all' not in cls._profilers): # don't do anything return cls._disabled_profiler", "args)) else: self.flush() print(msg % args) def __del__(self): self.finish() def", "+ \".\" + caller_frame.f_code.co_name if (disabled == 'env' and func_qualname", "Exiting %s, total time: %0.4f ms\", self._name, (ptime.time() - self._firstTime)", "are in a method qualifier = caller_object_type.__name__ func_qualname = qualifier", "print(\"\\n\".join([m[0] % m[1] for m in self._msgs])) type(self)._msgs = []", "registers a message (defaulting to an increasing counter) that contains", "cls._profilers and 'all' not in cls._profilers): # don't do anything", "under the (new) BSD License. See LICENSE.txt for more info.", "return cls._disabled_profiler # create an actual profiling object cls._depth +=", "_new_msg(self, msg, *args): msg = \" \" * (self._depth -", "... do stuff ... profiler('did stuff') ... do other stuff", "if self.disable: return if msg is None: msg = str(self._mark_count)", "Profiler() ... do stuff ... profiler('did stuff') ... do other", "= new_time def mark(self, msg=None): self(msg) def _new_msg(self, msg, *args):", "garbage-collected, the messages are passed to the outer profiler if", "*args, **kwds): pass def __call__(self, *args): pass def finish(self): pass", "obj._last_time = ptime.time() obj._new_msg(\"> Entering \" + obj._name) return obj", "of the function, stripping only the initial \"vispy..\" prefix from", "enable profiling, set the environment variable `VISPYPROFILE` to a comma-separated", "obj def __call__(self, msg=None, *args): \"\"\"Register or print a new", "of fully-qualified names of profiled functions. Calling a profiler registers", "config class Profiler(object): \"\"\"Simple profiler allowing directed, hierarchical measurement of", "elapsed since the last call. When the profiler is about", "profiler is garbage-collected and flushed at function end If this", "this function is a method of class C, setting `VISPYPROFILE`", "False, messages are immediately printed instead. Example: def function(...): profiler", "qualifier + \".\" + caller_frame.f_code.co_name if (disabled == 'env' and", "None else []) _depth = 0 _msgs = [] #", "new profiler based on caller's qualname. \"\"\" if (disabled is", "profiler('did other stuff') # profiler is garbage-collected and flushed at", "pass def mark(self, msg=None): pass _disabled_profiler = DisabledProfiler() def __new__(cls,", "# profiler is garbage-collected and flushed at function end If", "new_time = ptime.time() elapsed = (new_time - self._last_time) * 1000", "* 1000 self._new_msg(\" \" + msg + \": %0.4f ms\",", "variable `VISPYPROFILE` to a comma-separated list of fully-qualified names of", "prefix from the module. \"\"\" _profilers = (config['profile'].split(\",\") if config['profile']", "try: caller_object_type = type(caller_frame.f_locals[\"self\"]) except KeyError: # we are in", "\"\"\"Optionally create a new profiler based on caller's qualname. \"\"\"", "actual profiling object cls._depth += 1 obj = super(Profiler, cls).__new__(cls)", "this profiler. For regular functions, use the qualified name of", "*args): pass def finish(self): pass def mark(self, msg=None): pass _disabled_profiler", "# determine the qualified name of the caller function caller_frame", "import sys from . import ptime from .. import config", "+= 1 new_time = ptime.time() elapsed = (new_time - self._last_time)", "a method of class C, setting `VISPYPROFILE` to \"C.function\" (without", "`delayed` is set to False, messages are immediately printed instead.", "in a method qualifier = caller_object_type.__name__ func_qualname = qualifier +", "... do other stuff ... profiler('did other stuff') # profiler", "2014, Vispy Development Team. # Distributed under the (new) BSD", "caller_object_type.__name__ func_qualname = qualifier + \".\" + caller_frame.f_code.co_name if (disabled", "in a regular function qualifier = caller_frame.f_globals[\"__name__\"].split(\".\", 1)[1] else: #", "obj._name = msg or func_qualname obj._delayed = delayed obj._mark_count =", "__call__(self, msg=None, *args): \"\"\"Register or print a new message with", "* (self._depth - 1) + msg if self._delayed: self._msgs.append((msg, args))", "DisabledProfiler(object): def __init__(self, *args, **kwds): pass def __call__(self, *args): pass", "are immediately printed instead. Example: def function(...): profiler = Profiler()", "< 1: self.flush() def flush(self): if self._msgs: print(\"\\n\".join([m[0] % m[1]", "one is running, or printed to stdout otherwise. If `delayed`", "-*- coding: utf-8 -*- # Copyright (c) 2014, Vispy Development", "- 1) + msg if self._delayed: self._msgs.append((msg, args)) else: self.flush()", "\" + msg + \": %0.4f ms\", *(args + (elapsed,)))", ".. import config class Profiler(object): \"\"\"Simple profiler allowing directed, hierarchical", "the module name) will enable this profiler. For regular functions,", "is garbage-collected and flushed at function end If this function", "- self._last_time) * 1000 self._new_msg(\" \" + msg + \":", "if no parent profiler. \"\"\" if self._finished or self.disable: return", "default, profilers are disabled. To enable profiling, set the environment", "+ msg if self._delayed: self._msgs.append((msg, args)) else: self.flush() print(msg %", "the function, stripping only the initial \"vispy..\" prefix from the", "= True if msg is not None: self(msg) self._new_msg(\"< Exiting", "stuff ... profiler('did other stuff') # profiler is garbage-collected and", "\"C.function\" (without the module name) will enable this profiler. For", "# create an actual profiling object cls._depth += 1 obj", "function, stripping only the initial \"vispy..\" prefix from the module.", "self.finish() def finish(self, msg=None): \"\"\"Add a final message; flush the", "caller's qualname. \"\"\" if (disabled is True or (disabled ==", "self._depth < 1: self.flush() def flush(self): if self._msgs: print(\"\\n\".join([m[0] %", "_disabled_profiler = DisabledProfiler() def __new__(cls, msg=None, disabled='env', delayed=True): \"\"\"Optionally create", "sys from . import ptime from .. import config class", "config['profile'] is not None else []) _depth = 0 _msgs", "a profiler registers a message (defaulting to an increasing counter)", "the message list if no parent profiler. \"\"\" if self._finished", "function qualifier = caller_frame.f_globals[\"__name__\"].split(\".\", 1)[1] else: # we are in", "total time: %0.4f ms\", self._name, (ptime.time() - self._firstTime) * 1000)", "not in cls._profilers): # don't do anything return cls._disabled_profiler #", "%0.4f ms\", self._name, (ptime.time() - self._firstTime) * 1000) type(self)._depth -=", "at runtime disable = False class DisabledProfiler(object): def __init__(self, *args,", "0 _msgs = [] # set this flag to disable", "method of class C, setting `VISPYPROFILE` to \"C.function\" (without the", "caller_object_type = type(caller_frame.f_locals[\"self\"]) except KeyError: # we are in a", "the (new) BSD License. See LICENSE.txt for more info. #", "msg or func_qualname obj._delayed = delayed obj._mark_count = 0 obj._finished", "and len(cls._profilers) == 0)): return cls._disabled_profiler # determine the qualified", "def __del__(self): self.finish() def finish(self, msg=None): \"\"\"Add a final message;", "class DisabledProfiler(object): def __init__(self, *args, **kwds): pass def __call__(self, *args):", "def __new__(cls, msg=None, disabled='env', delayed=True): \"\"\"Optionally create a new profiler", "None: msg = str(self._mark_count) self._mark_count += 1 new_time = ptime.time()", "coding: utf-8 -*- # Copyright (c) 2014, Vispy Development Team.", "== 'env' and len(cls._profilers) == 0)): return cls._disabled_profiler # determine", "if self._finished or self.disable: return self._finished = True if msg", "qualname. \"\"\" if (disabled is True or (disabled == 'env'", "time intervals. By default, profilers are disabled. To enable profiling,", "stuff ... profiler('did stuff') ... do other stuff ... profiler('did", "__new__(cls, msg=None, disabled='env', delayed=True): \"\"\"Optionally create a new profiler based", "ptime.time() elapsed = (new_time - self._last_time) * 1000 self._new_msg(\" \"", "*(args + (elapsed,))) self._last_time = new_time def mark(self, msg=None): self(msg)", "cls._profilers): # don't do anything return cls._disabled_profiler # create an", "if self._msgs: print(\"\\n\".join([m[0] % m[1] for m in self._msgs])) type(self)._msgs", "other stuff ... profiler('did other stuff') # profiler is garbage-collected", "__init__(self, *args, **kwds): pass def __call__(self, *args): pass def finish(self):", "`VISPYPROFILE` to \"C.function\" (without the module name) will enable this", "will enable this profiler. For regular functions, use the qualified", "do anything return cls._disabled_profiler # create an actual profiling object", "msg = str(self._mark_count) self._mark_count += 1 new_time = ptime.time() elapsed", "name) will enable this profiler. For regular functions, use the", "+ obj._name) return obj def __call__(self, msg=None, *args): \"\"\"Register or", "that contains the time elapsed since the last call. When", "last call. When the profiler is about to be garbage-collected,", "\"\"\" if (disabled is True or (disabled == 'env' and", "message list if no parent profiler. \"\"\" if self._finished or", "(config['profile'].split(\",\") if config['profile'] is not None else []) _depth =", "(self._depth - 1) + msg if self._delayed: self._msgs.append((msg, args)) else:", "not None else []) _depth = 0 _msgs = []", "of class C, setting `VISPYPROFILE` to \"C.function\" (without the module", "mark(self, msg=None): self(msg) def _new_msg(self, msg, *args): msg = \"", "func_qualname not in cls._profilers and 'all' not in cls._profilers): #", "to be garbage-collected, the messages are passed to the outer", "this flag to disable all or individual profilers at runtime", "- self._firstTime) * 1000) type(self)._depth -= 1 if self._depth <", "a new message with timing information. \"\"\" if self.disable: return", "Distributed under the (new) BSD License. See LICENSE.txt for more", "is running, or printed to stdout otherwise. If `delayed` is", "self._mark_count += 1 new_time = ptime.time() elapsed = (new_time -", "from PyQtGraph import sys from . import ptime from ..", "flag to disable all or individual profilers at runtime disable", "the module. \"\"\" _profilers = (config['profile'].split(\",\") if config['profile'] is not", "call. When the profiler is about to be garbage-collected, the", "flush(self): if self._msgs: print(\"\\n\".join([m[0] % m[1] for m in self._msgs]))", "By default, profilers are disabled. To enable profiling, set the", "profiler based on caller's qualname. \"\"\" if (disabled is True", "If `delayed` is set to False, messages are immediately printed", "LICENSE.txt for more info. # Adapted from PyQtGraph import sys", "* 1000) type(self)._depth -= 1 if self._depth < 1: self.flush()", "except KeyError: # we are in a regular function qualifier", "based on caller's qualname. \"\"\" if (disabled is True or", "pass def __call__(self, *args): pass def finish(self): pass def mark(self,", "When the profiler is about to be garbage-collected, the messages", "return if msg is None: msg = str(self._mark_count) self._mark_count +=", "individual profilers at runtime disable = False class DisabledProfiler(object): def", "profiler if one is running, or printed to stdout otherwise.", "= \" \" * (self._depth - 1) + msg if", "%0.4f ms\", *(args + (elapsed,))) self._last_time = new_time def mark(self,", "printed instead. Example: def function(...): profiler = Profiler() ... do", "directed, hierarchical measurement of time intervals. By default, profilers are", "profiler. For regular functions, use the qualified name of the", "def mark(self, msg=None): self(msg) def _new_msg(self, msg, *args): msg =", "or print a new message with timing information. \"\"\" if", "self.flush() print(msg % args) def __del__(self): self.finish() def finish(self, msg=None):", "use the qualified name of the function, stripping only the", "the environment variable `VISPYPROFILE` to a comma-separated list of fully-qualified", "(new) BSD License. See LICENSE.txt for more info. # Adapted", "info. # Adapted from PyQtGraph import sys from . import", "the qualified name of the function, stripping only the initial", "class Profiler(object): \"\"\"Simple profiler allowing directed, hierarchical measurement of time", "more info. # Adapted from PyQtGraph import sys from .", "cls._disabled_profiler # determine the qualified name of the caller function", "type(self)._depth -= 1 if self._depth < 1: self.flush() def flush(self):", "return cls._disabled_profiler # determine the qualified name of the caller", "_profilers = (config['profile'].split(\",\") if config['profile'] is not None else [])", "profilers are disabled. To enable profiling, set the environment variable", "= str(self._mark_count) self._mark_count += 1 new_time = ptime.time() elapsed =", "\" \" * (self._depth - 1) + msg if self._delayed:", "to False, messages are immediately printed instead. Example: def function(...):", "self._firstTime) * 1000) type(self)._depth -= 1 if self._depth < 1:", "self._delayed: self._msgs.append((msg, args)) else: self.flush() print(msg % args) def __del__(self):", "are disabled. To enable profiling, set the environment variable `VISPYPROFILE`", "increasing counter) that contains the time elapsed since the last", "= Profiler() ... do stuff ... profiler('did stuff') ... do", "name of the function, stripping only the initial \"vispy..\" prefix", "is not None: self(msg) self._new_msg(\"< Exiting %s, total time: %0.4f", "a comma-separated list of fully-qualified names of profiled functions. Calling", "= (new_time - self._last_time) * 1000 self._new_msg(\" \" + msg", "False obj._firstTime = obj._last_time = ptime.time() obj._new_msg(\"> Entering \" +", "\"\"\"Simple profiler allowing directed, hierarchical measurement of time intervals. By", "_depth = 0 _msgs = [] # set this flag", "obj._firstTime = obj._last_time = ptime.time() obj._new_msg(\"> Entering \" + obj._name)", "See LICENSE.txt for more info. # Adapted from PyQtGraph import", "If this function is a method of class C, setting", "or individual profilers at runtime disable = False class DisabledProfiler(object):", "= qualifier + \".\" + caller_frame.f_code.co_name if (disabled == 'env'", "printed to stdout otherwise. If `delayed` is set to False,", "enable this profiler. For regular functions, use the qualified name", "the messages are passed to the outer profiler if one", "obj._mark_count = 0 obj._finished = False obj._firstTime = obj._last_time =", "= delayed obj._mark_count = 0 obj._finished = False obj._firstTime =", "DisabledProfiler() def __new__(cls, msg=None, disabled='env', delayed=True): \"\"\"Optionally create a new", "the qualified name of the caller function caller_frame = sys._getframe(1)", "the initial \"vispy..\" prefix from the module. \"\"\" _profilers =", "a regular function qualifier = caller_frame.f_globals[\"__name__\"].split(\".\", 1)[1] else: # we", "a message (defaulting to an increasing counter) that contains the", "do other stuff ... profiler('did other stuff') # profiler is", "1000 self._new_msg(\" \" + msg + \": %0.4f ms\", *(args", "fully-qualified names of profiled functions. Calling a profiler registers a", "an increasing counter) that contains the time elapsed since the", "1000) type(self)._depth -= 1 if self._depth < 1: self.flush() def", "list of fully-qualified names of profiled functions. Calling a profiler", "qualified name of the function, stripping only the initial \"vispy..\"", "function(...): profiler = Profiler() ... do stuff ... profiler('did stuff')", "passed to the outer profiler if one is running, or", "= False obj._firstTime = obj._last_time = ptime.time() obj._new_msg(\"> Entering \"", "no parent profiler. \"\"\" if self._finished or self.disable: return self._finished", "super(Profiler, cls).__new__(cls) obj._name = msg or func_qualname obj._delayed = delayed", "messages are immediately printed instead. Example: def function(...): profiler =", "profiler('did stuff') ... do other stuff ... profiler('did other stuff')", "flush the message list if no parent profiler. \"\"\" if", "only the initial \"vispy..\" prefix from the module. \"\"\" _profilers", "obj = super(Profiler, cls).__new__(cls) obj._name = msg or func_qualname obj._delayed", "measurement of time intervals. By default, profilers are disabled. To", "outer profiler if one is running, or printed to stdout", "not None: self(msg) self._new_msg(\"< Exiting %s, total time: %0.4f ms\",", "= sys._getframe(1) try: caller_object_type = type(caller_frame.f_locals[\"self\"]) except KeyError: # we", "qualifier = caller_object_type.__name__ func_qualname = qualifier + \".\" + caller_frame.f_code.co_name", "to a comma-separated list of fully-qualified names of profiled functions.", "if self._delayed: self._msgs.append((msg, args)) else: self.flush() print(msg % args) def", "True if msg is not None: self(msg) self._new_msg(\"< Exiting %s,", "the time elapsed since the last call. When the profiler", "%s, total time: %0.4f ms\", self._name, (ptime.time() - self._firstTime) *", "= [] # set this flag to disable all or", "the outer profiler if one is running, or printed to", "== 'env' and func_qualname not in cls._profilers and 'all' not", "cls._depth += 1 obj = super(Profiler, cls).__new__(cls) obj._name = msg", "or (disabled == 'env' and len(cls._profilers) == 0)): return cls._disabled_profiler", "qualifier = caller_frame.f_globals[\"__name__\"].split(\".\", 1)[1] else: # we are in a", "Adapted from PyQtGraph import sys from . import ptime from", "caller_frame.f_globals[\"__name__\"].split(\".\", 1)[1] else: # we are in a method qualifier", "or self.disable: return self._finished = True if msg is not", "# Distributed under the (new) BSD License. See LICENSE.txt for", "msg, *args): msg = \" \" * (self._depth - 1)", "of the caller function caller_frame = sys._getframe(1) try: caller_object_type =", "self._new_msg(\"< Exiting %s, total time: %0.4f ms\", self._name, (ptime.time() -", "\"\"\" if self._finished or self.disable: return self._finished = True if", "else []) _depth = 0 _msgs = [] # set", "0 obj._finished = False obj._firstTime = obj._last_time = ptime.time() obj._new_msg(\">", "(disabled is True or (disabled == 'env' and len(cls._profilers) ==", "ms\", self._name, (ptime.time() - self._firstTime) * 1000) type(self)._depth -= 1", "1 if self._depth < 1: self.flush() def flush(self): if self._msgs:", "or func_qualname obj._delayed = delayed obj._mark_count = 0 obj._finished =", "PyQtGraph import sys from . import ptime from .. import", "is True or (disabled == 'env' and len(cls._profilers) == 0)):", "information. \"\"\" if self.disable: return if msg is None: msg", "function is a method of class C, setting `VISPYPROFILE` to", "\" * (self._depth - 1) + msg if self._delayed: self._msgs.append((msg,", "= ptime.time() obj._new_msg(\"> Entering \" + obj._name) return obj def", "Entering \" + obj._name) return obj def __call__(self, msg=None, *args):", "cls._disabled_profiler # create an actual profiling object cls._depth += 1", "if self._depth < 1: self.flush() def flush(self): if self._msgs: print(\"\\n\".join([m[0]", "to disable all or individual profilers at runtime disable =", "caller_frame.f_code.co_name if (disabled == 'env' and func_qualname not in cls._profilers", "finish(self, msg=None): \"\"\"Add a final message; flush the message list", "[] # set this flag to disable all or individual", "are passed to the outer profiler if one is running,", "the last call. When the profiler is about to be", "of profiled functions. Calling a profiler registers a message (defaulting", "if (disabled is True or (disabled == 'env' and len(cls._profilers)", "are in a regular function qualifier = caller_frame.f_globals[\"__name__\"].split(\".\", 1)[1] else:", "def finish(self, msg=None): \"\"\"Add a final message; flush the message", "in cls._profilers and 'all' not in cls._profilers): # don't do", "**kwds): pass def __call__(self, *args): pass def finish(self): pass def", "Example: def function(...): profiler = Profiler() ... do stuff ...", "create an actual profiling object cls._depth += 1 obj =", "is a method of class C, setting `VISPYPROFILE` to \"C.function\"", "we are in a regular function qualifier = caller_frame.f_globals[\"__name__\"].split(\".\", 1)[1]", "def __call__(self, *args): pass def finish(self): pass def mark(self, msg=None):", "utf-8 -*- # Copyright (c) 2014, Vispy Development Team. #", "1: self.flush() def flush(self): if self._msgs: print(\"\\n\".join([m[0] % m[1] for", "immediately printed instead. Example: def function(...): profiler = Profiler() ...", "final message; flush the message list if no parent profiler.", "flushed at function end If this function is a method", "*args): msg = \" \" * (self._depth - 1) +", "name of the caller function caller_frame = sys._getframe(1) try: caller_object_type", "profilers at runtime disable = False class DisabledProfiler(object): def __init__(self,", "disabled. To enable profiling, set the environment variable `VISPYPROFILE` to", "profiler. \"\"\" if self._finished or self.disable: return self._finished = True", "module name) will enable this profiler. For regular functions, use", "obj._name) return obj def __call__(self, msg=None, *args): \"\"\"Register or print", "-= 1 if self._depth < 1: self.flush() def flush(self): if", "in cls._profilers): # don't do anything return cls._disabled_profiler # create", "+ (elapsed,))) self._last_time = new_time def mark(self, msg=None): self(msg) def", "(disabled == 'env' and len(cls._profilers) == 0)): return cls._disabled_profiler #", "Team. # Distributed under the (new) BSD License. See LICENSE.txt", "msg=None): \"\"\"Add a final message; flush the message list if", "\"\"\"Add a final message; flush the message list if no", "anything return cls._disabled_profiler # create an actual profiling object cls._depth", "stripping only the initial \"vispy..\" prefix from the module. \"\"\"", "\"vispy..\" prefix from the module. \"\"\" _profilers = (config['profile'].split(\",\") if", "set the environment variable `VISPYPROFILE` to a comma-separated list of", "function caller_frame = sys._getframe(1) try: caller_object_type = type(caller_frame.f_locals[\"self\"]) except KeyError:", "*args): \"\"\"Register or print a new message with timing information.", "with timing information. \"\"\" if self.disable: return if msg is", "self._last_time) * 1000 self._new_msg(\" \" + msg + \": %0.4f", "func_qualname obj._delayed = delayed obj._mark_count = 0 obj._finished = False", "= type(caller_frame.f_locals[\"self\"]) except KeyError: # we are in a regular", "else: self.flush() print(msg % args) def __del__(self): self.finish() def finish(self,", "True or (disabled == 'env' and len(cls._profilers) == 0)): return", "pass _disabled_profiler = DisabledProfiler() def __new__(cls, msg=None, disabled='env', delayed=True): \"\"\"Optionally", "(new_time - self._last_time) * 1000 self._new_msg(\" \" + msg +", "timing information. \"\"\" if self.disable: return if msg is None:", "to an increasing counter) that contains the time elapsed since", "stuff') ... do other stuff ... profiler('did other stuff') #", "... profiler('did other stuff') # profiler is garbage-collected and flushed", "is not None else []) _depth = 0 _msgs =", "message with timing information. \"\"\" if self.disable: return if msg", "on caller's qualname. \"\"\" if (disabled is True or (disabled", "to the outer profiler if one is running, or printed", "an actual profiling object cls._depth += 1 obj = super(Profiler,", "# -*- coding: utf-8 -*- # Copyright (c) 2014, Vispy", "from . import ptime from .. import config class Profiler(object):", "_msgs = [] # set this flag to disable all", "__del__(self): self.finish() def finish(self, msg=None): \"\"\"Add a final message; flush", "set to False, messages are immediately printed instead. Example: def", "msg is not None: self(msg) self._new_msg(\"< Exiting %s, total time:", "stdout otherwise. If `delayed` is set to False, messages are", "(c) 2014, Vispy Development Team. # Distributed under the (new)", "1) + msg if self._delayed: self._msgs.append((msg, args)) else: self.flush() print(msg", "Copyright (c) 2014, Vispy Development Team. # Distributed under the", "method qualifier = caller_object_type.__name__ func_qualname = qualifier + \".\" +", "message; flush the message list if no parent profiler. \"\"\"", "% args) def __del__(self): self.finish() def finish(self, msg=None): \"\"\"Add a", "profiling, set the environment variable `VISPYPROFILE` to a comma-separated list", "= caller_object_type.__name__ func_qualname = qualifier + \".\" + caller_frame.f_code.co_name if", "setting `VISPYPROFILE` to \"C.function\" (without the module name) will enable", "list if no parent profiler. \"\"\" if self._finished or self.disable:", "determine the qualified name of the caller function caller_frame =", "func_qualname = qualifier + \".\" + caller_frame.f_code.co_name if (disabled ==", "self._finished or self.disable: return self._finished = True if msg is", "if one is running, or printed to stdout otherwise. If", "initial \"vispy..\" prefix from the module. \"\"\" _profilers = (config['profile'].split(\",\")", "BSD License. See LICENSE.txt for more info. # Adapted from", "all or individual profilers at runtime disable = False class", "do stuff ... profiler('did stuff') ... do other stuff ...", "disable = False class DisabledProfiler(object): def __init__(self, *args, **kwds): pass", "self._new_msg(\" \" + msg + \": %0.4f ms\", *(args +", "hierarchical measurement of time intervals. By default, profilers are disabled.", "\": %0.4f ms\", *(args + (elapsed,))) self._last_time = new_time def", "environment variable `VISPYPROFILE` to a comma-separated list of fully-qualified names", "contains the time elapsed since the last call. When the", "other stuff') # profiler is garbage-collected and flushed at function", "def mark(self, msg=None): pass _disabled_profiler = DisabledProfiler() def __new__(cls, msg=None,", "profiler registers a message (defaulting to an increasing counter) that", "runtime disable = False class DisabledProfiler(object): def __init__(self, *args, **kwds):", "# Adapted from PyQtGraph import sys from . import ptime", "(without the module name) will enable this profiler. For regular", "set this flag to disable all or individual profilers at", "Vispy Development Team. # Distributed under the (new) BSD License.", "def __call__(self, msg=None, *args): \"\"\"Register or print a new message", "print a new message with timing information. \"\"\" if self.disable:", "msg=None, *args): \"\"\"Register or print a new message with timing", "regular function qualifier = caller_frame.f_globals[\"__name__\"].split(\".\", 1)[1] else: # we are", "self(msg) def _new_msg(self, msg, *args): msg = \" \" *", "running, or printed to stdout otherwise. If `delayed` is set", "intervals. By default, profilers are disabled. To enable profiling, set", "(defaulting to an increasing counter) that contains the time elapsed", "def flush(self): if self._msgs: print(\"\\n\".join([m[0] % m[1] for m in", "ms\", *(args + (elapsed,))) self._last_time = new_time def mark(self, msg=None):", "None: self(msg) self._new_msg(\"< Exiting %s, total time: %0.4f ms\", self._name,", "C, setting `VISPYPROFILE` to \"C.function\" (without the module name) will", "msg if self._delayed: self._msgs.append((msg, args)) else: self.flush() print(msg % args)", "= obj._last_time = ptime.time() obj._new_msg(\"> Entering \" + obj._name) return", "finish(self): pass def mark(self, msg=None): pass _disabled_profiler = DisabledProfiler() def", "elapsed = (new_time - self._last_time) * 1000 self._new_msg(\" \" +", "a final message; flush the message list if no parent", "\".\" + caller_frame.f_code.co_name if (disabled == 'env' and func_qualname not", "return obj def __call__(self, msg=None, *args): \"\"\"Register or print a", "end If this function is a method of class C,", "[]) _depth = 0 _msgs = [] # set this", "disabled='env', delayed=True): \"\"\"Optionally create a new profiler based on caller's", ". import ptime from .. import config class Profiler(object): \"\"\"Simple", "str(self._mark_count) self._mark_count += 1 new_time = ptime.time() elapsed = (new_time", "def function(...): profiler = Profiler() ... do stuff ... profiler('did", "# we are in a method qualifier = caller_object_type.__name__ func_qualname", "self(msg) self._new_msg(\"< Exiting %s, total time: %0.4f ms\", self._name, (ptime.time()", "= 0 obj._finished = False obj._firstTime = obj._last_time = ptime.time()", "(elapsed,))) self._last_time = new_time def mark(self, msg=None): self(msg) def _new_msg(self,", "False class DisabledProfiler(object): def __init__(self, *args, **kwds): pass def __call__(self,", "ptime from .. import config class Profiler(object): \"\"\"Simple profiler allowing", "class C, setting `VISPYPROFILE` to \"C.function\" (without the module name)" ]
[ "test_extract_user_id_configure_by_user_class(self): user = User() user.USERNAME_FIELD = 'email' user.email = 'test_email'", "field\"\"\" settings.SAML_IDP_DJANGO_USERNAME_FIELD = 'first_name' user = User() user.first_name = 'test_first_name'", "get_user_model() class TestBaseProcessor: def test_extract_user_id_configure_by_user_class(self): user = User() user.USERNAME_FIELD =", "determine the user id field\"\"\" settings.SAML_IDP_DJANGO_USERNAME_FIELD = 'first_name' user =", "from django.contrib.auth import get_user_model from djangosaml2idp.processors import BaseProcessor User =", "= 'email' user.email = 'test_email' assert BaseProcessor('entity-id').get_user_id(user) == 'test_email' def", "import BaseProcessor User = get_user_model() class TestBaseProcessor: def test_extract_user_id_configure_by_user_class(self): user", "`settings.SAML_IDP_DJANGO_USERNAME_FIELD` to determine the user id field\"\"\" settings.SAML_IDP_DJANGO_USERNAME_FIELD = 'first_name'", "= 'first_name' user = User() user.first_name = 'test_first_name' assert BaseProcessor('entity-id').get_user_id(user)", "User() user.USERNAME_FIELD = 'email' user.email = 'test_email' assert BaseProcessor('entity-id').get_user_id(user) ==", "class TestBaseProcessor: def test_extract_user_id_configure_by_user_class(self): user = User() user.USERNAME_FIELD = 'email'", "the user id field\"\"\" settings.SAML_IDP_DJANGO_USERNAME_FIELD = 'first_name' user = User()", "user.USERNAME_FIELD = 'email' user.email = 'test_email' assert BaseProcessor('entity-id').get_user_id(user) == 'test_email'", "user id field\"\"\" settings.SAML_IDP_DJANGO_USERNAME_FIELD = 'first_name' user = User() user.first_name", "== 'test_email' def test_extract_user_id_configure_by_settings(self, settings): \"\"\"Should use `settings.SAML_IDP_DJANGO_USERNAME_FIELD` to determine", "to determine the user id field\"\"\" settings.SAML_IDP_DJANGO_USERNAME_FIELD = 'first_name' user", "test_extract_user_id_configure_by_settings(self, settings): \"\"\"Should use `settings.SAML_IDP_DJANGO_USERNAME_FIELD` to determine the user id", "\"\"\"Should use `settings.SAML_IDP_DJANGO_USERNAME_FIELD` to determine the user id field\"\"\" settings.SAML_IDP_DJANGO_USERNAME_FIELD", "user = User() user.first_name = 'test_first_name' assert BaseProcessor('entity-id').get_user_id(user) == 'test_first_name'", "'test_email' def test_extract_user_id_configure_by_settings(self, settings): \"\"\"Should use `settings.SAML_IDP_DJANGO_USERNAME_FIELD` to determine the", "from djangosaml2idp.processors import BaseProcessor User = get_user_model() class TestBaseProcessor: def", "djangosaml2idp.processors import BaseProcessor User = get_user_model() class TestBaseProcessor: def test_extract_user_id_configure_by_user_class(self):", "id field\"\"\" settings.SAML_IDP_DJANGO_USERNAME_FIELD = 'first_name' user = User() user.first_name =", "= 'test_email' assert BaseProcessor('entity-id').get_user_id(user) == 'test_email' def test_extract_user_id_configure_by_settings(self, settings): \"\"\"Should", "BaseProcessor('entity-id').get_user_id(user) == 'test_email' def test_extract_user_id_configure_by_settings(self, settings): \"\"\"Should use `settings.SAML_IDP_DJANGO_USERNAME_FIELD` to", "django.contrib.auth import get_user_model from djangosaml2idp.processors import BaseProcessor User = get_user_model()", "settings): \"\"\"Should use `settings.SAML_IDP_DJANGO_USERNAME_FIELD` to determine the user id field\"\"\"", "= User() user.USERNAME_FIELD = 'email' user.email = 'test_email' assert BaseProcessor('entity-id').get_user_id(user)", "def test_extract_user_id_configure_by_user_class(self): user = User() user.USERNAME_FIELD = 'email' user.email =", "user.email = 'test_email' assert BaseProcessor('entity-id').get_user_id(user) == 'test_email' def test_extract_user_id_configure_by_settings(self, settings):", "user = User() user.USERNAME_FIELD = 'email' user.email = 'test_email' assert", "def test_extract_user_id_configure_by_settings(self, settings): \"\"\"Should use `settings.SAML_IDP_DJANGO_USERNAME_FIELD` to determine the user", "assert BaseProcessor('entity-id').get_user_id(user) == 'test_email' def test_extract_user_id_configure_by_settings(self, settings): \"\"\"Should use `settings.SAML_IDP_DJANGO_USERNAME_FIELD`", "= get_user_model() class TestBaseProcessor: def test_extract_user_id_configure_by_user_class(self): user = User() user.USERNAME_FIELD", "'test_email' assert BaseProcessor('entity-id').get_user_id(user) == 'test_email' def test_extract_user_id_configure_by_settings(self, settings): \"\"\"Should use", "settings.SAML_IDP_DJANGO_USERNAME_FIELD = 'first_name' user = User() user.first_name = 'test_first_name' assert", "'email' user.email = 'test_email' assert BaseProcessor('entity-id').get_user_id(user) == 'test_email' def test_extract_user_id_configure_by_settings(self,", "BaseProcessor User = get_user_model() class TestBaseProcessor: def test_extract_user_id_configure_by_user_class(self): user =", "use `settings.SAML_IDP_DJANGO_USERNAME_FIELD` to determine the user id field\"\"\" settings.SAML_IDP_DJANGO_USERNAME_FIELD =", "get_user_model from djangosaml2idp.processors import BaseProcessor User = get_user_model() class TestBaseProcessor:", "import get_user_model from djangosaml2idp.processors import BaseProcessor User = get_user_model() class", "TestBaseProcessor: def test_extract_user_id_configure_by_user_class(self): user = User() user.USERNAME_FIELD = 'email' user.email", "'first_name' user = User() user.first_name = 'test_first_name' assert BaseProcessor('entity-id').get_user_id(user) ==", "User = get_user_model() class TestBaseProcessor: def test_extract_user_id_configure_by_user_class(self): user = User()" ]
[ "class Node: def __init__(self, data): self.data = data self.prev =", "False while temp_head is not None: if temp_head.data == ele:", "contains(self, ele): temp_head = self.head while temp_head is not None:", "self.next = None class SingleLinkedList: def __init__(self): self.head = None", "temp_head.next break prev_node = temp_head temp_head = temp_head.next return is_node_deleted", "is not None: if temp_head.data == ele: is_node_deleted = True", "ele): if self.head is None: return; if self.head.data == ele:", "ele: return True temp_head = temp_head.next return False def remove(self,", "list.print_list(); print(\"List contains element 4\", list.contains(4)) print(\"List contains element 6\",", "while temp_head.next is not None: temp_head = temp_head.next; temp_head.next =", "temp_head = temp_head.next if __name__ == '__main__': list = SingleLinkedList();", "self.head while temp_head is not None: print(temp_head.data) temp_head = temp_head.next", "__init__(self): self.head = None def add(self, ele): new_node = Node(ele)", "= temp_head.next; temp_head.next = new_node; def contains(self, ele): temp_head =", "= self.head.next prev_node = temp_head is_node_deleted = False while temp_head", "prev_node = temp_head temp_head = temp_head.next return is_node_deleted def print_list(self):", "4\", list.contains(4)) print(\"List contains element 6\", list.contains(6)) print(\"Removing element 13\",", "prev_node = temp_head is_node_deleted = False while temp_head is not", "return False def remove(self, ele): if self.head is None: return;", "temp_head is_node_deleted = False while temp_head is not None: if", "list.add(19) list.print_list(); print(\"List contains element 4\", list.contains(4)) print(\"List contains element", "is not None: temp_head = temp_head.next; temp_head.next = new_node; def", "True temp_head = self.head.next prev_node = temp_head is_node_deleted = False", "None class SingleLinkedList: def __init__(self): self.head = None def add(self,", "return; if self.head.data == ele: self.head = self.head.next return True", "contains element 4\", list.contains(4)) print(\"List contains element 6\", list.contains(6)) print(\"Removing", "= None def add(self, ele): new_node = Node(ele) if self.head", "temp_head is not None: if temp_head.data == ele: return True", "True temp_head = temp_head.next return False def remove(self, ele): if", "= new_node; def contains(self, ele): temp_head = self.head while temp_head", "def contains(self, ele): temp_head = self.head while temp_head is not", "ele: self.head = self.head.next return True temp_head = self.head.next prev_node", "temp_head.data == ele: is_node_deleted = True prev_node.next = temp_head.next break", "= SingleLinkedList(); list.add(5) list.add(4) list.add(12) list.add(13) list.add(19) list.print_list(); print(\"List contains", "new_node = Node(ele) if self.head is None: self.head = new_node", "temp_head.next return is_node_deleted def print_list(self): temp_head = self.head while temp_head", "list = SingleLinkedList(); list.add(5) list.add(4) list.add(12) list.add(13) list.add(19) list.print_list(); print(\"List", "self.head.next prev_node = temp_head is_node_deleted = False while temp_head is", "temp_head.data == ele: return True temp_head = temp_head.next return False", "while temp_head is not None: if temp_head.data == ele: is_node_deleted", "= True prev_node.next = temp_head.next break prev_node = temp_head temp_head", "self.data = data self.prev = None self.next = None class", "= self.head while temp_head is not None: if temp_head.data ==", "None: temp_head = temp_head.next; temp_head.next = new_node; def contains(self, ele):", "= None class SingleLinkedList: def __init__(self): self.head = None def", "= temp_head.next return is_node_deleted def print_list(self): temp_head = self.head while", "def __init__(self, data): self.data = data self.prev = None self.next", "not None: if temp_head.data == ele: return True temp_head =", "print(\"List contains element 4\", list.contains(4)) print(\"List contains element 6\", list.contains(6))", "print(temp_head.data) temp_head = temp_head.next if __name__ == '__main__': list =", "temp_head is not None: print(temp_head.data) temp_head = temp_head.next if __name__", "self.head while temp_head.next is not None: temp_head = temp_head.next; temp_head.next", "None def add(self, ele): new_node = Node(ele) if self.head is", "self.head = None def add(self, ele): new_node = Node(ele) if", "= temp_head is_node_deleted = False while temp_head is not None:", "if __name__ == '__main__': list = SingleLinkedList(); list.add(5) list.add(4) list.add(12)", "while temp_head is not None: print(temp_head.data) temp_head = temp_head.next if", "== ele: return True temp_head = temp_head.next return False def", "while temp_head is not None: if temp_head.data == ele: return", "list.contains(6)) print(\"Removing element 13\", list.remove(13)) list.print_list(); print(\"List contains element 13\",", "None: print(temp_head.data) temp_head = temp_head.next if __name__ == '__main__': list", "temp_head = self.head while temp_head.next is not None: temp_head =", "if temp_head.data == ele: return True temp_head = temp_head.next return", "= Node(ele) if self.head is None: self.head = new_node return", "None: if temp_head.data == ele: return True temp_head = temp_head.next", "return is_node_deleted def print_list(self): temp_head = self.head while temp_head is", "return True temp_head = temp_head.next return False def remove(self, ele):", "False def remove(self, ele): if self.head is None: return; if", "temp_head.next return False def remove(self, ele): if self.head is None:", "list.add(13) list.add(19) list.print_list(); print(\"List contains element 4\", list.contains(4)) print(\"List contains", "__name__ == '__main__': list = SingleLinkedList(); list.add(5) list.add(4) list.add(12) list.add(13)", "if self.head is None: return; if self.head.data == ele: self.head", "temp_head temp_head = temp_head.next return is_node_deleted def print_list(self): temp_head =", "is_node_deleted = False while temp_head is not None: if temp_head.data", "break prev_node = temp_head temp_head = temp_head.next return is_node_deleted def", "__init__(self, data): self.data = data self.prev = None self.next =", "temp_head = self.head while temp_head is not None: print(temp_head.data) temp_head", "None self.next = None class SingleLinkedList: def __init__(self): self.head =", "return temp_head = self.head while temp_head.next is not None: temp_head", "ele): new_node = Node(ele) if self.head is None: self.head =", "temp_head.next; temp_head.next = new_node; def contains(self, ele): temp_head = self.head", "prev_node.next = temp_head.next break prev_node = temp_head temp_head = temp_head.next", "= temp_head.next if __name__ == '__main__': list = SingleLinkedList(); list.add(5)", "SingleLinkedList(); list.add(5) list.add(4) list.add(12) list.add(13) list.add(19) list.print_list(); print(\"List contains element", "temp_head.next if __name__ == '__main__': list = SingleLinkedList(); list.add(5) list.add(4)", "= False while temp_head is not None: if temp_head.data ==", "= None self.next = None class SingleLinkedList: def __init__(self): self.head", "def remove(self, ele): if self.head is None: return; if self.head.data", "add(self, ele): new_node = Node(ele) if self.head is None: self.head", "if self.head is None: self.head = new_node return temp_head =", "not None: print(temp_head.data) temp_head = temp_head.next if __name__ == '__main__':", "list.add(12) list.add(13) list.add(19) list.print_list(); print(\"List contains element 4\", list.contains(4)) print(\"List", "def print_list(self): temp_head = self.head while temp_head is not None:", "print(\"List contains element 6\", list.contains(6)) print(\"Removing element 13\", list.remove(13)) list.print_list();", "new_node return temp_head = self.head while temp_head.next is not None:", "= self.head.next return True temp_head = self.head.next prev_node = temp_head", "list.contains(4)) print(\"List contains element 6\", list.contains(6)) print(\"Removing element 13\", list.remove(13))", "temp_head.next is not None: temp_head = temp_head.next; temp_head.next = new_node;", "self.prev = None self.next = None class SingleLinkedList: def __init__(self):", "self.head is None: return; if self.head.data == ele: self.head =", "is None: return; if self.head.data == ele: self.head = self.head.next", "self.head.data == ele: self.head = self.head.next return True temp_head =", "element 6\", list.contains(6)) print(\"Removing element 13\", list.remove(13)) list.print_list(); print(\"List contains", "ele): temp_head = self.head while temp_head is not None: if", "self.head.next return True temp_head = self.head.next prev_node = temp_head is_node_deleted", "element 4\", list.contains(4)) print(\"List contains element 6\", list.contains(6)) print(\"Removing element", "not None: temp_head = temp_head.next; temp_head.next = new_node; def contains(self,", "temp_head = temp_head.next; temp_head.next = new_node; def contains(self, ele): temp_head", "= self.head while temp_head.next is not None: temp_head = temp_head.next;", "print(\"Removing element 13\", list.remove(13)) list.print_list(); print(\"List contains element 13\", list.contains(13))", "== ele: is_node_deleted = True prev_node.next = temp_head.next break prev_node", "is_node_deleted def print_list(self): temp_head = self.head while temp_head is not", "= temp_head temp_head = temp_head.next return is_node_deleted def print_list(self): temp_head", "None: self.head = new_node return temp_head = self.head while temp_head.next", "temp_head.next = new_node; def contains(self, ele): temp_head = self.head while", "temp_head = self.head while temp_head is not None: if temp_head.data", "return True temp_head = self.head.next prev_node = temp_head is_node_deleted =", "is not None: if temp_head.data == ele: return True temp_head", "is not None: print(temp_head.data) temp_head = temp_head.next if __name__ ==", "print_list(self): temp_head = self.head while temp_head is not None: print(temp_head.data)", "'__main__': list = SingleLinkedList(); list.add(5) list.add(4) list.add(12) list.add(13) list.add(19) list.print_list();", "self.head while temp_head is not None: if temp_head.data == ele:", "self.head = new_node return temp_head = self.head while temp_head.next is", "Node: def __init__(self, data): self.data = data self.prev = None", "True prev_node.next = temp_head.next break prev_node = temp_head temp_head =", "SingleLinkedList: def __init__(self): self.head = None def add(self, ele): new_node", "= temp_head.next break prev_node = temp_head temp_head = temp_head.next return", "temp_head = temp_head.next return False def remove(self, ele): if self.head", "def add(self, ele): new_node = Node(ele) if self.head is None:", "= new_node return temp_head = self.head while temp_head.next is not", "temp_head = temp_head.next return is_node_deleted def print_list(self): temp_head = self.head", "if self.head.data == ele: self.head = self.head.next return True temp_head", "def __init__(self): self.head = None def add(self, ele): new_node =", "temp_head is not None: if temp_head.data == ele: is_node_deleted =", "None: return; if self.head.data == ele: self.head = self.head.next return", "temp_head = self.head.next prev_node = temp_head is_node_deleted = False while", "not None: if temp_head.data == ele: is_node_deleted = True prev_node.next", "== ele: self.head = self.head.next return True temp_head = self.head.next", "None: if temp_head.data == ele: is_node_deleted = True prev_node.next =", "if temp_head.data == ele: is_node_deleted = True prev_node.next = temp_head.next", "= data self.prev = None self.next = None class SingleLinkedList:", "is_node_deleted = True prev_node.next = temp_head.next break prev_node = temp_head", "= self.head while temp_head is not None: print(temp_head.data) temp_head =", "6\", list.contains(6)) print(\"Removing element 13\", list.remove(13)) list.print_list(); print(\"List contains element", "list.add(4) list.add(12) list.add(13) list.add(19) list.print_list(); print(\"List contains element 4\", list.contains(4))", "contains element 6\", list.contains(6)) print(\"Removing element 13\", list.remove(13)) list.print_list(); print(\"List", "data self.prev = None self.next = None class SingleLinkedList: def", "= temp_head.next return False def remove(self, ele): if self.head is", "ele: is_node_deleted = True prev_node.next = temp_head.next break prev_node =", "self.head = self.head.next return True temp_head = self.head.next prev_node =", "self.head is None: self.head = new_node return temp_head = self.head", "is None: self.head = new_node return temp_head = self.head while", "remove(self, ele): if self.head is None: return; if self.head.data ==", "list.add(5) list.add(4) list.add(12) list.add(13) list.add(19) list.print_list(); print(\"List contains element 4\",", "data): self.data = data self.prev = None self.next = None", "class SingleLinkedList: def __init__(self): self.head = None def add(self, ele):", "== '__main__': list = SingleLinkedList(); list.add(5) list.add(4) list.add(12) list.add(13) list.add(19)", "new_node; def contains(self, ele): temp_head = self.head while temp_head is", "Node(ele) if self.head is None: self.head = new_node return temp_head" ]
[]
[ "be fetched.\"\"\" index: Index url: str page: int output: str", "this index for a given url.\"\"\" index: Index url: str", "datetime import datetime from typing import Any, List, Optional, Union", "strings warc_request_meta: Optional[str] response_header: Optional[str] class Result(BaseModel): url_key: str =", "-> Union[datetime, Any]: if isinstance(value, str): datetime_value = datetime.strptime(value, \"%Y%m%d%H%M%S\")", "cdx_api: HttpUrl = Field(alias=\"cdx-api\") @dataclass(frozen=True) class ResultBody: mime_detected: Optional[str] data:", "def parse_timestamp(cls, value: Any) -> Union[datetime, Any]: if isinstance(value, str):", "Optional[ResultMeta] @validator(\"timestamp\", pre=True) def parse_timestamp(cls, value: Any) -> Union[datetime, Any]:", "= \"json\" class SearchPagesResponse(BaseModel): \"\"\"Response with the total number of", "ResultBody: mime_detected: Optional[str] data: Optional[str] text: Optional[List[str]] @dataclass(frozen=True) class ResultMeta:", "List, Optional, Union from pydantic import BaseModel, Field, HttpUrl, validator", "pydantic.dataclasses import dataclass class Index(BaseModel): id: str name: str time_gate:", "on one index for a given url.\"\"\" index: Index url:", "str languages: Optional[str] encoding: Optional[str] index_id: Optional[str] body: Optional[ResultBody] meta:", "int digest: str length: int offset: int filename: str languages:", "mime_detected: str = Field(alias=\"mime-detected\") status: int digest: str length: int", "in this index for a given url.\"\"\" index: Index url:", "text: Optional[List[str]] @dataclass(frozen=True) class ResultMeta: # todo: these are still", "Result(BaseModel): url_key: str = Field(alias=\"urlkey\") timestamp: datetime url: str mime:", "url: str show_num_pages: str = Field(alias=\"showNumPages\", default=\"true\", const=True) output: str", "\"\"\"One page that contains records to be fetched.\"\"\" index: Index", "class SearchIndexRequest(BaseModel): \"\"\"One page that contains records to be fetched.\"\"\"", "name: str time_gate: HttpUrl = Field(alias=\"timegate\") cdx_api: HttpUrl = Field(alias=\"cdx-api\")", "contains records to be fetched.\"\"\" index: Index url: str page:", "mime_detected: Optional[str] data: Optional[str] text: Optional[List[str]] @dataclass(frozen=True) class ResultMeta: #", "index_id: Optional[str] body: Optional[ResultBody] meta: Optional[ResultMeta] @validator(\"timestamp\", pre=True) def parse_timestamp(cls,", "url.\"\"\" index: Index url: str show_num_pages: str = Field(alias=\"showNumPages\", default=\"true\",", "for a given url.\"\"\" index: Index url: str show_num_pages: str", "class ResultMeta: # todo: these are still raw strings warc_request_meta:", "\"json\" class SearchPagesResponse(BaseModel): \"\"\"Response with the total number of pages", "str show_num_pages: str = Field(alias=\"showNumPages\", default=\"true\", const=True) output: str =", "existing pages on one index for a given url.\"\"\" index:", "str name: str time_gate: HttpUrl = Field(alias=\"timegate\") cdx_api: HttpUrl =", "Union from pydantic import BaseModel, Field, HttpUrl, validator from pydantic.dataclasses", "url: str mime: str mime_detected: str = Field(alias=\"mime-detected\") status: int", "parse_timestamp(cls, value: Any) -> Union[datetime, Any]: if isinstance(value, str): datetime_value", "HttpUrl, validator from pydantic.dataclasses import dataclass class Index(BaseModel): id: str", "Optional[str] data: Optional[str] text: Optional[List[str]] @dataclass(frozen=True) class ResultMeta: # todo:", "datetime_value return value class SearchPagesRequest(BaseModel): \"\"\"Request existing pages on one", "dataclass class Index(BaseModel): id: str name: str time_gate: HttpUrl =", "Any) -> Union[datetime, Any]: if isinstance(value, str): datetime_value = datetime.strptime(value,", "\"%Y%m%d%H%M%S\") return datetime_value return value class SearchPagesRequest(BaseModel): \"\"\"Request existing pages", "raw strings warc_request_meta: Optional[str] response_header: Optional[str] class Result(BaseModel): url_key: str", "class Result(BaseModel): url_key: str = Field(alias=\"urlkey\") timestamp: datetime url: str", "a given url.\"\"\" index: Index url: str show_num_pages: str =", "response_header: Optional[str] class Result(BaseModel): url_key: str = Field(alias=\"urlkey\") timestamp: datetime", "Any]: if isinstance(value, str): datetime_value = datetime.strptime(value, \"%Y%m%d%H%M%S\") return datetime_value", "SearchIndexRequest(BaseModel): \"\"\"One page that contains records to be fetched.\"\"\" index:", "mime: str mime_detected: str = Field(alias=\"mime-detected\") status: int digest: str", "if isinstance(value, str): datetime_value = datetime.strptime(value, \"%Y%m%d%H%M%S\") return datetime_value return", "default=\"true\", const=True) output: str = \"json\" class SearchPagesResponse(BaseModel): \"\"\"Response with", "Index url: str pages: int class SearchIndexRequest(BaseModel): \"\"\"One page that", "ResultMeta: # todo: these are still raw strings warc_request_meta: Optional[str]", "\"\"\"Response with the total number of pages in this index", "these are still raw strings warc_request_meta: Optional[str] response_header: Optional[str] class", "str = Field(alias=\"urlkey\") timestamp: datetime url: str mime: str mime_detected:", "show_num_pages: str = Field(alias=\"showNumPages\", default=\"true\", const=True) output: str = \"json\"", "body: Optional[ResultBody] meta: Optional[ResultMeta] @validator(\"timestamp\", pre=True) def parse_timestamp(cls, value: Any)", "Optional[str] response_header: Optional[str] class Result(BaseModel): url_key: str = Field(alias=\"urlkey\") timestamp:", "isinstance(value, str): datetime_value = datetime.strptime(value, \"%Y%m%d%H%M%S\") return datetime_value return value", "str = Field(alias=\"showNumPages\", default=\"true\", const=True) output: str = \"json\" class", "import datetime from typing import Any, List, Optional, Union from", "length: int offset: int filename: str languages: Optional[str] encoding: Optional[str]", "import BaseModel, Field, HttpUrl, validator from pydantic.dataclasses import dataclass class", "str length: int offset: int filename: str languages: Optional[str] encoding:", "warc_request_meta: Optional[str] response_header: Optional[str] class Result(BaseModel): url_key: str = Field(alias=\"urlkey\")", "index for a given url.\"\"\" index: Index url: str pages:", "timestamp: datetime url: str mime: str mime_detected: str = Field(alias=\"mime-detected\")", "pages: int class SearchIndexRequest(BaseModel): \"\"\"One page that contains records to", "records to be fetched.\"\"\" index: Index url: str page: int", "Optional[str] text: Optional[List[str]] @dataclass(frozen=True) class ResultMeta: # todo: these are", "str): datetime_value = datetime.strptime(value, \"%Y%m%d%H%M%S\") return datetime_value return value class", "url: str pages: int class SearchIndexRequest(BaseModel): \"\"\"One page that contains", "SearchPagesRequest(BaseModel): \"\"\"Request existing pages on one index for a given", "url.\"\"\" index: Index url: str pages: int class SearchIndexRequest(BaseModel): \"\"\"One", "from pydantic import BaseModel, Field, HttpUrl, validator from pydantic.dataclasses import", "to be fetched.\"\"\" index: Index url: str page: int output:", "Field(alias=\"urlkey\") timestamp: datetime url: str mime: str mime_detected: str =", "datetime_value = datetime.strptime(value, \"%Y%m%d%H%M%S\") return datetime_value return value class SearchPagesRequest(BaseModel):", "Field(alias=\"timegate\") cdx_api: HttpUrl = Field(alias=\"cdx-api\") @dataclass(frozen=True) class ResultBody: mime_detected: Optional[str]", "class ResultBody: mime_detected: Optional[str] data: Optional[str] text: Optional[List[str]] @dataclass(frozen=True) class", "of pages in this index for a given url.\"\"\" index:", "= Field(alias=\"urlkey\") timestamp: datetime url: str mime: str mime_detected: str", "str mime: str mime_detected: str = Field(alias=\"mime-detected\") status: int digest:", "Optional[ResultBody] meta: Optional[ResultMeta] @validator(\"timestamp\", pre=True) def parse_timestamp(cls, value: Any) ->", "given url.\"\"\" index: Index url: str show_num_pages: str = Field(alias=\"showNumPages\",", "str = \"json\" class SearchPagesResponse(BaseModel): \"\"\"Response with the total number", "encoding: Optional[str] index_id: Optional[str] body: Optional[ResultBody] meta: Optional[ResultMeta] @validator(\"timestamp\", pre=True)", "pre=True) def parse_timestamp(cls, value: Any) -> Union[datetime, Any]: if isinstance(value,", "= Field(alias=\"showNumPages\", default=\"true\", const=True) output: str = \"json\" class SearchPagesResponse(BaseModel):", "fetched.\"\"\" index: Index url: str page: int output: str =", "HttpUrl = Field(alias=\"cdx-api\") @dataclass(frozen=True) class ResultBody: mime_detected: Optional[str] data: Optional[str]", "= datetime.strptime(value, \"%Y%m%d%H%M%S\") return datetime_value return value class SearchPagesRequest(BaseModel): \"\"\"Request", "output: str = \"json\" class SearchPagesResponse(BaseModel): \"\"\"Response with the total", "with the total number of pages in this index for", "index: Index url: str page: int output: str = \"json\"", "@dataclass(frozen=True) class ResultMeta: # todo: these are still raw strings", "Index url: str show_num_pages: str = Field(alias=\"showNumPages\", default=\"true\", const=True) output:", "value: Any) -> Union[datetime, Any]: if isinstance(value, str): datetime_value =", "total number of pages in this index for a given", "from datetime import datetime from typing import Any, List, Optional,", "Optional[List[str]] @dataclass(frozen=True) class ResultMeta: # todo: these are still raw", "Optional[str] class Result(BaseModel): url_key: str = Field(alias=\"urlkey\") timestamp: datetime url:", "int filename: str languages: Optional[str] encoding: Optional[str] index_id: Optional[str] body:", "SearchPagesResponse(BaseModel): \"\"\"Response with the total number of pages in this", "a given url.\"\"\" index: Index url: str pages: int class", "int class SearchIndexRequest(BaseModel): \"\"\"One page that contains records to be", "= Field(alias=\"mime-detected\") status: int digest: str length: int offset: int", "Union[datetime, Any]: if isinstance(value, str): datetime_value = datetime.strptime(value, \"%Y%m%d%H%M%S\") return", "status: int digest: str length: int offset: int filename: str", "from pydantic.dataclasses import dataclass class Index(BaseModel): id: str name: str", "Field(alias=\"cdx-api\") @dataclass(frozen=True) class ResultBody: mime_detected: Optional[str] data: Optional[str] text: Optional[List[str]]", "meta: Optional[ResultMeta] @validator(\"timestamp\", pre=True) def parse_timestamp(cls, value: Any) -> Union[datetime,", "Optional[str] encoding: Optional[str] index_id: Optional[str] body: Optional[ResultBody] meta: Optional[ResultMeta] @validator(\"timestamp\",", "HttpUrl = Field(alias=\"timegate\") cdx_api: HttpUrl = Field(alias=\"cdx-api\") @dataclass(frozen=True) class ResultBody:", "= Field(alias=\"cdx-api\") @dataclass(frozen=True) class ResultBody: mime_detected: Optional[str] data: Optional[str] text:", "class SearchPagesRequest(BaseModel): \"\"\"Request existing pages on one index for a", "@validator(\"timestamp\", pre=True) def parse_timestamp(cls, value: Any) -> Union[datetime, Any]: if", "str = Field(alias=\"mime-detected\") status: int digest: str length: int offset:", "datetime from typing import Any, List, Optional, Union from pydantic", "class SearchPagesResponse(BaseModel): \"\"\"Response with the total number of pages in", "typing import Any, List, Optional, Union from pydantic import BaseModel,", "pages on one index for a given url.\"\"\" index: Index", "todo: these are still raw strings warc_request_meta: Optional[str] response_header: Optional[str]", "return value class SearchPagesRequest(BaseModel): \"\"\"Request existing pages on one index", "one index for a given url.\"\"\" index: Index url: str", "str mime_detected: str = Field(alias=\"mime-detected\") status: int digest: str length:", "import Any, List, Optional, Union from pydantic import BaseModel, Field,", "Field(alias=\"mime-detected\") status: int digest: str length: int offset: int filename:", "index for a given url.\"\"\" index: Index url: str show_num_pages:", "Field(alias=\"showNumPages\", default=\"true\", const=True) output: str = \"json\" class SearchPagesResponse(BaseModel): \"\"\"Response", "from typing import Any, List, Optional, Union from pydantic import", "page that contains records to be fetched.\"\"\" index: Index url:", "given url.\"\"\" index: Index url: str pages: int class SearchIndexRequest(BaseModel):", "str time_gate: HttpUrl = Field(alias=\"timegate\") cdx_api: HttpUrl = Field(alias=\"cdx-api\") @dataclass(frozen=True)", "Field, HttpUrl, validator from pydantic.dataclasses import dataclass class Index(BaseModel): id:", "still raw strings warc_request_meta: Optional[str] response_header: Optional[str] class Result(BaseModel): url_key:", "return datetime_value return value class SearchPagesRequest(BaseModel): \"\"\"Request existing pages on", "BaseModel, Field, HttpUrl, validator from pydantic.dataclasses import dataclass class Index(BaseModel):", "languages: Optional[str] encoding: Optional[str] index_id: Optional[str] body: Optional[ResultBody] meta: Optional[ResultMeta]", "Any, List, Optional, Union from pydantic import BaseModel, Field, HttpUrl,", "offset: int filename: str languages: Optional[str] encoding: Optional[str] index_id: Optional[str]", "Optional, Union from pydantic import BaseModel, Field, HttpUrl, validator from", "url_key: str = Field(alias=\"urlkey\") timestamp: datetime url: str mime: str", "that contains records to be fetched.\"\"\" index: Index url: str", "for a given url.\"\"\" index: Index url: str pages: int", "import dataclass class Index(BaseModel): id: str name: str time_gate: HttpUrl", "@dataclass(frozen=True) class ResultBody: mime_detected: Optional[str] data: Optional[str] text: Optional[List[str]] @dataclass(frozen=True)", "str pages: int class SearchIndexRequest(BaseModel): \"\"\"One page that contains records", "const=True) output: str = \"json\" class SearchPagesResponse(BaseModel): \"\"\"Response with the", "validator from pydantic.dataclasses import dataclass class Index(BaseModel): id: str name:", "int offset: int filename: str languages: Optional[str] encoding: Optional[str] index_id:", "datetime url: str mime: str mime_detected: str = Field(alias=\"mime-detected\") status:", "digest: str length: int offset: int filename: str languages: Optional[str]", "number of pages in this index for a given url.\"\"\"", "Index(BaseModel): id: str name: str time_gate: HttpUrl = Field(alias=\"timegate\") cdx_api:", "time_gate: HttpUrl = Field(alias=\"timegate\") cdx_api: HttpUrl = Field(alias=\"cdx-api\") @dataclass(frozen=True) class", "Optional[str] index_id: Optional[str] body: Optional[ResultBody] meta: Optional[ResultMeta] @validator(\"timestamp\", pre=True) def", "datetime.strptime(value, \"%Y%m%d%H%M%S\") return datetime_value return value class SearchPagesRequest(BaseModel): \"\"\"Request existing", "# todo: these are still raw strings warc_request_meta: Optional[str] response_header:", "the total number of pages in this index for a", "index: Index url: str show_num_pages: str = Field(alias=\"showNumPages\", default=\"true\", const=True)", "pydantic import BaseModel, Field, HttpUrl, validator from pydantic.dataclasses import dataclass", "value class SearchPagesRequest(BaseModel): \"\"\"Request existing pages on one index for", "id: str name: str time_gate: HttpUrl = Field(alias=\"timegate\") cdx_api: HttpUrl", "= Field(alias=\"timegate\") cdx_api: HttpUrl = Field(alias=\"cdx-api\") @dataclass(frozen=True) class ResultBody: mime_detected:", "Optional[str] body: Optional[ResultBody] meta: Optional[ResultMeta] @validator(\"timestamp\", pre=True) def parse_timestamp(cls, value:", "filename: str languages: Optional[str] encoding: Optional[str] index_id: Optional[str] body: Optional[ResultBody]", "are still raw strings warc_request_meta: Optional[str] response_header: Optional[str] class Result(BaseModel):", "data: Optional[str] text: Optional[List[str]] @dataclass(frozen=True) class ResultMeta: # todo: these", "pages in this index for a given url.\"\"\" index: Index", "index: Index url: str pages: int class SearchIndexRequest(BaseModel): \"\"\"One page", "class Index(BaseModel): id: str name: str time_gate: HttpUrl = Field(alias=\"timegate\")", "\"\"\"Request existing pages on one index for a given url.\"\"\"" ]
[ ") if exc_type and isinstance(exc_value, EnvironmentError): _errno = exc_value.errno fserror", "# pragma: no cover fserror = errors.ResourceLocked reraise( fserror, fserror(", "to FS Errors. \"\"\" FILE_ERRORS = { 64: errors.RemoteConnectionError, #", "self._directory else self.FILE_ERRORS ) if exc_type and isinstance(exc_value, EnvironmentError): _errno", "in to FS Errors. \"\"\" FILE_ERRORS = { 64: errors.RemoteConnectionError,", "fserror( self._path, exc=exc_value ), traceback ) # Stops linter complaining", "_errno == errno.EACCES and sys.platform == \"win32\": if getattr(exc_value, 'args',", "unwrap_errors(path_replace): \"\"\"Get a context to map OS errors to their", "from contextlib import contextmanager import sys import platform from .", "to be unwrapped. Or it may be a dictionary that", "\"\"\" from __future__ import print_function from __future__ import unicode_literals import", "@contextmanager def unwrap_errors(path_replace): \"\"\"Get a context to map OS errors", "unwrapped paths. \"\"\" try: yield except errors.ResourceError as e: if", "import contextmanager import sys import platform from . import errors", "errors.DirectoryExpected FILE_ERRORS[13] = errors.FileExpected def __init__(self, opname, path, directory=False): self._opname", "errors.DirectoryExpected DIR_ERRORS[267] = errors.DirectoryExpected FILE_ERRORS[13] = errors.FileExpected def __init__(self, opname,", "exc=exc_value ), traceback ) # Stops linter complaining about invalid", "may be a dictionary that maps wrapped paths on to", "errors to their `fs.errors` counterpart. The context will re-write the", "errors.DirectoryExists DIR_ERRORS[errno.EINVAL] = errors.DirectoryExpected if _WINDOWS_PLATFORM: # pragma: no cover", "errno.ENOTDIR: errors.ResourceNotFound, errno.EISDIR: errors.FileExpected, errno.EINVAL: errors.FileExpected, errno.ENOSPC: errors.InsufficientStorage, errno.EPERM: errors.PermissionDenied,", "self._opname = opname self._path = path self._directory = directory def", "import platform from . import errors from six import reraise", "import print_function from __future__ import unicode_literals import errno from contextlib", "DIR_ERRORS[267] = errors.DirectoryExpected FILE_ERRORS[13] = errors.FileExpected def __init__(self, opname, path,", "= platform.system() == 'Windows' class _ConvertOSErrors(object): \"\"\"Context manager to convert", "paths on to unwrapped paths. \"\"\" try: yield except errors.ResourceError", "= os_errors.get(_errno, errors.OperationFailed) if _errno == errno.EACCES and sys.platform ==", "name convert_os_errors = _ConvertOSErrors @contextmanager def unwrap_errors(path_replace): \"\"\"Get a context", "context to map OS errors to their `fs.errors` counterpart. The", "dict): e.path = path_replace.get(e.path, e.path) else: e.path = path_replace reraise(type(e),", "== \"win32\": if getattr(exc_value, 'args', None) == 32: # pragma:", "= opname self._path = path self._directory = directory def __enter__(self):", "if self._directory else self.FILE_ERRORS ) if exc_type and isinstance(exc_value, EnvironmentError):", "exc_type and isinstance(exc_value, EnvironmentError): _errno = exc_value.errno fserror = os_errors.get(_errno,", "OS errors to their `fs.errors` counterpart. The context will re-write", "parent, if only one path is to be unwrapped. Or", "and sys.platform == \"win32\": if getattr(exc_value, 'args', None) == 32:", "only one path is to be unwrapped. Or it may", "errno.EISDIR: errors.FileExpected, errno.EINVAL: errors.FileExpected, errno.ENOSPC: errors.InsufficientStorage, errno.EPERM: errors.PermissionDenied, errno.ENETDOWN: errors.RemoteConnectionError,", "errors.FileExpected def __init__(self, opname, path, directory=False): self._opname = opname self._path", "errors.FileExpected, errno.EINVAL: errors.FileExpected, errno.ENOSPC: errors.InsufficientStorage, errno.EPERM: errors.PermissionDenied, errno.ENETDOWN: errors.RemoteConnectionError, errno.ECONNRESET:", "= path self._directory = directory def __enter__(self): return self def", "e.path = path_replace.get(e.path, e.path) else: e.path = path_replace reraise(type(e), e)", "errors.RemoteConnectionError, # ENONET errno.EACCES: errors.PermissionDenied, errno.ENOENT: errors.ResourceNotFound, errno.EFAULT: errors.ResourceNotFound, errno.ESRCH:", "__enter__(self): return self def __exit__(self, exc_type, exc_value, traceback): os_errors =", "re-write the paths in resource exceptions to be in the", "errno.EACCES: errors.PermissionDenied, errno.ENOENT: errors.ResourceNotFound, errno.EFAULT: errors.ResourceNotFound, errno.ESRCH: errors.ResourceNotFound, errno.ENOTEMPTY: errors.DirectoryNotEmpty,", "def __init__(self, opname, path, directory=False): self._opname = opname self._path =", "Stops linter complaining about invalid class name convert_os_errors = _ConvertOSErrors", "= exc_value.errno fserror = os_errors.get(_errno, errors.OperationFailed) if _errno == errno.EACCES", "import errors from six import reraise _WINDOWS_PLATFORM = platform.system() ==", "from six import reraise _WINDOWS_PLATFORM = platform.system() == 'Windows' class", "= ( self.DIR_ERRORS if self._directory else self.FILE_ERRORS ) if exc_type", "def __exit__(self, exc_type, exc_value, traceback): os_errors = ( self.DIR_ERRORS if", "will re-write the paths in resource exceptions to be in", "e: if hasattr(e, 'path'): if isinstance(path_replace, dict): e.path = path_replace.get(e.path,", "# ENONET errno.EACCES: errors.PermissionDenied, errno.ENOENT: errors.ResourceNotFound, errno.EFAULT: errors.ResourceNotFound, errno.ESRCH: errors.ResourceNotFound,", "os_errors = ( self.DIR_ERRORS if self._directory else self.FILE_ERRORS ) if", "ENONET errno.EACCES: errors.PermissionDenied, errno.ENOENT: errors.ResourceNotFound, errno.EFAULT: errors.ResourceNotFound, errno.ESRCH: errors.ResourceNotFound, errno.ENOTEMPTY:", "'path'): if isinstance(path_replace, dict): e.path = path_replace.get(e.path, e.path) else: e.path", "self.DIR_ERRORS if self._directory else self.FILE_ERRORS ) if exc_type and isinstance(exc_value,", "errors from six import reraise _WINDOWS_PLATFORM = platform.system() == 'Windows'", "_errno = exc_value.errno fserror = os_errors.get(_errno, errors.OperationFailed) if _errno ==", "isinstance(path_replace, dict): e.path = path_replace.get(e.path, e.path) else: e.path = path_replace", "'Windows' class _ConvertOSErrors(object): \"\"\"Context manager to convert OSErrors in to", "errors.PermissionDenied, errno.ENOENT: errors.ResourceNotFound, errno.EFAULT: errors.ResourceNotFound, errno.ESRCH: errors.ResourceNotFound, errno.ENOTEMPTY: errors.DirectoryNotEmpty, errno.EEXIST:", "errors.ResourceNotFound, errno.ESRCH: errors.ResourceNotFound, errno.ENOTEMPTY: errors.DirectoryNotEmpty, errno.EEXIST: errors.FileExists, 183: errors.DirectoryExists, #errno.ENOTDIR:", "errno.EPERM: errors.PermissionDenied, errno.ENETDOWN: errors.RemoteConnectionError, errno.ECONNRESET: errors.RemoteConnectionError, errno.ENAMETOOLONG: errors.PathError, errno.EOPNOTSUPP: errors.Unsupported,", "exc_value, traceback): os_errors = ( self.DIR_ERRORS if self._directory else self.FILE_ERRORS", "in resource exceptions to be in the same context as", "be the path from the parent, if only one path", "self.FILE_ERRORS ) if exc_type and isinstance(exc_value, EnvironmentError): _errno = exc_value.errno", "is to be unwrapped. Or it may be a dictionary", "{ 64: errors.RemoteConnectionError, # ENONET errno.EACCES: errors.PermissionDenied, errno.ENOENT: errors.ResourceNotFound, errno.EFAULT:", "if exc_type and isinstance(exc_value, EnvironmentError): _errno = exc_value.errno fserror =", "errors.FileExpected, errno.ENOSPC: errors.InsufficientStorage, errno.EPERM: errors.PermissionDenied, errno.ENETDOWN: errors.RemoteConnectionError, errno.ECONNRESET: errors.RemoteConnectionError, errno.ENAMETOOLONG:", "FILE_ERRORS.copy() DIR_ERRORS[errno.ENOTDIR] = errors.DirectoryExpected DIR_ERRORS[errno.EEXIST] = errors.DirectoryExists DIR_ERRORS[errno.EINVAL] = errors.DirectoryExpected", "DIR_ERRORS[errno.ENOTDIR] = errors.DirectoryExpected DIR_ERRORS[errno.EEXIST] = errors.DirectoryExists DIR_ERRORS[errno.EINVAL] = errors.DirectoryExpected if", "self def __exit__(self, exc_type, exc_value, traceback): os_errors = ( self.DIR_ERRORS", "Or it may be a dictionary that maps wrapped paths", "DIR_ERRORS = FILE_ERRORS.copy() DIR_ERRORS[errno.ENOTDIR] = errors.DirectoryExpected DIR_ERRORS[errno.EEXIST] = errors.DirectoryExists DIR_ERRORS[errno.EINVAL]", "errno.EFAULT: errors.ResourceNotFound, errno.ESRCH: errors.ResourceNotFound, errno.ENOTEMPTY: errors.DirectoryNotEmpty, errno.EEXIST: errors.FileExists, 183: errors.DirectoryExists,", "isinstance(exc_value, EnvironmentError): _errno = exc_value.errno fserror = os_errors.get(_errno, errors.OperationFailed) if", "os_errors.get(_errno, errors.OperationFailed) if _errno == errno.EACCES and sys.platform == \"win32\":", "if getattr(exc_value, 'args', None) == 32: # pragma: no cover", "path, directory=False): self._opname = opname self._path = path self._directory =", "linter complaining about invalid class name convert_os_errors = _ConvertOSErrors @contextmanager", "def unwrap_errors(path_replace): \"\"\"Get a context to map OS errors to", "errors. \"\"\" from __future__ import print_function from __future__ import unicode_literals", "import unicode_literals import errno from contextlib import contextmanager import sys", "counterpart. The context will re-write the paths in resource exceptions", "errors.DirectoryExpected DIR_ERRORS[errno.EEXIST] = errors.DirectoryExists DIR_ERRORS[errno.EINVAL] = errors.DirectoryExpected if _WINDOWS_PLATFORM: #", "it may be a dictionary that maps wrapped paths on", "__exit__(self, exc_type, exc_value, traceback): os_errors = ( self.DIR_ERRORS if self._directory", "errno.EEXIST: errors.FileExists, 183: errors.DirectoryExists, #errno.ENOTDIR: errors.DirectoryExpected, errno.ENOTDIR: errors.ResourceNotFound, errno.EISDIR: errors.FileExpected,", "errno.ECONNRESET: errors.RemoteConnectionError, errno.ENAMETOOLONG: errors.PathError, errno.EOPNOTSUPP: errors.Unsupported, errno.ENOSYS: errors.Unsupported, } DIR_ERRORS", "opname self._path = path self._directory = directory def __enter__(self): return", "a dictionary that maps wrapped paths on to unwrapped paths.", "try: yield except errors.ResourceError as e: if hasattr(e, 'path'): if", "_WINDOWS_PLATFORM: # pragma: no cover DIR_ERRORS[13] = errors.DirectoryExpected DIR_ERRORS[267] =", "if only one path is to be unwrapped. Or it", "except errors.ResourceError as e: if hasattr(e, 'path'): if isinstance(path_replace, dict):", "paths. \"\"\" try: yield except errors.ResourceError as e: if hasattr(e,", "== 32: # pragma: no cover fserror = errors.ResourceLocked reraise(", "the path from the parent, if only one path is", "FILE_ERRORS = { 64: errors.RemoteConnectionError, # ENONET errno.EACCES: errors.PermissionDenied, errno.ENOENT:", "errno.ENOSPC: errors.InsufficientStorage, errno.EPERM: errors.PermissionDenied, errno.ENETDOWN: errors.RemoteConnectionError, errno.ECONNRESET: errors.RemoteConnectionError, errno.ENAMETOOLONG: errors.PathError,", "} DIR_ERRORS = FILE_ERRORS.copy() DIR_ERRORS[errno.ENOTDIR] = errors.DirectoryExpected DIR_ERRORS[errno.EEXIST] = errors.DirectoryExists", "def __enter__(self): return self def __exit__(self, exc_type, exc_value, traceback): os_errors", "exc_type, exc_value, traceback): os_errors = ( self.DIR_ERRORS if self._directory else", "exc_value.errno fserror = os_errors.get(_errno, errors.OperationFailed) if _errno == errno.EACCES and", "return self def __exit__(self, exc_type, exc_value, traceback): os_errors = (", "__init__(self, opname, path, directory=False): self._opname = opname self._path = path", "only parameter may be the path from the parent, if", "directory=False): self._opname = opname self._path = path self._directory = directory", "fserror = errors.ResourceLocked reraise( fserror, fserror( self._path, exc=exc_value ), traceback", "errors.DirectoryExpected if _WINDOWS_PLATFORM: # pragma: no cover DIR_ERRORS[13] = errors.DirectoryExpected", "and isinstance(exc_value, EnvironmentError): _errno = exc_value.errno fserror = os_errors.get(_errno, errors.OperationFailed)", "to unwrapped paths. \"\"\" try: yield except errors.ResourceError as e:", "OSErrors in to FS Errors. \"\"\" FILE_ERRORS = { 64:", "convert OSErrors in to FS Errors. \"\"\" FILE_ERRORS = {", "errors.ResourceNotFound, errno.EFAULT: errors.ResourceNotFound, errno.ESRCH: errors.ResourceNotFound, errno.ENOTEMPTY: errors.DirectoryNotEmpty, errno.EEXIST: errors.FileExists, 183:", "import reraise _WINDOWS_PLATFORM = platform.system() == 'Windows' class _ConvertOSErrors(object): \"\"\"Context", "errors.RemoteConnectionError, errno.ENAMETOOLONG: errors.PathError, errno.EOPNOTSUPP: errors.Unsupported, errno.ENOSYS: errors.Unsupported, } DIR_ERRORS =", "the parent, if only one path is to be unwrapped.", "\"\"\"Context manager to convert OSErrors in to FS Errors. \"\"\"", "errno.ENETDOWN: errors.RemoteConnectionError, errno.ECONNRESET: errors.RemoteConnectionError, errno.ENAMETOOLONG: errors.PathError, errno.EOPNOTSUPP: errors.Unsupported, errno.ENOSYS: errors.Unsupported,", "getattr(exc_value, 'args', None) == 32: # pragma: no cover fserror", "yield except errors.ResourceError as e: if hasattr(e, 'path'): if isinstance(path_replace,", "print_function from __future__ import unicode_literals import errno from contextlib import", "errno.EOPNOTSUPP: errors.Unsupported, errno.ENOSYS: errors.Unsupported, } DIR_ERRORS = FILE_ERRORS.copy() DIR_ERRORS[errno.ENOTDIR] =", "EnvironmentError): _errno = exc_value.errno fserror = os_errors.get(_errno, errors.OperationFailed) if _errno", "fserror, fserror( self._path, exc=exc_value ), traceback ) # Stops linter", "The context will re-write the paths in resource exceptions to", "errors.ResourceError as e: if hasattr(e, 'path'): if isinstance(path_replace, dict): e.path", "hasattr(e, 'path'): if isinstance(path_replace, dict): e.path = path_replace.get(e.path, e.path) else:", "platform from . import errors from six import reraise _WINDOWS_PLATFORM", "errors.DirectoryExists, #errno.ENOTDIR: errors.DirectoryExpected, errno.ENOTDIR: errors.ResourceNotFound, errno.EISDIR: errors.FileExpected, errno.EINVAL: errors.FileExpected, errno.ENOSPC:", "# pragma: no cover DIR_ERRORS[13] = errors.DirectoryExpected DIR_ERRORS[267] = errors.DirectoryExpected", "errno.EINVAL: errors.FileExpected, errno.ENOSPC: errors.InsufficientStorage, errno.EPERM: errors.PermissionDenied, errno.ENETDOWN: errors.RemoteConnectionError, errno.ECONNRESET: errors.RemoteConnectionError,", "traceback ) # Stops linter complaining about invalid class name", "no cover fserror = errors.ResourceLocked reraise( fserror, fserror( self._path, exc=exc_value", "pragma: no cover DIR_ERRORS[13] = errors.DirectoryExpected DIR_ERRORS[267] = errors.DirectoryExpected FILE_ERRORS[13]", "OS errors. \"\"\" from __future__ import print_function from __future__ import", "\"\"\" FILE_ERRORS = { 64: errors.RemoteConnectionError, # ENONET errno.EACCES: errors.PermissionDenied,", "reraise( fserror, fserror( self._path, exc=exc_value ), traceback ) # Stops", "invalid class name convert_os_errors = _ConvertOSErrors @contextmanager def unwrap_errors(path_replace): \"\"\"Get", "as e: if hasattr(e, 'path'): if isinstance(path_replace, dict): e.path =", "if isinstance(path_replace, dict): e.path = path_replace.get(e.path, e.path) else: e.path =", "their `fs.errors` counterpart. The context will re-write the paths in", "the same context as the wrapped filesystem. The only parameter", "= errors.DirectoryExpected FILE_ERRORS[13] = errors.FileExpected def __init__(self, opname, path, directory=False):", "#errno.ENOTDIR: errors.DirectoryExpected, errno.ENOTDIR: errors.ResourceNotFound, errno.EISDIR: errors.FileExpected, errno.EINVAL: errors.FileExpected, errno.ENOSPC: errors.InsufficientStorage,", "errors.ResourceNotFound, errno.EISDIR: errors.FileExpected, errno.EINVAL: errors.FileExpected, errno.ENOSPC: errors.InsufficientStorage, errno.EPERM: errors.PermissionDenied, errno.ENETDOWN:", "The only parameter may be the path from the parent,", "context will re-write the paths in resource exceptions to be", "cover fserror = errors.ResourceLocked reraise( fserror, fserror( self._path, exc=exc_value ),", "on to unwrapped paths. \"\"\" try: yield except errors.ResourceError as", "traceback): os_errors = ( self.DIR_ERRORS if self._directory else self.FILE_ERRORS )", "( self.DIR_ERRORS if self._directory else self.FILE_ERRORS ) if exc_type and", ") # Stops linter complaining about invalid class name convert_os_errors", "platform.system() == 'Windows' class _ConvertOSErrors(object): \"\"\"Context manager to convert OSErrors", "self._path, exc=exc_value ), traceback ) # Stops linter complaining about", "for managing OS errors. \"\"\" from __future__ import print_function from", "resource exceptions to be in the same context as the", "errors.PermissionDenied, errno.ENETDOWN: errors.RemoteConnectionError, errno.ECONNRESET: errors.RemoteConnectionError, errno.ENAMETOOLONG: errors.PathError, errno.EOPNOTSUPP: errors.Unsupported, errno.ENOSYS:", "be in the same context as the wrapped filesystem. The", "be unwrapped. Or it may be a dictionary that maps", "= errors.ResourceLocked reraise( fserror, fserror( self._path, exc=exc_value ), traceback )", "no cover DIR_ERRORS[13] = errors.DirectoryExpected DIR_ERRORS[267] = errors.DirectoryExpected FILE_ERRORS[13] =", "dictionary that maps wrapped paths on to unwrapped paths. \"\"\"", "contextmanager import sys import platform from . import errors from", "if _errno == errno.EACCES and sys.platform == \"win32\": if getattr(exc_value,", "DIR_ERRORS[13] = errors.DirectoryExpected DIR_ERRORS[267] = errors.DirectoryExpected FILE_ERRORS[13] = errors.FileExpected def", "errors.Unsupported, errno.ENOSYS: errors.Unsupported, } DIR_ERRORS = FILE_ERRORS.copy() DIR_ERRORS[errno.ENOTDIR] = errors.DirectoryExpected", "errors.Unsupported, } DIR_ERRORS = FILE_ERRORS.copy() DIR_ERRORS[errno.ENOTDIR] = errors.DirectoryExpected DIR_ERRORS[errno.EEXIST] =", "= errors.DirectoryExpected DIR_ERRORS[errno.EEXIST] = errors.DirectoryExists DIR_ERRORS[errno.EINVAL] = errors.DirectoryExpected if _WINDOWS_PLATFORM:", "be a dictionary that maps wrapped paths on to unwrapped", ". import errors from six import reraise _WINDOWS_PLATFORM = platform.system()", "errors.FileExists, 183: errors.DirectoryExists, #errno.ENOTDIR: errors.DirectoryExpected, errno.ENOTDIR: errors.ResourceNotFound, errno.EISDIR: errors.FileExpected, errno.EINVAL:", "convert_os_errors = _ConvertOSErrors @contextmanager def unwrap_errors(path_replace): \"\"\"Get a context to", "a context to map OS errors to their `fs.errors` counterpart.", "path from the parent, if only one path is to", "errno.ENOTEMPTY: errors.DirectoryNotEmpty, errno.EEXIST: errors.FileExists, 183: errors.DirectoryExists, #errno.ENOTDIR: errors.DirectoryExpected, errno.ENOTDIR: errors.ResourceNotFound,", "None) == 32: # pragma: no cover fserror = errors.ResourceLocked", "from __future__ import print_function from __future__ import unicode_literals import errno", "DIR_ERRORS[errno.EEXIST] = errors.DirectoryExists DIR_ERRORS[errno.EINVAL] = errors.DirectoryExpected if _WINDOWS_PLATFORM: # pragma:", "to their `fs.errors` counterpart. The context will re-write the paths", "= _ConvertOSErrors @contextmanager def unwrap_errors(path_replace): \"\"\"Get a context to map", "six import reraise _WINDOWS_PLATFORM = platform.system() == 'Windows' class _ConvertOSErrors(object):", "one path is to be unwrapped. Or it may be", "_ConvertOSErrors @contextmanager def unwrap_errors(path_replace): \"\"\"Get a context to map OS", "if _WINDOWS_PLATFORM: # pragma: no cover DIR_ERRORS[13] = errors.DirectoryExpected DIR_ERRORS[267]", "errors.OperationFailed) if _errno == errno.EACCES and sys.platform == \"win32\": if", "_ConvertOSErrors(object): \"\"\"Context manager to convert OSErrors in to FS Errors.", "same context as the wrapped filesystem. The only parameter may", "the wrapped filesystem. The only parameter may be the path", "FILE_ERRORS[13] = errors.FileExpected def __init__(self, opname, path, directory=False): self._opname =", "errors.ResourceNotFound, errno.ENOTEMPTY: errors.DirectoryNotEmpty, errno.EEXIST: errors.FileExists, 183: errors.DirectoryExists, #errno.ENOTDIR: errors.DirectoryExpected, errno.ENOTDIR:", "class _ConvertOSErrors(object): \"\"\"Context manager to convert OSErrors in to FS", "map OS errors to their `fs.errors` counterpart. The context will", "== errno.EACCES and sys.platform == \"win32\": if getattr(exc_value, 'args', None)", "'args', None) == 32: # pragma: no cover fserror =", "<gh_stars>0 \"\"\"Tools for managing OS errors. \"\"\" from __future__ import", "errors.DirectoryNotEmpty, errno.EEXIST: errors.FileExists, 183: errors.DirectoryExists, #errno.ENOTDIR: errors.DirectoryExpected, errno.ENOTDIR: errors.ResourceNotFound, errno.EISDIR:", "managing OS errors. \"\"\" from __future__ import print_function from __future__", "Errors. \"\"\" FILE_ERRORS = { 64: errors.RemoteConnectionError, # ENONET errno.EACCES:", "_WINDOWS_PLATFORM = platform.system() == 'Windows' class _ConvertOSErrors(object): \"\"\"Context manager to", "64: errors.RemoteConnectionError, # ENONET errno.EACCES: errors.PermissionDenied, errno.ENOENT: errors.ResourceNotFound, errno.EFAULT: errors.ResourceNotFound,", "to convert OSErrors in to FS Errors. \"\"\" FILE_ERRORS =", "else self.FILE_ERRORS ) if exc_type and isinstance(exc_value, EnvironmentError): _errno =", "DIR_ERRORS[errno.EINVAL] = errors.DirectoryExpected if _WINDOWS_PLATFORM: # pragma: no cover DIR_ERRORS[13]", "errors.ResourceLocked reraise( fserror, fserror( self._path, exc=exc_value ), traceback ) #", "complaining about invalid class name convert_os_errors = _ConvertOSErrors @contextmanager def", "class name convert_os_errors = _ConvertOSErrors @contextmanager def unwrap_errors(path_replace): \"\"\"Get a", "`fs.errors` counterpart. The context will re-write the paths in resource", "paths in resource exceptions to be in the same context", "== 'Windows' class _ConvertOSErrors(object): \"\"\"Context manager to convert OSErrors in", "exceptions to be in the same context as the wrapped", "filesystem. The only parameter may be the path from the", "self._directory = directory def __enter__(self): return self def __exit__(self, exc_type,", "to be in the same context as the wrapped filesystem.", "manager to convert OSErrors in to FS Errors. \"\"\" FILE_ERRORS", "errno.ENOSYS: errors.Unsupported, } DIR_ERRORS = FILE_ERRORS.copy() DIR_ERRORS[errno.ENOTDIR] = errors.DirectoryExpected DIR_ERRORS[errno.EEXIST]", "= directory def __enter__(self): return self def __exit__(self, exc_type, exc_value,", "path is to be unwrapped. Or it may be a", "from __future__ import unicode_literals import errno from contextlib import contextmanager", "maps wrapped paths on to unwrapped paths. \"\"\" try: yield", "that maps wrapped paths on to unwrapped paths. \"\"\" try:", "errno.ENAMETOOLONG: errors.PathError, errno.EOPNOTSUPP: errors.Unsupported, errno.ENOSYS: errors.Unsupported, } DIR_ERRORS = FILE_ERRORS.copy()", "errors.PathError, errno.EOPNOTSUPP: errors.Unsupported, errno.ENOSYS: errors.Unsupported, } DIR_ERRORS = FILE_ERRORS.copy() DIR_ERRORS[errno.ENOTDIR]", "errno.ENOENT: errors.ResourceNotFound, errno.EFAULT: errors.ResourceNotFound, errno.ESRCH: errors.ResourceNotFound, errno.ENOTEMPTY: errors.DirectoryNotEmpty, errno.EEXIST: errors.FileExists,", "cover DIR_ERRORS[13] = errors.DirectoryExpected DIR_ERRORS[267] = errors.DirectoryExpected FILE_ERRORS[13] = errors.FileExpected", "sys.platform == \"win32\": if getattr(exc_value, 'args', None) == 32: #", "may be the path from the parent, if only one", "\"\"\"Get a context to map OS errors to their `fs.errors`", "parameter may be the path from the parent, if only", "= errors.DirectoryExists DIR_ERRORS[errno.EINVAL] = errors.DirectoryExpected if _WINDOWS_PLATFORM: # pragma: no", "the paths in resource exceptions to be in the same", "in the same context as the wrapped filesystem. The only", "self._path = path self._directory = directory def __enter__(self): return self", "errors.DirectoryExpected, errno.ENOTDIR: errors.ResourceNotFound, errno.EISDIR: errors.FileExpected, errno.EINVAL: errors.FileExpected, errno.ENOSPC: errors.InsufficientStorage, errno.EPERM:", "__future__ import unicode_literals import errno from contextlib import contextmanager import", "from . import errors from six import reraise _WINDOWS_PLATFORM =", "to map OS errors to their `fs.errors` counterpart. The context", "\"\"\"Tools for managing OS errors. \"\"\" from __future__ import print_function", "= { 64: errors.RemoteConnectionError, # ENONET errno.EACCES: errors.PermissionDenied, errno.ENOENT: errors.ResourceNotFound,", "fserror = os_errors.get(_errno, errors.OperationFailed) if _errno == errno.EACCES and sys.platform", "errno.ESRCH: errors.ResourceNotFound, errno.ENOTEMPTY: errors.DirectoryNotEmpty, errno.EEXIST: errors.FileExists, 183: errors.DirectoryExists, #errno.ENOTDIR: errors.DirectoryExpected,", "contextlib import contextmanager import sys import platform from . import", "FS Errors. \"\"\" FILE_ERRORS = { 64: errors.RemoteConnectionError, # ENONET", "errors.InsufficientStorage, errno.EPERM: errors.PermissionDenied, errno.ENETDOWN: errors.RemoteConnectionError, errno.ECONNRESET: errors.RemoteConnectionError, errno.ENAMETOOLONG: errors.PathError, errno.EOPNOTSUPP:", "), traceback ) # Stops linter complaining about invalid class", "errno from contextlib import contextmanager import sys import platform from", "wrapped paths on to unwrapped paths. \"\"\" try: yield except", "\"\"\" try: yield except errors.ResourceError as e: if hasattr(e, 'path'):", "as the wrapped filesystem. The only parameter may be the", "import sys import platform from . import errors from six", "183: errors.DirectoryExists, #errno.ENOTDIR: errors.DirectoryExpected, errno.ENOTDIR: errors.ResourceNotFound, errno.EISDIR: errors.FileExpected, errno.EINVAL: errors.FileExpected,", "\"win32\": if getattr(exc_value, 'args', None) == 32: # pragma: no", "wrapped filesystem. The only parameter may be the path from", "32: # pragma: no cover fserror = errors.ResourceLocked reraise( fserror,", "= errors.DirectoryExpected DIR_ERRORS[267] = errors.DirectoryExpected FILE_ERRORS[13] = errors.FileExpected def __init__(self,", "errors.RemoteConnectionError, errno.ECONNRESET: errors.RemoteConnectionError, errno.ENAMETOOLONG: errors.PathError, errno.EOPNOTSUPP: errors.Unsupported, errno.ENOSYS: errors.Unsupported, }", "= errors.DirectoryExpected if _WINDOWS_PLATFORM: # pragma: no cover DIR_ERRORS[13] =", "if hasattr(e, 'path'): if isinstance(path_replace, dict): e.path = path_replace.get(e.path, e.path)", "about invalid class name convert_os_errors = _ConvertOSErrors @contextmanager def unwrap_errors(path_replace):", "path self._directory = directory def __enter__(self): return self def __exit__(self,", "unwrapped. Or it may be a dictionary that maps wrapped", "# Stops linter complaining about invalid class name convert_os_errors =", "reraise _WINDOWS_PLATFORM = platform.system() == 'Windows' class _ConvertOSErrors(object): \"\"\"Context manager", "from the parent, if only one path is to be", "__future__ import print_function from __future__ import unicode_literals import errno from", "sys import platform from . import errors from six import", "directory def __enter__(self): return self def __exit__(self, exc_type, exc_value, traceback):", "import errno from contextlib import contextmanager import sys import platform", "context as the wrapped filesystem. The only parameter may be", "opname, path, directory=False): self._opname = opname self._path = path self._directory", "= FILE_ERRORS.copy() DIR_ERRORS[errno.ENOTDIR] = errors.DirectoryExpected DIR_ERRORS[errno.EEXIST] = errors.DirectoryExists DIR_ERRORS[errno.EINVAL] =", "pragma: no cover fserror = errors.ResourceLocked reraise( fserror, fserror( self._path,", "errno.EACCES and sys.platform == \"win32\": if getattr(exc_value, 'args', None) ==", "= errors.FileExpected def __init__(self, opname, path, directory=False): self._opname = opname", "unicode_literals import errno from contextlib import contextmanager import sys import" ]
[ "'%s' is connected to the Controller\" % nodeName) else: print", "permission. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS", "ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,", "result = vrouter.get_cfg() status = result.get_status() if(status.eq(STATUS.OK)): print (\"'%s' configuration:\"", "must reproduce the above copyright notice, # this list of", "above copyright notice, # this list of conditions and the", "vr_demo_3(): f = \"cfg4.yml\" d = {} if(load_dict_from_file(f, d) is", "terminated, reason: %s\" % status.brief()) exit(0) print (\"\\n\") print (\">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\")", "ctrlIpAddr = d['ctrlIpAddr'] ctrlPortNum = d['ctrlPortNum'] ctrlUname = d['ctrlUname'] ctrlPswd", "%s, '%s': %s\" % (ctrlIpAddr, nodeName, nodeIpAddr)) print (\"\\n\") time.sleep(rundelay)", "rights reserved. # Redistribution and use in source and binary", "print (\"<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<\") print (\"\\n\") ctrl = Controller(ctrlIpAddr, ctrlPortNum, ctrlUname, ctrlPswd)", "notice, # this list of conditions and the following disclaimer.", "'Controller': %s, '%s': %s\" % (ctrlIpAddr, nodeName, nodeIpAddr)) print (\"\\n\")", "reason: %s\" % status.brief().lower()) exit(0) print \"\\n\" print (\">>> Remove", "reason: %s\" % status.brief()) exit(0) print (\"\\n\") print (\">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\") print", "the following conditions are met: # 1. Redistributions of source", "print (\"<<< '%s' is connected to the Controller\" % nodeName)", "THIS SOFTWARE, EVEN IF ADVISED OF # THE POSSIBILITY OF", "= ctrl.delete_netconf_node(vrouter) status = result.get_status() if(status.eq(STATUS.OK)): print (\"'%s' NETCONF node", "# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF", "f) exit() try: ctrlIpAddr = d['ctrlIpAddr'] ctrlPortNum = d['ctrlPortNum'] ctrlUname", "= json.loads(cfg) print json.dumps(data, indent=4) else: print (\"\\n\") print (\"!!!Demo", "THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR", "NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF", "prior written permission. # THIS SOFTWARE IS PROVIDED BY THE", "nodeName) time.sleep(rundelay) result = vrouter.get_cfg() status = result.get_status() if(status.eq(STATUS.OK)): print", "= d['nodeName'] nodeIpAddr = d['nodeIpAddr'] nodePortNum = d['nodePortNum'] nodeUname =", "% status.detailed()) exit(0) if node_configured is False: result = ctrl.add_netconf_node(vrouter)", "EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE # LIABLE", "d['nodePortNum'] nodeUname = d['nodeUname'] nodePswd = d['nodePswd'] rundelay = d['rundelay']", "Development @version: 1.1.0 \"\"\" import time import json from pysdn.controller.controller", "source and binary forms, with or without # modification, are", "(\"'%s' NETCONF node was successfully removed \" \"from the Controller\"", "device attributes\") exit(0) print (\"<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<\") print (\"<<< Demo Start\") print", "exit(0) print (\"\\n\") print (\">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\") print (\">>> Demo End\") print", "status.brief()) exit(0) print (\"\\n\") print (\">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\") print (\">>> Demo End\")", "nodeName) time.sleep(rundelay) result = ctrl.delete_netconf_node(vrouter) status = result.get_status() if(status.eq(STATUS.OK)): print", "INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER", "print (\"<<< '%s' added to the Controller\" % nodeName) else:", "if(status.eq(STATUS.OK)): print (\"'%s' configuration:\" % nodeName) cfg = result.get_data() data", "is False): print(\"Config file '%s' read error: \" % f)", "'%s' NETCONF node from the Controller\" % nodeName) time.sleep(rundelay) result", "COPYRIGHT HOLDER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT,", "import load_dict_from_file def vr_demo_3(): f = \"cfg4.yml\" d = {}", "else: print (\"\\n\") print \"Failed to get configuration status for", "CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,", "ctrl.add_netconf_node(vrouter) status = result.get_status() if(status.eq(STATUS.OK)): print (\"<<< '%s' added to", "%s\" % status.brief()) exit(0) print (\"\\n\") print (\">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\") print (\">>>", "IF ADVISED OF # THE POSSIBILITY OF SUCH DAMAGE. \"\"\"", "DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE", "import json from pysdn.controller.controller import Controller from pysdn.netconfdev.vrouter.vrouter5600 import VRouter5600", "status = result.get_status() if(status.eq(STATUS.OK)): print (\"'%s' configuration:\" % nodeName) cfg", "(\"<<< '%s' is configured on the Controller\" % nodeName) elif(status.eq(STATUS.DATA_NOT_FOUND)):", "(\"\\n\") time.sleep(rundelay) result = ctrl.check_node_conn_status(nodeName) status = result.get_status() if(status.eq(STATUS.NODE_CONNECTED)): print", "AND CONTRIBUTORS \"AS IS\" # AND ANY EXPRESS OR IMPLIED", "terminated, reason: %s\" % status.brief().lower()) exit(0) print \"\\n\" print (\">>>", "disclaimer. # 2. Redistributions in binary form must reproduce the", "provided that the following conditions are met: # 1. Redistributions", "and binary forms, with or without # modification, are permitted", "print(\"\\n\") print (\"<<< Show configuration of the '%s'\" % nodeName)", "names of its # contributors may be used to endorse", "nodeName print (\"!!!Demo terminated, reason: %s\" % status.detailed()) exit(0) if", "Start\") print (\"<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<\") print (\"\\n\") ctrl = Controller(ctrlIpAddr, ctrlPortNum, ctrlUname,", "get configuration status for the '%s'\" % nodeName print (\"!!!Demo", "json.dumps(data, indent=4) else: print (\"\\n\") print (\"!!!Demo terminated, reason: %s\"", "(\"!!!Demo terminated, reason: %s\" % status.brief()) exit(0) print (\"\\n\") print", "= ctrl.check_node_conn_status(nodeName) status = result.get_status() if(status.eq(STATUS.NODE_CONNECTED)): print (\"<<< '%s' is", "of source code must retain the above copyright notice, #", "= True print (\"<<< '%s' is configured on the Controller\"", "EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE", "terminated, reason: %s\" % status.detailed()) exit(0) print (\"\\n\") time.sleep(rundelay) result", "% nodeName) else: print (\"\\n\") print (\"!!!Demo terminated, reason: %s\"", "ctrl.delete_netconf_node(vrouter) status = result.get_status() if(status.eq(STATUS.OK)): print (\"'%s' NETCONF node was", "'%s' added to the Controller\" % nodeName) else: print (\"\\n\")", "# this list of conditions and the following disclaimer. #", "% status.detailed()) exit(0) print (\"\\n\") time.sleep(rundelay) result = ctrl.check_node_conn_status(nodeName) status", "or promote products derived from this # software without specific", "specific prior written permission. # THIS SOFTWARE IS PROVIDED BY", "result.get_status() if(status.eq(STATUS.OK)): print (\"<<< '%s' added to the Controller\" %", "status.detailed()) exit(0) print (\"\\n\") time.sleep(rundelay) result = ctrl.check_node_conn_status(nodeName) status =", "= d['ctrlUname'] ctrlPswd = d['ctrlPswd'] nodeName = d['nodeName'] nodeIpAddr =", "notice, # this list of conditions and the following disclaimer", "get Controller device attributes\") exit(0) print (\"<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<\") print (\"<<< Demo", "GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR", "WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF", "(\"\\n\") print (\"!!!Demo terminated, reason: %s\" % status.brief().lower()) exit(0) print(\"\\n\")", "d['nodePswd'] rundelay = d['rundelay'] except: print (\"Failed to get Controller", "BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,", "else: print (\"\\n\") print (\"!!!Demo terminated, reason: %s\" % status.detailed())", "if(status.eq(STATUS.OK)): print (\"'%s' NETCONF node was successfully removed \" \"from", "(\"<<< '%s' is connected to the Controller\" % nodeName) else:", "software without specific prior written permission. # THIS SOFTWARE IS", "USE OF THIS SOFTWARE, EVEN IF ADVISED OF # THE", "ctrl.check_node_config_status(nodeName) status = result.get_status() if(status.eq(STATUS.NODE_CONFIGURED)): node_configured = True print (\"<<<", "(\">>> Demo End\") print (\">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\") if __name__ == \"__main__\": vr_demo_3()", "= \"cfg4.yml\" d = {} if(load_dict_from_file(f, d) is False): print(\"Config", "= d['rundelay'] except: print (\"Failed to get Controller device attributes\")", "error: \" % f) exit() try: ctrlIpAddr = d['ctrlIpAddr'] ctrlPortNum", "THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS", "result = ctrl.check_node_conn_status(nodeName) status = result.get_status() if(status.eq(STATUS.NODE_CONNECTED)): print (\"<<< '%s'", "disclaimer in the documentation # and/or other materials provided with", "% nodeName) time.sleep(rundelay) result = ctrl.delete_netconf_node(vrouter) status = result.get_status() if(status.eq(STATUS.OK)):", "\"AS IS\" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,", "exit(0) print \"\\n\" print (\">>> Remove '%s' NETCONF node from", "%s\" % status.detailed()) exit(0) if node_configured is False: result =", "BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR", "\"from the Controller\" % nodeName) else: print (\"\\n\") print (\"!!!Demo", "TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR", "promote products derived from this # software without specific prior", "(\"<<< Show configuration of the '%s'\" % nodeName) time.sleep(rundelay) result", "distribution. # 3. Neither the name of the copyright holder", "the Controller\" % nodeName) elif(status.eq(STATUS.DATA_NOT_FOUND)): node_configured = False else: print", "CONTRIBUTORS \"AS IS\" # AND ANY EXPRESS OR IMPLIED WARRANTIES,", "(\"<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<\") print (\"\\n\") ctrl = Controller(ctrlIpAddr, ctrlPortNum, ctrlUname, ctrlPswd) vrouter", "result.get_data() data = json.loads(cfg) print json.dumps(data, indent=4) else: print (\"\\n\")", "print(\"Config file '%s' read error: \" % f) exit() try:", "THE POSSIBILITY OF SUCH DAMAGE. \"\"\" @authors: <NAME> @status: Development", "FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL", "name of the copyright holder nor the names of its", "False result = ctrl.check_node_config_status(nodeName) status = result.get_status() if(status.eq(STATUS.NODE_CONFIGURED)): node_configured =", "(\"\\n\") print \"Failed to get configuration status for the '%s'\"", "configuration of the '%s'\" % nodeName) time.sleep(rundelay) result = vrouter.get_cfg()", "# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR", "CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN #", "elif(status.eq(STATUS.DATA_NOT_FOUND)): node_configured = False else: print (\"\\n\") print \"Failed to", "status.brief().lower()) exit(0) print \"\\n\" print (\">>> Remove '%s' NETCONF node", "ctrlUname = d['ctrlUname'] ctrlPswd = d['ctrlPswd'] nodeName = d['nodeName'] nodeIpAddr", "= vrouter.get_cfg() status = result.get_status() if(status.eq(STATUS.OK)): print (\"'%s' configuration:\" %", "nodePswd) print (\"<<< 'Controller': %s, '%s': %s\" % (ctrlIpAddr, nodeName,", "AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT,", "terminated, reason: %s\" % status.detailed()) exit(0) if node_configured is False:", "OF SUCH DAMAGE. \"\"\" @authors: <NAME> @status: Development @version: 1.1.0", "Redistributions of source code must retain the above copyright notice,", "of conditions and the following disclaimer. # 2. Redistributions in", "permitted provided that the following conditions are met: # 1.", "ctrl = Controller(ctrlIpAddr, ctrlPortNum, ctrlUname, ctrlPswd) vrouter = VRouter5600(ctrl, nodeName,", "f = \"cfg4.yml\" d = {} if(load_dict_from_file(f, d) is False):", "print (\"\\n\") print \"Failed to get configuration status for the", "if(load_dict_from_file(f, d) is False): print(\"Config file '%s' read error: \"", "SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS #", "documentation # and/or other materials provided with the distribution. #", "True print (\"<<< '%s' is configured on the Controller\" %", "VRouter5600 from pysdn.common.status import STATUS from pysdn.common.utils import load_dict_from_file def", "IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE", "or without # modification, are permitted provided that the following", "file '%s' read error: \" % f) exit() try: ctrlIpAddr", "HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN", "Redistribution and use in source and binary forms, with or", "source code must retain the above copyright notice, # this", "# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT", "that the following conditions are met: # 1. Redistributions of", "Copyright (c) 2015, BROCADE COMMUNICATIONS SYSTEMS, INC # All rights", "d['ctrlPortNum'] ctrlUname = d['ctrlUname'] ctrlPswd = d['ctrlPswd'] nodeName = d['nodeName']", "(\"\\n\") print (\"!!!Demo terminated, reason: %s\" % status.detailed()) exit(0) print", "import VRouter5600 from pysdn.common.status import STATUS from pysdn.common.utils import load_dict_from_file", "Controller device attributes\") exit(0) print (\"<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<\") print (\"<<< Demo Start\")", "USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED", "(\"<<< 'Controller': %s, '%s': %s\" % (ctrlIpAddr, nodeName, nodeIpAddr)) print", "@status: Development @version: 1.1.0 \"\"\" import time import json from", "print (\"\\n\") print (\"!!!Demo terminated, reason: %s\" % status.detailed()) exit(0)", "print (\"<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<\") print (\"<<< Demo Start\") print (\"<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<\") print (\"\\n\")", "\" % f) exit() try: ctrlIpAddr = d['ctrlIpAddr'] ctrlPortNum =", "IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR", "nor the names of its # contributors may be used", "binary form must reproduce the above copyright notice, # this", "OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE", "exit(0) print (\"<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<\") print (\"<<< Demo Start\") print (\"<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<\") print", "(\"!!!Demo terminated, reason: %s\" % status.brief().lower()) exit(0) print(\"\\n\") print (\"<<<", "time import json from pysdn.controller.controller import Controller from pysdn.netconfdev.vrouter.vrouter5600 import", "# Copyright (c) 2015, BROCADE COMMUNICATIONS SYSTEMS, INC # All", "written permission. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT", "PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" #", "reason: %s\" % status.detailed()) exit(0) if node_configured is False: result", "json.loads(cfg) print json.dumps(data, indent=4) else: print (\"\\n\") print (\"!!!Demo terminated,", "INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT", "CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) #", "VRouter5600(ctrl, nodeName, nodeIpAddr, nodePortNum, nodeUname, nodePswd) print (\"<<< 'Controller': %s,", "Remove '%s' NETCONF node from the Controller\" % nodeName) time.sleep(rundelay)", "OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS", "node_configured = True print (\"<<< '%s' is configured on the", "node from the Controller\" % nodeName) time.sleep(rundelay) result = ctrl.delete_netconf_node(vrouter)", "print (\"\\n\") ctrl = Controller(ctrlIpAddr, ctrlPortNum, ctrlUname, ctrlPswd) vrouter =", "Controller\" % nodeName) time.sleep(rundelay) result = ctrl.delete_netconf_node(vrouter) status = result.get_status()", "PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY", "TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY", "nodeUname = d['nodeUname'] nodePswd = d['nodePswd'] rundelay = d['rundelay'] except:", "All rights reserved. # Redistribution and use in source and", "status for the '%s'\" % nodeName print (\"!!!Demo terminated, reason:", "3. Neither the name of the copyright holder nor the", "reason: %s\" % status.brief().lower()) exit(0) print(\"\\n\") print (\"<<< Show configuration", "print (\"<<< 'Controller': %s, '%s': %s\" % (ctrlIpAddr, nodeName, nodeIpAddr))", "read error: \" % f) exit() try: ctrlIpAddr = d['ctrlIpAddr']", "in source and binary forms, with or without # modification,", "is False: result = ctrl.add_netconf_node(vrouter) status = result.get_status() if(status.eq(STATUS.OK)): print", "# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,", "is configured on the Controller\" % nodeName) elif(status.eq(STATUS.DATA_NOT_FOUND)): node_configured =", "%s\" % status.brief().lower()) exit(0) print \"\\n\" print (\">>> Remove '%s'", "A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL", "SUCH DAMAGE. \"\"\" @authors: <NAME> @status: Development @version: 1.1.0 \"\"\"", "AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED", "(\"<<< '%s' added to the Controller\" % nodeName) else: print", "THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF #", "successfully removed \" \"from the Controller\" % nodeName) else: print", "in the documentation # and/or other materials provided with the", "reserved. # Redistribution and use in source and binary forms,", "of conditions and the following disclaimer in the documentation #", "'%s' read error: \" % f) exit() try: ctrlIpAddr =", "to get Controller device attributes\") exit(0) print (\"<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<\") print (\"<<<", "from pysdn.common.utils import load_dict_from_file def vr_demo_3(): f = \"cfg4.yml\" d", "attributes\") exit(0) print (\"<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<\") print (\"<<< Demo Start\") print (\"<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<\")", "are met: # 1. Redistributions of source code must retain", "configuration:\" % nodeName) cfg = result.get_data() data = json.loads(cfg) print", "result.get_status() if(status.eq(STATUS.NODE_CONNECTED)): print (\"<<< '%s' is connected to the Controller\"", "ctrlPswd) vrouter = VRouter5600(ctrl, nodeName, nodeIpAddr, nodePortNum, nodeUname, nodePswd) print", "from this # software without specific prior written permission. #", "this list of conditions and the following disclaimer. # 2.", "form must reproduce the above copyright notice, # this list", "= False result = ctrl.check_node_config_status(nodeName) status = result.get_status() if(status.eq(STATUS.NODE_CONFIGURED)): node_configured", "connected to the Controller\" % nodeName) else: print (\"\\n\") print", "# 1. Redistributions of source code must retain the above", "without specific prior written permission. # THIS SOFTWARE IS PROVIDED", "use in source and binary forms, with or without #", "import time import json from pysdn.controller.controller import Controller from pysdn.netconfdev.vrouter.vrouter5600", "False else: print (\"\\n\") print \"Failed to get configuration status", "POSSIBILITY OF SUCH DAMAGE. \"\"\" @authors: <NAME> @status: Development @version:", "print json.dumps(data, indent=4) else: print (\"\\n\") print (\"!!!Demo terminated, reason:", "node_configured is False: result = ctrl.add_netconf_node(vrouter) status = result.get_status() if(status.eq(STATUS.OK)):", "STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING", "\"\\n\" print (\">>> Remove '%s' NETCONF node from the Controller\"", "% f) exit() try: ctrlIpAddr = d['ctrlIpAddr'] ctrlPortNum = d['ctrlPortNum']", "EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,", "OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY", "ctrlPortNum, ctrlUname, ctrlPswd) vrouter = VRouter5600(ctrl, nodeName, nodeIpAddr, nodePortNum, nodeUname,", "print (\"\\n\") print (\"!!!Demo terminated, reason: %s\" % status.brief()) exit(0)", "= d['nodePswd'] rundelay = d['rundelay'] except: print (\"Failed to get", "pysdn.common.utils import load_dict_from_file def vr_demo_3(): f = \"cfg4.yml\" d =", "code must retain the above copyright notice, # this list", "status.detailed()) exit(0) if node_configured is False: result = ctrl.add_netconf_node(vrouter) status", "# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER", "result = ctrl.add_netconf_node(vrouter) status = result.get_status() if(status.eq(STATUS.OK)): print (\"<<< '%s'", "nodeName) cfg = result.get_data() data = json.loads(cfg) print json.dumps(data, indent=4)", "# contributors may be used to endorse or promote products", "ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES", "INC # All rights reserved. # Redistribution and use in", "% nodeName print (\"!!!Demo terminated, reason: %s\" % status.detailed()) exit(0)", "NETCONF node was successfully removed \" \"from the Controller\" %", "print (\"<<< Demo Start\") print (\"<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<\") print (\"\\n\") ctrl =", "status = result.get_status() if(status.eq(STATUS.NODE_CONNECTED)): print (\"<<< '%s' is connected to", "else: print (\"\\n\") print (\"!!!Demo terminated, reason: %s\" % status.brief())", "= d['nodePortNum'] nodeUname = d['nodeUname'] nodePswd = d['nodePswd'] rundelay =", "OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED", "OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,", "print (\"\\n\") time.sleep(rundelay) result = ctrl.check_node_conn_status(nodeName) status = result.get_status() if(status.eq(STATUS.NODE_CONNECTED)):", "d['rundelay'] except: print (\"Failed to get Controller device attributes\") exit(0)", "LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION)", "(\"\\n\") print (\">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\") print (\">>> Demo End\") print (\">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\") if", "d['ctrlUname'] ctrlPswd = d['ctrlPswd'] nodeName = d['nodeName'] nodeIpAddr = d['nodeIpAddr']", "with or without # modification, are permitted provided that the", "# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR", "following conditions are met: # 1. Redistributions of source code", "Controller\" % nodeName) else: print (\"\\n\") print (\"!!!Demo terminated, reason:", "THE COPYRIGHT HOLDER OR CONTRIBUTORS BE # LIABLE FOR ANY", "(c) 2015, BROCADE COMMUNICATIONS SYSTEMS, INC # All rights reserved.", "HOLDER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT,", "(\"\\n\") ctrl = Controller(ctrlIpAddr, ctrlPortNum, ctrlUname, ctrlPswd) vrouter = VRouter5600(ctrl,", "print (\"\\n\") time.sleep(rundelay) node_configured = False result = ctrl.check_node_config_status(nodeName) status", "result = ctrl.delete_netconf_node(vrouter) status = result.get_status() if(status.eq(STATUS.OK)): print (\"'%s' NETCONF", "try: ctrlIpAddr = d['ctrlIpAddr'] ctrlPortNum = d['ctrlPortNum'] ctrlUname = d['ctrlUname']", "WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES", "NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES;", "(\"!!!Demo terminated, reason: %s\" % status.brief().lower()) exit(0) print \"\\n\" print", "% status.brief()) exit(0) print (\"\\n\") print (\">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\") print (\">>> Demo", "#!/usr/bin/python # Copyright (c) 2015, BROCADE COMMUNICATIONS SYSTEMS, INC #", "# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR", "# this list of conditions and the following disclaimer in", "terminated, reason: %s\" % status.brief().lower()) exit(0) print(\"\\n\") print (\"<<< Show", "\" \"from the Controller\" % nodeName) else: print (\"\\n\") print", "the distribution. # 3. Neither the name of the copyright", "(\">>> Remove '%s' NETCONF node from the Controller\" % nodeName)", "the Controller\" % nodeName) else: print (\"\\n\") print (\"!!!Demo terminated,", "DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND", "be used to endorse or promote products derived from this", "print (\"<<< '%s' is configured on the Controller\" % nodeName)", "2015, BROCADE COMMUNICATIONS SYSTEMS, INC # All rights reserved. #", "@authors: <NAME> @status: Development @version: 1.1.0 \"\"\" import time import", "BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF", "= d['ctrlIpAddr'] ctrlPortNum = d['ctrlPortNum'] ctrlUname = d['ctrlUname'] ctrlPswd =", "print (\"!!!Demo terminated, reason: %s\" % status.brief().lower()) exit(0) print \"\\n\"", "exit(0) if node_configured is False: result = ctrl.add_netconf_node(vrouter) status =", "the following disclaimer in the documentation # and/or other materials", "\"\"\" import time import json from pysdn.controller.controller import Controller from", "HOLDERS AND CONTRIBUTORS \"AS IS\" # AND ANY EXPRESS OR", "and use in source and binary forms, with or without", "import Controller from pysdn.netconfdev.vrouter.vrouter5600 import VRouter5600 from pysdn.common.status import STATUS", "# THE POSSIBILITY OF SUCH DAMAGE. \"\"\" @authors: <NAME> @status:", "<NAME> @status: Development @version: 1.1.0 \"\"\" import time import json", "print (\"\\n\") print (\"!!!Demo terminated, reason: %s\" % status.brief().lower()) exit(0)", "= result.get_status() if(status.eq(STATUS.OK)): print (\"'%s' configuration:\" % nodeName) cfg =", "IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE", "OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE", "the copyright holder nor the names of its # contributors", "copyright holder nor the names of its # contributors may", "the '%s'\" % nodeName print (\"!!!Demo terminated, reason: %s\" %", "print (\"!!!Demo terminated, reason: %s\" % status.brief().lower()) exit(0) print(\"\\n\") print", "time.sleep(rundelay) node_configured = False result = ctrl.check_node_config_status(nodeName) status = result.get_status()", "configured on the Controller\" % nodeName) elif(status.eq(STATUS.DATA_NOT_FOUND)): node_configured = False", "conditions and the following disclaimer in the documentation # and/or", "# 3. Neither the name of the copyright holder nor", "OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF", "PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT", "reproduce the above copyright notice, # this list of conditions", "configuration status for the '%s'\" % nodeName print (\"!!!Demo terminated,", "exit() try: ctrlIpAddr = d['ctrlIpAddr'] ctrlPortNum = d['ctrlPortNum'] ctrlUname =", "print \"Failed to get configuration status for the '%s'\" %", "Controller(ctrlIpAddr, ctrlPortNum, ctrlUname, ctrlPswd) vrouter = VRouter5600(ctrl, nodeName, nodeIpAddr, nodePortNum,", "print (\"!!!Demo terminated, reason: %s\" % status.detailed()) exit(0) if node_configured", "in binary form must reproduce the above copyright notice, #", "OF # THE POSSIBILITY OF SUCH DAMAGE. \"\"\" @authors: <NAME>", "# All rights reserved. # Redistribution and use in source", "forms, with or without # modification, are permitted provided that", "the Controller\" % nodeName) time.sleep(rundelay) result = ctrl.delete_netconf_node(vrouter) status =", "binary forms, with or without # modification, are permitted provided", "False: result = ctrl.add_netconf_node(vrouter) status = result.get_status() if(status.eq(STATUS.OK)): print (\"<<<", "= {} if(load_dict_from_file(f, d) is False): print(\"Config file '%s' read", "SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE # LIABLE FOR", "nodeIpAddr = d['nodeIpAddr'] nodePortNum = d['nodePortNum'] nodeUname = d['nodeUname'] nodePswd", "nodeName = d['nodeName'] nodeIpAddr = d['nodeIpAddr'] nodePortNum = d['nodePortNum'] nodeUname", "contributors may be used to endorse or promote products derived", "exit(0) print (\"\\n\") time.sleep(rundelay) result = ctrl.check_node_conn_status(nodeName) status = result.get_status()", "print (\">>> Remove '%s' NETCONF node from the Controller\" %", "the documentation # and/or other materials provided with the distribution.", "COMMUNICATIONS SYSTEMS, INC # All rights reserved. # Redistribution and", "print (\"!!!Demo terminated, reason: %s\" % status.brief()) exit(0) print (\"\\n\")", "products derived from this # software without specific prior written", "% status.brief().lower()) exit(0) print \"\\n\" print (\">>> Remove '%s' NETCONF", "rundelay = d['rundelay'] except: print (\"Failed to get Controller device", "are permitted provided that the following conditions are met: #", "# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)", "nodeName) else: print (\"\\n\") print (\"!!!Demo terminated, reason: %s\" %", "data = json.loads(cfg) print json.dumps(data, indent=4) else: print (\"\\n\") print", "the name of the copyright holder nor the names of", "status.brief().lower()) exit(0) print(\"\\n\") print (\"<<< Show configuration of the '%s'\"", "WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE #", "Controller\" % nodeName) elif(status.eq(STATUS.DATA_NOT_FOUND)): node_configured = False else: print (\"\\n\")", "ARISING IN ANY WAY OUT OF THE USE OF THIS", "# software without specific prior written permission. # THIS SOFTWARE", "ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN", "nodePswd = d['nodePswd'] rundelay = d['rundelay'] except: print (\"Failed to", "LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS", "result.get_status() if(status.eq(STATUS.NODE_CONFIGURED)): node_configured = True print (\"<<< '%s' is configured", "conditions are met: # 1. Redistributions of source code must", "Redistributions in binary form must reproduce the above copyright notice,", "ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR", "(\"Failed to get Controller device attributes\") exit(0) print (\"<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<\") print", "# Redistribution and use in source and binary forms, with", "from pysdn.netconfdev.vrouter.vrouter5600 import VRouter5600 from pysdn.common.status import STATUS from pysdn.common.utils", "(\"!!!Demo terminated, reason: %s\" % status.detailed()) exit(0) print (\"\\n\") time.sleep(rundelay)", "the above copyright notice, # this list of conditions and", "= Controller(ctrlIpAddr, ctrlPortNum, ctrlUname, ctrlPswd) vrouter = VRouter5600(ctrl, nodeName, nodeIpAddr,", "derived from this # software without specific prior written permission.", "nodeName, nodeIpAddr)) print (\"\\n\") time.sleep(rundelay) node_configured = False result =", "OF THIS SOFTWARE, EVEN IF ADVISED OF # THE POSSIBILITY", "nodePortNum, nodeUname, nodePswd) print (\"<<< 'Controller': %s, '%s': %s\" %", "% nodeName) time.sleep(rundelay) result = vrouter.get_cfg() status = result.get_status() if(status.eq(STATUS.OK)):", "ctrlPortNum = d['ctrlPortNum'] ctrlUname = d['ctrlUname'] ctrlPswd = d['ctrlPswd'] nodeName", "d = {} if(load_dict_from_file(f, d) is False): print(\"Config file '%s'", "LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN", "nodeName, nodeIpAddr, nodePortNum, nodeUname, nodePswd) print (\"<<< 'Controller': %s, '%s':", "pysdn.common.status import STATUS from pysdn.common.utils import load_dict_from_file def vr_demo_3(): f", "list of conditions and the following disclaimer in the documentation", "status = result.get_status() if(status.eq(STATUS.OK)): print (\"'%s' NETCONF node was successfully", "= d['nodeUname'] nodePswd = d['nodePswd'] rundelay = d['rundelay'] except: print", "= result.get_status() if(status.eq(STATUS.OK)): print (\"<<< '%s' added to the Controller\"", "print (\"\\n\") print (\">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\") print (\">>> Demo End\") print (\">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\")", "# modification, are permitted provided that the following conditions are", "DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING,", "DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS", "d) is False): print(\"Config file '%s' read error: \" %", "OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT", "= False else: print (\"\\n\") print \"Failed to get configuration", "OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,", "following disclaimer in the documentation # and/or other materials provided", "reason: %s\" % status.detailed()) exit(0) print (\"\\n\") time.sleep(rundelay) result =", "'%s'\" % nodeName print (\"!!!Demo terminated, reason: %s\" % status.detailed())", "(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS", "LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING", "\"\"\" @authors: <NAME> @status: Development @version: 1.1.0 \"\"\" import time", "(\"\\n\") time.sleep(rundelay) node_configured = False result = ctrl.check_node_config_status(nodeName) status =", "may be used to endorse or promote products derived from", "ctrlPswd = d['ctrlPswd'] nodeName = d['nodeName'] nodeIpAddr = d['nodeIpAddr'] nodePortNum", "to endorse or promote products derived from this # software", "= ctrl.add_netconf_node(vrouter) status = result.get_status() if(status.eq(STATUS.OK)): print (\"<<< '%s' added", "IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED", "<gh_stars>1-10 #!/usr/bin/python # Copyright (c) 2015, BROCADE COMMUNICATIONS SYSTEMS, INC", "to the Controller\" % nodeName) else: print (\"\\n\") print (\"!!!Demo", "= d['nodeIpAddr'] nodePortNum = d['nodePortNum'] nodeUname = d['nodeUname'] nodePswd =", "copyright notice, # this list of conditions and the following", "result = ctrl.check_node_config_status(nodeName) status = result.get_status() if(status.eq(STATUS.NODE_CONFIGURED)): node_configured = True", "'%s' is configured on the Controller\" % nodeName) elif(status.eq(STATUS.DATA_NOT_FOUND)): node_configured", "time.sleep(rundelay) result = ctrl.check_node_conn_status(nodeName) status = result.get_status() if(status.eq(STATUS.NODE_CONNECTED)): print (\"<<<", "SYSTEMS, INC # All rights reserved. # Redistribution and use", "\"cfg4.yml\" d = {} if(load_dict_from_file(f, d) is False): print(\"Config file", "{} if(load_dict_from_file(f, d) is False): print(\"Config file '%s' read error:", "'%s': %s\" % (ctrlIpAddr, nodeName, nodeIpAddr)) print (\"\\n\") time.sleep(rundelay) node_configured", "if(status.eq(STATUS.NODE_CONFIGURED)): node_configured = True print (\"<<< '%s' is configured on", "print (\"'%s' NETCONF node was successfully removed \" \"from the", "'%s'\" % nodeName) time.sleep(rundelay) result = vrouter.get_cfg() status = result.get_status()", "OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE", "the names of its # contributors may be used to", "INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF", "load_dict_from_file def vr_demo_3(): f = \"cfg4.yml\" d = {} if(load_dict_from_file(f,", "conditions and the following disclaimer. # 2. Redistributions in binary", "else: print (\"\\n\") print (\"!!!Demo terminated, reason: %s\" % status.brief().lower())", "d['ctrlPswd'] nodeName = d['nodeName'] nodeIpAddr = d['nodeIpAddr'] nodePortNum = d['nodePortNum']", "and the following disclaimer. # 2. Redistributions in binary form", "and the following disclaimer in the documentation # and/or other", "# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND", "print (\"Failed to get Controller device attributes\") exit(0) print (\"<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<\")", "(\"!!!Demo terminated, reason: %s\" % status.detailed()) exit(0) if node_configured is", "the following disclaimer. # 2. Redistributions in binary form must", "BROCADE COMMUNICATIONS SYSTEMS, INC # All rights reserved. # Redistribution", "following disclaimer. # 2. Redistributions in binary form must reproduce", "status = result.get_status() if(status.eq(STATUS.NODE_CONFIGURED)): node_configured = True print (\"<<< '%s'", "(\"\\n\") print (\"!!!Demo terminated, reason: %s\" % status.brief()) exit(0) print", "% nodeName) elif(status.eq(STATUS.DATA_NOT_FOUND)): node_configured = False else: print (\"\\n\") print", "OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER", "ctrlUname, ctrlPswd) vrouter = VRouter5600(ctrl, nodeName, nodeIpAddr, nodePortNum, nodeUname, nodePswd)", "status = result.get_status() if(status.eq(STATUS.OK)): print (\"<<< '%s' added to the", "LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS", "Show configuration of the '%s'\" % nodeName) time.sleep(rundelay) result =", "IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,", "LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR #", "retain the above copyright notice, # this list of conditions", "json from pysdn.controller.controller import Controller from pysdn.netconfdev.vrouter.vrouter5600 import VRouter5600 from", "this # software without specific prior written permission. # THIS", "and/or other materials provided with the distribution. # 3. Neither", "nodeIpAddr)) print (\"\\n\") time.sleep(rundelay) node_configured = False result = ctrl.check_node_config_status(nodeName)", "without # modification, are permitted provided that the following conditions", "node_configured = False else: print (\"\\n\") print \"Failed to get", "to get configuration status for the '%s'\" % nodeName print", "Demo Start\") print (\"<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<\") print (\"\\n\") ctrl = Controller(ctrlIpAddr, ctrlPortNum,", "(\"<<< Demo Start\") print (\"<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<\") print (\"\\n\") ctrl = Controller(ctrlIpAddr,", "indent=4) else: print (\"\\n\") print (\"!!!Demo terminated, reason: %s\" %", "WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE", "this list of conditions and the following disclaimer in the", "%s\" % status.brief().lower()) exit(0) print(\"\\n\") print (\"<<< Show configuration of", "modification, are permitted provided that the following conditions are met:", "PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,", "SOFTWARE, EVEN IF ADVISED OF # THE POSSIBILITY OF SUCH", "from pysdn.controller.controller import Controller from pysdn.netconfdev.vrouter.vrouter5600 import VRouter5600 from pysdn.common.status", "d['ctrlIpAddr'] ctrlPortNum = d['ctrlPortNum'] ctrlUname = d['ctrlUname'] ctrlPswd = d['ctrlPswd']", "of the copyright holder nor the names of its #", "for the '%s'\" % nodeName print (\"!!!Demo terminated, reason: %s\"", "# ARISING IN ANY WAY OUT OF THE USE OF", "@version: 1.1.0 \"\"\" import time import json from pysdn.controller.controller import", "added to the Controller\" % nodeName) else: print (\"\\n\") print", "pysdn.controller.controller import Controller from pysdn.netconfdev.vrouter.vrouter5600 import VRouter5600 from pysdn.common.status import", "= result.get_status() if(status.eq(STATUS.NODE_CONNECTED)): print (\"<<< '%s' is connected to the", "is connected to the Controller\" % nodeName) else: print (\"\\n\")", "AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN", "= result.get_data() data = json.loads(cfg) print json.dumps(data, indent=4) else: print", "other materials provided with the distribution. # 3. Neither the", "STATUS from pysdn.common.utils import load_dict_from_file def vr_demo_3(): f = \"cfg4.yml\"", "if(status.eq(STATUS.NODE_CONNECTED)): print (\"<<< '%s' is connected to the Controller\" %", "= d['ctrlPortNum'] ctrlUname = d['ctrlUname'] ctrlPswd = d['ctrlPswd'] nodeName =", "1.1.0 \"\"\" import time import json from pysdn.controller.controller import Controller", "NETCONF node from the Controller\" % nodeName) time.sleep(rundelay) result =", "result.get_status() if(status.eq(STATUS.OK)): print (\"'%s' NETCONF node was successfully removed \"", "ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY,", "ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT", "EVEN IF ADVISED OF # THE POSSIBILITY OF SUCH DAMAGE.", "result.get_status() if(status.eq(STATUS.OK)): print (\"'%s' configuration:\" % nodeName) cfg = result.get_data()", "print \"\\n\" print (\">>> Remove '%s' NETCONF node from the", "removed \" \"from the Controller\" % nodeName) else: print (\"\\n\")", "%s\" % (ctrlIpAddr, nodeName, nodeIpAddr)) print (\"\\n\") time.sleep(rundelay) node_configured =", "(\"<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<\") print (\"<<< Demo Start\") print (\"<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<\") print (\"\\n\") ctrl", "vrouter.get_cfg() status = result.get_status() if(status.eq(STATUS.OK)): print (\"'%s' configuration:\" % nodeName)", "was successfully removed \" \"from the Controller\" % nodeName) else:", "time.sleep(rundelay) result = ctrl.delete_netconf_node(vrouter) status = result.get_status() if(status.eq(STATUS.OK)): print (\"'%s'", "DAMAGE. \"\"\" @authors: <NAME> @status: Development @version: 1.1.0 \"\"\" import", "def vr_demo_3(): f = \"cfg4.yml\" d = {} if(load_dict_from_file(f, d)", "(\">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\") print (\">>> Demo End\") print (\">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\") if __name__ ==", "OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE #", "%s\" % status.detailed()) exit(0) print (\"\\n\") time.sleep(rundelay) result = ctrl.check_node_conn_status(nodeName)", "cfg = result.get_data() data = json.loads(cfg) print json.dumps(data, indent=4) else:", "SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED", "from the Controller\" % nodeName) time.sleep(rundelay) result = ctrl.delete_netconf_node(vrouter) status", "IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"", "list of conditions and the following disclaimer. # 2. Redistributions", "nodeName) elif(status.eq(STATUS.DATA_NOT_FOUND)): node_configured = False else: print (\"\\n\") print \"Failed", "print (\"<<< Show configuration of the '%s'\" % nodeName) time.sleep(rundelay)", "# and/or other materials provided with the distribution. # 3.", "= ctrl.check_node_config_status(nodeName) status = result.get_status() if(status.eq(STATUS.NODE_CONFIGURED)): node_configured = True print", "d['nodeUname'] nodePswd = d['nodePswd'] rundelay = d['rundelay'] except: print (\"Failed", "ADVISED OF # THE POSSIBILITY OF SUCH DAMAGE. \"\"\" @authors:", "met: # 1. Redistributions of source code must retain the", "materials provided with the distribution. # 3. Neither the name", "NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE #", "endorse or promote products derived from this # software without", "from pysdn.common.status import STATUS from pysdn.common.utils import load_dict_from_file def vr_demo_3():", "holder nor the names of its # contributors may be", "= d['ctrlPswd'] nodeName = d['nodeName'] nodeIpAddr = d['nodeIpAddr'] nodePortNum =", "time.sleep(rundelay) result = vrouter.get_cfg() status = result.get_status() if(status.eq(STATUS.OK)): print (\"'%s'", "FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT", "of the '%s'\" % nodeName) time.sleep(rundelay) result = vrouter.get_cfg() status", "provided with the distribution. # 3. Neither the name of", "NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND", "(\"\\n\") print (\"!!!Demo terminated, reason: %s\" % status.brief().lower()) exit(0) print", "SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;", "if node_configured is False: result = ctrl.add_netconf_node(vrouter) status = result.get_status()", "FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO", "pysdn.netconfdev.vrouter.vrouter5600 import VRouter5600 from pysdn.common.status import STATUS from pysdn.common.utils import", "BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY", "its # contributors may be used to endorse or promote", "the '%s'\" % nodeName) time.sleep(rundelay) result = vrouter.get_cfg() status =", "d['nodeIpAddr'] nodePortNum = d['nodePortNum'] nodeUname = d['nodeUname'] nodePswd = d['nodePswd']", "= result.get_status() if(status.eq(STATUS.OK)): print (\"'%s' NETCONF node was successfully removed", "IS\" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT", "% status.brief().lower()) exit(0) print(\"\\n\") print (\"<<< Show configuration of the", "1. Redistributions of source code must retain the above copyright", "import STATUS from pysdn.common.utils import load_dict_from_file def vr_demo_3(): f =", "SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS", "ctrl.check_node_conn_status(nodeName) status = result.get_status() if(status.eq(STATUS.NODE_CONNECTED)): print (\"<<< '%s' is connected", "except: print (\"Failed to get Controller device attributes\") exit(0) print", "on the Controller\" % nodeName) elif(status.eq(STATUS.DATA_NOT_FOUND)): node_configured = False else:", "print (\">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\") print (\">>> Demo End\") print (\">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\") if __name__", "PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE", "(INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT", "% nodeName) cfg = result.get_data() data = json.loads(cfg) print json.dumps(data,", "of its # contributors may be used to endorse or", "nodeUname, nodePswd) print (\"<<< 'Controller': %s, '%s': %s\" % (ctrlIpAddr,", "OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY", "THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A", "COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" # AND ANY EXPRESS", "with the distribution. # 3. Neither the name of the", "vrouter = VRouter5600(ctrl, nodeName, nodeIpAddr, nodePortNum, nodeUname, nodePswd) print (\"<<<", "d['nodeName'] nodeIpAddr = d['nodeIpAddr'] nodePortNum = d['nodePortNum'] nodeUname = d['nodeUname']", "print (\"'%s' configuration:\" % nodeName) cfg = result.get_data() data =", "INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT", "% (ctrlIpAddr, nodeName, nodeIpAddr)) print (\"\\n\") time.sleep(rundelay) node_configured = False", "CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF #", "node_configured = False result = ctrl.check_node_config_status(nodeName) status = result.get_status() if(status.eq(STATUS.NODE_CONFIGURED)):", "OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON", "node was successfully removed \" \"from the Controller\" % nodeName)", "(\"'%s' configuration:\" % nodeName) cfg = result.get_data() data = json.loads(cfg)", "2. Redistributions in binary form must reproduce the above copyright", "(ctrlIpAddr, nodeName, nodeIpAddr)) print (\"\\n\") time.sleep(rundelay) node_configured = False result", "must retain the above copyright notice, # this list of", "MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED.", "OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT", "Controller from pysdn.netconfdev.vrouter.vrouter5600 import VRouter5600 from pysdn.common.status import STATUS from", "print (\">>> Demo End\") print (\">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\") if __name__ == \"__main__\":", "used to endorse or promote products derived from this #", "= VRouter5600(ctrl, nodeName, nodeIpAddr, nodePortNum, nodeUname, nodePswd) print (\"<<< 'Controller':", "if(status.eq(STATUS.OK)): print (\"<<< '%s' added to the Controller\" % nodeName)", "= result.get_status() if(status.eq(STATUS.NODE_CONFIGURED)): node_configured = True print (\"<<< '%s' is", "TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF", "exit(0) print(\"\\n\") print (\"<<< Show configuration of the '%s'\" %", "nodeIpAddr, nodePortNum, nodeUname, nodePswd) print (\"<<< 'Controller': %s, '%s': %s\"", "BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" # AND", "nodePortNum = d['nodePortNum'] nodeUname = d['nodeUname'] nodePswd = d['nodePswd'] rundelay", "\"Failed to get configuration status for the '%s'\" % nodeName", "print (\"!!!Demo terminated, reason: %s\" % status.detailed()) exit(0) print (\"\\n\")", "THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" # AND ANY", "False): print(\"Config file '%s' read error: \" % f) exit()", "# 2. Redistributions in binary form must reproduce the above", "Neither the name of the copyright holder nor the names" ]
[ "is not None: item=str(json.dumps(parsed_json)) print(item) #client.publish(\"message\",item) client.publish(\"detection\",item) else: throw('Not correct", "on_connect(client, userdata, rc): print(\"Connected to MQTT-broker on \" + MQTT_BROKER", "not None: percent=faceDetection.getDifference(_image1,_image2) print('Detection:' + str(percent)) client.publish(\"detection_found\", makeJsonObject_detection(percent,_image1,_image2,_read)) if msg.topic==\"recalculate_start\":", "_image2 =parsed_json['image2'] _read=parsed_json['read'] if _read: if _image1 is not None", "is not None: calcObj=levelCalculation.calculate(_data,_file) print('CalculatedOBJ:' + str(calcObj)) client.publish(\"recalculate_done\", makeJsonObject_levelCalculate(calcObj['data'],calcObj['score'])) except", "sys.exit(0) else: parsed_json=json.loads(convertJson(msg.payload)) _type =parsed_json['type'] _port=parsed_json['port'] _read=parsed_json['read'] if _type is", "client.publish(\"recalculate_done\", makeJsonObject_levelCalculate(calcObj['data'],calcObj['score'])) except Exception as error: print('Error:',error) def convertJson(data): data=data.decode()", "=parsed_json['type'] _port=parsed_json['port'] _read=parsed_json['read'] if _type is not None and _port", "item=str(json.dumps(parsed_json)) print(item) #client.publish(\"message\",item) client.publish(\"detection\",item) else: throw('Not correct data') except Exception", "and _port is not None and _read is not None:", "\"read\":read}) return str(item) def makeJsonObject_levelCalculate(data=None,score=0): item=json.dumps({\"data\":data,\"score\":score}) return str(item) def main():", "as publish import lib.faceDetection as faceDetection import lib.levelCalculation as levelCalculation", "None: try: if data=='exit': exit() sys.exit(0) else: parsed_json=json.loads(convertJson(msg.payload)) _type =parsed_json['type']", "lib.faceDetection as faceDetection import lib.levelCalculation as levelCalculation MQTT_BROKER=\"localhost\" client =", "stijnvanhulle # @Last modified time: 2016-12-20T12:51:07+01:00 # @License: stijnvanhulle.be #!/usr/bin/env", "client.loop_start() time.sleep(0.2) client.publish(\"online\", makeJsonOnlineObject('FaceDetection')) def makeJsonObject(value=None,port=None,type=None,read=False): item=json.dumps({\"port\":port, \"type\":type,\"value\":value,\"read\":read}) return str(item)", "return str(item) def makeJsonObject_detection(value=None,image1=None,image2=None,read=False): item=json.dumps({\"value\":value, \"image1\":image1,\"image2\":image2, \"read\":read}) return str(item) def", "sys import json import paho.mqtt.client as mqtt import paho.mqtt.publish as", "import sys import json import paho.mqtt.client as mqtt import paho.mqtt.publish", "def init(): client.on_connect = on_connect client.on_message = on_message client.connect_async(MQTT_BROKER, 1883,", "=parsed_json['data'] _file=parsed_json['file'] if _data is not None: calcObj=levelCalculation.calculate(_data,_file) print('CalculatedOBJ:' +", "paho.mqtt.client as mqtt import paho.mqtt.publish as publish import lib.faceDetection as", "main)) main() except (TypeError) as ex: error=\"Error: \" + str(ex)", "modified by: stijnvanhulle # @Last modified time: 2016-12-20T12:51:07+01:00 # @License:", "_port=parsed_json['port'] _read=parsed_json['read'] if _type is not None and _port is", "parsed_json=json.loads(convertJson(msg.payload)) if msg.topic==\"detection_find\": print(parsed_json) _image1 =parsed_json['image1'] _image2 =parsed_json['image2'] _read=parsed_json['read'] if", "item=json.dumps({\"device\":device}) return str(item) def init(): client.on_connect = on_connect client.on_message =", "#executor = ProcessPoolExecutor(2) #loop = trollius.get_event_loop() #_main = trollius.async(loop.run_in_executor(executor, main))", "<NAME> <stijnvanhulle> # @Date: 2016-11-28T13:51:38+01:00 # @Email: <EMAIL> # @Last", "client.subscribe(\"online\") client.subscribe(\"message\") client.subscribe(\"detection_find\") client.subscribe(\"detection_found\") client.subscribe(\"recalculate_start\") client.subscribe(\"recalculate_done\") def on_message(client, userdata, msg):", "else: parsed_json=json.loads(convertJson(msg.payload)) _type =parsed_json['type'] _port=parsed_json['port'] _read=parsed_json['read'] if _type is not", "not None and _read is not None: item=str(json.dumps(parsed_json)) print(item) #client.publish(\"message\",item)", "_image1 =parsed_json['image1'] _image2 =parsed_json['image2'] _read=parsed_json['read'] if _read: if _image1 is", ") client.subscribe(\"online\") client.subscribe(\"message\") client.subscribe(\"detection_find\") client.subscribe(\"detection_found\") client.subscribe(\"recalculate_start\") client.subscribe(\"recalculate_done\") def on_message(client, userdata,", "data def makeJsonOnlineObject(device=''): item=json.dumps({\"device\":device}) return str(item) def init(): client.on_connect =", "str(item) def init(): client.on_connect = on_connect client.on_message = on_message client.connect_async(MQTT_BROKER,", "levelCalculation MQTT_BROKER=\"localhost\" client = mqtt.Client() #classes def on_connect(client, userdata, rc):", "+ str(percent)) client.publish(\"detection_found\", makeJsonObject_detection(percent,_image1,_image2,_read)) if msg.topic==\"recalculate_start\": print(parsed_json) _data =parsed_json['data'] _file=parsed_json['file']", "client.subscribe(\"recalculate_done\") def on_message(client, userdata, msg): try: parsed_json=json.loads(convertJson(msg.payload)) if msg.topic==\"detection_find\": print(parsed_json)", "percent=faceDetection.getDifference(_image1,_image2) print('Detection:' + str(percent)) client.publish(\"detection_found\", makeJsonObject_detection(percent,_image1,_image2,_read)) if msg.topic==\"recalculate_start\": print(parsed_json) _data", "import math import sys import json import paho.mqtt.client as mqtt", "on_message(client, userdata, msg): try: parsed_json=json.loads(convertJson(msg.payload)) if msg.topic==\"detection_find\": print(parsed_json) _image1 =parsed_json['image1']", "time.sleep(0.1) data = input(\"Code:\") if data is not None: try:", "ProcessPoolExecutor(2) #loop = trollius.get_event_loop() #_main = trollius.async(loop.run_in_executor(executor, main)) main() except", "ex: error=\"Error: \" + str(ex) #print(error) except (KeyboardInterrupt): exit() print(\"\\nIOT", "import lib.faceDetection as faceDetection import lib.levelCalculation as levelCalculation MQTT_BROKER=\"localhost\" client", "\"type\":type,\"value\":value,\"read\":read}) return str(item) def makeJsonObject_detection(value=None,image1=None,image2=None,read=False): item=json.dumps({\"value\":value, \"image1\":image1,\"image2\":image2, \"read\":read}) return str(item)", "# @Email: <EMAIL> # @Last modified by: stijnvanhulle # @Last", "import paho.mqtt.client as mqtt import paho.mqtt.publish as publish import lib.faceDetection", "input_text: MQTT_BROKER=input_text #executor = ProcessPoolExecutor(2) #loop = trollius.get_event_loop() #_main =", "if _data is not None: calcObj=levelCalculation.calculate(_data,_file) print('CalculatedOBJ:' + str(calcObj)) client.publish(\"recalculate_done\",", "# @License: stijnvanhulle.be #!/usr/bin/env python import time import datetime import", "print('Error:',error) if __name__ == '__main__': try: if len(sys.argv)>1: MQTT_BROKER=sys.argv[1] else:", "userdata, rc): print(\"Connected to MQTT-broker on \" + MQTT_BROKER )", "if msg.topic==\"detection_find\": print(parsed_json) _image1 =parsed_json['image1'] _image2 =parsed_json['image2'] _read=parsed_json['read'] if _read:", "makeJsonObject_detection(value=None,image1=None,image2=None,read=False): item=json.dumps({\"value\":value, \"image1\":image1,\"image2\":image2, \"read\":read}) return str(item) def makeJsonObject_levelCalculate(data=None,score=0): item=json.dumps({\"data\":data,\"score\":score}) return", "None and _port is not None and _read is not", "client.subscribe(\"message\") client.subscribe(\"detection_find\") client.subscribe(\"detection_found\") client.subscribe(\"recalculate_start\") client.subscribe(\"recalculate_done\") def on_message(client, userdata, msg): try:", "def makeJsonObject_levelCalculate(data=None,score=0): item=json.dumps({\"data\":data,\"score\":score}) return str(item) def main(): init() while True:", "= trollius.get_event_loop() #_main = trollius.async(loop.run_in_executor(executor, main)) main() except (TypeError) as", "lib.levelCalculation as levelCalculation MQTT_BROKER=\"localhost\" client = mqtt.Client() #classes def on_connect(client,", "client.on_connect = on_connect client.on_message = on_message client.connect_async(MQTT_BROKER, 1883, 60) client.loop_start()", "data') except Exception as error: print('Error:',error) if __name__ == '__main__':", "makeJsonOnlineObject('FaceDetection')) def makeJsonObject(value=None,port=None,type=None,read=False): item=json.dumps({\"port\":port, \"type\":type,\"value\":value,\"read\":read}) return str(item) def makeJsonObject_detection(value=None,image1=None,image2=None,read=False): item=json.dumps({\"value\":value,", "as error: print('Error:',error) if __name__ == '__main__': try: if len(sys.argv)>1:", "print(\"Connected to MQTT-broker on \" + MQTT_BROKER ) client.subscribe(\"online\") client.subscribe(\"message\")", "MQTT_BROKER ) client.subscribe(\"online\") client.subscribe(\"message\") client.subscribe(\"detection_find\") client.subscribe(\"detection_found\") client.subscribe(\"recalculate_start\") client.subscribe(\"recalculate_done\") def on_message(client,", "is not None: try: if data=='exit': exit() sys.exit(0) else: parsed_json=json.loads(convertJson(msg.payload))", "\") if input_text: MQTT_BROKER=input_text #executor = ProcessPoolExecutor(2) #loop = trollius.get_event_loop()", "MQTT-broker on \" + MQTT_BROKER ) client.subscribe(\"online\") client.subscribe(\"message\") client.subscribe(\"detection_find\") client.subscribe(\"detection_found\")", "math import sys import json import paho.mqtt.client as mqtt import", "= ProcessPoolExecutor(2) #loop = trollius.get_event_loop() #_main = trollius.async(loop.run_in_executor(executor, main)) main()", "not None: try: if data=='exit': exit() sys.exit(0) else: parsed_json=json.loads(convertJson(msg.payload)) _type", "1883, 60) client.loop_start() time.sleep(0.2) client.publish(\"online\", makeJsonOnlineObject('FaceDetection')) def makeJsonObject(value=None,port=None,type=None,read=False): item=json.dumps({\"port\":port, \"type\":type,\"value\":value,\"read\":read})", "on \" + MQTT_BROKER ) client.subscribe(\"online\") client.subscribe(\"message\") client.subscribe(\"detection_find\") client.subscribe(\"detection_found\") client.subscribe(\"recalculate_start\")", "= input(\"Ip of MQTT-broker: \") if input_text: MQTT_BROKER=input_text #executor =", "# @Last modified time: 2016-12-20T12:51:07+01:00 # @License: stijnvanhulle.be #!/usr/bin/env python", "= on_connect client.on_message = on_message client.connect_async(MQTT_BROKER, 1883, 60) client.loop_start() time.sleep(0.2)", "MQTT_BROKER=sys.argv[1] else: input_text = input(\"Ip of MQTT-broker: \") if input_text:", "#classes def on_connect(client, userdata, rc): print(\"Connected to MQTT-broker on \"", "data.startswith(\"'\") and data.endswith(\"'\"): data = data[1:-1] print(data) return data def", "makeJsonObject_levelCalculate(data=None,score=0): item=json.dumps({\"data\":data,\"score\":score}) return str(item) def main(): init() while True: time.sleep(0.1)", "as ex: error=\"Error: \" + str(ex) #print(error) except (KeyboardInterrupt): exit()", "def on_connect(client, userdata, rc): print(\"Connected to MQTT-broker on \" +", "_image1 is not None and _image2 is not None: percent=faceDetection.getDifference(_image1,_image2)", "client.on_message = on_message client.connect_async(MQTT_BROKER, 1883, 60) client.loop_start() time.sleep(0.2) client.publish(\"online\", makeJsonOnlineObject('FaceDetection'))", "print(parsed_json) _image1 =parsed_json['image1'] _image2 =parsed_json['image2'] _read=parsed_json['read'] if _read: if _image1", "<gh_stars>1-10 # @Author: <NAME> <stijnvanhulle> # @Date: 2016-11-28T13:51:38+01:00 # @Email:", "@Author: <NAME> <stijnvanhulle> # @Date: 2016-11-28T13:51:38+01:00 # @Email: <EMAIL> #", "# @Author: <NAME> <stijnvanhulle> # @Date: 2016-11-28T13:51:38+01:00 # @Email: <EMAIL>", "== '__main__': try: if len(sys.argv)>1: MQTT_BROKER=sys.argv[1] else: input_text = input(\"Ip", "python import time import datetime import math import sys import", "MQTT_BROKER=\"localhost\" client = mqtt.Client() #classes def on_connect(client, userdata, rc): print(\"Connected", "as error: print('Error:',error) def convertJson(data): data=data.decode() if data.startswith(\"'\") and data.endswith(\"'\"):", "msg.topic==\"recalculate_start\": print(parsed_json) _data =parsed_json['data'] _file=parsed_json['file'] if _data is not None:", "import paho.mqtt.publish as publish import lib.faceDetection as faceDetection import lib.levelCalculation", "parsed_json=json.loads(convertJson(msg.payload)) _type =parsed_json['type'] _port=parsed_json['port'] _read=parsed_json['read'] if _type is not None", "if __name__ == '__main__': try: if len(sys.argv)>1: MQTT_BROKER=sys.argv[1] else: input_text", "item=json.dumps({\"value\":value, \"image1\":image1,\"image2\":image2, \"read\":read}) return str(item) def makeJsonObject_levelCalculate(data=None,score=0): item=json.dumps({\"data\":data,\"score\":score}) return str(item)", "exit() print(\"\\nIOT is afgesloten\\n\") sys.exit(0) except (SystemExit): print(\"\\nIOT is geforceert", "as faceDetection import lib.levelCalculation as levelCalculation MQTT_BROKER=\"localhost\" client = mqtt.Client()", "makeJsonObject_levelCalculate(calcObj['data'],calcObj['score'])) except Exception as error: print('Error:',error) def convertJson(data): data=data.decode() if", "is not None and _image2 is not None: percent=faceDetection.getDifference(_image1,_image2) print('Detection:'", "userdata, msg): try: parsed_json=json.loads(convertJson(msg.payload)) if msg.topic==\"detection_find\": print(parsed_json) _image1 =parsed_json['image1'] _image2", "msg.topic==\"detection_find\": print(parsed_json) _image1 =parsed_json['image1'] _image2 =parsed_json['image2'] _read=parsed_json['read'] if _read: if", "is not None and _port is not None and _read", "else: throw('Not correct data') except Exception as error: print('Error:',error) if", "@Last modified time: 2016-12-20T12:51:07+01:00 # @License: stijnvanhulle.be #!/usr/bin/env python import", "time: 2016-12-20T12:51:07+01:00 # @License: stijnvanhulle.be #!/usr/bin/env python import time import", "as levelCalculation MQTT_BROKER=\"localhost\" client = mqtt.Client() #classes def on_connect(client, userdata,", "client.subscribe(\"detection_find\") client.subscribe(\"detection_found\") client.subscribe(\"recalculate_start\") client.subscribe(\"recalculate_done\") def on_message(client, userdata, msg): try: parsed_json=json.loads(convertJson(msg.payload))", "if msg.topic==\"recalculate_start\": print(parsed_json) _data =parsed_json['data'] _file=parsed_json['file'] if _data is not", "to MQTT-broker on \" + MQTT_BROKER ) client.subscribe(\"online\") client.subscribe(\"message\") client.subscribe(\"detection_find\")", "while True: time.sleep(0.1) data = input(\"Code:\") if data is not", "as mqtt import paho.mqtt.publish as publish import lib.faceDetection as faceDetection", "def on_message(client, userdata, msg): try: parsed_json=json.loads(convertJson(msg.payload)) if msg.topic==\"detection_find\": print(parsed_json) _image1", "not None: item=str(json.dumps(parsed_json)) print(item) #client.publish(\"message\",item) client.publish(\"detection\",item) else: throw('Not correct data')", "len(sys.argv)>1: MQTT_BROKER=sys.argv[1] else: input_text = input(\"Ip of MQTT-broker: \") if", "\" + str(ex) #print(error) except (KeyboardInterrupt): exit() print(\"\\nIOT is afgesloten\\n\")", "print('CalculatedOBJ:' + str(calcObj)) client.publish(\"recalculate_done\", makeJsonObject_levelCalculate(calcObj['data'],calcObj['score'])) except Exception as error: print('Error:',error)", "makeJsonObject_detection(percent,_image1,_image2,_read)) if msg.topic==\"recalculate_start\": print(parsed_json) _data =parsed_json['data'] _file=parsed_json['file'] if _data is", "msg): try: parsed_json=json.loads(convertJson(msg.payload)) if msg.topic==\"detection_find\": print(parsed_json) _image1 =parsed_json['image1'] _image2 =parsed_json['image2']", "_read: if _image1 is not None and _image2 is not", "data=='exit': exit() sys.exit(0) else: parsed_json=json.loads(convertJson(msg.payload)) _type =parsed_json['type'] _port=parsed_json['port'] _read=parsed_json['read'] if", "error: print('Error:',error) def convertJson(data): data=data.decode() if data.startswith(\"'\") and data.endswith(\"'\"): data", "not None: calcObj=levelCalculation.calculate(_data,_file) print('CalculatedOBJ:' + str(calcObj)) client.publish(\"recalculate_done\", makeJsonObject_levelCalculate(calcObj['data'],calcObj['score'])) except Exception", "__name__ == '__main__': try: if len(sys.argv)>1: MQTT_BROKER=sys.argv[1] else: input_text =", "data.endswith(\"'\"): data = data[1:-1] print(data) return data def makeJsonOnlineObject(device=''): item=json.dumps({\"device\":device})", "time.sleep(0.2) client.publish(\"online\", makeJsonOnlineObject('FaceDetection')) def makeJsonObject(value=None,port=None,type=None,read=False): item=json.dumps({\"port\":port, \"type\":type,\"value\":value,\"read\":read}) return str(item) def", "None: calcObj=levelCalculation.calculate(_data,_file) print('CalculatedOBJ:' + str(calcObj)) client.publish(\"recalculate_done\", makeJsonObject_levelCalculate(calcObj['data'],calcObj['score'])) except Exception as", "else: input_text = input(\"Ip of MQTT-broker: \") if input_text: MQTT_BROKER=input_text", "2016-11-28T13:51:38+01:00 # @Email: <EMAIL> # @Last modified by: stijnvanhulle #", "#!/usr/bin/env python import time import datetime import math import sys", "and _read is not None: item=str(json.dumps(parsed_json)) print(item) #client.publish(\"message\",item) client.publish(\"detection\",item) else:", "+ MQTT_BROKER ) client.subscribe(\"online\") client.subscribe(\"message\") client.subscribe(\"detection_find\") client.subscribe(\"detection_found\") client.subscribe(\"recalculate_start\") client.subscribe(\"recalculate_done\") def", "client.connect_async(MQTT_BROKER, 1883, 60) client.loop_start() time.sleep(0.2) client.publish(\"online\", makeJsonOnlineObject('FaceDetection')) def makeJsonObject(value=None,port=None,type=None,read=False): item=json.dumps({\"port\":port,", "data is not None: try: if data=='exit': exit() sys.exit(0) else:", "by: stijnvanhulle # @Last modified time: 2016-12-20T12:51:07+01:00 # @License: stijnvanhulle.be", "datetime import math import sys import json import paho.mqtt.client as", "def convertJson(data): data=data.decode() if data.startswith(\"'\") and data.endswith(\"'\"): data = data[1:-1]", "not None and _port is not None and _read is", "if _read: if _image1 is not None and _image2 is", "if _image1 is not None and _image2 is not None:", "@License: stijnvanhulle.be #!/usr/bin/env python import time import datetime import math", "str(percent)) client.publish(\"detection_found\", makeJsonObject_detection(percent,_image1,_image2,_read)) if msg.topic==\"recalculate_start\": print(parsed_json) _data =parsed_json['data'] _file=parsed_json['file'] if", "trollius.get_event_loop() #_main = trollius.async(loop.run_in_executor(executor, main)) main() except (TypeError) as ex:", "client.publish(\"online\", makeJsonOnlineObject('FaceDetection')) def makeJsonObject(value=None,port=None,type=None,read=False): item=json.dumps({\"port\":port, \"type\":type,\"value\":value,\"read\":read}) return str(item) def makeJsonObject_detection(value=None,image1=None,image2=None,read=False):", "client = mqtt.Client() #classes def on_connect(client, userdata, rc): print(\"Connected to", "publish import lib.faceDetection as faceDetection import lib.levelCalculation as levelCalculation MQTT_BROKER=\"localhost\"", "of MQTT-broker: \") if input_text: MQTT_BROKER=input_text #executor = ProcessPoolExecutor(2) #loop", "= data[1:-1] print(data) return data def makeJsonOnlineObject(device=''): item=json.dumps({\"device\":device}) return str(item)", "if data=='exit': exit() sys.exit(0) else: parsed_json=json.loads(convertJson(msg.payload)) _type =parsed_json['type'] _port=parsed_json['port'] _read=parsed_json['read']", "= input(\"Code:\") if data is not None: try: if data=='exit':", "input(\"Ip of MQTT-broker: \") if input_text: MQTT_BROKER=input_text #executor = ProcessPoolExecutor(2)", "str(calcObj)) client.publish(\"recalculate_done\", makeJsonObject_levelCalculate(calcObj['data'],calcObj['score'])) except Exception as error: print('Error:',error) def convertJson(data):", "convertJson(data): data=data.decode() if data.startswith(\"'\") and data.endswith(\"'\"): data = data[1:-1] print(data)", "None: percent=faceDetection.getDifference(_image1,_image2) print('Detection:' + str(percent)) client.publish(\"detection_found\", makeJsonObject_detection(percent,_image1,_image2,_read)) if msg.topic==\"recalculate_start\": print(parsed_json)", "and data.endswith(\"'\"): data = data[1:-1] print(data) return data def makeJsonOnlineObject(device=''):", "print(item) #client.publish(\"message\",item) client.publish(\"detection\",item) else: throw('Not correct data') except Exception as", "\" + MQTT_BROKER ) client.subscribe(\"online\") client.subscribe(\"message\") client.subscribe(\"detection_find\") client.subscribe(\"detection_found\") client.subscribe(\"recalculate_start\") client.subscribe(\"recalculate_done\")", "=parsed_json['image1'] _image2 =parsed_json['image2'] _read=parsed_json['read'] if _read: if _image1 is not", "_read=parsed_json['read'] if _read: if _image1 is not None and _image2", "item=json.dumps({\"data\":data,\"score\":score}) return str(item) def main(): init() while True: time.sleep(0.1) data", "if input_text: MQTT_BROKER=input_text #executor = ProcessPoolExecutor(2) #loop = trollius.get_event_loop() #_main", "@Date: 2016-11-28T13:51:38+01:00 # @Email: <EMAIL> # @Last modified by: stijnvanhulle", "client.subscribe(\"recalculate_start\") client.subscribe(\"recalculate_done\") def on_message(client, userdata, msg): try: parsed_json=json.loads(convertJson(msg.payload)) if msg.topic==\"detection_find\":", "print(data) return data def makeJsonOnlineObject(device=''): item=json.dumps({\"device\":device}) return str(item) def init():", "@Last modified by: stijnvanhulle # @Last modified time: 2016-12-20T12:51:07+01:00 #", "str(item) def makeJsonObject_levelCalculate(data=None,score=0): item=json.dumps({\"data\":data,\"score\":score}) return str(item) def main(): init() while", "return str(item) def main(): init() while True: time.sleep(0.1) data =", "mqtt import paho.mqtt.publish as publish import lib.faceDetection as faceDetection import", "init() while True: time.sleep(0.1) data = input(\"Code:\") if data is", "_data is not None: calcObj=levelCalculation.calculate(_data,_file) print('CalculatedOBJ:' + str(calcObj)) client.publish(\"recalculate_done\", makeJsonObject_levelCalculate(calcObj['data'],calcObj['score']))", "exit() sys.exit(0) else: parsed_json=json.loads(convertJson(msg.payload)) _type =parsed_json['type'] _port=parsed_json['port'] _read=parsed_json['read'] if _type", "print(parsed_json) _data =parsed_json['data'] _file=parsed_json['file'] if _data is not None: calcObj=levelCalculation.calculate(_data,_file)", "except (KeyboardInterrupt): exit() print(\"\\nIOT is afgesloten\\n\") sys.exit(0) except (SystemExit): print(\"\\nIOT", "None and _image2 is not None: percent=faceDetection.getDifference(_image1,_image2) print('Detection:' + str(percent))", "<EMAIL> # @Last modified by: stijnvanhulle # @Last modified time:", "is not None and _read is not None: item=str(json.dumps(parsed_json)) print(item)", "data = data[1:-1] print(data) return data def makeJsonOnlineObject(device=''): item=json.dumps({\"device\":device}) return", "#loop = trollius.get_event_loop() #_main = trollius.async(loop.run_in_executor(executor, main)) main() except (TypeError)", "modified time: 2016-12-20T12:51:07+01:00 # @License: stijnvanhulle.be #!/usr/bin/env python import time", "if data.startswith(\"'\") and data.endswith(\"'\"): data = data[1:-1] print(data) return data", "import time import datetime import math import sys import json", "data[1:-1] print(data) return data def makeJsonOnlineObject(device=''): item=json.dumps({\"device\":device}) return str(item) def", "Exception as error: print('Error:',error) if __name__ == '__main__': try: if", "#_main = trollius.async(loop.run_in_executor(executor, main)) main() except (TypeError) as ex: error=\"Error:", "Exception as error: print('Error:',error) def convertJson(data): data=data.decode() if data.startswith(\"'\") and", "_read is not None: item=str(json.dumps(parsed_json)) print(item) #client.publish(\"message\",item) client.publish(\"detection\",item) else: throw('Not", "str(item) def main(): init() while True: time.sleep(0.1) data = input(\"Code:\")", "60) client.loop_start() time.sleep(0.2) client.publish(\"online\", makeJsonOnlineObject('FaceDetection')) def makeJsonObject(value=None,port=None,type=None,read=False): item=json.dumps({\"port\":port, \"type\":type,\"value\":value,\"read\":read}) return", "def makeJsonObject(value=None,port=None,type=None,read=False): item=json.dumps({\"port\":port, \"type\":type,\"value\":value,\"read\":read}) return str(item) def makeJsonObject_detection(value=None,image1=None,image2=None,read=False): item=json.dumps({\"value\":value, \"image1\":image1,\"image2\":image2,", "MQTT_BROKER=input_text #executor = ProcessPoolExecutor(2) #loop = trollius.get_event_loop() #_main = trollius.async(loop.run_in_executor(executor,", "_port is not None and _read is not None: item=str(json.dumps(parsed_json))", "is not None: percent=faceDetection.getDifference(_image1,_image2) print('Detection:' + str(percent)) client.publish(\"detection_found\", makeJsonObject_detection(percent,_image1,_image2,_read)) if", "try: if data=='exit': exit() sys.exit(0) else: parsed_json=json.loads(convertJson(msg.payload)) _type =parsed_json['type'] _port=parsed_json['port']", "_type is not None and _port is not None and", "on_message client.connect_async(MQTT_BROKER, 1883, 60) client.loop_start() time.sleep(0.2) client.publish(\"online\", makeJsonOnlineObject('FaceDetection')) def makeJsonObject(value=None,port=None,type=None,read=False):", "if _type is not None and _port is not None", "'__main__': try: if len(sys.argv)>1: MQTT_BROKER=sys.argv[1] else: input_text = input(\"Ip of", "item=json.dumps({\"port\":port, \"type\":type,\"value\":value,\"read\":read}) return str(item) def makeJsonObject_detection(value=None,image1=None,image2=None,read=False): item=json.dumps({\"value\":value, \"image1\":image1,\"image2\":image2, \"read\":read}) return", "None: item=str(json.dumps(parsed_json)) print(item) #client.publish(\"message\",item) client.publish(\"detection\",item) else: throw('Not correct data') except", "+ str(ex) #print(error) except (KeyboardInterrupt): exit() print(\"\\nIOT is afgesloten\\n\") sys.exit(0)", "_read=parsed_json['read'] if _type is not None and _port is not", "# @Last modified by: stijnvanhulle # @Last modified time: 2016-12-20T12:51:07+01:00", "= mqtt.Client() #classes def on_connect(client, userdata, rc): print(\"Connected to MQTT-broker", "rc): print(\"Connected to MQTT-broker on \" + MQTT_BROKER ) client.subscribe(\"online\")", "#client.publish(\"message\",item) client.publish(\"detection\",item) else: throw('Not correct data') except Exception as error:", "json import paho.mqtt.client as mqtt import paho.mqtt.publish as publish import", "except Exception as error: print('Error:',error) def convertJson(data): data=data.decode() if data.startswith(\"'\")", "= on_message client.connect_async(MQTT_BROKER, 1883, 60) client.loop_start() time.sleep(0.2) client.publish(\"online\", makeJsonOnlineObject('FaceDetection')) def", "\"image1\":image1,\"image2\":image2, \"read\":read}) return str(item) def makeJsonObject_levelCalculate(data=None,score=0): item=json.dumps({\"data\":data,\"score\":score}) return str(item) def", "<stijnvanhulle> # @Date: 2016-11-28T13:51:38+01:00 # @Email: <EMAIL> # @Last modified", "error: print('Error:',error) if __name__ == '__main__': try: if len(sys.argv)>1: MQTT_BROKER=sys.argv[1]", "makeJsonObject(value=None,port=None,type=None,read=False): item=json.dumps({\"port\":port, \"type\":type,\"value\":value,\"read\":read}) return str(item) def makeJsonObject_detection(value=None,image1=None,image2=None,read=False): item=json.dumps({\"value\":value, \"image1\":image1,\"image2\":image2, \"read\":read})", "print('Error:',error) def convertJson(data): data=data.decode() if data.startswith(\"'\") and data.endswith(\"'\"): data =", "faceDetection import lib.levelCalculation as levelCalculation MQTT_BROKER=\"localhost\" client = mqtt.Client() #classes", "import lib.levelCalculation as levelCalculation MQTT_BROKER=\"localhost\" client = mqtt.Client() #classes def", "client.subscribe(\"detection_found\") client.subscribe(\"recalculate_start\") client.subscribe(\"recalculate_done\") def on_message(client, userdata, msg): try: parsed_json=json.loads(convertJson(msg.payload)) if", "input(\"Code:\") if data is not None: try: if data=='exit': exit()", "def makeJsonOnlineObject(device=''): item=json.dumps({\"device\":device}) return str(item) def init(): client.on_connect = on_connect", "main(): init() while True: time.sleep(0.1) data = input(\"Code:\") if data", "try: if len(sys.argv)>1: MQTT_BROKER=sys.argv[1] else: input_text = input(\"Ip of MQTT-broker:", "str(ex) #print(error) except (KeyboardInterrupt): exit() print(\"\\nIOT is afgesloten\\n\") sys.exit(0) except", "print('Detection:' + str(percent)) client.publish(\"detection_found\", makeJsonObject_detection(percent,_image1,_image2,_read)) if msg.topic==\"recalculate_start\": print(parsed_json) _data =parsed_json['data']", "# @Date: 2016-11-28T13:51:38+01:00 # @Email: <EMAIL> # @Last modified by:", "if data is not None: try: if data=='exit': exit() sys.exit(0)", "and _image2 is not None: percent=faceDetection.getDifference(_image1,_image2) print('Detection:' + str(percent)) client.publish(\"detection_found\",", "calcObj=levelCalculation.calculate(_data,_file) print('CalculatedOBJ:' + str(calcObj)) client.publish(\"recalculate_done\", makeJsonObject_levelCalculate(calcObj['data'],calcObj['score'])) except Exception as error:", "#print(error) except (KeyboardInterrupt): exit() print(\"\\nIOT is afgesloten\\n\") sys.exit(0) except (SystemExit):", "str(item) def makeJsonObject_detection(value=None,image1=None,image2=None,read=False): item=json.dumps({\"value\":value, \"image1\":image1,\"image2\":image2, \"read\":read}) return str(item) def makeJsonObject_levelCalculate(data=None,score=0):", "correct data') except Exception as error: print('Error:',error) if __name__ ==", "return str(item) def makeJsonObject_levelCalculate(data=None,score=0): item=json.dumps({\"data\":data,\"score\":score}) return str(item) def main(): init()", "=parsed_json['image2'] _read=parsed_json['read'] if _read: if _image1 is not None and", "data = input(\"Code:\") if data is not None: try: if", "2016-12-20T12:51:07+01:00 # @License: stijnvanhulle.be #!/usr/bin/env python import time import datetime", "client.publish(\"detection\",item) else: throw('Not correct data') except Exception as error: print('Error:',error)", "main() except (TypeError) as ex: error=\"Error: \" + str(ex) #print(error)", "error=\"Error: \" + str(ex) #print(error) except (KeyboardInterrupt): exit() print(\"\\nIOT is", "try: parsed_json=json.loads(convertJson(msg.payload)) if msg.topic==\"detection_find\": print(parsed_json) _image1 =parsed_json['image1'] _image2 =parsed_json['image2'] _read=parsed_json['read']", "MQTT-broker: \") if input_text: MQTT_BROKER=input_text #executor = ProcessPoolExecutor(2) #loop =", "init(): client.on_connect = on_connect client.on_message = on_message client.connect_async(MQTT_BROKER, 1883, 60)", "time import datetime import math import sys import json import", "= trollius.async(loop.run_in_executor(executor, main)) main() except (TypeError) as ex: error=\"Error: \"", "@Email: <EMAIL> # @Last modified by: stijnvanhulle # @Last modified", "except (TypeError) as ex: error=\"Error: \" + str(ex) #print(error) except", "_data =parsed_json['data'] _file=parsed_json['file'] if _data is not None: calcObj=levelCalculation.calculate(_data,_file) print('CalculatedOBJ:'", "except Exception as error: print('Error:',error) if __name__ == '__main__': try:", "True: time.sleep(0.1) data = input(\"Code:\") if data is not None:", "input_text = input(\"Ip of MQTT-broker: \") if input_text: MQTT_BROKER=input_text #executor", "if len(sys.argv)>1: MQTT_BROKER=sys.argv[1] else: input_text = input(\"Ip of MQTT-broker: \")", "makeJsonOnlineObject(device=''): item=json.dumps({\"device\":device}) return str(item) def init(): client.on_connect = on_connect client.on_message", "trollius.async(loop.run_in_executor(executor, main)) main() except (TypeError) as ex: error=\"Error: \" +", "print(\"\\nIOT is afgesloten\\n\") sys.exit(0) except (SystemExit): print(\"\\nIOT is geforceert afgelosten\\n\")", "on_connect client.on_message = on_message client.connect_async(MQTT_BROKER, 1883, 60) client.loop_start() time.sleep(0.2) client.publish(\"online\",", "_image2 is not None: percent=faceDetection.getDifference(_image1,_image2) print('Detection:' + str(percent)) client.publish(\"detection_found\", makeJsonObject_detection(percent,_image1,_image2,_read))", "not None and _image2 is not None: percent=faceDetection.getDifference(_image1,_image2) print('Detection:' +", "import json import paho.mqtt.client as mqtt import paho.mqtt.publish as publish", "import datetime import math import sys import json import paho.mqtt.client", "def main(): init() while True: time.sleep(0.1) data = input(\"Code:\") if", "paho.mqtt.publish as publish import lib.faceDetection as faceDetection import lib.levelCalculation as", "client.publish(\"detection_found\", makeJsonObject_detection(percent,_image1,_image2,_read)) if msg.topic==\"recalculate_start\": print(parsed_json) _data =parsed_json['data'] _file=parsed_json['file'] if _data", "_file=parsed_json['file'] if _data is not None: calcObj=levelCalculation.calculate(_data,_file) print('CalculatedOBJ:' + str(calcObj))", "def makeJsonObject_detection(value=None,image1=None,image2=None,read=False): item=json.dumps({\"value\":value, \"image1\":image1,\"image2\":image2, \"read\":read}) return str(item) def makeJsonObject_levelCalculate(data=None,score=0): item=json.dumps({\"data\":data,\"score\":score})", "_type =parsed_json['type'] _port=parsed_json['port'] _read=parsed_json['read'] if _type is not None and", "throw('Not correct data') except Exception as error: print('Error:',error) if __name__", "(TypeError) as ex: error=\"Error: \" + str(ex) #print(error) except (KeyboardInterrupt):", "return data def makeJsonOnlineObject(device=''): item=json.dumps({\"device\":device}) return str(item) def init(): client.on_connect", "stijnvanhulle.be #!/usr/bin/env python import time import datetime import math import", "+ str(calcObj)) client.publish(\"recalculate_done\", makeJsonObject_levelCalculate(calcObj['data'],calcObj['score'])) except Exception as error: print('Error:',error) def", "None and _read is not None: item=str(json.dumps(parsed_json)) print(item) #client.publish(\"message\",item) client.publish(\"detection\",item)", "return str(item) def init(): client.on_connect = on_connect client.on_message = on_message", "data=data.decode() if data.startswith(\"'\") and data.endswith(\"'\"): data = data[1:-1] print(data) return", "(KeyboardInterrupt): exit() print(\"\\nIOT is afgesloten\\n\") sys.exit(0) except (SystemExit): print(\"\\nIOT is", "mqtt.Client() #classes def on_connect(client, userdata, rc): print(\"Connected to MQTT-broker on" ]
[ "= sum(A) m = float('inf') left_sum = 0 for n", "n in A[:-1]: left_sum += n v = abs(total -", "m = float('inf') left_sum = 0 for n in A[:-1]:", "abs(total - 2*left_sum) if v < m: m = v", "n v = abs(total - 2*left_sum) if v < m:", "0 for n in A[:-1]: left_sum += n v =", "- 2*left_sum) if v < m: m = v return", "= abs(total - 2*left_sum) if v < m: m =", "float('inf') left_sum = 0 for n in A[:-1]: left_sum +=", "for n in A[:-1]: left_sum += n v = abs(total", "A[:-1]: left_sum += n v = abs(total - 2*left_sum) if", "solution(A): total = sum(A) m = float('inf') left_sum = 0", "v = abs(total - 2*left_sum) if v < m: m", "in A[:-1]: left_sum += n v = abs(total - 2*left_sum)", "sum(A) m = float('inf') left_sum = 0 for n in", "= 0 for n in A[:-1]: left_sum += n v", "total = sum(A) m = float('inf') left_sum = 0 for", "def solution(A): total = sum(A) m = float('inf') left_sum =", "+= n v = abs(total - 2*left_sum) if v <", "left_sum = 0 for n in A[:-1]: left_sum += n", "2*left_sum) if v < m: m = v return m", "= float('inf') left_sum = 0 for n in A[:-1]: left_sum", "left_sum += n v = abs(total - 2*left_sum) if v" ]
[ "str(pep_number) pep_number_string = re.sub(r'^0+', '', pep_number_string) pep_page.title = pep_content['title'] pep_page.content", "used as the directory index, but it's also an actual", "display purposes pep_number_string = str(pep_number) pep_number_string = re.sub(r'^0+', '', pep_number_string)", "path=\"dev/peps/peps.rss\", template_name=\"pages/raw.html\", ) with open(rss_feed, \"r\") as rss_content: content =", "unwanted headers and find our title \"\"\" header_rows = soup.find_all('th')", "{} -- {}\".format( pep_number, data['title'], ) data['content'] = soup.prettify() #", "suitable for a Python.org Page returns the core body HTML", "recreate if not os.path.exists(image_root_path): MISSING = image break if not", "disk, recreate if not os.path.exists(image_root_path): MISSING = image break if", "header_rows: if 'Version:' in t.text: if t.next_sibling.text == '$Revision$': t.parent.extract()", "if 'Last-Modified:' in t.text: if '$Date$'in t.next_sibling.text: t.parent.extract() if t.next_sibling.text", "soup = BeautifulSoup(page.content.raw) for img_tag in soup.findAll('img'): if img_tag['src'] ==", "in t.text and 'N/A' in t.next_sibling.text: t.parent.extract() return ''.join([header.prettify(), pep_content.prettify()])", "or MISSING: image = None if MISSING: image = MISSING", "print(\"Image Path '{}' does not exist, skipping\".format(image_path)) try: page =", "= BeautifulSoup(content) data['title'] = soup.title.text if not re.search(r'PEP \\d+', data['title']):", "data['title'], ) data['content'] = soup.prettify() # Fix PEP links pep_content", "Page, Image PEP_TEMPLATE = 'pages/pep-page.html' pep_url = lambda num: 'dev/peps/pep-{}/'.format(num)", "= pep_content.prettify() hg_link = \"https://hg.python.org/peps/file/tip/pep-{0}.txt\".format(pep_number) data['content'] += \"\"\"Source: <a href=\"{0}\">{0}</a>\"\"\".format(hg_link)", "= soup.body.find('div', class_=\"header\") header, data = fix_headers(header, data) data['header'] =", "path: img_tag['src'] = os.path.join(settings.MEDIA_URL, page.path, path) page.content.raw = soup.prettify() page.save()", "= '/dev/peps/pep-{}/'.format(m.group(1)) data['content'] = pep_content.prettify() hg_link = \"https://hg.python.org/peps/file/tip/pep-{0}.txt\".format(pep_number) data['content'] +=", "BeautifulSoup(content) soup, data = fix_headers(soup, data) if not data['title']: data['title']", "Given a pep_number retrieve original PEP source text, rst, or", "+= \"\"\"Source: <a href=\"{0}\">{0}</a>\"\"\".format(hg_link) return data def get_pep_page(pep_number, commit=True): \"\"\"", "'<html>' in content: soup = BeautifulSoup(content) data['title'] = soup.title.text if", "not data['title']: data['title'] = \"PEP {} -- \".format(pep_number) else: if", "ImproperlyConfigured(\"PEP_REPO_PATH in settings does not exist\") def convert_pep0(): \"\"\" Take", "\\d+', data['title']): data['title'] = \"PEP {} -- {}\".format( pep_number, data['title'],", "= fix_headers(header, data) data['header'] = header.prettify() main_content = soup.body.find('div', class_=\"content\")", "def convert_pep_page(pep_number, content): \"\"\" Handle different formats that pep2html.py outputs", "purposes pep_number_string = str(pep_number) pep_number_string = re.sub(r'^0+', '', pep_number_string) pep_page.title", "in different # places, so update the page accordingly. soup", "data def get_pep_page(pep_number, commit=True): \"\"\" Given a pep_number retrieve original", "return image def get_peps_rss(): rss_feed = os.path.join(settings.PEP_REPO_PATH, 'peps.rss') if not", "data) data['header'] = header.prettify() main_content = soup.body.find('div', class_=\"content\") data['main_content'] =", "not find backing PEP {}\".format(pep_number)) return # Find existing images,", "} if '<html>' in content: soup = BeautifulSoup(content) data['title'] =", "'html' pep_page.template_name = PEP_TEMPLATE if commit: pep_page.save() return pep_page def", "does not exist, skipping\".format(image_path)) try: page = Page.objects.get(path=pep_url(pep_number)) except Page.DoesNotExist:", "settings from django.core.exceptions import ImproperlyConfigured from django.core.files import File from", "hasattr(settings, 'PEP_REPO_PATH'): raise ImproperlyConfigured(\"No PEP_REPO_PATH in settings\") if not os.path.exists(settings.PEP_REPO_PATH):", "= pep_href_re.search(b.attrs['href']) # Skip anything not matching 'pep-XXXX.html' if not", "# places, so update the page accordingly. soup = BeautifulSoup(page.content.raw)", "if not m: continue b.attrs['href'] = '/dev/peps/pep-{}/'.format(m.group(1)) data['content'] = pep_content.prettify()", "alongside html, but now they're in different # places, so", "PEP_TEMPLATE if commit: pep_page.save() return pep_page def add_pep_image(pep_number, path): image_path", "-- {}'.format( pep_number, soup.title.text, ) header = soup.body.find('div', class_=\"header\") header,", "image.image.path.endswith(path): FOUND = True # File is missing on disk,", "img_tag in soup.findAll('img'): if img_tag['src'] == path: img_tag['src'] = os.path.join(settings.MEDIA_URL,", "t.parent.extract() if 'Version:' in t.text and 'N/A' in t.next_sibling.text: t.parent.extract()", "File is missing on disk, recreate if not os.path.exists(image_root_path): MISSING", "convert_pep_page(pep_number, content): \"\"\" Handle different formats that pep2html.py outputs \"\"\"", "content page.is_published = True page.content_type = \"application/rss+xml\" page.save() return page", "re.sub(r'^0+', '', pep_number_string) pep_page.title = pep_content['title'] pep_page.content = pep_content['content'] pep_page.content_markup_type", "soup.prettify() page.save() return image def get_peps_rss(): rss_feed = os.path.join(settings.PEP_REPO_PATH, 'peps.rss')", "but it's also an actual pep, so we return both", "open(image_path, 'rb') as image_obj: image.image.save(path, File(image_obj)) image.save() # Old images", "def convert_pep0(): \"\"\" Take existing generated pep-0000.html and convert to", "pep_page.content_markup_type = 'html' pep_page.template_name = PEP_TEMPLATE if commit: pep_page.save() return", "to something suitable for a Python.org Page returns the core", "= 'html' page.title = \"PEP 0 -- Index of Python", "commit: page.save() return pep0_page, pep0000_page def fix_headers(soup, data): \"\"\" Remove", "Enhancement Proposals (PEPs)\" page.template_name = PEP_TEMPLATE if commit: page.save() return", "page.path, path) page.content.raw = soup.prettify() page.save() return image def get_peps_rss():", "{} -- \".format(pep_number) else: if not re.search(r'PEP \\d+', data['title']): data['title']", "import settings from django.core.exceptions import ImproperlyConfigured from django.core.files import File", "== '$Revision$': t.parent.extract() if t.next_sibling.text == '': t.parent.extract() if 'Last-Modified:'", "Old images used to live alongside html, but now they're", "in content: soup = BeautifulSoup(content) data['title'] = soup.title.text if not", "fix_headers(soup, data) if not data['title']: data['title'] = \"PEP {} --", "both Page objects. \"\"\" pep0_content = convert_pep0() pep0_page, _ =", "rss_content: content = rss_content.read() page.content = content page.is_published = True", "Handle different formats that pep2html.py outputs \"\"\" check_paths() data =", "pep_content.find_all(\"a\") pep_href_re = re.compile(r'pep-(\\d+)\\.html') for b in body_links: m =", "# File is missing on disk, recreate if not os.path.exists(image_root_path):", "ensure our PEP_REPO_PATH is setup correctly \"\"\" if not hasattr(settings,", "raise ImproperlyConfigured(\"No PEP_REPO_PATH in settings\") if not os.path.exists(settings.PEP_REPO_PATH): raise ImproperlyConfigured(\"PEP_REPO_PATH", "return it \"\"\" pep_path = os.path.join(settings.PEP_REPO_PATH, 'pep-{}.html'.format(pep_number)) if not os.path.exists(pep_path):", "data = fix_headers(soup, data) if not data['title']: data['title'] = \"PEP", "\".format(pep_number) else: if not re.search(r'PEP \\d+', data['title']): data['title'] = \"PEP", "= BeautifulSoup(page.content.raw) for img_tag in soup.findAll('img'): if img_tag['src'] == path:", "{ 'title': None, } if '<html>' in content: soup =", "m: continue b.attrs['href'] = '/dev/peps/pep-{}/'.format(m.group(1)) # Remove Version from header", "the directory index, but it's also an actual pep, so", "MISSING = False FOUND = False for image in existing_images:", "= os.path.join(settings.PEP_REPO_PATH, 'pep-0000.html') pep0_content = open(pep0_path).read() soup = BeautifulSoup(pep0_content) body_children", "= t.next_sibling.text if t.text == 'Content-Type:': t.parent.extract() if 'Version:' in", "formats that pep2html.py outputs \"\"\" check_paths() data = { 'title':", ") data['content'] = soup.prettify() # Fix PEP links pep_content =", "to loop here as we can't use the ORM #", "in t.next_sibling.text: t.parent.extract() return ''.join([header.prettify(), pep_content.prettify()]) def get_pep0_page(commit=True): \"\"\" Using", "MISSING: image = None if MISSING: image = MISSING else:", "os.path.exists(pep_path): print(\"PEP Path '{}' does not exist, skipping\".format(pep_path)) pep_content =", "pep_url = lambda num: 'dev/peps/pep-{}/'.format(num) def check_paths(): \"\"\" Checks to", "if commit: page.save() return pep0_page, pep0000_page def fix_headers(soup, data): \"\"\"", "# to query against image__path existing_images = Image.objects.filter(page=page) MISSING =", "header, data = fix_headers(header, data) data['header'] = header.prettify() main_content =", "settings\") if not os.path.exists(settings.PEP_REPO_PATH): raise ImproperlyConfigured(\"PEP_REPO_PATH in settings does not", "so we return both Page objects. \"\"\" pep0_content = convert_pep0()", "not os.path.exists(image_path): print(\"Image Path '{}' does not exist, skipping\".format(image_path)) try:", "or html. Get or create the associated Page and return", "'html' page.title = \"PEP 0 -- Index of Python Enhancement", "'Version:' in t.text and 'N/A' in t.next_sibling.text: t.parent.extract() return soup,", "if '$Date$'in t.next_sibling.text: t.parent.extract() if t.next_sibling.text == '': t.parent.extract() if", "path) if image.image.path.endswith(path): FOUND = True # File is missing", "return ''.join([header.prettify(), pep_content.prettify()]) def get_pep0_page(commit=True): \"\"\" Using convert_pep0 above, create", "from django.conf import settings from django.core.exceptions import ImproperlyConfigured from django.core.files", "'pep-{}.html'.format(pep_number)) if not os.path.exists(pep_path): print(\"PEP Path '{}' does not exist,", "= str(pep_number) pep_number_string = re.sub(r'^0+', '', pep_number_string) pep_page.title = pep_content['title']", "the page accordingly. soup = BeautifulSoup(page.content.raw) for img_tag in soup.findAll('img'):", "= rss_content.read() page.content = content page.is_published = True page.content_type =", "Python.org Page returns the core body HTML necessary only \"\"\"", "{} -- {}'.format( pep_number, soup.title.text, ) header = soup.body.find('div', class_=\"header\")", "image def get_peps_rss(): rss_feed = os.path.join(settings.PEP_REPO_PATH, 'peps.rss') if not os.path.exists(rss_feed):", "body_children[3] pep_content = body_children[7] # Fix PEP links body_links =", "Page.objects.get_or_create(path='dev/peps/pep-0000/') for page in [pep0_page, pep0000_page]: page.content = pep0_content page.content_markup_type", "in [pep0_page, pep0000_page]: page.content = pep0_content page.content_markup_type = 'html' page.title", "t.text: if t.next_sibling.text == '$Revision$': t.parent.extract() if t.next_sibling.text == '':", "PEP_REPO_PATH is setup correctly \"\"\" if not hasattr(settings, 'PEP_REPO_PATH'): raise", "_ = Page.objects.get_or_create(path='dev/peps/pep-0000/') for page in [pep0_page, pep0000_page]: page.content =", "used to live alongside html, but now they're in different", "os.path.join(settings.PEP_REPO_PATH, 'pep-0000.html') pep0_content = open(pep0_path).read() soup = BeautifulSoup(pep0_content) body_children =", "bs4 import BeautifulSoup from django.conf import settings from django.core.exceptions import", "pep_number_string = str(pep_number) pep_number_string = re.sub(r'^0+', '', pep_number_string) pep_page.title =", "# Find existing images, we have to loop here as", "a Python.org Page returns the core body HTML necessary only", "= pep0_content page.content_markup_type = 'html' page.title = \"PEP 0 --", "os.path.exists(image_root_path): MISSING = image break if not FOUND or MISSING:", "we have to loop here as we can't use the", "image in existing_images: image_root_path = os.path.join(settings.MEDIA_ROOT, page.path, path) if image.image.path.endswith(path):", "re.search(r'PEP \\d+', data['title']): data['title'] = 'PEP {} -- {}'.format( pep_number,", "= pep_content['title'] pep_page.content = pep_content['content'] pep_page.content_markup_type = 'html' pep_page.template_name =", "= Page.objects.get(path=pep_url(pep_number)) except Page.DoesNotExist: print(\"Could not find backing PEP {}\".format(pep_number))", "Grab header and PEP body header = body_children[3] pep_content =", "PEP_TEMPLATE = 'pages/pep-page.html' pep_url = lambda num: 'dev/peps/pep-{}/'.format(num) def check_paths():", "Checks to ensure our PEP_REPO_PATH is setup correctly \"\"\" if", "and 'N/A' in t.next_sibling.text: t.parent.extract() return soup, data def convert_pep_page(pep_number,", "if not re.search(r'PEP \\d+', data['title']): data['title'] = 'PEP {} --", "'Version:' in t.text: if t.next_sibling.text == '$Revision$': t.parent.extract() if t.next_sibling.text", "content = rss_content.read() page.content = content page.is_published = True page.content_type", "MISSING else: image = Image(page=page) with open(image_path, 'rb') as image_obj:", "pep_content = convert_pep_page(pep_number, open(pep_path).read()) pep_page, _ = Page.objects.get_or_create(path=pep_url(pep_number)) # Remove", "t.next_sibling.text == '$Revision$': t.parent.extract() if t.next_sibling.text == '': t.parent.extract() if", "if img_tag['src'] == path: img_tag['src'] = os.path.join(settings.MEDIA_URL, page.path, path) page.content.raw", "image_root_path = os.path.join(settings.MEDIA_ROOT, page.path, path) if image.image.path.endswith(path): FOUND = True", "pep_number_string) pep_page.title = pep_content['title'] pep_page.content = pep_content['content'] pep_page.content_markup_type = 'html'", "django.core.exceptions import ImproperlyConfigured from django.core.files import File from pages.models import", "add_pep_image(pep_number, path): image_path = os.path.join(settings.PEP_REPO_PATH, path) if not os.path.exists(image_path): print(\"Image", "pep_path = os.path.join(settings.PEP_REPO_PATH, 'pep-{}.html'.format(pep_number)) if not os.path.exists(pep_path): print(\"PEP Path '{}'", "soup.body.find('div', class_=\"content\") data['main_content'] = main_content.prettify() data['content'] = ''.join([ data['header'], data['main_content']", "is missing on disk, recreate if not os.path.exists(image_root_path): MISSING =", "BeautifulSoup(content) data['title'] = soup.title.text if not re.search(r'PEP \\d+', data['title']): data['title']", "= Page.objects.get_or_create( path=\"dev/peps/peps.rss\", template_name=\"pages/raw.html\", ) with open(rss_feed, \"r\") as rss_content:", "rss_content.read() page.content = content page.is_published = True page.content_type = \"application/rss+xml\"", "content: soup = BeautifulSoup(content) data['title'] = soup.title.text if not re.search(r'PEP", "matching 'pep-XXXX.html' if not m: continue b.attrs['href'] = '/dev/peps/pep-{}/'.format(m.group(1)) data['content']", "check_paths(): \"\"\" Checks to ensure our PEP_REPO_PATH is setup correctly", "data['header'], data['main_content'] ]) else: soup = BeautifulSoup(content) soup, data =", "if not os.path.exists(image_path): print(\"Image Path '{}' does not exist, skipping\".format(image_path))", "'Content-Type:': t.parent.extract() if 'Version:' in t.text and 'N/A' in t.next_sibling.text:", "PEP body header = body_children[3] pep_content = body_children[7] # Fix", "os.path.join(settings.PEP_REPO_PATH, 'peps.rss') if not os.path.exists(rss_feed): return page, _ = Page.objects.get_or_create(", "Image PEP_TEMPLATE = 'pages/pep-page.html' pep_url = lambda num: 'dev/peps/pep-{}/'.format(num) def", "= \"PEP 0 -- Index of Python Enhancement Proposals (PEPs)\"", "BeautifulSoup(page.content.raw) for img_tag in soup.findAll('img'): if img_tag['src'] == path: img_tag['src']", "and 'N/A' in t.next_sibling.text: t.parent.extract() return ''.join([header.prettify(), pep_content.prettify()]) def get_pep0_page(commit=True):", "empty or unwanted headers and find our title \"\"\" header_rows", "if not os.path.exists(rss_feed): return page, _ = Page.objects.get_or_create( path=\"dev/peps/peps.rss\", template_name=\"pages/raw.html\",", "skipping\".format(pep_path)) pep_content = convert_pep_page(pep_number, open(pep_path).read()) pep_page, _ = Page.objects.get_or_create(path=pep_url(pep_number)) #", "== '': t.parent.extract() if t.text == 'Title:': data['title'] = t.next_sibling.text", "and convert to something suitable for a Python.org Page returns", "= soup.body.find('div', class_=\"content\") data['main_content'] = main_content.prettify() data['content'] = ''.join([ data['header'],", "= 'PEP {} -- {}'.format( pep_number, soup.title.text, ) header =", "\"\"\" pep0_content = convert_pep0() pep0_page, _ = Page.objects.get_or_create(path='dev/peps/') pep0000_page, _", "re.search(r'PEP \\d+', data['title']): data['title'] = \"PEP {} -- {}\".format( pep_number,", "setup correctly \"\"\" if not hasattr(settings, 'PEP_REPO_PATH'): raise ImproperlyConfigured(\"No PEP_REPO_PATH", "None, } if '<html>' in content: soup = BeautifulSoup(content) data['title']", "page = Page.objects.get(path=pep_url(pep_number)) except Page.DoesNotExist: print(\"Could not find backing PEP", "not re.search(r'PEP \\d+', data['title']): data['title'] = \"PEP {} -- {}\".format(", "Page.objects.get_or_create(path='dev/peps/') pep0000_page, _ = Page.objects.get_or_create(path='dev/peps/pep-0000/') for page in [pep0_page, pep0000_page]:", "pep0_content = open(pep0_path).read() soup = BeautifulSoup(pep0_content) body_children = list(soup.body.children) #", "'rb') as image_obj: image.image.save(path, File(image_obj)) image.save() # Old images used", "for display purposes pep_number_string = str(pep_number) pep_number_string = re.sub(r'^0+', '',", "= \"https://hg.python.org/peps/file/tip/pep-{0}.txt\".format(pep_number) data['content'] += \"\"\"Source: <a href=\"{0}\">{0}</a>\"\"\".format(hg_link) return data def", "'pages/pep-page.html' pep_url = lambda num: 'dev/peps/pep-{}/'.format(num) def check_paths(): \"\"\" Checks", "zeros from PEP number for display purposes pep_number_string = str(pep_number)", "image = MISSING else: image = Image(page=page) with open(image_path, 'rb')", "PEP_REPO_PATH in settings\") if not os.path.exists(settings.PEP_REPO_PATH): raise ImproperlyConfigured(\"PEP_REPO_PATH in settings", "pep_page, _ = Page.objects.get_or_create(path=pep_url(pep_number)) # Remove leading zeros from PEP", "\"\"\" Given a pep_number retrieve original PEP source text, rst,", "data['title']): data['title'] = 'PEP {} -- {}'.format( pep_number, soup.title.text, )", "'$Revision$': t.parent.extract() if t.next_sibling.text == '': t.parent.extract() if 'Last-Modified:' in", "from django.core.exceptions import ImproperlyConfigured from django.core.files import File from pages.models", "if not m: continue b.attrs['href'] = '/dev/peps/pep-{}/'.format(m.group(1)) # Remove Version", "def check_paths(): \"\"\" Checks to ensure our PEP_REPO_PATH is setup", "'/dev/peps/pep-{}/'.format(m.group(1)) # Remove Version from header header_rows = header.find_all('th') for", "t.parent.extract() if 'Last-Modified:' in t.text: if '$Date$'in t.next_sibling.text: t.parent.extract() if", "update the page accordingly. soup = BeautifulSoup(page.content.raw) for img_tag in", "# Remove leading zeros from PEP number for display purposes", "not exist, skipping\".format(image_path)) try: page = Page.objects.get(path=pep_url(pep_number)) except Page.DoesNotExist: print(\"Could", "not m: continue b.attrs['href'] = '/dev/peps/pep-{}/'.format(m.group(1)) # Remove Version from", "b.attrs['href'] = '/dev/peps/pep-{}/'.format(m.group(1)) data['content'] = pep_content.prettify() hg_link = \"https://hg.python.org/peps/file/tip/pep-{0}.txt\".format(pep_number) data['content']", "\"PEP {} -- {}\".format( pep_number, data['title'], ) data['content'] = soup.prettify()", "== '': t.parent.extract() if 'Last-Modified:' in t.text: if '$Date$'in t.next_sibling.text:", "objects. \"\"\" pep0_content = convert_pep0() pep0_page, _ = Page.objects.get_or_create(path='dev/peps/') pep0000_page,", "import Page, Image PEP_TEMPLATE = 'pages/pep-page.html' pep_url = lambda num:", "page and return it pep0 is used as the directory", "header.prettify() main_content = soup.body.find('div', class_=\"content\") data['main_content'] = main_content.prettify() data['content'] =", "we return both Page objects. \"\"\" pep0_content = convert_pep0() pep0_page,", "t.text and 'N/A' in t.next_sibling.text: t.parent.extract() return soup, data def", "Skip anything not matching 'pep-XXXX.html' if not m: continue b.attrs['href']", "fix_headers(soup, data): \"\"\" Remove empty or unwanted headers and find", "= Page.objects.get_or_create(path='dev/peps/pep-0000/') for page in [pep0_page, pep0000_page]: page.content = pep0_content", "= header.find_all('th') for t in header_rows: if 'Version:' in t.text", "different formats that pep2html.py outputs \"\"\" check_paths() data = {", "= open(pep0_path).read() soup = BeautifulSoup(pep0_content) body_children = list(soup.body.children) # Grab", "data) if not data['title']: data['title'] = \"PEP {} -- \".format(pep_number)", "pep_content = BeautifulSoup(data['content']) body_links = pep_content.find_all(\"a\") pep_href_re = re.compile(r'pep-(\\d+)\\.html') for", "get_pep_page(pep_number, commit=True): \"\"\" Given a pep_number retrieve original PEP source", "= \"PEP {} -- \".format(pep_number) else: if not re.search(r'PEP \\d+',", "= ''.join([ data['header'], data['main_content'] ]) else: soup = BeautifulSoup(content) soup,", "existing generated pep-0000.html and convert to something suitable for a", "Path '{}' does not exist, skipping\".format(image_path)) try: page = Page.objects.get(path=pep_url(pep_number))", "\"\"\" Checks to ensure our PEP_REPO_PATH is setup correctly \"\"\"", "skipping\".format(image_path)) try: page = Page.objects.get(path=pep_url(pep_number)) except Page.DoesNotExist: print(\"Could not find", "convert_pep0 above, create a CMS ready pep0 page and return", "print(\"PEP Path '{}' does not exist, skipping\".format(pep_path)) pep_content = convert_pep_page(pep_number,", "os.path.join(settings.PEP_REPO_PATH, 'pep-{}.html'.format(pep_number)) if not os.path.exists(pep_path): print(\"PEP Path '{}' does not", "title \"\"\" header_rows = soup.find_all('th') for t in header_rows: if", "= '/dev/peps/pep-{}/'.format(m.group(1)) # Remove Version from header header_rows = header.find_all('th')", "in settings does not exist\") def convert_pep0(): \"\"\" Take existing", "'Last-Modified:' in t.text: if '$Date$'in t.next_sibling.text: t.parent.extract() if t.next_sibling.text ==", "they're in different # places, so update the page accordingly.", "return it pep0 is used as the directory index, but", "img_tag['src'] = os.path.join(settings.MEDIA_URL, page.path, path) page.content.raw = soup.prettify() page.save() return", "data = fix_headers(header, data) data['header'] = header.prettify() main_content = soup.body.find('div',", "import BeautifulSoup from django.conf import settings from django.core.exceptions import ImproperlyConfigured", "b.attrs['href'] = '/dev/peps/pep-{}/'.format(m.group(1)) # Remove Version from header header_rows =", "\"\"\"Source: <a href=\"{0}\">{0}</a>\"\"\".format(hg_link) return data def get_pep_page(pep_number, commit=True): \"\"\" Given", "header_rows = header.find_all('th') for t in header_rows: if 'Version:' in", "get_peps_rss(): rss_feed = os.path.join(settings.PEP_REPO_PATH, 'peps.rss') if not os.path.exists(rss_feed): return page,", "PEP number for display purposes pep_number_string = str(pep_number) pep_number_string =", "soup.findAll('img'): if img_tag['src'] == path: img_tag['src'] = os.path.join(settings.MEDIA_URL, page.path, path)", "header = body_children[3] pep_content = body_children[7] # Fix PEP links", "= convert_pep_page(pep_number, open(pep_path).read()) pep_page, _ = Page.objects.get_or_create(path=pep_url(pep_number)) # Remove leading", "it pep0 is used as the directory index, but it's", "to query against image__path existing_images = Image.objects.filter(page=page) MISSING = False", "against image__path existing_images = Image.objects.filter(page=page) MISSING = False FOUND =", "in t.text: if t.next_sibling.text == '$Revision$': t.parent.extract() if t.next_sibling.text ==", "not exist, skipping\".format(pep_path)) pep_content = convert_pep_page(pep_number, open(pep_path).read()) pep_page, _ =", "if t.next_sibling.text == '': t.parent.extract() if 'Last-Modified:' in t.text: if", "for t in header_rows: if 'Version:' in t.text: if t.next_sibling.text", "convert_pep_page(pep_number, open(pep_path).read()) pep_page, _ = Page.objects.get_or_create(path=pep_url(pep_number)) # Remove leading zeros", "data['title'] = \"PEP {} -- {}\".format( pep_number, data['title'], ) data['content']", "except Page.DoesNotExist: print(\"Could not find backing PEP {}\".format(pep_number)) return #", "if t.text == 'Content-Type:': t.parent.extract() if 'Version:' in t.text and", "PEP source text, rst, or html. Get or create the", "pep_page.title = pep_content['title'] pep_page.content = pep_content['content'] pep_page.content_markup_type = 'html' pep_page.template_name", "t in header_rows: if 'Version:' in t.text: if t.next_sibling.text ==", "t.text == 'Title:': data['title'] = t.next_sibling.text if t.text == 'Content-Type:':", "= re.compile(r'pep-(\\d+)\\.html') for b in body_links: m = pep_href_re.search(b.attrs['href']) #", "in header_rows: if 'Version:' in t.text and 'N/A' in t.next_sibling.text:", "it \"\"\" pep_path = os.path.join(settings.PEP_REPO_PATH, 'pep-{}.html'.format(pep_number)) if not os.path.exists(pep_path): print(\"PEP", "retrieve original PEP source text, rst, or html. Get or", "\"\"\" check_paths() pep0_path = os.path.join(settings.PEP_REPO_PATH, 'pep-0000.html') pep0_content = open(pep0_path).read() soup", "not os.path.exists(settings.PEP_REPO_PATH): raise ImproperlyConfigured(\"PEP_REPO_PATH in settings does not exist\") def", "# Remove Version from header header_rows = header.find_all('th') for t", "header header_rows = header.find_all('th') for t in header_rows: if 'Version:'", "Fix PEP links pep_content = BeautifulSoup(data['content']) body_links = pep_content.find_all(\"a\") pep_href_re", "return soup, data def convert_pep_page(pep_number, content): \"\"\" Handle different formats", "html, but now they're in different # places, so update", "as we can't use the ORM # to query against", "path): image_path = os.path.join(settings.PEP_REPO_PATH, path) if not os.path.exists(image_path): print(\"Image Path", "= PEP_TEMPLATE if commit: page.save() return pep0_page, pep0000_page def fix_headers(soup,", "page.title = \"PEP 0 -- Index of Python Enhancement Proposals", "Page returns the core body HTML necessary only \"\"\" check_paths()", "original PEP source text, rst, or html. Get or create", "data['title'] = 'PEP {} -- {}'.format( pep_number, soup.title.text, ) header", "# Old images used to live alongside html, but now", "pep, so we return both Page objects. \"\"\" pep0_content =", "anything not matching 'pep-XXXX.html' if not m: continue b.attrs['href'] =", "def get_pep0_page(commit=True): \"\"\" Using convert_pep0 above, create a CMS ready", "is used as the directory index, but it's also an", ") header = soup.body.find('div', class_=\"header\") header, data = fix_headers(header, data)", "pep_page.save() return pep_page def add_pep_image(pep_number, path): image_path = os.path.join(settings.PEP_REPO_PATH, path)", "for t in header_rows: if 'Version:' in t.text and 'N/A'", "the ORM # to query against image__path existing_images = Image.objects.filter(page=page)", "associated Page and return it \"\"\" pep_path = os.path.join(settings.PEP_REPO_PATH, 'pep-{}.html'.format(pep_number))", "does not exist, skipping\".format(pep_path)) pep_content = convert_pep_page(pep_number, open(pep_path).read()) pep_page, _", "correctly \"\"\" if not hasattr(settings, 'PEP_REPO_PATH'): raise ImproperlyConfigured(\"No PEP_REPO_PATH in", "\"https://hg.python.org/peps/file/tip/pep-{0}.txt\".format(pep_number) data['content'] += \"\"\"Source: <a href=\"{0}\">{0}</a>\"\"\".format(hg_link) return data def get_pep_page(pep_number,", "main_content.prettify() data['content'] = ''.join([ data['header'], data['main_content'] ]) else: soup =", "FOUND or MISSING: image = None if MISSING: image =", "m: continue b.attrs['href'] = '/dev/peps/pep-{}/'.format(m.group(1)) data['content'] = pep_content.prettify() hg_link =", "actual pep, so we return both Page objects. \"\"\" pep0_content", "if not os.path.exists(image_root_path): MISSING = image break if not FOUND", "headers and find our title \"\"\" header_rows = soup.find_all('th') for", "= content page.is_published = True page.content_type = \"application/rss+xml\" page.save() return", "header = soup.body.find('div', class_=\"header\") header, data = fix_headers(header, data) data['header']", "open(pep0_path).read() soup = BeautifulSoup(pep0_content) body_children = list(soup.body.children) # Grab header", "\"\"\" Using convert_pep0 above, create a CMS ready pep0 page", "to live alongside html, but now they're in different #", "True # File is missing on disk, recreate if not", "-- Index of Python Enhancement Proposals (PEPs)\" page.template_name = PEP_TEMPLATE", "= os.path.join(settings.MEDIA_URL, page.path, path) page.content.raw = soup.prettify() page.save() return image", "t.parent.extract() if t.next_sibling.text == '': t.parent.extract() if t.text == 'Title:':", "not hasattr(settings, 'PEP_REPO_PATH'): raise ImproperlyConfigured(\"No PEP_REPO_PATH in settings\") if not", "'{}' does not exist, skipping\".format(image_path)) try: page = Page.objects.get(path=pep_url(pep_number)) except", "and return it \"\"\" pep_path = os.path.join(settings.PEP_REPO_PATH, 'pep-{}.html'.format(pep_number)) if not", "''.join([ data['header'], data['main_content'] ]) else: soup = BeautifulSoup(content) soup, data", "main_content = soup.body.find('div', class_=\"content\") data['main_content'] = main_content.prettify() data['content'] = ''.join([", "False for image in existing_images: image_root_path = os.path.join(settings.MEDIA_ROOT, page.path, path)", "print(\"Could not find backing PEP {}\".format(pep_number)) return # Find existing", "the associated Page and return it \"\"\" pep_path = os.path.join(settings.PEP_REPO_PATH,", "so update the page accordingly. soup = BeautifulSoup(page.content.raw) for img_tag", "only \"\"\" check_paths() pep0_path = os.path.join(settings.PEP_REPO_PATH, 'pep-0000.html') pep0_content = open(pep0_path).read()", "data['content'] += \"\"\"Source: <a href=\"{0}\">{0}</a>\"\"\".format(hg_link) return data def get_pep_page(pep_number, commit=True):", "rst, or html. Get or create the associated Page and", "pep0_content = convert_pep0() pep0_page, _ = Page.objects.get_or_create(path='dev/peps/') pep0000_page, _ =", "= BeautifulSoup(content) soup, data = fix_headers(soup, data) if not data['title']:", "in header_rows: if 'Version:' in t.text: if t.next_sibling.text == '$Revision$':", "if '<html>' in content: soup = BeautifulSoup(content) data['title'] = soup.title.text", "t.parent.extract() return ''.join([header.prettify(), pep_content.prettify()]) def get_pep0_page(commit=True): \"\"\" Using convert_pep0 above,", "def add_pep_image(pep_number, path): image_path = os.path.join(settings.PEP_REPO_PATH, path) if not os.path.exists(image_path):", "necessary only \"\"\" check_paths() pep0_path = os.path.join(settings.PEP_REPO_PATH, 'pep-0000.html') pep0_content =", "'$Date$'in t.next_sibling.text: t.parent.extract() if t.next_sibling.text == '': t.parent.extract() if t.text", "soup, data def convert_pep_page(pep_number, content): \"\"\" Handle different formats that", "b in body_links: m = pep_href_re.search(b.attrs['href']) # Skip anything not", "path) page.content.raw = soup.prettify() page.save() return image def get_peps_rss(): rss_feed", "fix_headers(header, data) data['header'] = header.prettify() main_content = soup.body.find('div', class_=\"content\") data['main_content']", "images, we have to loop here as we can't use", "returns the core body HTML necessary only \"\"\" check_paths() pep0_path", "ImproperlyConfigured from django.core.files import File from pages.models import Page, Image", "Remove leading zeros from PEP number for display purposes pep_number_string", "data['content'] = soup.prettify() # Fix PEP links pep_content = BeautifulSoup(data['content'])", "Image(page=page) with open(image_path, 'rb') as image_obj: image.image.save(path, File(image_obj)) image.save() #", "body HTML necessary only \"\"\" check_paths() pep0_path = os.path.join(settings.PEP_REPO_PATH, 'pep-0000.html')", "[pep0_page, pep0000_page]: page.content = pep0_content page.content_markup_type = 'html' page.title =", "= fix_headers(soup, data) if not data['title']: data['title'] = \"PEP {}", "for a Python.org Page returns the core body HTML necessary", "'PEP_REPO_PATH'): raise ImproperlyConfigured(\"No PEP_REPO_PATH in settings\") if not os.path.exists(settings.PEP_REPO_PATH): raise", "'pep-0000.html') pep0_content = open(pep0_path).read() soup = BeautifulSoup(pep0_content) body_children = list(soup.body.children)", "as the directory index, but it's also an actual pep,", "in t.text and 'N/A' in t.next_sibling.text: t.parent.extract() return soup, data", "= pep_content.find_all(\"a\") pep_href_re = re.compile(r'pep-(\\d+)\\.html') for b in body_links: m", "also an actual pep, so we return both Page objects.", "pep0 is used as the directory index, but it's also", "_ = Page.objects.get_or_create(path=pep_url(pep_number)) # Remove leading zeros from PEP number", "t in header_rows: if 'Version:' in t.text and 'N/A' in", "page.save() return image def get_peps_rss(): rss_feed = os.path.join(settings.PEP_REPO_PATH, 'peps.rss') if", "loop here as we can't use the ORM # to", "\"\"\" Take existing generated pep-0000.html and convert to something suitable", "Page.DoesNotExist: print(\"Could not find backing PEP {}\".format(pep_number)) return # Find", "links pep_content = BeautifulSoup(data['content']) body_links = pep_content.find_all(\"a\") pep_href_re = re.compile(r'pep-(\\d+)\\.html')", "Page.objects.get_or_create(path=pep_url(pep_number)) # Remove leading zeros from PEP number for display", "= body_children[7] # Fix PEP links body_links = pep_content.find_all(\"a\") pep_href_re", "else: if not re.search(r'PEP \\d+', data['title']): data['title'] = \"PEP {}", "t.parent.extract() return soup, data def convert_pep_page(pep_number, content): \"\"\" Handle different", "source text, rst, or html. Get or create the associated", "PEP {}\".format(pep_number)) return # Find existing images, we have to", "body_links = pep_content.find_all(\"a\") pep_href_re = re.compile(r'pep-(\\d+)\\.html') for b in body_links:", "pep0000_page def fix_headers(soup, data): \"\"\" Remove empty or unwanted headers", "return page, _ = Page.objects.get_or_create( path=\"dev/peps/peps.rss\", template_name=\"pages/raw.html\", ) with open(rss_feed,", "for image in existing_images: image_root_path = os.path.join(settings.MEDIA_ROOT, page.path, path) if", "= None if MISSING: image = MISSING else: image =", "page.content_markup_type = 'html' page.title = \"PEP 0 -- Index of", "we can't use the ORM # to query against image__path", "-- {}\".format( pep_number, data['title'], ) data['content'] = soup.prettify() # Fix", "t.next_sibling.text == '': t.parent.extract() if t.text == 'Title:': data['title'] =", "if 'Version:' in t.text and 'N/A' in t.next_sibling.text: t.parent.extract() return", "pep_content.prettify() hg_link = \"https://hg.python.org/peps/file/tip/pep-{0}.txt\".format(pep_number) data['content'] += \"\"\"Source: <a href=\"{0}\">{0}</a>\"\"\".format(hg_link) return", "create a CMS ready pep0 page and return it pep0", "import ImproperlyConfigured from django.core.files import File from pages.models import Page,", "exist, skipping\".format(image_path)) try: page = Page.objects.get(path=pep_url(pep_number)) except Page.DoesNotExist: print(\"Could not", "images used to live alongside html, but now they're in", "as image_obj: image.image.save(path, File(image_obj)) image.save() # Old images used to", "links body_links = pep_content.find_all(\"a\") pep_href_re = re.compile(r'pep-(\\d+)\\.html') for b in", "not os.path.exists(image_root_path): MISSING = image break if not FOUND or", "body_children[7] # Fix PEP links body_links = pep_content.find_all(\"a\") pep_href_re =", "MISSING = image break if not FOUND or MISSING: image", "pep0 page and return it pep0 is used as the", "an actual pep, so we return both Page objects. \"\"\"", "= convert_pep0() pep0_page, _ = Page.objects.get_or_create(path='dev/peps/') pep0000_page, _ = Page.objects.get_or_create(path='dev/peps/pep-0000/')", "content): \"\"\" Handle different formats that pep2html.py outputs \"\"\" check_paths()", "not re.search(r'PEP \\d+', data['title']): data['title'] = 'PEP {} -- {}'.format(", "if not data['title']: data['title'] = \"PEP {} -- \".format(pep_number) else:", "else: image = Image(page=page) with open(image_path, 'rb') as image_obj: image.image.save(path,", "Page.objects.get_or_create( path=\"dev/peps/peps.rss\", template_name=\"pages/raw.html\", ) with open(rss_feed, \"r\") as rss_content: content", "if 'Version:' in t.text: if t.next_sibling.text == '$Revision$': t.parent.extract() if", "os from bs4 import BeautifulSoup from django.conf import settings from", "something suitable for a Python.org Page returns the core body", "break if not FOUND or MISSING: image = None if", "page, _ = Page.objects.get_or_create( path=\"dev/peps/peps.rss\", template_name=\"pages/raw.html\", ) with open(rss_feed, \"r\")", "-- \".format(pep_number) else: if not re.search(r'PEP \\d+', data['title']): data['title'] =", "the core body HTML necessary only \"\"\" check_paths() pep0_path =", "import os from bs4 import BeautifulSoup from django.conf import settings", "\"PEP {} -- \".format(pep_number) else: if not re.search(r'PEP \\d+', data['title']):", "pep_content.prettify()]) def get_pep0_page(commit=True): \"\"\" Using convert_pep0 above, create a CMS", "from PEP number for display purposes pep_number_string = str(pep_number) pep_number_string", "data = { 'title': None, } if '<html>' in content:", "PEP links pep_content = BeautifulSoup(data['content']) body_links = pep_content.find_all(\"a\") pep_href_re =", "= PEP_TEMPLATE if commit: pep_page.save() return pep_page def add_pep_image(pep_number, path):", "'Title:': data['title'] = t.next_sibling.text if t.text == 'Content-Type:': t.parent.extract() if", "{}\".format(pep_number)) return # Find existing images, we have to loop", "'{}' does not exist, skipping\".format(pep_path)) pep_content = convert_pep_page(pep_number, open(pep_path).read()) pep_page,", ") with open(rss_feed, \"r\") as rss_content: content = rss_content.read() page.content", "pep_content['content'] pep_page.content_markup_type = 'html' pep_page.template_name = PEP_TEMPLATE if commit: pep_page.save()", "image__path existing_images = Image.objects.filter(page=page) MISSING = False FOUND = False", "''.join([header.prettify(), pep_content.prettify()]) def get_pep0_page(commit=True): \"\"\" Using convert_pep0 above, create a", "return pep_page def add_pep_image(pep_number, path): image_path = os.path.join(settings.PEP_REPO_PATH, path) if", "_ = Page.objects.get_or_create( path=\"dev/peps/peps.rss\", template_name=\"pages/raw.html\", ) with open(rss_feed, \"r\") as", "header and PEP body header = body_children[3] pep_content = body_children[7]", "m = pep_href_re.search(b.attrs['href']) # Skip anything not matching 'pep-XXXX.html' if", "page accordingly. soup = BeautifulSoup(page.content.raw) for img_tag in soup.findAll('img'): if", "Page and return it \"\"\" pep_path = os.path.join(settings.PEP_REPO_PATH, 'pep-{}.html'.format(pep_number)) if", "HTML necessary only \"\"\" check_paths() pep0_path = os.path.join(settings.PEP_REPO_PATH, 'pep-0000.html') pep0_content", "list(soup.body.children) # Grab header and PEP body header = body_children[3]", "a CMS ready pep0 page and return it pep0 is", "use the ORM # to query against image__path existing_images =", "from bs4 import BeautifulSoup from django.conf import settings from django.core.exceptions", "Proposals (PEPs)\" page.template_name = PEP_TEMPLATE if commit: page.save() return pep0_page,", "if t.next_sibling.text == '': t.parent.extract() if t.text == 'Title:': data['title']", "directory index, but it's also an actual pep, so we", "page.save() return pep0_page, pep0000_page def fix_headers(soup, data): \"\"\" Remove empty", "not matching 'pep-XXXX.html' if not m: continue b.attrs['href'] = '/dev/peps/pep-{}/'.format(m.group(1))", "= 'html' pep_page.template_name = PEP_TEMPLATE if commit: pep_page.save() return pep_page", "import re import os from bs4 import BeautifulSoup from django.conf", "image = Image(page=page) with open(image_path, 'rb') as image_obj: image.image.save(path, File(image_obj))", "t.next_sibling.text if t.text == 'Content-Type:': t.parent.extract() if 'Version:' in t.text", "'pep-XXXX.html' if not m: continue b.attrs['href'] = '/dev/peps/pep-{}/'.format(m.group(1)) # Remove", "in t.text: if '$Date$'in t.next_sibling.text: t.parent.extract() if t.next_sibling.text == '':", "pep_number, data['title'], ) data['content'] = soup.prettify() # Fix PEP links", "not os.path.exists(pep_path): print(\"PEP Path '{}' does not exist, skipping\".format(pep_path)) pep_content", "'N/A' in t.next_sibling.text: t.parent.extract() return ''.join([header.prettify(), pep_content.prettify()]) def get_pep0_page(commit=True): \"\"\"", "pep_href_re = re.compile(r'pep-(\\d+)\\.html') for b in body_links: m = pep_href_re.search(b.attrs['href'])", "pep_href_re.search(b.attrs['href']) # Skip anything not matching 'pep-XXXX.html' if not m:", "{}\".format( pep_number, data['title'], ) data['content'] = soup.prettify() # Fix PEP", "template_name=\"pages/raw.html\", ) with open(rss_feed, \"r\") as rss_content: content = rss_content.read()", "image.save() # Old images used to live alongside html, but", "and return it pep0 is used as the directory index,", "t.text: if '$Date$'in t.next_sibling.text: t.parent.extract() if t.next_sibling.text == '': t.parent.extract()", "does not exist\") def convert_pep0(): \"\"\" Take existing generated pep-0000.html", "pep0000_page, _ = Page.objects.get_or_create(path='dev/peps/pep-0000/') for page in [pep0_page, pep0000_page]: page.content", "= main_content.prettify() data['content'] = ''.join([ data['header'], data['main_content'] ]) else: soup", "or create the associated Page and return it \"\"\" pep_path", "'': t.parent.extract() if 'Last-Modified:' in t.text: if '$Date$'in t.next_sibling.text: t.parent.extract()", "backing PEP {}\".format(pep_number)) return # Find existing images, we have", "not FOUND or MISSING: image = None if MISSING: image", "t.text == 'Content-Type:': t.parent.extract() if 'Version:' in t.text and 'N/A'", "Take existing generated pep-0000.html and convert to something suitable for", "return # Find existing images, we have to loop here", "href=\"{0}\">{0}</a>\"\"\".format(hg_link) return data def get_pep_page(pep_number, commit=True): \"\"\" Given a pep_number", "continue b.attrs['href'] = '/dev/peps/pep-{}/'.format(m.group(1)) data['content'] = pep_content.prettify() hg_link = \"https://hg.python.org/peps/file/tip/pep-{0}.txt\".format(pep_number)", "if t.next_sibling.text == '$Revision$': t.parent.extract() if t.next_sibling.text == '': t.parent.extract()", "\"\"\" pep_path = os.path.join(settings.PEP_REPO_PATH, 'pep-{}.html'.format(pep_number)) if not os.path.exists(pep_path): print(\"PEP Path", "\"\"\" Handle different formats that pep2html.py outputs \"\"\" check_paths() data", "img_tag['src'] == path: img_tag['src'] = os.path.join(settings.MEDIA_URL, page.path, path) page.content.raw =", "core body HTML necessary only \"\"\" check_paths() pep0_path = os.path.join(settings.PEP_REPO_PATH,", "or unwanted headers and find our title \"\"\" header_rows =", "here as we can't use the ORM # to query", "= False FOUND = False for image in existing_images: image_root_path", "index, but it's also an actual pep, so we return", "CMS ready pep0 page and return it pep0 is used", "find our title \"\"\" header_rows = soup.find_all('th') for t in", "not os.path.exists(rss_feed): return page, _ = Page.objects.get_or_create( path=\"dev/peps/peps.rss\", template_name=\"pages/raw.html\", )", "Remove Version from header header_rows = header.find_all('th') for t in", "t.parent.extract() if t.next_sibling.text == '': t.parent.extract() if 'Last-Modified:' in t.text:", "raise ImproperlyConfigured(\"PEP_REPO_PATH in settings does not exist\") def convert_pep0(): \"\"\"", "= os.path.join(settings.PEP_REPO_PATH, path) if not os.path.exists(image_path): print(\"Image Path '{}' does", "rss_feed = os.path.join(settings.PEP_REPO_PATH, 'peps.rss') if not os.path.exists(rss_feed): return page, _", "'', pep_number_string) pep_page.title = pep_content['title'] pep_page.content = pep_content['content'] pep_page.content_markup_type =", "Path '{}' does not exist, skipping\".format(pep_path)) pep_content = convert_pep_page(pep_number, open(pep_path).read())", "Version from header header_rows = header.find_all('th') for t in header_rows:", "Using convert_pep0 above, create a CMS ready pep0 page and", "= soup.prettify() page.save() return image def get_peps_rss(): rss_feed = os.path.join(settings.PEP_REPO_PATH,", "data['main_content'] ]) else: soup = BeautifulSoup(content) soup, data = fix_headers(soup,", "from pages.models import Page, Image PEP_TEMPLATE = 'pages/pep-page.html' pep_url =", "continue b.attrs['href'] = '/dev/peps/pep-{}/'.format(m.group(1)) # Remove Version from header header_rows", "BeautifulSoup(pep0_content) body_children = list(soup.body.children) # Grab header and PEP body", "return pep0_page, pep0000_page def fix_headers(soup, data): \"\"\" Remove empty or", "our title \"\"\" header_rows = soup.find_all('th') for t in header_rows:", "hg_link = \"https://hg.python.org/peps/file/tip/pep-{0}.txt\".format(pep_number) data['content'] += \"\"\"Source: <a href=\"{0}\">{0}</a>\"\"\".format(hg_link) return data", "PEP_TEMPLATE if commit: page.save() return pep0_page, pep0000_page def fix_headers(soup, data):", "pep_content = body_children[7] # Fix PEP links body_links = pep_content.find_all(\"a\")", "= os.path.join(settings.PEP_REPO_PATH, 'pep-{}.html'.format(pep_number)) if not os.path.exists(pep_path): print(\"PEP Path '{}' does", "= re.sub(r'^0+', '', pep_number_string) pep_page.title = pep_content['title'] pep_page.content = pep_content['content']", "return both Page objects. \"\"\" pep0_content = convert_pep0() pep0_page, _", "above, create a CMS ready pep0 page and return it", "data['title'] = t.next_sibling.text if t.text == 'Content-Type:': t.parent.extract() if 'Version:'", "BeautifulSoup from django.conf import settings from django.core.exceptions import ImproperlyConfigured from", "text, rst, or html. Get or create the associated Page", "\\d+', data['title']): data['title'] = 'PEP {} -- {}'.format( pep_number, soup.title.text,", "page.path, path) if image.image.path.endswith(path): FOUND = True # File is", "os.path.join(settings.MEDIA_URL, page.path, path) page.content.raw = soup.prettify() page.save() return image def", "image_obj: image.image.save(path, File(image_obj)) image.save() # Old images used to live", "= soup.find_all('th') for t in header_rows: if 'Version:' in t.text:", "soup.prettify() # Fix PEP links pep_content = BeautifulSoup(data['content']) body_links =", "# Fix PEP links body_links = pep_content.find_all(\"a\") pep_href_re = re.compile(r'pep-(\\d+)\\.html')", "os.path.exists(image_path): print(\"Image Path '{}' does not exist, skipping\".format(image_path)) try: page", "== 'Title:': data['title'] = t.next_sibling.text if t.text == 'Content-Type:': t.parent.extract()", "if commit: pep_page.save() return pep_page def add_pep_image(pep_number, path): image_path =", "find backing PEP {}\".format(pep_number)) return # Find existing images, we", "'dev/peps/pep-{}/'.format(num) def check_paths(): \"\"\" Checks to ensure our PEP_REPO_PATH is", "t.next_sibling.text: t.parent.extract() return soup, data def convert_pep_page(pep_number, content): \"\"\" Handle", "class_=\"content\") data['main_content'] = main_content.prettify() data['content'] = ''.join([ data['header'], data['main_content'] ])", "File from pages.models import Page, Image PEP_TEMPLATE = 'pages/pep-page.html' pep_url", "'title': None, } if '<html>' in content: soup = BeautifulSoup(content)", "if not re.search(r'PEP \\d+', data['title']): data['title'] = \"PEP {} --", "html. Get or create the associated Page and return it", "t.parent.extract() if t.text == 'Title:': data['title'] = t.next_sibling.text if t.text", "<a href=\"{0}\">{0}</a>\"\"\".format(hg_link) return data def get_pep_page(pep_number, commit=True): \"\"\" Given a", "t.next_sibling.text: t.parent.extract() if t.next_sibling.text == '': t.parent.extract() if t.text ==", "on disk, recreate if not os.path.exists(image_root_path): MISSING = image break", "Fix PEP links body_links = pep_content.find_all(\"a\") pep_href_re = re.compile(r'pep-(\\d+)\\.html') for", "= BeautifulSoup(data['content']) body_links = pep_content.find_all(\"a\") pep_href_re = re.compile(r'pep-(\\d+)\\.html') for b", "pep_content['title'] pep_page.content = pep_content['content'] pep_page.content_markup_type = 'html' pep_page.template_name = PEP_TEMPLATE", "for img_tag in soup.findAll('img'): if img_tag['src'] == path: img_tag['src'] =", "from header header_rows = header.find_all('th') for t in header_rows: if", "for b in body_links: m = pep_href_re.search(b.attrs['href']) # Skip anything", "header_rows = soup.find_all('th') for t in header_rows: if 'Version:' in", "else: soup = BeautifulSoup(content) soup, data = fix_headers(soup, data) if", "pep0_content page.content_markup_type = 'html' page.title = \"PEP 0 -- Index", "\"\"\" Remove empty or unwanted headers and find our title", "= True # File is missing on disk, recreate if", "ORM # to query against image__path existing_images = Image.objects.filter(page=page) MISSING", "generated pep-0000.html and convert to something suitable for a Python.org", "== 'Content-Type:': t.parent.extract() if 'Version:' in t.text and 'N/A' in", "a pep_number retrieve original PEP source text, rst, or html.", "os.path.join(settings.PEP_REPO_PATH, path) if not os.path.exists(image_path): print(\"Image Path '{}' does not", "= BeautifulSoup(pep0_content) body_children = list(soup.body.children) # Grab header and PEP", "as rss_content: content = rss_content.read() page.content = content page.is_published =", "exist, skipping\".format(pep_path)) pep_content = convert_pep_page(pep_number, open(pep_path).read()) pep_page, _ = Page.objects.get_or_create(path=pep_url(pep_number))", "but now they're in different # places, so update the", "pep_page.template_name = PEP_TEMPLATE if commit: pep_page.save() return pep_page def add_pep_image(pep_number,", "with open(rss_feed, \"r\") as rss_content: content = rss_content.read() page.content =", "FOUND = False for image in existing_images: image_root_path = os.path.join(settings.MEDIA_ROOT,", "import File from pages.models import Page, Image PEP_TEMPLATE = 'pages/pep-page.html'", "= list(soup.body.children) # Grab header and PEP body header =", "num: 'dev/peps/pep-{}/'.format(num) def check_paths(): \"\"\" Checks to ensure our PEP_REPO_PATH", "pep2html.py outputs \"\"\" check_paths() data = { 'title': None, }", "pep_number, soup.title.text, ) header = soup.body.find('div', class_=\"header\") header, data =", "for page in [pep0_page, pep0000_page]: page.content = pep0_content page.content_markup_type =", "re.compile(r'pep-(\\d+)\\.html') for b in body_links: m = pep_href_re.search(b.attrs['href']) # Skip", "= Page.objects.get_or_create(path=pep_url(pep_number)) # Remove leading zeros from PEP number for", "and find our title \"\"\" header_rows = soup.find_all('th') for t", "Image.objects.filter(page=page) MISSING = False FOUND = False for image in", "def get_pep_page(pep_number, commit=True): \"\"\" Given a pep_number retrieve original PEP", "soup.title.text, ) header = soup.body.find('div', class_=\"header\") header, data = fix_headers(header,", "if t.text == 'Title:': data['title'] = t.next_sibling.text if t.text ==", "\"PEP 0 -- Index of Python Enhancement Proposals (PEPs)\" page.template_name", "not m: continue b.attrs['href'] = '/dev/peps/pep-{}/'.format(m.group(1)) data['content'] = pep_content.prettify() hg_link", "try: page = Page.objects.get(path=pep_url(pep_number)) except Page.DoesNotExist: print(\"Could not find backing", "False FOUND = False for image in existing_images: image_root_path =", "= { 'title': None, } if '<html>' in content: soup", "can't use the ORM # to query against image__path existing_images", "t.text and 'N/A' in t.next_sibling.text: t.parent.extract() return ''.join([header.prettify(), pep_content.prettify()]) def", "existing_images = Image.objects.filter(page=page) MISSING = False FOUND = False for", "now they're in different # places, so update the page", "get_pep0_page(commit=True): \"\"\" Using convert_pep0 above, create a CMS ready pep0", "= Image(page=page) with open(image_path, 'rb') as image_obj: image.image.save(path, File(image_obj)) image.save()", "if image.image.path.endswith(path): FOUND = True # File is missing on", "it's also an actual pep, so we return both Page", "Index of Python Enhancement Proposals (PEPs)\" page.template_name = PEP_TEMPLATE if", "header.find_all('th') for t in header_rows: if 'Version:' in t.text and", "soup = BeautifulSoup(content) data['title'] = soup.title.text if not re.search(r'PEP \\d+',", "missing on disk, recreate if not os.path.exists(image_root_path): MISSING = image", "# Grab header and PEP body header = body_children[3] pep_content", "File(image_obj)) image.save() # Old images used to live alongside html,", "= image break if not FOUND or MISSING: image =", "(PEPs)\" page.template_name = PEP_TEMPLATE if commit: page.save() return pep0_page, pep0000_page", "convert to something suitable for a Python.org Page returns the", "if not FOUND or MISSING: image = None if MISSING:", "that pep2html.py outputs \"\"\" check_paths() data = { 'title': None,", "in t.next_sibling.text: t.parent.extract() return soup, data def convert_pep_page(pep_number, content): \"\"\"", "\"\"\" header_rows = soup.find_all('th') for t in header_rows: if 'Version:'", "leading zeros from PEP number for display purposes pep_number_string =", "page.content = pep0_content page.content_markup_type = 'html' page.title = \"PEP 0", "== path: img_tag['src'] = os.path.join(settings.MEDIA_URL, page.path, path) page.content.raw = soup.prettify()", "our PEP_REPO_PATH is setup correctly \"\"\" if not hasattr(settings, 'PEP_REPO_PATH'):", "check_paths() pep0_path = os.path.join(settings.PEP_REPO_PATH, 'pep-0000.html') pep0_content = open(pep0_path).read() soup =", "page in [pep0_page, pep0000_page]: page.content = pep0_content page.content_markup_type = 'html'", "to ensure our PEP_REPO_PATH is setup correctly \"\"\" if not", "soup = BeautifulSoup(pep0_content) body_children = list(soup.body.children) # Grab header and", "= Page.objects.get_or_create(path='dev/peps/') pep0000_page, _ = Page.objects.get_or_create(path='dev/peps/pep-0000/') for page in [pep0_page,", "= header.prettify() main_content = soup.body.find('div', class_=\"content\") data['main_content'] = main_content.prettify() data['content']", "MISSING: image = MISSING else: image = Image(page=page) with open(image_path,", "data['title'] = \"PEP {} -- \".format(pep_number) else: if not re.search(r'PEP", "\"\"\" check_paths() data = { 'title': None, } if '<html>'", "soup.title.text if not re.search(r'PEP \\d+', data['title']): data['title'] = 'PEP {}", "commit=True): \"\"\" Given a pep_number retrieve original PEP source text,", "\"r\") as rss_content: content = rss_content.read() page.content = content page.is_published", "pep0_path = os.path.join(settings.PEP_REPO_PATH, 'pep-0000.html') pep0_content = open(pep0_path).read() soup = BeautifulSoup(pep0_content)", "open(rss_feed, \"r\") as rss_content: content = rss_content.read() page.content = content", "Page objects. \"\"\" pep0_content = convert_pep0() pep0_page, _ = Page.objects.get_or_create(path='dev/peps/')", "{}'.format( pep_number, soup.title.text, ) header = soup.body.find('div', class_=\"header\") header, data", "data['main_content'] = main_content.prettify() data['content'] = ''.join([ data['header'], data['main_content'] ]) else:", "body header = body_children[3] pep_content = body_children[7] # Fix PEP", "data['title']: data['title'] = \"PEP {} -- \".format(pep_number) else: if not", "FOUND = True # File is missing on disk, recreate", "= pep_content['content'] pep_page.content_markup_type = 'html' pep_page.template_name = PEP_TEMPLATE if commit:", "= Image.objects.filter(page=page) MISSING = False FOUND = False for image", "if MISSING: image = MISSING else: image = Image(page=page) with", "page.template_name = PEP_TEMPLATE if commit: page.save() return pep0_page, pep0000_page def", "'N/A' in t.next_sibling.text: t.parent.extract() return soup, data def convert_pep_page(pep_number, content):", "data['header'] = header.prettify() main_content = soup.body.find('div', class_=\"content\") data['main_content'] = main_content.prettify()", "pep_page.content = pep_content['content'] pep_page.content_markup_type = 'html' pep_page.template_name = PEP_TEMPLATE if", "commit: pep_page.save() return pep_page def add_pep_image(pep_number, path): image_path = os.path.join(settings.PEP_REPO_PATH,", "in body_links: m = pep_href_re.search(b.attrs['href']) # Skip anything not matching", "path) if not os.path.exists(image_path): print(\"Image Path '{}' does not exist,", "exist\") def convert_pep0(): \"\"\" Take existing generated pep-0000.html and convert", "soup = BeautifulSoup(content) soup, data = fix_headers(soup, data) if not", "in existing_images: image_root_path = os.path.join(settings.MEDIA_ROOT, page.path, path) if image.image.path.endswith(path): FOUND", "page.content = content page.is_published = True page.content_type = \"application/rss+xml\" page.save()", "t.next_sibling.text == '': t.parent.extract() if 'Last-Modified:' in t.text: if '$Date$'in", "if not hasattr(settings, 'PEP_REPO_PATH'): raise ImproperlyConfigured(\"No PEP_REPO_PATH in settings\") if", "open(pep_path).read()) pep_page, _ = Page.objects.get_or_create(path=pep_url(pep_number)) # Remove leading zeros from", "settings does not exist\") def convert_pep0(): \"\"\" Take existing generated", "if not os.path.exists(settings.PEP_REPO_PATH): raise ImproperlyConfigured(\"PEP_REPO_PATH in settings does not exist\")", "body_links: m = pep_href_re.search(b.attrs['href']) # Skip anything not matching 'pep-XXXX.html'", "django.core.files import File from pages.models import Page, Image PEP_TEMPLATE =", "PEP links body_links = pep_content.find_all(\"a\") pep_href_re = re.compile(r'pep-(\\d+)\\.html') for b", "= os.path.join(settings.PEP_REPO_PATH, 'peps.rss') if not os.path.exists(rss_feed): return page, _ =", "pep_number_string = re.sub(r'^0+', '', pep_number_string) pep_page.title = pep_content['title'] pep_page.content =", "= 'pages/pep-page.html' pep_url = lambda num: 'dev/peps/pep-{}/'.format(num) def check_paths(): \"\"\"", "not exist\") def convert_pep0(): \"\"\" Take existing generated pep-0000.html and", "0 -- Index of Python Enhancement Proposals (PEPs)\" page.template_name =", "= MISSING else: image = Image(page=page) with open(image_path, 'rb') as", "django.conf import settings from django.core.exceptions import ImproperlyConfigured from django.core.files import", "'peps.rss') if not os.path.exists(rss_feed): return page, _ = Page.objects.get_or_create( path=\"dev/peps/peps.rss\",", "data['content'] = pep_content.prettify() hg_link = \"https://hg.python.org/peps/file/tip/pep-{0}.txt\".format(pep_number) data['content'] += \"\"\"Source: <a", "number for display purposes pep_number_string = str(pep_number) pep_number_string = re.sub(r'^0+',", "matching 'pep-XXXX.html' if not m: continue b.attrs['href'] = '/dev/peps/pep-{}/'.format(m.group(1)) #", "query against image__path existing_images = Image.objects.filter(page=page) MISSING = False FOUND", "places, so update the page accordingly. soup = BeautifulSoup(page.content.raw) for", "image_path = os.path.join(settings.PEP_REPO_PATH, path) if not os.path.exists(image_path): print(\"Image Path '{}'", "\"\"\" if not hasattr(settings, 'PEP_REPO_PATH'): raise ImproperlyConfigured(\"No PEP_REPO_PATH in settings\")", "with open(image_path, 'rb') as image_obj: image.image.save(path, File(image_obj)) image.save() # Old", "outputs \"\"\" check_paths() data = { 'title': None, } if", "# Skip anything not matching 'pep-XXXX.html' if not m: continue", "data): \"\"\" Remove empty or unwanted headers and find our", "have to loop here as we can't use the ORM", "in soup.findAll('img'): if img_tag['src'] == path: img_tag['src'] = os.path.join(settings.MEDIA_URL, page.path,", "pep_number retrieve original PEP source text, rst, or html. Get", "pep0_page, _ = Page.objects.get_or_create(path='dev/peps/') pep0000_page, _ = Page.objects.get_or_create(path='dev/peps/pep-0000/') for page", "pages.models import Page, Image PEP_TEMPLATE = 'pages/pep-page.html' pep_url = lambda", "= os.path.join(settings.MEDIA_ROOT, page.path, path) if image.image.path.endswith(path): FOUND = True #", "def fix_headers(soup, data): \"\"\" Remove empty or unwanted headers and", "page.content.raw = soup.prettify() page.save() return image def get_peps_rss(): rss_feed =", "def get_peps_rss(): rss_feed = os.path.join(settings.PEP_REPO_PATH, 'peps.rss') if not os.path.exists(rss_feed): return", "'Version:' in t.text and 'N/A' in t.next_sibling.text: t.parent.extract() return ''.join([header.prettify(),", "create the associated Page and return it \"\"\" pep_path =", "accordingly. soup = BeautifulSoup(page.content.raw) for img_tag in soup.findAll('img'): if img_tag['src']", "check_paths() data = { 'title': None, } if '<html>' in", "Page.objects.get(path=pep_url(pep_number)) except Page.DoesNotExist: print(\"Could not find backing PEP {}\".format(pep_number)) return", "= soup.prettify() # Fix PEP links pep_content = BeautifulSoup(data['content']) body_links", "]) else: soup = BeautifulSoup(content) soup, data = fix_headers(soup, data)", "return data def get_pep_page(pep_number, commit=True): \"\"\" Given a pep_number retrieve", "= body_children[3] pep_content = body_children[7] # Fix PEP links body_links", "data['title'] = soup.title.text if not re.search(r'PEP \\d+', data['title']): data['title'] =", "pep-0000.html and convert to something suitable for a Python.org Page", "convert_pep0() pep0_page, _ = Page.objects.get_or_create(path='dev/peps/') pep0000_page, _ = Page.objects.get_or_create(path='dev/peps/pep-0000/') for", "data def convert_pep_page(pep_number, content): \"\"\" Handle different formats that pep2html.py", "body_children = list(soup.body.children) # Grab header and PEP body header", "t.next_sibling.text: t.parent.extract() return ''.join([header.prettify(), pep_content.prettify()]) def get_pep0_page(commit=True): \"\"\" Using convert_pep0", "image break if not FOUND or MISSING: image = None", "soup.find_all('th') for t in header_rows: if 'Version:' in t.text: if", "'/dev/peps/pep-{}/'.format(m.group(1)) data['content'] = pep_content.prettify() hg_link = \"https://hg.python.org/peps/file/tip/pep-{0}.txt\".format(pep_number) data['content'] += \"\"\"Source:", "soup.body.find('div', class_=\"header\") header, data = fix_headers(header, data) data['header'] = header.prettify()", "live alongside html, but now they're in different # places,", "image.image.save(path, File(image_obj)) image.save() # Old images used to live alongside", "Find existing images, we have to loop here as we", "= soup.title.text if not re.search(r'PEP \\d+', data['title']): data['title'] = 'PEP", "None if MISSING: image = MISSING else: image = Image(page=page)", "is setup correctly \"\"\" if not hasattr(settings, 'PEP_REPO_PATH'): raise ImproperlyConfigured(\"No", "re import os from bs4 import BeautifulSoup from django.conf import", "lambda num: 'dev/peps/pep-{}/'.format(num) def check_paths(): \"\"\" Checks to ensure our", "existing_images: image_root_path = os.path.join(settings.MEDIA_ROOT, page.path, path) if image.image.path.endswith(path): FOUND =", "'pep-XXXX.html' if not m: continue b.attrs['href'] = '/dev/peps/pep-{}/'.format(m.group(1)) data['content'] =", "= False for image in existing_images: image_root_path = os.path.join(settings.MEDIA_ROOT, page.path,", "of Python Enhancement Proposals (PEPs)\" page.template_name = PEP_TEMPLATE if commit:", "_ = Page.objects.get_or_create(path='dev/peps/') pep0000_page, _ = Page.objects.get_or_create(path='dev/peps/pep-0000/') for page in", "os.path.join(settings.MEDIA_ROOT, page.path, path) if image.image.path.endswith(path): FOUND = True # File", "data['content'] = ''.join([ data['header'], data['main_content'] ]) else: soup = BeautifulSoup(content)", "image = None if MISSING: image = MISSING else: image", "os.path.exists(settings.PEP_REPO_PATH): raise ImproperlyConfigured(\"PEP_REPO_PATH in settings does not exist\") def convert_pep0():", "in settings\") if not os.path.exists(settings.PEP_REPO_PATH): raise ImproperlyConfigured(\"PEP_REPO_PATH in settings does", "ready pep0 page and return it pep0 is used as", "# Fix PEP links pep_content = BeautifulSoup(data['content']) body_links = pep_content.find_all(\"a\")", "Python Enhancement Proposals (PEPs)\" page.template_name = PEP_TEMPLATE if commit: page.save()", "if not os.path.exists(pep_path): print(\"PEP Path '{}' does not exist, skipping\".format(pep_path))", "'PEP {} -- {}'.format( pep_number, soup.title.text, ) header = soup.body.find('div',", "data['title']): data['title'] = \"PEP {} -- {}\".format( pep_number, data['title'], )", "and PEP body header = body_children[3] pep_content = body_children[7] #", "ImproperlyConfigured(\"No PEP_REPO_PATH in settings\") if not os.path.exists(settings.PEP_REPO_PATH): raise ImproperlyConfigured(\"PEP_REPO_PATH in", "'': t.parent.extract() if t.text == 'Title:': data['title'] = t.next_sibling.text if", "pep0_page, pep0000_page def fix_headers(soup, data): \"\"\" Remove empty or unwanted", "different # places, so update the page accordingly. soup =", "BeautifulSoup(data['content']) body_links = pep_content.find_all(\"a\") pep_href_re = re.compile(r'pep-(\\d+)\\.html') for b in", "from django.core.files import File from pages.models import Page, Image PEP_TEMPLATE", "header_rows: if 'Version:' in t.text and 'N/A' in t.next_sibling.text: t.parent.extract()", "convert_pep0(): \"\"\" Take existing generated pep-0000.html and convert to something", "soup, data = fix_headers(soup, data) if not data['title']: data['title'] =", "os.path.exists(rss_feed): return page, _ = Page.objects.get_or_create( path=\"dev/peps/peps.rss\", template_name=\"pages/raw.html\", ) with", "= \"PEP {} -- {}\".format( pep_number, data['title'], ) data['content'] =", "Get or create the associated Page and return it \"\"\"", "pep0000_page]: page.content = pep0_content page.content_markup_type = 'html' page.title = \"PEP", "Remove empty or unwanted headers and find our title \"\"\"", "class_=\"header\") header, data = fix_headers(header, data) data['header'] = header.prettify() main_content", "existing images, we have to loop here as we can't", "= lambda num: 'dev/peps/pep-{}/'.format(num) def check_paths(): \"\"\" Checks to ensure", "pep_page def add_pep_image(pep_number, path): image_path = os.path.join(settings.PEP_REPO_PATH, path) if not" ]
[]
[ "import Flask app = Flask(__name__, static_folder='static') from app import routes", "from flask import Flask app = Flask(__name__, static_folder='static') from app", "flask import Flask app = Flask(__name__, static_folder='static') from app import" ]
[ "\"+hexstr+\"\\nExpected: \"+expected) b = capdu.to_bytes() assert(type(b) is bytes) return (hexstr,", "+= \"0000\" else: hexstr += \"%04X\"%LE elif LE == 0x10000:", "P2=0x33)) check(\"00 11 22 33\", CAPDU(CLA=0x00, INS=0x11, P1=0x22, P2=0x33)) check(\"0x00,0x11,0x22,0x33\",", "tolerate less well formed inputs check(\"00-11,22_33\", CAPDU(CLA=0x00, INS=0x11, P1=0x22, P2=0x33))", "if LE == 0x10000: hexstr += \"0000\" else: hexstr +=", "if __name__ == \"__main__\": def check(hexstr, expected): capdu = CAPDU.from_hexstr(hexstr)", "[0,1,2,254,255,256,257,65534,65535] LE_cases = LC_cases + [65536] for LC in LC_cases:", "and LE>0 case4e = case4 and (LC>0xFF or LE>0x100) if", "if case4e: if LE == 0x10000: hexstr += \"0000\" else:", "elif LE == 0x10000: hexstr += \"000000\" elif LE>0x100: hexstr", "hexstr += pysatl.Utils.hexstr(data, separator=\"\") if LE>0: if case4e: if LE", "pysatl.Utils.hexstr(data, separator=\"\") if LE>0: if case4e: if LE == 0x10000:", "case4e: if LE == 0x10000: hexstr += \"0000\" else: hexstr", "2 304\", CAPDU(CLA=0x01, INS=0x02, P1=0x03, P2=0x04)) LC_cases = [0,1,2,254,255,256,257,65534,65535] LE_cases", "case4 and (LC>0xFF or LE>0x100) if LC>0: if LC>0xFF or", "= case4 and (LC>0xFF or LE>0x100) if LC>0: if LC>0xFF", "0x100: hexstr += \"00\" else: hexstr += \"%02X\" % LE", "check(\"0x00,0x11,0x22,0x33\", CAPDU(CLA=0x00, INS=0x11, P1=0x22, P2=0x33)) #check we tolerate less well", "formed inputs check(\"00-11,22_33\", CAPDU(CLA=0x00, INS=0x11, P1=0x22, P2=0x33)) check(\"\"\"0x00 0x11 0x22", "0x10000) data = os.getrandom(LC) hexstr = \"00112233\" case4 = LC>0", "LE_cases = LC_cases + [65536] for LC in LC_cases: for", "+= pysatl.Utils.hexstr(data, separator=\"\") if LE>0: if case4e: if LE ==", "expected: raise Exception(\"Mismatch for LC=%d, LE=%d\"%(LC,LE)+\"\\nActual: \"+hexstr+\"\\nExpected: \"+expected) b =", "% LE expected = hexstr capdu = CAPDU(CLA=0x00, INS=0x11, P1=0x22,", "capdu = CAPDU(CLA=0x00, INS=0x11, P1=0x22, P2=0x33, DATA=data, LE=LE) hexstr =", "\"+str(capdu)+\"\\nExpected: \"+str(expected)) def gencase(* ,LC ,LE): assert(LC < 0x10000) assert(LE", "CAPDU.from_hexstr(hexstr) if capdu != expected: raise Exception(\"Mismatch for input '\"+hexstr+\"'\\nActual:", "P1=0x22, P2=0x33)) check(\"\"\"0x00 0x11 0x22 0x33\"\"\", CAPDU(CLA=0x00, INS=0x11, P1=0x22, P2=0x33))", "'\"+hexstr+\"'\\nActual: \"+str(capdu)+\"\\nExpected: \"+str(expected)) def gencase(* ,LC ,LE): assert(LC < 0x10000)", "P2=0x33)) check(\"1 2 304\", CAPDU(CLA=0x01, INS=0x02, P1=0x03, P2=0x04)) LC_cases =", "hexstr += \"00%04X\"%LC else: hexstr += \"%02X\" % LC hexstr", "CAPDU(CLA=0x01, INS=0x02, P1=0x03, P2=0x04)) LC_cases = [0,1,2,254,255,256,257,65534,65535] LE_cases = LC_cases", "data = os.getrandom(LC) hexstr = \"00112233\" case4 = LC>0 and", "else: hexstr += \"%04X\"%LE elif LE == 0x10000: hexstr +=", "hexstr += \"%02X\" % LE expected = hexstr capdu =", "INS=0x11, P1=0x22, P2=0x33)) check(\"00 11 22 33\", CAPDU(CLA=0x00, INS=0x11, P1=0x22,", "<= 0x10000) data = os.getrandom(LC) hexstr = \"00112233\" case4 =", "CAPDU(CLA=0x00, INS=0x11, P1=0x22, P2=0x33)) #check we tolerate less well formed", "P1=0x22, P2=0x33)) check(\"1 2 304\", CAPDU(CLA=0x01, INS=0x02, P1=0x03, P2=0x04)) LC_cases", "b = capdu.to_bytes() assert(type(b) is bytes) return (hexstr, capdu) #check", "(LC>0xFF or LE>0x100) if LC>0: if LC>0xFF or case4e: hexstr", "hexstr = capdu.to_hexstr() if hexstr != expected: raise Exception(\"Mismatch for", "assert(expected==repr(capdu)) #check well formed inputs check(\"00112233\", CAPDU(CLA=0x00, INS=0x11, P1=0x22, P2=0x33))", "\"pysatl.CAPDU.from_hexstr('00112233015502')\" capdu=None exec(\"capdu=\"+expected) assert(expected==repr(capdu)) #check well formed inputs check(\"00112233\", CAPDU(CLA=0x00,", "= \"pysatl.CAPDU.from_hexstr('00112233015502')\" capdu=None exec(\"capdu=\"+expected) assert(expected==repr(capdu)) #check well formed inputs check(\"00112233\",", "+= \"%02X\" % LC hexstr += pysatl.Utils.hexstr(data, separator=\"\") if LE>0:", "__repr__ expected = \"pysatl.CAPDU.from_hexstr('00112233015502')\" capdu=None exec(\"capdu=\"+expected) assert(expected==repr(capdu)) #check well formed", "hexstr != expected: raise Exception(\"Mismatch for LC=%d, LE=%d\"%(LC,LE)+\"\\nActual: \"+hexstr+\"\\nExpected: \"+expected)", "well formed inputs check(\"00112233\", CAPDU(CLA=0x00, INS=0x11, P1=0x22, P2=0x33)) check(\"00 11", "pysatl from pysatl import CAPDU if __name__ == \"__main__\": def", "P2=0x33, DATA=data, LE=LE) hexstr = capdu.to_hexstr() if hexstr != expected:", "capdu=None exec(\"capdu=\"+expected) assert(expected==repr(capdu)) #check well formed inputs check(\"00112233\", CAPDU(CLA=0x00, INS=0x11,", "__name__ == \"__main__\": def check(hexstr, expected): capdu = CAPDU.from_hexstr(hexstr) if", "LC in LC_cases: for LE in LE_cases: print(LC,LE) check(*gencase(LC=LC, LE=LE))", "return (hexstr, capdu) #check __repr__ expected = \"pysatl.CAPDU.from_hexstr('00112233015502')\" capdu=None exec(\"capdu=\"+expected)", "+ [65536] for LC in LC_cases: for LE in LE_cases:", "== 0x100: hexstr += \"00\" else: hexstr += \"%02X\" %", "P1=0x22, P2=0x33)) check(\"0x00,0x11,0x22,0x33\", CAPDU(CLA=0x00, INS=0x11, P1=0x22, P2=0x33)) #check we tolerate", "capdu.to_bytes() assert(type(b) is bytes) return (hexstr, capdu) #check __repr__ expected", "\"+expected) b = capdu.to_bytes() assert(type(b) is bytes) return (hexstr, capdu)", "P1=0x22, P2=0x33)) #check we tolerate less well formed inputs check(\"00-11,22_33\",", "LC_cases + [65536] for LC in LC_cases: for LE in", ",LE): assert(LC < 0x10000) assert(LE <= 0x10000) data = os.getrandom(LC)", "LE == 0x10000: hexstr += \"0000\" else: hexstr += \"%04X\"%LE", "os import pysatl from pysatl import CAPDU if __name__ ==", "capdu != expected: raise Exception(\"Mismatch for input '\"+hexstr+\"'\\nActual: \"+str(capdu)+\"\\nExpected: \"+str(expected))", "check(\"1 2 304\", CAPDU(CLA=0x01, INS=0x02, P1=0x03, P2=0x04)) LC_cases = [0,1,2,254,255,256,257,65534,65535]", "= LC>0 and LE>0 case4e = case4 and (LC>0xFF or", "LE>0x100) if LC>0: if LC>0xFF or case4e: hexstr += \"00%04X\"%LC", "CAPDU(CLA=0x00, INS=0x11, P1=0x22, P2=0x33)) check(\"1 2 304\", CAPDU(CLA=0x01, INS=0x02, P1=0x03,", "(hexstr, capdu) #check __repr__ expected = \"pysatl.CAPDU.from_hexstr('00112233015502')\" capdu=None exec(\"capdu=\"+expected) assert(expected==repr(capdu))", "import pysatl from pysatl import CAPDU if __name__ == \"__main__\":", "import CAPDU if __name__ == \"__main__\": def check(hexstr, expected): capdu", "22 33\", CAPDU(CLA=0x00, INS=0x11, P1=0x22, P2=0x33)) check(\"0x00,0x11,0x22,0x33\", CAPDU(CLA=0x00, INS=0x11, P1=0x22,", "\"%04X\"%LE elif LE == 0x10000: hexstr += \"000000\" elif LE>0x100:", "case4e: hexstr += \"00%04X\"%LC else: hexstr += \"%02X\" % LC", "LE == 0x10000: hexstr += \"000000\" elif LE>0x100: hexstr +=", "hexstr += \"%04X\"%LE elif LE == 0x10000: hexstr += \"000000\"", "\"00%04X\"%LE elif LE == 0x100: hexstr += \"00\" else: hexstr", "LC>0: if LC>0xFF or case4e: hexstr += \"00%04X\"%LC else: hexstr", "capdu.to_hexstr() if hexstr != expected: raise Exception(\"Mismatch for LC=%d, LE=%d\"%(LC,LE)+\"\\nActual:", "INS=0x11, P1=0x22, P2=0x33, DATA=data, LE=LE) hexstr = capdu.to_hexstr() if hexstr", "check(\"\"\"0x00 0x11 0x22 0x33\"\"\", CAPDU(CLA=0x00, INS=0x11, P1=0x22, P2=0x33)) check(\"1 2", "11 22 33\", CAPDU(CLA=0x00, INS=0x11, P1=0x22, P2=0x33)) check(\"0x00,0x11,0x22,0x33\", CAPDU(CLA=0x00, INS=0x11,", "gencase(* ,LC ,LE): assert(LC < 0x10000) assert(LE <= 0x10000) data", "\"00\" else: hexstr += \"%02X\" % LE expected = hexstr", "else: hexstr += \"%02X\" % LE expected = hexstr capdu", "304\", CAPDU(CLA=0x01, INS=0x02, P1=0x03, P2=0x04)) LC_cases = [0,1,2,254,255,256,257,65534,65535] LE_cases =", "input '\"+hexstr+\"'\\nActual: \"+str(capdu)+\"\\nExpected: \"+str(expected)) def gencase(* ,LC ,LE): assert(LC <", "if capdu != expected: raise Exception(\"Mismatch for input '\"+hexstr+\"'\\nActual: \"+str(capdu)+\"\\nExpected:", "LC_cases = [0,1,2,254,255,256,257,65534,65535] LE_cases = LC_cases + [65536] for LC", "== 0x10000: hexstr += \"0000\" else: hexstr += \"%04X\"%LE elif", "def check(hexstr, expected): capdu = CAPDU.from_hexstr(hexstr) if capdu != expected:", "0x10000) assert(LE <= 0x10000) data = os.getrandom(LC) hexstr = \"00112233\"", "inputs check(\"00112233\", CAPDU(CLA=0x00, INS=0x11, P1=0x22, P2=0x33)) check(\"00 11 22 33\",", "assert(type(b) is bytes) return (hexstr, capdu) #check __repr__ expected =", "== 0x10000: hexstr += \"000000\" elif LE>0x100: hexstr += \"00%04X\"%LE", "from pysatl import CAPDU if __name__ == \"__main__\": def check(hexstr,", "0x33\"\"\", CAPDU(CLA=0x00, INS=0x11, P1=0x22, P2=0x33)) check(\"1 2 304\", CAPDU(CLA=0x01, INS=0x02,", "hexstr capdu = CAPDU(CLA=0x00, INS=0x11, P1=0x22, P2=0x33, DATA=data, LE=LE) hexstr", "LE=LE) hexstr = capdu.to_hexstr() if hexstr != expected: raise Exception(\"Mismatch", "+= \"%02X\" % LE expected = hexstr capdu = CAPDU(CLA=0x00,", "for LC=%d, LE=%d\"%(LC,LE)+\"\\nActual: \"+hexstr+\"\\nExpected: \"+expected) b = capdu.to_bytes() assert(type(b) is", "CAPDU(CLA=0x00, INS=0x11, P1=0x22, P2=0x33)) check(\"\"\"0x00 0x11 0x22 0x33\"\"\", CAPDU(CLA=0x00, INS=0x11,", "separator=\"\") if LE>0: if case4e: if LE == 0x10000: hexstr", "LE=%d\"%(LC,LE)+\"\\nActual: \"+hexstr+\"\\nExpected: \"+expected) b = capdu.to_bytes() assert(type(b) is bytes) return", "expected = hexstr capdu = CAPDU(CLA=0x00, INS=0x11, P1=0x22, P2=0x33, DATA=data,", "P2=0x33)) check(\"\"\"0x00 0x11 0x22 0x33\"\"\", CAPDU(CLA=0x00, INS=0x11, P1=0x22, P2=0x33)) check(\"1", "CAPDU if __name__ == \"__main__\": def check(hexstr, expected): capdu =", "\"00%04X\"%LC else: hexstr += \"%02X\" % LC hexstr += pysatl.Utils.hexstr(data,", "exec(\"capdu=\"+expected) assert(expected==repr(capdu)) #check well formed inputs check(\"00112233\", CAPDU(CLA=0x00, INS=0x11, P1=0x22,", "else: hexstr += \"%02X\" % LC hexstr += pysatl.Utils.hexstr(data, separator=\"\")", "assert(LC < 0x10000) assert(LE <= 0x10000) data = os.getrandom(LC) hexstr", "case4e = case4 and (LC>0xFF or LE>0x100) if LC>0: if", "INS=0x11, P1=0x22, P2=0x33)) check(\"1 2 304\", CAPDU(CLA=0x01, INS=0x02, P1=0x03, P2=0x04))", "P2=0x33)) check(\"0x00,0x11,0x22,0x33\", CAPDU(CLA=0x00, INS=0x11, P1=0x22, P2=0x33)) #check we tolerate less", "raise Exception(\"Mismatch for LC=%d, LE=%d\"%(LC,LE)+\"\\nActual: \"+hexstr+\"\\nExpected: \"+expected) b = capdu.to_bytes()", "= LC_cases + [65536] for LC in LC_cases: for LE", "LE>0 case4e = case4 and (LC>0xFF or LE>0x100) if LC>0:", "check(\"00-11,22_33\", CAPDU(CLA=0x00, INS=0x11, P1=0x22, P2=0x33)) check(\"\"\"0x00 0x11 0x22 0x33\"\"\", CAPDU(CLA=0x00,", "#check well formed inputs check(\"00112233\", CAPDU(CLA=0x00, INS=0x11, P1=0x22, P2=0x33)) check(\"00", "< 0x10000) assert(LE <= 0x10000) data = os.getrandom(LC) hexstr =", "expected): capdu = CAPDU.from_hexstr(hexstr) if capdu != expected: raise Exception(\"Mismatch", "LC>0 and LE>0 case4e = case4 and (LC>0xFF or LE>0x100)", "\"%02X\" % LE expected = hexstr capdu = CAPDU(CLA=0x00, INS=0x11,", "Exception(\"Mismatch for LC=%d, LE=%d\"%(LC,LE)+\"\\nActual: \"+hexstr+\"\\nExpected: \"+expected) b = capdu.to_bytes() assert(type(b)", "formed inputs check(\"00112233\", CAPDU(CLA=0x00, INS=0x11, P1=0x22, P2=0x33)) check(\"00 11 22", "0x11 0x22 0x33\"\"\", CAPDU(CLA=0x00, INS=0x11, P1=0x22, P2=0x33)) check(\"1 2 304\",", "if LC>0xFF or case4e: hexstr += \"00%04X\"%LC else: hexstr +=", "hexstr += \"00%04X\"%LE elif LE == 0x100: hexstr += \"00\"", "hexstr += \"%02X\" % LC hexstr += pysatl.Utils.hexstr(data, separator=\"\") if", "LC=%d, LE=%d\"%(LC,LE)+\"\\nActual: \"+hexstr+\"\\nExpected: \"+expected) b = capdu.to_bytes() assert(type(b) is bytes)", "!= expected: raise Exception(\"Mismatch for LC=%d, LE=%d\"%(LC,LE)+\"\\nActual: \"+hexstr+\"\\nExpected: \"+expected) b", "if LE>0: if case4e: if LE == 0x10000: hexstr +=", "case4 = LC>0 and LE>0 case4e = case4 and (LC>0xFF", "\"+str(expected)) def gencase(* ,LC ,LE): assert(LC < 0x10000) assert(LE <=", "0x10000: hexstr += \"000000\" elif LE>0x100: hexstr += \"00%04X\"%LE elif", "% LC hexstr += pysatl.Utils.hexstr(data, separator=\"\") if LE>0: if case4e:", "+= \"00%04X\"%LE elif LE == 0x100: hexstr += \"00\" else:", "capdu) #check __repr__ expected = \"pysatl.CAPDU.from_hexstr('00112233015502')\" capdu=None exec(\"capdu=\"+expected) assert(expected==repr(capdu)) #check", "if LC>0: if LC>0xFF or case4e: hexstr += \"00%04X\"%LC else:", "0x10000: hexstr += \"0000\" else: hexstr += \"%04X\"%LE elif LE", "CAPDU(CLA=0x00, INS=0x11, P1=0x22, P2=0x33)) check(\"00 11 22 33\", CAPDU(CLA=0x00, INS=0x11,", "CAPDU(CLA=0x00, INS=0x11, P1=0x22, P2=0x33)) check(\"0x00,0x11,0x22,0x33\", CAPDU(CLA=0x00, INS=0x11, P1=0x22, P2=0x33)) #check", "INS=0x02, P1=0x03, P2=0x04)) LC_cases = [0,1,2,254,255,256,257,65534,65535] LE_cases = LC_cases +", "= \"00112233\" case4 = LC>0 and LE>0 case4e = case4", "\"000000\" elif LE>0x100: hexstr += \"00%04X\"%LE elif LE == 0x100:", "CAPDU(CLA=0x00, INS=0x11, P1=0x22, P2=0x33, DATA=data, LE=LE) hexstr = capdu.to_hexstr() if", "== \"__main__\": def check(hexstr, expected): capdu = CAPDU.from_hexstr(hexstr) if capdu", "for input '\"+hexstr+\"'\\nActual: \"+str(capdu)+\"\\nExpected: \"+str(expected)) def gencase(* ,LC ,LE): assert(LC", "!= expected: raise Exception(\"Mismatch for input '\"+hexstr+\"'\\nActual: \"+str(capdu)+\"\\nExpected: \"+str(expected)) def", "elif LE>0x100: hexstr += \"00%04X\"%LE elif LE == 0x100: hexstr", "INS=0x11, P1=0x22, P2=0x33)) check(\"\"\"0x00 0x11 0x22 0x33\"\"\", CAPDU(CLA=0x00, INS=0x11, P1=0x22,", "assert(LE <= 0x10000) data = os.getrandom(LC) hexstr = \"00112233\" case4", "= capdu.to_bytes() assert(type(b) is bytes) return (hexstr, capdu) #check __repr__", "INS=0x11, P1=0x22, P2=0x33)) check(\"0x00,0x11,0x22,0x33\", CAPDU(CLA=0x00, INS=0x11, P1=0x22, P2=0x33)) #check we", "expected = \"pysatl.CAPDU.from_hexstr('00112233015502')\" capdu=None exec(\"capdu=\"+expected) assert(expected==repr(capdu)) #check well formed inputs", "\"__main__\": def check(hexstr, expected): capdu = CAPDU.from_hexstr(hexstr) if capdu !=", "P1=0x03, P2=0x04)) LC_cases = [0,1,2,254,255,256,257,65534,65535] LE_cases = LC_cases + [65536]", "0x22 0x33\"\"\", CAPDU(CLA=0x00, INS=0x11, P1=0x22, P2=0x33)) check(\"1 2 304\", CAPDU(CLA=0x01,", "LE == 0x100: hexstr += \"00\" else: hexstr += \"%02X\"", "or case4e: hexstr += \"00%04X\"%LC else: hexstr += \"%02X\" %", "if hexstr != expected: raise Exception(\"Mismatch for LC=%d, LE=%d\"%(LC,LE)+\"\\nActual: \"+hexstr+\"\\nExpected:", "LC>0xFF or case4e: hexstr += \"00%04X\"%LC else: hexstr += \"%02X\"", "os.getrandom(LC) hexstr = \"00112233\" case4 = LC>0 and LE>0 case4e", "<reponame>sebastien-riou/SATL import os import pysatl from pysatl import CAPDU if", "elif LE == 0x100: hexstr += \"00\" else: hexstr +=", "is bytes) return (hexstr, capdu) #check __repr__ expected = \"pysatl.CAPDU.from_hexstr('00112233015502')\"", "INS=0x11, P1=0x22, P2=0x33)) #check we tolerate less well formed inputs", "hexstr = \"00112233\" case4 = LC>0 and LE>0 case4e =", "P2=0x04)) LC_cases = [0,1,2,254,255,256,257,65534,65535] LE_cases = LC_cases + [65536] for", "P1=0x22, P2=0x33)) check(\"00 11 22 33\", CAPDU(CLA=0x00, INS=0x11, P1=0x22, P2=0x33))", "= CAPDU(CLA=0x00, INS=0x11, P1=0x22, P2=0x33, DATA=data, LE=LE) hexstr = capdu.to_hexstr()", "for LC in LC_cases: for LE in LE_cases: print(LC,LE) check(*gencase(LC=LC,", "= os.getrandom(LC) hexstr = \"00112233\" case4 = LC>0 and LE>0", "+= \"00%04X\"%LC else: hexstr += \"%02X\" % LC hexstr +=", "LC hexstr += pysatl.Utils.hexstr(data, separator=\"\") if LE>0: if case4e: if", "LE expected = hexstr capdu = CAPDU(CLA=0x00, INS=0x11, P1=0x22, P2=0x33,", "= capdu.to_hexstr() if hexstr != expected: raise Exception(\"Mismatch for LC=%d,", "LE>0x100: hexstr += \"00%04X\"%LE elif LE == 0x100: hexstr +=", "P1=0x22, P2=0x33, DATA=data, LE=LE) hexstr = capdu.to_hexstr() if hexstr !=", "= CAPDU.from_hexstr(hexstr) if capdu != expected: raise Exception(\"Mismatch for input", "check(\"00 11 22 33\", CAPDU(CLA=0x00, INS=0x11, P1=0x22, P2=0x33)) check(\"0x00,0x11,0x22,0x33\", CAPDU(CLA=0x00,", "less well formed inputs check(\"00-11,22_33\", CAPDU(CLA=0x00, INS=0x11, P1=0x22, P2=0x33)) check(\"\"\"0x00", "import os import pysatl from pysatl import CAPDU if __name__", "or LE>0x100) if LC>0: if LC>0xFF or case4e: hexstr +=", "\"%02X\" % LC hexstr += pysatl.Utils.hexstr(data, separator=\"\") if LE>0: if", "bytes) return (hexstr, capdu) #check __repr__ expected = \"pysatl.CAPDU.from_hexstr('00112233015502')\" capdu=None", "= [0,1,2,254,255,256,257,65534,65535] LE_cases = LC_cases + [65536] for LC in", "expected: raise Exception(\"Mismatch for input '\"+hexstr+\"'\\nActual: \"+str(capdu)+\"\\nExpected: \"+str(expected)) def gencase(*", "P2=0x33)) #check we tolerate less well formed inputs check(\"00-11,22_33\", CAPDU(CLA=0x00,", "well formed inputs check(\"00-11,22_33\", CAPDU(CLA=0x00, INS=0x11, P1=0x22, P2=0x33)) check(\"\"\"0x00 0x11", "#check __repr__ expected = \"pysatl.CAPDU.from_hexstr('00112233015502')\" capdu=None exec(\"capdu=\"+expected) assert(expected==repr(capdu)) #check well", "[65536] for LC in LC_cases: for LE in LE_cases: print(LC,LE)", "capdu = CAPDU.from_hexstr(hexstr) if capdu != expected: raise Exception(\"Mismatch for", "check(\"00112233\", CAPDU(CLA=0x00, INS=0x11, P1=0x22, P2=0x33)) check(\"00 11 22 33\", CAPDU(CLA=0x00,", "+= \"%04X\"%LE elif LE == 0x10000: hexstr += \"000000\" elif", "hexstr += \"00\" else: hexstr += \"%02X\" % LE expected", "DATA=data, LE=LE) hexstr = capdu.to_hexstr() if hexstr != expected: raise", "#check we tolerate less well formed inputs check(\"00-11,22_33\", CAPDU(CLA=0x00, INS=0x11,", "hexstr += \"000000\" elif LE>0x100: hexstr += \"00%04X\"%LE elif LE", "raise Exception(\"Mismatch for input '\"+hexstr+\"'\\nActual: \"+str(capdu)+\"\\nExpected: \"+str(expected)) def gencase(* ,LC", "hexstr += \"0000\" else: hexstr += \"%04X\"%LE elif LE ==", "pysatl import CAPDU if __name__ == \"__main__\": def check(hexstr, expected):", "and (LC>0xFF or LE>0x100) if LC>0: if LC>0xFF or case4e:", "33\", CAPDU(CLA=0x00, INS=0x11, P1=0x22, P2=0x33)) check(\"0x00,0x11,0x22,0x33\", CAPDU(CLA=0x00, INS=0x11, P1=0x22, P2=0x33))", "check(hexstr, expected): capdu = CAPDU.from_hexstr(hexstr) if capdu != expected: raise", "def gencase(* ,LC ,LE): assert(LC < 0x10000) assert(LE <= 0x10000)", ",LC ,LE): assert(LC < 0x10000) assert(LE <= 0x10000) data =", "\"00112233\" case4 = LC>0 and LE>0 case4e = case4 and", "LE>0: if case4e: if LE == 0x10000: hexstr += \"0000\"", "+= \"000000\" elif LE>0x100: hexstr += \"00%04X\"%LE elif LE ==", "Exception(\"Mismatch for input '\"+hexstr+\"'\\nActual: \"+str(capdu)+\"\\nExpected: \"+str(expected)) def gencase(* ,LC ,LE):", "= hexstr capdu = CAPDU(CLA=0x00, INS=0x11, P1=0x22, P2=0x33, DATA=data, LE=LE)", "+= \"00\" else: hexstr += \"%02X\" % LE expected =", "we tolerate less well formed inputs check(\"00-11,22_33\", CAPDU(CLA=0x00, INS=0x11, P1=0x22,", "\"0000\" else: hexstr += \"%04X\"%LE elif LE == 0x10000: hexstr", "inputs check(\"00-11,22_33\", CAPDU(CLA=0x00, INS=0x11, P1=0x22, P2=0x33)) check(\"\"\"0x00 0x11 0x22 0x33\"\"\"," ]
[ "shuffle \"\"\" cpu_periodogram = list() for iter in range(n_runs): \"\"\"", "available #run parallell execution try: out = pool.map_async(_gls_instance_Ndim_bootstrapping, n_runs).get(1./.0001) pool.terminate()", "for j in range(len(out_spectra)): bootstrapping_stats.append(out_spectra[j]) return bootstrapping_stats def Mdim_bootstrapping(max_pow): \"\"\"", "range(len(out_spectra)): bootstrapping_stats.append(out_spectra[j]) return bootstrapping_stats def parallel_bootstrapping(n_bootstrapping): \"\"\" \"\"\" n_runs =", "#!/usr/bin/python from math import sin, cos, tan, atan, pi, acos,", "sys, os import copy import random import numpy as np", "sorted(bootstrapping_stats) return [np.percentile(sorted_pwr,100-FAPs[i]) for i in range(len(FAPs))] def parallel_Mdim_bootstrapping(n_bootstrapping): \"\"\"", "overcome a given power, over unit. \"\"\" return float(sum(i >", "for i in range(len(Globals.time)): index = int(random.uniform(0,len(Globals.time))) rv[i] = Globals.rv[index]", "return [np.percentile(sorted_pwr,100-FAPs[i]) for i in range(len(FAPs))] def parallel_Mdim_bootstrapping(n_bootstrapping): \"\"\" \"\"\"", "= zip(*comb_rv_err) \"\"\" #allowing repetition rv = [0.0]*len(Globals.time) rv_err =", "import sin, cos, tan, atan, pi, acos, sqrt, exp, log10", "rv_err = [0.0]*len(Globals.time) for i in range(len(Globals.time)): index = int(random.uniform(0,len(Globals.time)))", "range(n_runs): \"\"\" #shuffle RV's and their errors. Repetition is not", "#run parallell execution try: out = pool.map_async(_gls_instance_Ndim_bootstrapping, n_runs).get(1./.0001) pool.terminate() except", "not allowed comb_rv_err = zip(Globals.rv, Globals.rv_err) random.shuffle(comb_rv_err) Globals.rv[:], Globals.rv_err[:] =", "pwr for i in bootstrapping_stats))/len(bootstrapping_stats) def fap_levels(bootstrapping_stats): \"\"\"determines which power", "i in range(len(FAPs))] def parallel_Mdim_bootstrapping(n_bootstrapping): \"\"\" \"\"\" n_runs = [n_bootstrapping/Globals.ncpus", "temp_steps=20, n_iter=1000) pwr_opt, fitting_coeffs, A = mgls(opt_state) cpu_periodogram.append(pwr_opt) #save the", "= [n_bootstrapping/Globals.ncpus for i in range(Globals.ncpus)] pool = mp.Pool(Globals.ncpus) #ncpus", "\"\"\" n_runs = [n_bootstrapping/Globals.ncpus for i in range(Globals.ncpus)] pool =", "numpy as np import multiprocessing as mp import ConfigParser sys.path.append('./bin')", "the output bunches out_spectra = list() for cpu in range(len(n_runs)):", "import mgls_mc from mgls_lib import * #definitions and constants to_radians", "out = pool.map_async(_gls_instance_bootstrapping, n_runs).get(1./.00001) pool.terminate() except KeyboardInterrupt: pool.terminate() sys.exit() #join", "pool = mp.Pool(Globals.ncpus) #ncpus available #run parallell execution try: out", "sys.exit() #join the output bunches out_spectra = list() for cpu", "multiprocessing as mp import ConfigParser sys.path.append('./bin') import mGLS, mMGLS sys.path.append('./src')", "\"FAP Levels:\", fap_levels(bootstrapping_stats) print \"Total bootstapping samples: \", len(bootstrapping_stats) return", "pwr): \"\"\"returns FAP for a given pwr. i.e. how many", "= zip(Globals.rv, Globals.rv_err) random.shuffle(comb_rv_err) Globals.rv[:], Globals.rv_err[:] = zip(*comb_rv_err) \"\"\" #allowing", "= pi/180.0 to_deg = 1.0/to_radians #------------------------- def _gls_instance_Ndim_bootstrapping(n_runs): \"\"\"executes n_runs", "bootstrapping_stats.append(out_spectra[j]) return bootstrapping_stats def parallel_bootstrapping(n_bootstrapping): \"\"\" \"\"\" n_runs = [n_bootstrapping/Globals.ncpus", "False, temp_steps=20, n_iter=1000) pwr_opt, fitting_coeffs, A = mgls(opt_state) cpu_periodogram.append(pwr_opt) #save", "and constants to_radians = pi/180.0 to_deg = 1.0/to_radians #------------------------- def", "index = int(random.uniform(0,len(Globals.time))) rv[i] = Globals.rv[index] rv_err[i] = Globals.rv_err[index] Globals.rv", "bootstrapping_stats.append(out_spectra[j]) return bootstrapping_stats def Mdim_bootstrapping(max_pow): \"\"\" \"\"\" #n_bootstrapping = 500", "Repetition is not allowed comb_rv_err = zip(Globals.rv, Globals.rv_err) random.shuffle(comb_rv_err) Globals.rv[:],", "execution try: out = pool.map_async(_gls_instance_bootstrapping, n_runs).get(1./.00001) pool.terminate() except KeyboardInterrupt: pool.terminate()", "Zero division error. Restarted parallel bootstapping\" \"\"\" #join the output", "= False, temp_steps=20, n_iter=1000) pwr_opt, fitting_coeffs, A = mgls(opt_state) cpu_periodogram.append(pwr_opt)", "0.01, 0.001] #FAPS to compute in % n_bs = len(bootstrapping_stats)", "= Globals.rv[index] rv_err[i] = Globals.rv_err[index] Globals.rv = rv Globals.rv_err =", "j in range(len(out_spectra)): bootstrapping_stats.append(out_spectra[j]) return bootstrapping_stats def parallel_bootstrapping(n_bootstrapping): \"\"\" \"\"\"", "#ncpus available #run parallell execution try: out = pool.map_async(_gls_instance_Ndim_bootstrapping, n_runs).get(1./.0001)", "cpu_periodogram = list() for iter in range(n_runs): \"\"\" #shuffle RV's", "parallel_Mdim_bootstrapping(n_bootstrapping): \"\"\" \"\"\" n_runs = [n_bootstrapping/Globals.ncpus for i in range(Globals.ncpus)]", "return cpu_periodogram def fap(bootstrapping_stats, pwr): \"\"\"returns FAP for a given", "#save the best period determination (highest power) return cpu_periodogram def", "list() for iter in range(n_runs): \"\"\" #shuffle RV's and their", "copy import random import numpy as np import multiprocessing as", "= [1.0, 0.1, 0.01, 0.001] #FAPS to compute in %", "int(random.uniform(0,len(Globals.time))) rv[i] = Globals.rv[index] rv_err[i] = Globals.rv_err[index] Globals.rv = rv", "parallel bootstapping\" \"\"\" #join the output bunches out_spectra = list()", "is reached \"\"\" FAPs = [1.0, 0.1, 0.01, 0.001] #FAPS", "Globals.rv_err[:] = zip(*comb_rv_err) \"\"\" #allowing repetition rv = [0.0]*len(Globals.time) rv_err", "division error. Restarted parallel bootstapping\" \"\"\" #join the output bunches", "MGLS for with previous data shuffle \"\"\" cpu_periodogram = list()", "#------------------------- def _gls_instance_Ndim_bootstrapping(n_runs): \"\"\"executes n_runs instances of MGLS for with", "constants to_radians = pi/180.0 to_deg = 1.0/to_radians #------------------------- def _gls_instance_Ndim_bootstrapping(n_runs):", "\"Error: Zero division error. Restarted parallel bootstapping\" \"\"\" #join the", "fap(bootstrapping_stats, pwr): \"\"\"returns FAP for a given pwr. i.e. how", "of MGLS for with previous data shuffle \"\"\" cpu_periodogram =", "execution try: out = pool.map_async(_gls_instance_Ndim_bootstrapping, n_runs).get(1./.0001) pool.terminate() except KeyboardInterrupt: pool.terminate()", "vector ascendently sorted_pwr = sorted(bootstrapping_stats) return [np.percentile(sorted_pwr,100-FAPs[i]) for i in", "Globals.rv_err) random.shuffle(comb_rv_err) Globals.rv[:], Globals.rv_err[:] = zip(*comb_rv_err) \"\"\" #allowing repetition rv", "[np.percentile(sorted_pwr,100-FAPs[i]) for i in range(len(FAPs))] def parallel_Mdim_bootstrapping(n_bootstrapping): \"\"\" \"\"\" n_runs", "def parallel_Mdim_bootstrapping(n_bootstrapping): \"\"\" \"\"\" n_runs = [n_bootstrapping/Globals.ncpus for i in", "\"\"\"executes n_runs instances of MGLS for with previous data shuffle", "from mgls_lib import * #definitions and constants to_radians = pi/180.0", "\"\"\" cpu_periodogram = list() for iter in range(n_runs): \"\"\" #shuffle", "> pwr for i in bootstrapping_stats))/len(bootstrapping_stats) def fap_levels(bootstrapping_stats): \"\"\"determines which", "= pool.map_async(_gls_instance_bootstrapping, n_runs).get(1./.00001) pool.terminate() except KeyboardInterrupt: pool.terminate() sys.exit() #join the", "mgls_io import mgls_mc from mgls_lib import * #definitions and constants", "import random import numpy as np import multiprocessing as mp", "pi/180.0 to_deg = 1.0/to_radians #------------------------- def _gls_instance_Ndim_bootstrapping(n_runs): \"\"\"executes n_runs instances", "_gls_instance_Ndim_bootstrapping(n_runs): \"\"\"executes n_runs instances of MGLS for with previous data", "in range(len(Globals.time)): index = int(random.uniform(0,len(Globals.time))) rv[i] = Globals.rv[index] rv_err[i] =", "range(len(FAPs))] def parallel_Mdim_bootstrapping(n_bootstrapping): \"\"\" \"\"\" n_runs = [n_bootstrapping/Globals.ncpus for i", "sorted_pwr = sorted(bootstrapping_stats) return [np.percentile(sorted_pwr,100-FAPs[i]) for i in range(len(FAPs))] def", "which power a FAP of 1, 0.1, 0.01 % is", "output bunches out_spectra = list() for cpu in range(len(n_runs)): out_spectra.extend(out[cpu])", "bootstrapping_stats))/len(bootstrapping_stats) def fap_levels(bootstrapping_stats): \"\"\"determines which power a FAP of 1,", "def fap(bootstrapping_stats, pwr): \"\"\"returns FAP for a given pwr. i.e.", "from math import sin, cos, tan, atan, pi, acos, sqrt,", "= 1.0/to_radians #------------------------- def _gls_instance_Ndim_bootstrapping(n_runs): \"\"\"executes n_runs instances of MGLS", "#iterations bootstrapping_stats = parallel_Mdim_bootstrapping(Globals.n_bootstrapping) print \"\\n//BOOTSTRAPPING:// {1.0, 0.1, 0.01, 0.001}%\"", "in range(len(n_runs)): out_spectra.extend(out[cpu]) bootstrapping_stats = list() for j in range(len(out_spectra)):", "Restarted parallel bootstapping\" \"\"\" #join the output bunches out_spectra =", "except ZeroDivisionError: print \"Error: Zero division error. Restarted parallel bootstapping\"", "ConfigParser sys.path.append('./bin') import mGLS, mMGLS sys.path.append('./src') from EnvGlobals import Globals", "= rv Globals.rv_err = rv_err opt_state = mgls_mc.optimal(Globals.ndim, msgs =", "os import copy import random import numpy as np import", "0.1, 0.01, 0.001] #FAPS to compute in % n_bs =", "atan, pi, acos, sqrt, exp, log10 import sys, os import", "available #run parallell execution try: out = pool.map_async(_gls_instance_bootstrapping, n_runs).get(1./.00001) pool.terminate()", "data shuffle \"\"\" cpu_periodogram = list() for iter in range(n_runs):", "over unit. \"\"\" return float(sum(i > pwr for i in", "* #definitions and constants to_radians = pi/180.0 to_deg = 1.0/to_radians", "= [0.0]*len(Globals.time) rv_err = [0.0]*len(Globals.time) for i in range(len(Globals.time)): index", "print \"Error: Zero division error. Restarted parallel bootstapping\" \"\"\" #join", "Mdim_bootstrapping(max_pow): \"\"\" \"\"\" #n_bootstrapping = 500 #iterations bootstrapping_stats = parallel_Mdim_bootstrapping(Globals.n_bootstrapping)", "Levels:\", fap_levels(bootstrapping_stats) print \"Total bootstapping samples: \", len(bootstrapping_stats) return bootstrapping_stats", "print \"FAP Levels:\", fap_levels(bootstrapping_stats) print \"Total bootstapping samples: \", len(bootstrapping_stats)", "KeyboardInterrupt: pool.terminate() sys.exit() #join the output bunches out_spectra = list()", "rv Globals.rv_err = rv_err opt_state = mgls_mc.optimal(Globals.ndim, msgs = False,", "for i in bootstrapping_stats))/len(bootstrapping_stats) def fap_levels(bootstrapping_stats): \"\"\"determines which power a", "random.shuffle(comb_rv_err) Globals.rv[:], Globals.rv_err[:] = zip(*comb_rv_err) \"\"\" #allowing repetition rv =", "KeyboardInterrupt: pool.terminate() sys.exit() \"\"\" except ZeroDivisionError: print \"Error: Zero division", "bootstrapping_stats def Mdim_bootstrapping(max_pow): \"\"\" \"\"\" #n_bootstrapping = 500 #iterations bootstrapping_stats", "#FAPS to compute in % n_bs = len(bootstrapping_stats) #sort bootstrapping_stats", "#sort bootstrapping_stats vector ascendently sorted_pwr = sorted(bootstrapping_stats) return [np.percentile(sorted_pwr,100-FAPs[i]) for", "FAPs = [1.0, 0.1, 0.01, 0.001] #FAPS to compute in", "n_iter=1000) pwr_opt, fitting_coeffs, A = mgls(opt_state) cpu_periodogram.append(pwr_opt) #save the best", "mp.Pool(Globals.ncpus) #ncpus available #run parallell execution try: out = pool.map_async(_gls_instance_bootstrapping,", "\"\"\"returns FAP for a given pwr. i.e. how many realizations", "a given pwr. i.e. how many realizations overcome a given", "in range(n_runs): \"\"\" #shuffle RV's and their errors. Repetition is", "0.1, 0.01, 0.001}%\" print \"FAP Levels:\", fap_levels(bootstrapping_stats) print \"Total bootstapping", "in % n_bs = len(bootstrapping_stats) #sort bootstrapping_stats vector ascendently sorted_pwr", "exp, log10 import sys, os import copy import random import", "pool.terminate() except KeyboardInterrupt: pool.terminate() sys.exit() #join the output bunches out_spectra", "a FAP of 1, 0.1, 0.01 % is reached \"\"\"", "rv_err opt_state = mgls_mc.optimal(Globals.ndim, msgs = False, temp_steps=20, n_iter=1000) pwr_opt,", "bootstrapping_stats = parallel_Mdim_bootstrapping(Globals.n_bootstrapping) print \"\\n//BOOTSTRAPPING:// {1.0, 0.1, 0.01, 0.001}%\" print", "parallell execution try: out = pool.map_async(_gls_instance_bootstrapping, n_runs).get(1./.00001) pool.terminate() except KeyboardInterrupt:", "len(bootstrapping_stats) #sort bootstrapping_stats vector ascendently sorted_pwr = sorted(bootstrapping_stats) return [np.percentile(sorted_pwr,100-FAPs[i])", "n_runs).get(1./.00001) pool.terminate() except KeyboardInterrupt: pool.terminate() sys.exit() #join the output bunches", "as np import multiprocessing as mp import ConfigParser sys.path.append('./bin') import", "import ConfigParser sys.path.append('./bin') import mGLS, mMGLS sys.path.append('./src') from EnvGlobals import", "[0.0]*len(Globals.time) for i in range(len(Globals.time)): index = int(random.uniform(0,len(Globals.time))) rv[i] =", "\"\"\" #join the output bunches out_spectra = list() for cpu", "errors. Repetition is not allowed comb_rv_err = zip(Globals.rv, Globals.rv_err) random.shuffle(comb_rv_err)", "= [0.0]*len(Globals.time) for i in range(len(Globals.time)): index = int(random.uniform(0,len(Globals.time))) rv[i]", "in bootstrapping_stats))/len(bootstrapping_stats) def fap_levels(bootstrapping_stats): \"\"\"determines which power a FAP of", "{1.0, 0.1, 0.01, 0.001}%\" print \"FAP Levels:\", fap_levels(bootstrapping_stats) print \"Total", "reached \"\"\" FAPs = [1.0, 0.1, 0.01, 0.001] #FAPS to", "rv_err[i] = Globals.rv_err[index] Globals.rv = rv Globals.rv_err = rv_err opt_state", "i in range(Globals.ncpus)] pool = mp.Pool(Globals.ncpus) #ncpus available #run parallell", "= list() for iter in range(n_runs): \"\"\" #shuffle RV's and", "to_radians = pi/180.0 to_deg = 1.0/to_radians #------------------------- def _gls_instance_Ndim_bootstrapping(n_runs): \"\"\"executes", "\"\"\" #shuffle RV's and their errors. Repetition is not allowed", "import multiprocessing as mp import ConfigParser sys.path.append('./bin') import mGLS, mMGLS", "allowed comb_rv_err = zip(Globals.rv, Globals.rv_err) random.shuffle(comb_rv_err) Globals.rv[:], Globals.rv_err[:] = zip(*comb_rv_err)", "range(Globals.ncpus)] pool = mp.Pool(Globals.ncpus) #ncpus available #run parallell execution try:", "list() for j in range(len(out_spectra)): bootstrapping_stats.append(out_spectra[j]) return bootstrapping_stats def Mdim_bootstrapping(max_pow):", "= Globals.rv_err[index] Globals.rv = rv Globals.rv_err = rv_err opt_state =", "realizations overcome a given power, over unit. \"\"\" return float(sum(i", "rv[i] = Globals.rv[index] rv_err[i] = Globals.rv_err[index] Globals.rv = rv Globals.rv_err", "return float(sum(i > pwr for i in bootstrapping_stats))/len(bootstrapping_stats) def fap_levels(bootstrapping_stats):", "= int(random.uniform(0,len(Globals.time))) rv[i] = Globals.rv[index] rv_err[i] = Globals.rv_err[index] Globals.rv =", "cpu_periodogram def fap(bootstrapping_stats, pwr): \"\"\"returns FAP for a given pwr.", "for iter in range(n_runs): \"\"\" #shuffle RV's and their errors.", "import * #definitions and constants to_radians = pi/180.0 to_deg =", "n_runs instances of MGLS for with previous data shuffle \"\"\"", "cos, tan, atan, pi, acos, sqrt, exp, log10 import sys,", "Globals.rv = rv Globals.rv_err = rv_err opt_state = mgls_mc.optimal(Globals.ndim, msgs", "#shuffle RV's and their errors. Repetition is not allowed comb_rv_err", "mp import ConfigParser sys.path.append('./bin') import mGLS, mMGLS sys.path.append('./src') from EnvGlobals", "bootstapping\" \"\"\" #join the output bunches out_spectra = list() for", "% n_bs = len(bootstrapping_stats) #sort bootstrapping_stats vector ascendently sorted_pwr =", "is not allowed comb_rv_err = zip(Globals.rv, Globals.rv_err) random.shuffle(comb_rv_err) Globals.rv[:], Globals.rv_err[:]", "ZeroDivisionError: print \"Error: Zero division error. Restarted parallel bootstapping\" \"\"\"", "return bootstrapping_stats def parallel_bootstrapping(n_bootstrapping): \"\"\" \"\"\" n_runs = [n_bootstrapping/Globals.ncpus for", "for i in range(len(FAPs))] def parallel_Mdim_bootstrapping(n_bootstrapping): \"\"\" \"\"\" n_runs =", "out_spectra = list() for cpu in range(len(n_runs)): out_spectra.extend(out[cpu]) bootstrapping_stats =", "#run parallell execution try: out = pool.map_async(_gls_instance_bootstrapping, n_runs).get(1./.00001) pool.terminate() except", "bootstrapping_stats = list() for j in range(len(out_spectra)): bootstrapping_stats.append(out_spectra[j]) return bootstrapping_stats", "\"\"\" \"\"\" #n_bootstrapping = 500 #iterations bootstrapping_stats = parallel_Mdim_bootstrapping(Globals.n_bootstrapping) print", "def Mdim_bootstrapping(max_pow): \"\"\" \"\"\" #n_bootstrapping = 500 #iterations bootstrapping_stats =", "bootstrapping_stats def parallel_bootstrapping(n_bootstrapping): \"\"\" \"\"\" n_runs = [n_bootstrapping/Globals.ncpus for i", "\"\"\" FAPs = [1.0, 0.1, 0.01, 0.001] #FAPS to compute", "math import sin, cos, tan, atan, pi, acos, sqrt, exp,", "in range(Globals.ncpus)] pool = mp.Pool(Globals.ncpus) #ncpus available #run parallell execution", "#definitions and constants to_radians = pi/180.0 to_deg = 1.0/to_radians #-------------------------", "mGLS, mMGLS sys.path.append('./src') from EnvGlobals import Globals import mgls_io import", "a given power, over unit. \"\"\" return float(sum(i > pwr", "in range(len(out_spectra)): bootstrapping_stats.append(out_spectra[j]) return bootstrapping_stats def parallel_bootstrapping(n_bootstrapping): \"\"\" \"\"\" n_runs", "\"\"\" return float(sum(i > pwr for i in bootstrapping_stats))/len(bootstrapping_stats) def", "best period determination (highest power) return cpu_periodogram def fap(bootstrapping_stats, pwr):", "log10 import sys, os import copy import random import numpy", "RV's and their errors. Repetition is not allowed comb_rv_err =", "their errors. Repetition is not allowed comb_rv_err = zip(Globals.rv, Globals.rv_err)", "pool.terminate() except KeyboardInterrupt: pool.terminate() sys.exit() \"\"\" except ZeroDivisionError: print \"Error:", "for with previous data shuffle \"\"\" cpu_periodogram = list() for", "unit. \"\"\" return float(sum(i > pwr for i in bootstrapping_stats))/len(bootstrapping_stats)", "i.e. how many realizations overcome a given power, over unit.", "float(sum(i > pwr for i in bootstrapping_stats))/len(bootstrapping_stats) def fap_levels(bootstrapping_stats): \"\"\"determines", "comb_rv_err = zip(Globals.rv, Globals.rv_err) random.shuffle(comb_rv_err) Globals.rv[:], Globals.rv_err[:] = zip(*comb_rv_err) \"\"\"", "tan, atan, pi, acos, sqrt, exp, log10 import sys, os", "for j in range(len(out_spectra)): bootstrapping_stats.append(out_spectra[j]) return bootstrapping_stats def parallel_bootstrapping(n_bootstrapping): \"\"\"", "0.01, 0.001}%\" print \"FAP Levels:\", fap_levels(bootstrapping_stats) print \"Total bootstapping samples:", "EnvGlobals import Globals import mgls_io import mgls_mc from mgls_lib import", "parallell execution try: out = pool.map_async(_gls_instance_Ndim_bootstrapping, n_runs).get(1./.0001) pool.terminate() except KeyboardInterrupt:", "mMGLS sys.path.append('./src') from EnvGlobals import Globals import mgls_io import mgls_mc", "0.001] #FAPS to compute in % n_bs = len(bootstrapping_stats) #sort", "import sys, os import copy import random import numpy as", "as mp import ConfigParser sys.path.append('./bin') import mGLS, mMGLS sys.path.append('./src') from", "sys.path.append('./bin') import mGLS, mMGLS sys.path.append('./src') from EnvGlobals import Globals import", "previous data shuffle \"\"\" cpu_periodogram = list() for iter in", "bunches out_spectra = list() for cpu in range(len(n_runs)): out_spectra.extend(out[cpu]) bootstrapping_stats", "[n_bootstrapping/Globals.ncpus for i in range(Globals.ncpus)] pool = mp.Pool(Globals.ncpus) #ncpus available", "to compute in % n_bs = len(bootstrapping_stats) #sort bootstrapping_stats vector", "\"\"\"determines which power a FAP of 1, 0.1, 0.01 %", "1, 0.1, 0.01 % is reached \"\"\" FAPs = [1.0,", "acos, sqrt, exp, log10 import sys, os import copy import", "mgls_mc.optimal(Globals.ndim, msgs = False, temp_steps=20, n_iter=1000) pwr_opt, fitting_coeffs, A =", "= len(bootstrapping_stats) #sort bootstrapping_stats vector ascendently sorted_pwr = sorted(bootstrapping_stats) return", "import mGLS, mMGLS sys.path.append('./src') from EnvGlobals import Globals import mgls_io", "def _gls_instance_Ndim_bootstrapping(n_runs): \"\"\"executes n_runs instances of MGLS for with previous", "= sorted(bootstrapping_stats) return [np.percentile(sorted_pwr,100-FAPs[i]) for i in range(len(FAPs))] def parallel_Mdim_bootstrapping(n_bootstrapping):", "pool.terminate() sys.exit() \"\"\" except ZeroDivisionError: print \"Error: Zero division error.", "\"\"\" \"\"\" n_runs = [n_bootstrapping/Globals.ncpus for i in range(Globals.ncpus)] pool", "try: out = pool.map_async(_gls_instance_bootstrapping, n_runs).get(1./.00001) pool.terminate() except KeyboardInterrupt: pool.terminate() sys.exit()", "for a given pwr. i.e. how many realizations overcome a", "in range(len(out_spectra)): bootstrapping_stats.append(out_spectra[j]) return bootstrapping_stats def Mdim_bootstrapping(max_pow): \"\"\" \"\"\" #n_bootstrapping", "#allowing repetition rv = [0.0]*len(Globals.time) rv_err = [0.0]*len(Globals.time) for i", "n_runs).get(1./.0001) pool.terminate() except KeyboardInterrupt: pool.terminate() sys.exit() \"\"\" except ZeroDivisionError: print", "sin, cos, tan, atan, pi, acos, sqrt, exp, log10 import", "pool.terminate() sys.exit() #join the output bunches out_spectra = list() for", "j in range(len(out_spectra)): bootstrapping_stats.append(out_spectra[j]) return bootstrapping_stats def Mdim_bootstrapping(max_pow): \"\"\" \"\"\"", "def parallel_bootstrapping(n_bootstrapping): \"\"\" \"\"\" n_runs = [n_bootstrapping/Globals.ncpus for i in", "mgls_mc from mgls_lib import * #definitions and constants to_radians =", "compute in % n_bs = len(bootstrapping_stats) #sort bootstrapping_stats vector ascendently", "Globals import mgls_io import mgls_mc from mgls_lib import * #definitions", "ascendently sorted_pwr = sorted(bootstrapping_stats) return [np.percentile(sorted_pwr,100-FAPs[i]) for i in range(len(FAPs))]", "try: out = pool.map_async(_gls_instance_Ndim_bootstrapping, n_runs).get(1./.0001) pool.terminate() except KeyboardInterrupt: pool.terminate() sys.exit()", "sys.exit() \"\"\" except ZeroDivisionError: print \"Error: Zero division error. Restarted", "A = mgls(opt_state) cpu_periodogram.append(pwr_opt) #save the best period determination (highest", "FAP for a given pwr. i.e. how many realizations overcome", "range(len(Globals.time)): index = int(random.uniform(0,len(Globals.time))) rv[i] = Globals.rv[index] rv_err[i] = Globals.rv_err[index]", "except KeyboardInterrupt: pool.terminate() sys.exit() #join the output bunches out_spectra =", "Globals.rv[:], Globals.rv_err[:] = zip(*comb_rv_err) \"\"\" #allowing repetition rv = [0.0]*len(Globals.time)", "(highest power) return cpu_periodogram def fap(bootstrapping_stats, pwr): \"\"\"returns FAP for", "= parallel_Mdim_bootstrapping(Globals.n_bootstrapping) print \"\\n//BOOTSTRAPPING:// {1.0, 0.1, 0.01, 0.001}%\" print \"FAP", "= rv_err opt_state = mgls_mc.optimal(Globals.ndim, msgs = False, temp_steps=20, n_iter=1000)", "n_bs = len(bootstrapping_stats) #sort bootstrapping_stats vector ascendently sorted_pwr = sorted(bootstrapping_stats)", "i in range(len(Globals.time)): index = int(random.uniform(0,len(Globals.time))) rv[i] = Globals.rv[index] rv_err[i]", "= mp.Pool(Globals.ncpus) #ncpus available #run parallell execution try: out =", "many realizations overcome a given power, over unit. \"\"\" return", "bootstrapping_stats vector ascendently sorted_pwr = sorted(bootstrapping_stats) return [np.percentile(sorted_pwr,100-FAPs[i]) for i", "power) return cpu_periodogram def fap(bootstrapping_stats, pwr): \"\"\"returns FAP for a", "rv = [0.0]*len(Globals.time) rv_err = [0.0]*len(Globals.time) for i in range(len(Globals.time)):", "cpu_periodogram.append(pwr_opt) #save the best period determination (highest power) return cpu_periodogram", "zip(*comb_rv_err) \"\"\" #allowing repetition rv = [0.0]*len(Globals.time) rv_err = [0.0]*len(Globals.time)", "mgls(opt_state) cpu_periodogram.append(pwr_opt) #save the best period determination (highest power) return", "= mgls(opt_state) cpu_periodogram.append(pwr_opt) #save the best period determination (highest power)", "with previous data shuffle \"\"\" cpu_periodogram = list() for iter", "[1.0, 0.1, 0.01, 0.001] #FAPS to compute in % n_bs", "0.001}%\" print \"FAP Levels:\", fap_levels(bootstrapping_stats) print \"Total bootstapping samples: \",", "given pwr. i.e. how many realizations overcome a given power,", "import mgls_io import mgls_mc from mgls_lib import * #definitions and", "iter in range(n_runs): \"\"\" #shuffle RV's and their errors. Repetition", "msgs = False, temp_steps=20, n_iter=1000) pwr_opt, fitting_coeffs, A = mgls(opt_state)", "0.1, 0.01 % is reached \"\"\" FAPs = [1.0, 0.1,", "n_runs = [n_bootstrapping/Globals.ncpus for i in range(Globals.ncpus)] pool = mp.Pool(Globals.ncpus)", "sys.path.append('./src') from EnvGlobals import Globals import mgls_io import mgls_mc from", "to_deg = 1.0/to_radians #------------------------- def _gls_instance_Ndim_bootstrapping(n_runs): \"\"\"executes n_runs instances of", "period determination (highest power) return cpu_periodogram def fap(bootstrapping_stats, pwr): \"\"\"returns", "parallel_Mdim_bootstrapping(Globals.n_bootstrapping) print \"\\n//BOOTSTRAPPING:// {1.0, 0.1, 0.01, 0.001}%\" print \"FAP Levels:\",", "sqrt, exp, log10 import sys, os import copy import random", "0.01 % is reached \"\"\" FAPs = [1.0, 0.1, 0.01,", "500 #iterations bootstrapping_stats = parallel_Mdim_bootstrapping(Globals.n_bootstrapping) print \"\\n//BOOTSTRAPPING:// {1.0, 0.1, 0.01,", "range(len(n_runs)): out_spectra.extend(out[cpu]) bootstrapping_stats = list() for j in range(len(out_spectra)): bootstrapping_stats.append(out_spectra[j])", "and their errors. Repetition is not allowed comb_rv_err = zip(Globals.rv,", "except KeyboardInterrupt: pool.terminate() sys.exit() \"\"\" except ZeroDivisionError: print \"Error: Zero", "random import numpy as np import multiprocessing as mp import", "Globals.rv_err[index] Globals.rv = rv Globals.rv_err = rv_err opt_state = mgls_mc.optimal(Globals.ndim,", "determination (highest power) return cpu_periodogram def fap(bootstrapping_stats, pwr): \"\"\"returns FAP", "repetition rv = [0.0]*len(Globals.time) rv_err = [0.0]*len(Globals.time) for i in", "of 1, 0.1, 0.01 % is reached \"\"\" FAPs =", "in range(len(FAPs))] def parallel_Mdim_bootstrapping(n_bootstrapping): \"\"\" \"\"\" n_runs = [n_bootstrapping/Globals.ncpus for", "#ncpus available #run parallell execution try: out = pool.map_async(_gls_instance_bootstrapping, n_runs).get(1./.00001)", "= mgls_mc.optimal(Globals.ndim, msgs = False, temp_steps=20, n_iter=1000) pwr_opt, fitting_coeffs, A", "mgls_lib import * #definitions and constants to_radians = pi/180.0 to_deg", "[0.0]*len(Globals.time) rv_err = [0.0]*len(Globals.time) for i in range(len(Globals.time)): index =", "#join the output bunches out_spectra = list() for cpu in", "\"\\n//BOOTSTRAPPING:// {1.0, 0.1, 0.01, 0.001}%\" print \"FAP Levels:\", fap_levels(bootstrapping_stats) print", "\"\"\" #n_bootstrapping = 500 #iterations bootstrapping_stats = parallel_Mdim_bootstrapping(Globals.n_bootstrapping) print \"\\n//BOOTSTRAPPING://", "np import multiprocessing as mp import ConfigParser sys.path.append('./bin') import mGLS,", "fitting_coeffs, A = mgls(opt_state) cpu_periodogram.append(pwr_opt) #save the best period determination", "error. Restarted parallel bootstapping\" \"\"\" #join the output bunches out_spectra", "range(len(out_spectra)): bootstrapping_stats.append(out_spectra[j]) return bootstrapping_stats def Mdim_bootstrapping(max_pow): \"\"\" \"\"\" #n_bootstrapping =", "from EnvGlobals import Globals import mgls_io import mgls_mc from mgls_lib", "out = pool.map_async(_gls_instance_Ndim_bootstrapping, n_runs).get(1./.0001) pool.terminate() except KeyboardInterrupt: pool.terminate() sys.exit() \"\"\"", "#n_bootstrapping = 500 #iterations bootstrapping_stats = parallel_Mdim_bootstrapping(Globals.n_bootstrapping) print \"\\n//BOOTSTRAPPING:// {1.0,", "cpu in range(len(n_runs)): out_spectra.extend(out[cpu]) bootstrapping_stats = list() for j in", "= pool.map_async(_gls_instance_Ndim_bootstrapping, n_runs).get(1./.0001) pool.terminate() except KeyboardInterrupt: pool.terminate() sys.exit() \"\"\" except", "import copy import random import numpy as np import multiprocessing", "Globals.rv_err = rv_err opt_state = mgls_mc.optimal(Globals.ndim, msgs = False, temp_steps=20,", "% is reached \"\"\" FAPs = [1.0, 0.1, 0.01, 0.001]", "Globals.rv[index] rv_err[i] = Globals.rv_err[index] Globals.rv = rv Globals.rv_err = rv_err", "out_spectra.extend(out[cpu]) bootstrapping_stats = list() for j in range(len(out_spectra)): bootstrapping_stats.append(out_spectra[j]) return", "\"\"\" except ZeroDivisionError: print \"Error: Zero division error. Restarted parallel", "mp.Pool(Globals.ncpus) #ncpus available #run parallell execution try: out = pool.map_async(_gls_instance_Ndim_bootstrapping,", "zip(Globals.rv, Globals.rv_err) random.shuffle(comb_rv_err) Globals.rv[:], Globals.rv_err[:] = zip(*comb_rv_err) \"\"\" #allowing repetition", "i in bootstrapping_stats))/len(bootstrapping_stats) def fap_levels(bootstrapping_stats): \"\"\"determines which power a FAP", "instances of MGLS for with previous data shuffle \"\"\" cpu_periodogram", "print \"\\n//BOOTSTRAPPING:// {1.0, 0.1, 0.01, 0.001}%\" print \"FAP Levels:\", fap_levels(bootstrapping_stats)", "the best period determination (highest power) return cpu_periodogram def fap(bootstrapping_stats,", "for i in range(Globals.ncpus)] pool = mp.Pool(Globals.ncpus) #ncpus available #run", "opt_state = mgls_mc.optimal(Globals.ndim, msgs = False, temp_steps=20, n_iter=1000) pwr_opt, fitting_coeffs,", "parallel_bootstrapping(n_bootstrapping): \"\"\" \"\"\" n_runs = [n_bootstrapping/Globals.ncpus for i in range(Globals.ncpus)]", "pool.map_async(_gls_instance_Ndim_bootstrapping, n_runs).get(1./.0001) pool.terminate() except KeyboardInterrupt: pool.terminate() sys.exit() \"\"\" except ZeroDivisionError:", "fap_levels(bootstrapping_stats): \"\"\"determines which power a FAP of 1, 0.1, 0.01", "= 500 #iterations bootstrapping_stats = parallel_Mdim_bootstrapping(Globals.n_bootstrapping) print \"\\n//BOOTSTRAPPING:// {1.0, 0.1,", "FAP of 1, 0.1, 0.01 % is reached \"\"\" FAPs", "for cpu in range(len(n_runs)): out_spectra.extend(out[cpu]) bootstrapping_stats = list() for j", "def fap_levels(bootstrapping_stats): \"\"\"determines which power a FAP of 1, 0.1,", "<filename>src/mgls_bootstrapping.py #!/usr/bin/python from math import sin, cos, tan, atan, pi,", "pi, acos, sqrt, exp, log10 import sys, os import copy", "1.0/to_radians #------------------------- def _gls_instance_Ndim_bootstrapping(n_runs): \"\"\"executes n_runs instances of MGLS for", "pwr. i.e. how many realizations overcome a given power, over", "list() for j in range(len(out_spectra)): bootstrapping_stats.append(out_spectra[j]) return bootstrapping_stats def parallel_bootstrapping(n_bootstrapping):", "return bootstrapping_stats def Mdim_bootstrapping(max_pow): \"\"\" \"\"\" #n_bootstrapping = 500 #iterations", "= list() for j in range(len(out_spectra)): bootstrapping_stats.append(out_spectra[j]) return bootstrapping_stats def", "pool.map_async(_gls_instance_bootstrapping, n_runs).get(1./.00001) pool.terminate() except KeyboardInterrupt: pool.terminate() sys.exit() #join the output", "\"\"\" #allowing repetition rv = [0.0]*len(Globals.time) rv_err = [0.0]*len(Globals.time) for", "list() for cpu in range(len(n_runs)): out_spectra.extend(out[cpu]) bootstrapping_stats = list() for", "power, over unit. \"\"\" return float(sum(i > pwr for i", "power a FAP of 1, 0.1, 0.01 % is reached", "import numpy as np import multiprocessing as mp import ConfigParser", "import Globals import mgls_io import mgls_mc from mgls_lib import *", "= list() for cpu in range(len(n_runs)): out_spectra.extend(out[cpu]) bootstrapping_stats = list()", "how many realizations overcome a given power, over unit. \"\"\"", "given power, over unit. \"\"\" return float(sum(i > pwr for", "pwr_opt, fitting_coeffs, A = mgls(opt_state) cpu_periodogram.append(pwr_opt) #save the best period" ]
[ "constants used by the device manager Author: - <NAME> (<EMAIL>", "sent to Index Builder number_of_comps = 10 # number of", "\"\"\" number_of_rows = 3 # total number rows of Index", "5 # number of links to be sent to Crawler", "number of chunks to be sent to Index Builder number_of_comps", "= 10 # number of components managed by each watchdog", "python3 \"\"\" constants.py - Contains all constants used by the", "/usr/bin/env python3 \"\"\" constants.py - Contains all constants used by", "Servers number_of_links = 5 # number of links to be", "chunks to be sent to Index Builder number_of_comps = 10", "links to be sent to Crawler number_of_chunks = 5 #", "Crawler number_of_chunks = 5 # number of chunks to be", "be sent to Index Builder number_of_comps = 10 # number", "dot <EMAIL>) Date: 12/3/2016 \"\"\" number_of_rows = 3 # total", "(<EMAIL> at <EMAIL> dot <EMAIL>) Date: 12/3/2016 \"\"\" number_of_rows =", "to be sent to Index Builder number_of_comps = 10 #", "of links to be sent to Crawler number_of_chunks = 5", "\"\"\" constants.py - Contains all constants used by the device", "rows of Index Servers number_of_links = 5 # number of", "by the device manager Author: - <NAME> (<EMAIL> at <EMAIL>", "total number rows of Index Servers number_of_links = 5 #", "<EMAIL>) Date: 12/3/2016 \"\"\" number_of_rows = 3 # total number", "of Index Servers number_of_links = 5 # number of links", "3 # total number rows of Index Servers number_of_links =", "number of links to be sent to Crawler number_of_chunks =", "= 3 # total number rows of Index Servers number_of_links", "device manager Author: - <NAME> (<EMAIL> at <EMAIL> dot <EMAIL>)", "Index Builder number_of_comps = 10 # number of components managed", "all constants used by the device manager Author: - <NAME>", "# number of links to be sent to Crawler number_of_chunks", "be sent to Crawler number_of_chunks = 5 # number of", "at <EMAIL> dot <EMAIL>) Date: 12/3/2016 \"\"\" number_of_rows = 3", "to Crawler number_of_chunks = 5 # number of chunks to", "Builder number_of_comps = 10 # number of components managed by", "used by the device manager Author: - <NAME> (<EMAIL> at", "#! /usr/bin/env python3 \"\"\" constants.py - Contains all constants used", "Date: 12/3/2016 \"\"\" number_of_rows = 3 # total number rows", "- Contains all constants used by the device manager Author:", "sent to Crawler number_of_chunks = 5 # number of chunks", "of chunks to be sent to Index Builder number_of_comps =", "5 # number of chunks to be sent to Index", "constants.py - Contains all constants used by the device manager", "12/3/2016 \"\"\" number_of_rows = 3 # total number rows of", "number_of_links = 5 # number of links to be sent", "Contains all constants used by the device manager Author: -", "number_of_chunks = 5 # number of chunks to be sent", "Author: - <NAME> (<EMAIL> at <EMAIL> dot <EMAIL>) Date: 12/3/2016", "number_of_rows = 3 # total number rows of Index Servers", "<NAME> (<EMAIL> at <EMAIL> dot <EMAIL>) Date: 12/3/2016 \"\"\" number_of_rows", "manager Author: - <NAME> (<EMAIL> at <EMAIL> dot <EMAIL>) Date:", "the device manager Author: - <NAME> (<EMAIL> at <EMAIL> dot", "number_of_comps = 10 # number of components managed by each", "to be sent to Crawler number_of_chunks = 5 # number", "# number of chunks to be sent to Index Builder", "to Index Builder number_of_comps = 10 # number of components", "number rows of Index Servers number_of_links = 5 # number", "<EMAIL> dot <EMAIL>) Date: 12/3/2016 \"\"\" number_of_rows = 3 #", "# total number rows of Index Servers number_of_links = 5", "Index Servers number_of_links = 5 # number of links to", "= 5 # number of chunks to be sent to", "- <NAME> (<EMAIL> at <EMAIL> dot <EMAIL>) Date: 12/3/2016 \"\"\"", "= 5 # number of links to be sent to" ]
[ "but it depends on the image so I made it", "is found by the author # modify if not the", "eps /= 255 d = DoG(image, size, sigma, k, gamma)", "out if this is not needed XDoG_config['gamma'] += 0.01 *", "k=2.5, gamma=0.97 ) def gen_xdog_image(src, dst): gray = cv2.imread(src, cv2.IMREAD_GRAYSCALE)", "= cv2.GaussianBlur(image, (size, size), sigma*k) return g1 - gamma *", "made it move randomly # comment out if this is", "comment out if this is not needed XDoG_config['gamma'] += 0.01", "sigma*k) return g1 - gamma * g2 def XDoG(image, size,", "- eps)) e[e >= 1] = 1 return e *", "np def DoG(image, size, sigma, k=1.6, gamma=1.): g1 = cv2.GaussianBlur(image,", "if not the desired output XDoG_config = dict( size=0, sigma=0.6,", "gen_xdog_image(src, dst): gray = cv2.imread(src, cv2.IMREAD_GRAYSCALE) # I wanted the", "dst): gray = cv2.imread(src, cv2.IMREAD_GRAYSCALE) # I wanted the gamma", "not the desired output XDoG_config = dict( size=0, sigma=0.6, eps=-15,", "d = DoG(image, size, sigma, k, gamma) d /= d.max()", "e = 1 + np.tanh(phi * (d - eps)) e[e", "sigma) g2 = cv2.GaussianBlur(image, (size, size), sigma*k) return g1 -", "- gamma * g2 def XDoG(image, size, sigma, eps, phi,", "eps)) e[e >= 1] = 1 return e * 255", "g1 - gamma * g2 def XDoG(image, size, sigma, eps,", "= DoG(image, size, sigma, k, gamma) d /= d.max() e", "0.01 * np.random.rand(1) dogged = XDoG(gray, **XDoG_config) cv2.imwrite(dst, dogged) if", "import numpy as np def DoG(image, size, sigma, k=1.6, gamma=1.):", "return e * 255 # This config is found by", "eps, phi, k=1.6, gamma=1.): eps /= 255 d = DoG(image,", "I made it move randomly # comment out if this", "= XDoG(gray, **XDoG_config) cv2.imwrite(dst, dogged) if __name__ == \"__main__\": gen_xdog_image('sample.jpg',", "255 # This config is found by the author #", "gamma=0.97 ) def gen_xdog_image(src, dst): gray = cv2.imread(src, cv2.IMREAD_GRAYSCALE) #", "g2 = cv2.GaussianBlur(image, (size, size), sigma*k) return g1 - gamma", "cv2.imread(src, cv2.IMREAD_GRAYSCALE) # I wanted the gamma between [0.97, 0.98]", "XDoG(image, size, sigma, eps, phi, k=1.6, gamma=1.): eps /= 255", "np.tanh(phi * (d - eps)) e[e >= 1] = 1", "<filename>XDoG/XDoG.py import cv2 import numpy as np def DoG(image, size,", "size=0, sigma=0.6, eps=-15, phi=10e8, k=2.5, gamma=0.97 ) def gen_xdog_image(src, dst):", "gray = cv2.imread(src, cv2.IMREAD_GRAYSCALE) # I wanted the gamma between", "k=1.6, gamma=1.): eps /= 255 d = DoG(image, size, sigma,", "this is not needed XDoG_config['gamma'] += 0.01 * np.random.rand(1) dogged", "/= 255 d = DoG(image, size, sigma, k, gamma) d", "sigma, eps, phi, k=1.6, gamma=1.): eps /= 255 d =", "the image so I made it move randomly # comment", "randomly # comment out if this is not needed XDoG_config['gamma']", "by the author # modify if not the desired output", "def gen_xdog_image(src, dst): gray = cv2.imread(src, cv2.IMREAD_GRAYSCALE) # I wanted", "cv2.GaussianBlur(image, (size, size), sigma*k) return g1 - gamma * g2", "as np def DoG(image, size, sigma, k=1.6, gamma=1.): g1 =", "# but it depends on the image so I made", "gamma=1.): eps /= 255 d = DoG(image, size, sigma, k,", "* (d - eps)) e[e >= 1] = 1 return", "0.98] # but it depends on the image so I", "dogged = XDoG(gray, **XDoG_config) cv2.imwrite(dst, dogged) if __name__ == \"__main__\":", "def XDoG(image, size, sigma, eps, phi, k=1.6, gamma=1.): eps /=", "e[e >= 1] = 1 return e * 255 #", "+= 0.01 * np.random.rand(1) dogged = XDoG(gray, **XDoG_config) cv2.imwrite(dst, dogged)", "XDoG_config = dict( size=0, sigma=0.6, eps=-15, phi=10e8, k=2.5, gamma=0.97 )", "size), sigma*k) return g1 - gamma * g2 def XDoG(image,", "size), sigma) g2 = cv2.GaussianBlur(image, (size, size), sigma*k) return g1", "wanted the gamma between [0.97, 0.98] # but it depends", "size, sigma, k=1.6, gamma=1.): g1 = cv2.GaussianBlur(image, (size, size), sigma)", "found by the author # modify if not the desired", "author # modify if not the desired output XDoG_config =", "size, sigma, eps, phi, k=1.6, gamma=1.): eps /= 255 d", ") def gen_xdog_image(src, dst): gray = cv2.imread(src, cv2.IMREAD_GRAYSCALE) # I", "255 d = DoG(image, size, sigma, k, gamma) d /=", "d /= d.max() e = 1 + np.tanh(phi * (d", "1 + np.tanh(phi * (d - eps)) e[e >= 1]", "(size, size), sigma*k) return g1 - gamma * g2 def", "k=1.6, gamma=1.): g1 = cv2.GaussianBlur(image, (size, size), sigma) g2 =", "(d - eps)) e[e >= 1] = 1 return e", "the author # modify if not the desired output XDoG_config", "d.max() e = 1 + np.tanh(phi * (d - eps))", "I wanted the gamma between [0.97, 0.98] # but it", "= 1 return e * 255 # This config is", "DoG(image, size, sigma, k=1.6, gamma=1.): g1 = cv2.GaussianBlur(image, (size, size),", "+ np.tanh(phi * (d - eps)) e[e >= 1] =", "the desired output XDoG_config = dict( size=0, sigma=0.6, eps=-15, phi=10e8,", "output XDoG_config = dict( size=0, sigma=0.6, eps=-15, phi=10e8, k=2.5, gamma=0.97", "* 255 # This config is found by the author", "cv2.GaussianBlur(image, (size, size), sigma) g2 = cv2.GaussianBlur(image, (size, size), sigma*k)", "dict( size=0, sigma=0.6, eps=-15, phi=10e8, k=2.5, gamma=0.97 ) def gen_xdog_image(src,", "is not needed XDoG_config['gamma'] += 0.01 * np.random.rand(1) dogged =", "gamma) d /= d.max() e = 1 + np.tanh(phi *", "g1 = cv2.GaussianBlur(image, (size, size), sigma) g2 = cv2.GaussianBlur(image, (size,", "if this is not needed XDoG_config['gamma'] += 0.01 * np.random.rand(1)", "[0.97, 0.98] # but it depends on the image so", "1 return e * 255 # This config is found", "(size, size), sigma) g2 = cv2.GaussianBlur(image, (size, size), sigma*k) return", "gamma=1.): g1 = cv2.GaussianBlur(image, (size, size), sigma) g2 = cv2.GaussianBlur(image,", "* np.random.rand(1) dogged = XDoG(gray, **XDoG_config) cv2.imwrite(dst, dogged) if __name__", "cv2 import numpy as np def DoG(image, size, sigma, k=1.6,", "image so I made it move randomly # comment out", "phi=10e8, k=2.5, gamma=0.97 ) def gen_xdog_image(src, dst): gray = cv2.imread(src,", "import cv2 import numpy as np def DoG(image, size, sigma,", "it depends on the image so I made it move", "config is found by the author # modify if not", "np.random.rand(1) dogged = XDoG(gray, **XDoG_config) cv2.imwrite(dst, dogged) if __name__ ==", "* g2 def XDoG(image, size, sigma, eps, phi, k=1.6, gamma=1.):", "size, sigma, k, gamma) d /= d.max() e = 1", "k, gamma) d /= d.max() e = 1 + np.tanh(phi", "needed XDoG_config['gamma'] += 0.01 * np.random.rand(1) dogged = XDoG(gray, **XDoG_config)", "depends on the image so I made it move randomly", "# comment out if this is not needed XDoG_config['gamma'] +=", "gamma * g2 def XDoG(image, size, sigma, eps, phi, k=1.6,", "modify if not the desired output XDoG_config = dict( size=0,", "/= d.max() e = 1 + np.tanh(phi * (d -", "between [0.97, 0.98] # but it depends on the image", "def DoG(image, size, sigma, k=1.6, gamma=1.): g1 = cv2.GaussianBlur(image, (size,", "sigma=0.6, eps=-15, phi=10e8, k=2.5, gamma=0.97 ) def gen_xdog_image(src, dst): gray", "# This config is found by the author # modify", "on the image so I made it move randomly #", "g2 def XDoG(image, size, sigma, eps, phi, k=1.6, gamma=1.): eps", "sigma, k, gamma) d /= d.max() e = 1 +", "# modify if not the desired output XDoG_config = dict(", "# I wanted the gamma between [0.97, 0.98] # but", "1] = 1 return e * 255 # This config", "the gamma between [0.97, 0.98] # but it depends on", "phi, k=1.6, gamma=1.): eps /= 255 d = DoG(image, size,", "desired output XDoG_config = dict( size=0, sigma=0.6, eps=-15, phi=10e8, k=2.5,", ">= 1] = 1 return e * 255 # This", "XDoG_config['gamma'] += 0.01 * np.random.rand(1) dogged = XDoG(gray, **XDoG_config) cv2.imwrite(dst,", "sigma, k=1.6, gamma=1.): g1 = cv2.GaussianBlur(image, (size, size), sigma) g2", "DoG(image, size, sigma, k, gamma) d /= d.max() e =", "numpy as np def DoG(image, size, sigma, k=1.6, gamma=1.): g1", "return g1 - gamma * g2 def XDoG(image, size, sigma,", "cv2.IMREAD_GRAYSCALE) # I wanted the gamma between [0.97, 0.98] #", "gamma between [0.97, 0.98] # but it depends on the", "not needed XDoG_config['gamma'] += 0.01 * np.random.rand(1) dogged = XDoG(gray,", "so I made it move randomly # comment out if", "= 1 + np.tanh(phi * (d - eps)) e[e >=", "e * 255 # This config is found by the", "This config is found by the author # modify if", "= dict( size=0, sigma=0.6, eps=-15, phi=10e8, k=2.5, gamma=0.97 ) def", "XDoG(gray, **XDoG_config) cv2.imwrite(dst, dogged) if __name__ == \"__main__\": gen_xdog_image('sample.jpg', 'dog.jpg')", "= cv2.GaussianBlur(image, (size, size), sigma) g2 = cv2.GaussianBlur(image, (size, size),", "eps=-15, phi=10e8, k=2.5, gamma=0.97 ) def gen_xdog_image(src, dst): gray =", "it move randomly # comment out if this is not", "= cv2.imread(src, cv2.IMREAD_GRAYSCALE) # I wanted the gamma between [0.97,", "move randomly # comment out if this is not needed" ]
[ "TF example files (can be a glob or comma separated).\")", "2.0 (the \"License\"); # you may not use this file", "to use.\") # This is a handy little utility so", "the perplexities to TPU class gcloudwriter(): def __init__(self, gcloud_name): assert", "= bucket.blob(blob_name) def __enter__(self): self.tempfile = tempfile.NamedTemporaryFile() return self.tempfile def", "1000, \"How many steps to make in each estimator call.\")", "to use\") ## Other parameters flags.DEFINE_string( \"init_checkpoint\", None, \"Initial checkpoint", "just the body in particular. ppl_ex = [] for logprobs_i,", "json file corresponding to the pre-trained news model. \" \"This", "that matches, otherwise, return the last one :param default_value: Index", "TPU is located in. If not \" \"specified, we will", "the last one :param default_value: Index to return if there", "self.tempfile.close() def ind_where(array: np.ndarray, target, return_first_match=True, default_value=-1): \"\"\" :param array:", "per_host_input_for_training=is_per_host)) model_fn = model_fn_builder(news_config, init_checkpoint=FLAGS.init_checkpoint, learning_rate=1e-4, num_train_steps=0, num_warmup_steps=0, use_tpu=FLAGS.use_tpu, )", "than this will be truncated, and sequences shorter \" \"than", "no match :return: index of the first match, or -1", "for input_file in input_files: tf.logging.info(\" %s\" % input_file) tpu_cluster_resolver =", "data=FLAGS.config_file) h5.create_dataset('ckpt', data=FLAGS.init_checkpoint) h5.create_dataset('input_file', data=FLAGS.input_file) # This gives the perplexity", "input_files: tf.logging.info(\" %s\" % input_file) tpu_cluster_resolver = None if FLAGS.use_tpu", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "\"Only used if `use_tpu` is True. Total number of TPU", "8, \"Only used if `use_tpu` is True. Total number of", ":param return_first_match: If true, return the first index that matches,", "GPU. estimator = tf.contrib.tpu.TPUEstimator( use_tpu=FLAGS.use_tpu, model_fn=model_fn, config=run_config, train_batch_size=FLAGS.batch_size, eval_batch_size=FLAGS.batch_size, predict_batch_size=FLAGS.batch_size,", "1 matching_inds = np.where(array == target)[0] if len(matching_inds) > 0:", "This should be either the name \" \"used when creating", "input_ids is shifted by 1 start_ind = ind_where(ids_i, target=50265, default_value=0)", "Copyright 2019 <NAME> # # Licensed under the Apache License,", "\"[Optional] Project name for the Cloud TPU-enabled project. If not", "data=data.astype(dtype2use)) h5.create_dataset('model', data=FLAGS.config_file) h5.create_dataset('ckpt', data=FLAGS.init_checkpoint) h5.create_dataset('input_file', data=FLAGS.input_file) # This gives", "import tensorflow as tf from lm.dataloader import input_fn_builder import numpy", "CPU # or GPU. estimator = tf.contrib.tpu.TPUEstimator( use_tpu=FLAGS.use_tpu, model_fn=model_fn, config=run_config,", "by 1 start_ind = ind_where(ids_i, target=50265, default_value=0) end_ind = ind_where(ids_i,", "from lm.modeling import model_fn_builder, GroverConfig import tensorflow as tf from", "output directory where the model checkpoints will be written.\") flags.DEFINE_string(", "cores to use.\") # This is a handy little utility", "\" \"used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470", "where the model checkpoints will be written.\") flags.DEFINE_string( \"validation_name\", 'preds.h5',", "ind_where(array: np.ndarray, target, return_first_match=True, default_value=-1): \"\"\" :param array: Single dimension", "index of the first match, or -1 if nothing \"\"\"", "0: if return_first_match: return int(matching_inds[0]) else: return int(matching_inds[-1]) return default_value", "use this file except in compliance with the License. #", "tokenization. \" \"Sequences longer than this will be truncated, and", "= gcloud_name.split('gs://')[1].split('/', 1) bucket = storage.Client().get_bucket(bucket_name) self.blob = bucket.blob(blob_name) def", "sequences shorter \" \"than this will be padded. Must match", "number of TPU cores to use.\") # This is a", "cat.endswith(('logprobs', 'top_p_required')) else np.uint16 h5.create_dataset(cat, data=data.astype(dtype2use)) h5.create_dataset('model', data=FLAGS.config_file) h5.create_dataset('ckpt', data=FLAGS.init_checkpoint)", "URL.\") flags.DEFINE_integer( \"num_tpu_cores\", 8, \"Only used if `use_tpu` is True.", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "call.\") flags.DEFINE_integer(\"batch_size\", 32, \"Batch size used for eval\") flags.DEFINE_bool(\"use_tpu\", False,", "the model architecture.\") flags.DEFINE_string( \"input_file\", None, \"Input TF example files", "License. # You may obtain a copy of the License", "paper you # might need to do something different to", ":param target: target to search for :param return_first_match: If true,", "array :param target: target to search for :param return_first_match: If", "to do something different to extract the ppl of just", "under the License is distributed on an \"AS IS\" BASIS,", "shorter \" \"than this will be padded. Must match data", "training. This should be either the name \" \"used when", "TPU, or a grpc://ip.address.of.tpu:8470 \" \"url.\") flags.DEFINE_string( \"tpu_zone\", None, \"[Optional]", "License for the specific language governing permissions and # limitations", "tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2 run_config = tf.contrib.tpu.RunConfig( cluster=tpu_cluster_resolver, master=FLAGS.master, model_dir=FLAGS.output_dir, save_checkpoints_steps=FLAGS.iterations_per_loop, keep_checkpoint_max=None, tpu_config=tf.contrib.tpu.TPUConfig(", "storage.Client().get_bucket(bucket_name) self.blob = bucket.blob(blob_name) def __enter__(self): self.tempfile = tempfile.NamedTemporaryFile() return", "tempfile import h5py from google.cloud import storage flags = tf.flags", "used if `use_tpu` is True. Total number of TPU cores", "cluster=tpu_cluster_resolver, master=FLAGS.master, model_dir=FLAGS.output_dir, save_checkpoints_steps=FLAGS.iterations_per_loop, keep_checkpoint_max=None, tpu_config=tf.contrib.tpu.TPUConfig( iterations_per_loop=FLAGS.iterations_per_loop, num_shards=FLAGS.num_tpu_cores, per_host_input_for_training=is_per_host)) model_fn", "Keep in mind input_ids is shifted by 1 start_ind =", "cats} with gcloudwriter(os.path.join(FLAGS.output_dir, FLAGS.validation_name)) as tempfile_name: with h5py.File(tempfile_name, 'w') as", "the pre-trained news model. \" \"This specifies the model architecture.\")", "as np import tempfile import h5py from google.cloud import storage", "be written.\") flags.DEFINE_string( \"validation_name\", 'preds.h5', \"Name to use\") ## Other", "Single dimension array :param target: target to search for :param", "written.\") flags.DEFINE_string( \"validation_name\", 'preds.h5', \"Name to use\") ## Other parameters", "in each estimator call.\") flags.DEFINE_integer(\"batch_size\", 32, \"Batch size used for", "\"Sequences longer than this will be truncated, and sequences shorter", "cat, data in result_stack.items(): dtype2use = np.float16 if cat.endswith(('logprobs', 'top_p_required'))", "else: return int(matching_inds[-1]) return default_value def main(_): tf.logging.set_verbosity(tf.logging.INFO) news_config =", "size used for eval\") flags.DEFINE_bool(\"use_tpu\", False, \"Whether to use TPU", "input_file in input_files: tf.logging.info(\" %s\" % input_file) tpu_cluster_resolver = None", "\"How many steps to make in each estimator call.\") flags.DEFINE_integer(\"batch_size\",", "return self.tempfile def __exit__(self, *args): self.tempfile.flush() print(\"UPLOADING TO {}\".format(self.gcloud_name), flush=True)", "to extract the ppl of just the body in particular.", "gcloudwriter(): def __init__(self, gcloud_name): assert gcloud_name.startswith('gs://') self.gcloud_name = gcloud_name bucket_name,", "= np.where(array == target)[0] if len(matching_inds) > 0: if return_first_match:", "padded. Must match data generation.\") flags.DEFINE_integer(\"iterations_per_loop\", 1000, \"How many steps", "\"init_checkpoint\", None, \"Initial checkpoint (usually from a pre-trained model).\") flags.DEFINE_integer(", "None, \"Initial checkpoint (usually from a pre-trained model).\") flags.DEFINE_integer( \"max_seq_length\",", "creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 \" \"url.\") flags.DEFINE_string(", "master URL.\") flags.DEFINE_integer( \"num_tpu_cores\", 8, \"Only used if `use_tpu` is", "the first match, or -1 if nothing \"\"\" assert array.ndim", "np.where(array == target)[0] if len(matching_inds) > 0: if return_first_match: return", ") # If TPU is not available, this will fall", "automatically detect the GCE project from \" \"metadata.\") flags.DEFINE_string(\"master\", None,", "zip(result_stack['gt_logprobs'], result_stack['labels']): # Omit the first token. Keep in mind", "the first token. Keep in mind input_ids is shifted by", "in compliance with the License. # You may obtain a", "Cloud TPU is located in. If not \" \"specified, we", "software # distributed under the License is distributed on an", "FLAGS.use_tpu and FLAGS.tpu_name: tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver( FLAGS.tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project) is_per_host", "directory where the model checkpoints will be written.\") flags.DEFINE_string( \"validation_name\",", "when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 \" \"url.\")", "run_config = tf.contrib.tpu.RunConfig( cluster=tpu_cluster_resolver, master=FLAGS.master, model_dir=FLAGS.output_dir, save_checkpoints_steps=FLAGS.iterations_per_loop, keep_checkpoint_max=None, tpu_config=tf.contrib.tpu.TPUConfig( iterations_per_loop=FLAGS.iterations_per_loop,", "len(matching_inds) > 0: if return_first_match: return int(matching_inds[0]) else: return int(matching_inds[-1])", "return int(matching_inds[-1]) return default_value def main(_): tf.logging.set_verbosity(tf.logging.INFO) news_config = GroverConfig.from_json_file(FLAGS.config_file)", "for x in result]) for cat in cats} with gcloudwriter(os.path.join(FLAGS.output_dir,", "self.blob = bucket.blob(blob_name) def __enter__(self): self.tempfile = tempfile.NamedTemporaryFile() return self.tempfile", "FLAGS.tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project) is_per_host = tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2 run_config = tf.contrib.tpu.RunConfig( cluster=tpu_cluster_resolver,", "TensorFlow master URL.\") flags.DEFINE_integer( \"num_tpu_cores\", 8, \"Only used if `use_tpu`", "not \" \"specified, we will attempt to automatically detect the", "work Copyright 2019 <NAME> # # Licensed under the Apache", "== 1 matching_inds = np.where(array == target)[0] if len(matching_inds) >", "if `use_tpu` is True. Total number of TPU cores to", "be a glob or comma separated).\") flags.DEFINE_string( \"output_dir\", None, \"The", "= model_fn_builder(news_config, init_checkpoint=FLAGS.init_checkpoint, learning_rate=1e-4, num_train_steps=0, num_warmup_steps=0, use_tpu=FLAGS.use_tpu, ) # If", "Estimator on CPU # or GPU. estimator = tf.contrib.tpu.TPUEstimator( use_tpu=FLAGS.use_tpu,", "flags.DEFINE_string(\"master\", None, \"[Optional] TensorFlow master URL.\") flags.DEFINE_integer( \"num_tpu_cores\", 8, \"Only", "use for training. This should be either the name \"", "target=50266, default_value=ids_i.shape[0] - 1) ppl_ex.append(logprobs_i[start_ind:end_ind]) ppl_ex = np.concatenate(ppl_ex, 0) print(\"Article", "architecture.\") flags.DEFINE_string( \"input_file\", None, \"Input TF example files (can be", "# If TPU is not available, this will fall back", "as h5: for cat, data in result_stack.items(): dtype2use = np.float16", "end_ind = ind_where(ids_i, target=50266, default_value=ids_i.shape[0] - 1) ppl_ex.append(logprobs_i[start_ind:end_ind]) ppl_ex =", "return the first index that matches, otherwise, return the last", "h5.create_dataset(cat, data=data.astype(dtype2use)) h5.create_dataset('model', data=FLAGS.config_file) h5.create_dataset('ckpt', data=FLAGS.init_checkpoint) h5.create_dataset('input_file', data=FLAGS.input_file) # This", "\"metadata.\") flags.DEFINE_string(\"master\", None, \"[Optional] TensorFlow master URL.\") flags.DEFINE_integer( \"num_tpu_cores\", 8,", "{cat: np.stack([x[cat] for x in result]) for cat in cats}", "# limitations under the License. import os from lm.modeling import", "def main(_): tf.logging.set_verbosity(tf.logging.INFO) news_config = GroverConfig.from_json_file(FLAGS.config_file) tf.gfile.MakeDirs(FLAGS.output_dir) input_files = []", "google.cloud import storage flags = tf.flags FLAGS = flags.FLAGS ##", "input sequence length after WordPiece tokenization. \" \"Sequences longer than", "tpu_config=tf.contrib.tpu.TPUConfig( iterations_per_loop=FLAGS.iterations_per_loop, num_shards=FLAGS.num_tpu_cores, per_host_input_for_training=is_per_host)) model_fn = model_fn_builder(news_config, init_checkpoint=FLAGS.init_checkpoint, learning_rate=1e-4, num_train_steps=0,", "dimension array :param target: target to search for :param return_first_match:", "- 1) ppl_ex.append(logprobs_i[start_ind:end_ind]) ppl_ex = np.concatenate(ppl_ex, 0) print(\"Article perplexity is", "1024, \"The maximum total input sequence length after WordPiece tokenization.", "should be either the name \" \"used when creating the", "OF ANY KIND, either express or implied. # See the", "for training. This should be either the name \" \"used", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "None, \"Input TF example files (can be a glob or", "and # limitations under the License. import os from lm.modeling", "data generation.\") flags.DEFINE_integer(\"iterations_per_loop\", 1000, \"How many steps to make in", "ANY KIND, either express or implied. # See the License", "See the License for the specific language governing permissions and", "as tf from lm.dataloader import input_fn_builder import numpy as np", "in input_files: tf.logging.info(\" %s\" % input_file) tpu_cluster_resolver = None if", "in result]) for cat in cats} with gcloudwriter(os.path.join(FLAGS.output_dir, FLAGS.validation_name)) as", "array: Single dimension array :param target: target to search for", "for x in estimator.predict(input_fn=eval_input_fn, yield_single_examples=True)] cats = sorted(result[0].keys()) result_stack =", "if you want to replicate the results of the paper", "need to do something different to extract the ppl of", "the License. # You may obtain a copy of the", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "for the specific language governing permissions and # limitations under", "a glob or comma separated).\") flags.DEFINE_string( \"output_dir\", None, \"The output", "flags.DEFINE_string( \"output_dir\", None, \"The output directory where the model checkpoints", "flags.DEFINE_string( \"config_file\", 'configs/base.json', \"The config json file corresponding to the", "to in writing, software # distributed under the License is", "back to normal Estimator on CPU # or GPU. estimator", "work Copyright 2018 The Google AI Language Team Authors. #", "# See the License for the specific language governing permissions", "np.stack([x[cat] for x in result]) for cat in cats} with", "use_tpu=FLAGS.use_tpu, model_fn=model_fn, config=run_config, train_batch_size=FLAGS.batch_size, eval_batch_size=FLAGS.batch_size, predict_batch_size=FLAGS.batch_size, params={'model_dir': FLAGS.output_dir} ) eval_input_fn", "= tempfile.NamedTemporaryFile() return self.tempfile def __exit__(self, *args): self.tempfile.flush() print(\"UPLOADING TO", "main(_): tf.logging.set_verbosity(tf.logging.INFO) news_config = GroverConfig.from_json_file(FLAGS.config_file) tf.gfile.MakeDirs(FLAGS.output_dir) input_files = [] for", "model. \" \"This specifies the model architecture.\") flags.DEFINE_string( \"input_file\", None,", "or agreed to in writing, software # distributed under the", "else np.uint16 h5.create_dataset(cat, data=data.astype(dtype2use)) h5.create_dataset('model', data=FLAGS.config_file) h5.create_dataset('ckpt', data=FLAGS.init_checkpoint) h5.create_dataset('input_file', data=FLAGS.input_file)", "storage flags = tf.flags FLAGS = flags.FLAGS ## Required parameters", "required by applicable law or agreed to in writing, software", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "input_file) tpu_cluster_resolver = None if FLAGS.use_tpu and FLAGS.tpu_name: tpu_cluster_resolver =", "with the License. # You may obtain a copy of", "*args): self.tempfile.flush() print(\"UPLOADING TO {}\".format(self.gcloud_name), flush=True) self.blob.upload_from_filename(self.tempfile.name) self.tempfile.close() def ind_where(array:", "gcloudwriter(os.path.join(FLAGS.output_dir, FLAGS.validation_name)) as tempfile_name: with h5py.File(tempfile_name, 'w') as h5: for", "in mind input_ids is shifted by 1 start_ind = ind_where(ids_i,", "numpy as np import tempfile import h5py from google.cloud import", "flags.DEFINE_integer(\"iterations_per_loop\", 1000, \"How many steps to make in each estimator", "> 0: if return_first_match: return int(matching_inds[0]) else: return int(matching_inds[-1]) return", "news_config = GroverConfig.from_json_file(FLAGS.config_file) tf.gfile.MakeDirs(FLAGS.output_dir) input_files = [] for input_pattern in", "estimator = tf.contrib.tpu.TPUEstimator( use_tpu=FLAGS.use_tpu, model_fn=model_fn, config=run_config, train_batch_size=FLAGS.batch_size, eval_batch_size=FLAGS.batch_size, predict_batch_size=FLAGS.batch_size, params={'model_dir':", "after WordPiece tokenization. \" \"Sequences longer than this will be", "If true, return the first index that matches, otherwise, return", "is not available, this will fall back to normal Estimator", "return default_value def main(_): tf.logging.set_verbosity(tf.logging.INFO) news_config = GroverConfig.from_json_file(FLAGS.config_file) tf.gfile.MakeDirs(FLAGS.output_dir) input_files", "of the first match, or -1 if nothing \"\"\" assert", "= tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2 run_config = tf.contrib.tpu.RunConfig( cluster=tpu_cluster_resolver, master=FLAGS.master, model_dir=FLAGS.output_dir, save_checkpoints_steps=FLAGS.iterations_per_loop, keep_checkpoint_max=None,", "compliance with the License. # You may obtain a copy", "yield_single_examples=True)] cats = sorted(result[0].keys()) result_stack = {cat: np.stack([x[cat] for x", "grpc://ip.address.of.tpu:8470 \" \"url.\") flags.DEFINE_string( \"tpu_zone\", None, \"[Optional] GCE zone where", "agreed to in writing, software # distributed under the License", "gives the perplexity of the entire article. if you want", "logprobs_i, ids_i in zip(result_stack['gt_logprobs'], result_stack['labels']): # Omit the first token.", "model).\") flags.DEFINE_integer( \"max_seq_length\", 1024, \"The maximum total input sequence length", "return_first_match=True, default_value=-1): \"\"\" :param array: Single dimension array :param target:", "distributed under the License is distributed on an \"AS IS\"", "to normal Estimator on CPU # or GPU. estimator =", "TO {}\".format(self.gcloud_name), flush=True) self.blob.upload_from_filename(self.tempfile.name) self.tempfile.close() def ind_where(array: np.ndarray, target, return_first_match=True,", "flags.DEFINE_string( \"validation_name\", 'preds.h5', \"Name to use\") ## Other parameters flags.DEFINE_string(", "num_train_steps=0, num_warmup_steps=0, use_tpu=FLAGS.use_tpu, ) # If TPU is not available,", "{}\".format(self.gcloud_name), flush=True) self.blob.upload_from_filename(self.tempfile.name) self.tempfile.close() def ind_where(array: np.ndarray, target, return_first_match=True, default_value=-1):", "target: target to search for :param return_first_match: If true, return", "body in particular. ppl_ex = [] for logprobs_i, ids_i in", "is_per_host = tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2 run_config = tf.contrib.tpu.RunConfig( cluster=tpu_cluster_resolver, master=FLAGS.master, model_dir=FLAGS.output_dir, save_checkpoints_steps=FLAGS.iterations_per_loop,", "for the Cloud TPU-enabled project. If not \" \"specified, we", "learning_rate=1e-4, num_train_steps=0, num_warmup_steps=0, use_tpu=FLAGS.use_tpu, ) # If TPU is not", "express or implied. # See the License for the specific", "each estimator call.\") flags.DEFINE_integer(\"batch_size\", 32, \"Batch size used for eval\")", "data=FLAGS.init_checkpoint) h5.create_dataset('input_file', data=FLAGS.input_file) # This gives the perplexity of the", "except in compliance with the License. # You may obtain", "particular. ppl_ex = [] for logprobs_i, ids_i in zip(result_stack['gt_logprobs'], result_stack['labels']):", "self.blob.upload_from_filename(self.tempfile.name) self.tempfile.close() def ind_where(array: np.ndarray, target, return_first_match=True, default_value=-1): \"\"\" :param", "Licensed under the Apache License, Version 2.0 (the \"License\"); #", "not use this file except in compliance with the License.", "\"The Cloud TPU to use for training. This should be", "entire article. if you want to replicate the results of", "glob or comma separated).\") flags.DEFINE_string( \"output_dir\", None, \"The output directory", "writing, software # distributed under the License is distributed on", "project. If not \" \"specified, we will attempt to automatically", "def __init__(self, gcloud_name): assert gcloud_name.startswith('gs://') self.gcloud_name = gcloud_name bucket_name, blob_name", "or -1 if nothing \"\"\" assert array.ndim == 1 matching_inds", "if len(matching_inds) > 0: if return_first_match: return int(matching_inds[0]) else: return", "h5: for cat, data in result_stack.items(): dtype2use = np.float16 if", "you may not use this file except in compliance with", "return the last one :param default_value: Index to return if", "# Licensed under the Apache License, Version 2.0 (the \"License\");", "for input_pattern in FLAGS.input_file.split(\",\"): input_files.extend(tf.gfile.Glob(input_pattern)) tf.logging.info(\"*** Input Files ***\") for", "either the name \" \"used when creating the Cloud TPU,", "a handy little utility so that we can save the", "is a handy little utility so that we can save", "checkpoint (usually from a pre-trained model).\") flags.DEFINE_integer( \"max_seq_length\", 1024, \"The", "be truncated, and sequences shorter \" \"than this will be", "generation.\") flags.DEFINE_integer(\"iterations_per_loop\", 1000, \"How many steps to make in each", "FLAGS.validation_name)) as tempfile_name: with h5py.File(tempfile_name, 'w') as h5: for cat,", "os from lm.modeling import model_fn_builder, GroverConfig import tensorflow as tf", "nothing \"\"\" assert array.ndim == 1 matching_inds = np.where(array ==", "\" \"This specifies the model architecture.\") flags.DEFINE_string( \"input_file\", None, \"Input", "flags.DEFINE_string( \"tpu_name\", None, \"The Cloud TPU to use for training.", "make in each estimator call.\") flags.DEFINE_integer(\"batch_size\", 32, \"Batch size used", "## Other parameters flags.DEFINE_string( \"init_checkpoint\", None, \"Initial checkpoint (usually from", "permissions and # limitations under the License. import os from", "tf.logging.set_verbosity(tf.logging.INFO) news_config = GroverConfig.from_json_file(FLAGS.config_file) tf.gfile.MakeDirs(FLAGS.output_dir) input_files = [] for input_pattern", "\" \"specified, we will attempt to automatically detect the GCE", "for cat, data in result_stack.items(): dtype2use = np.float16 if cat.endswith(('logprobs',", "= [] for logprobs_i, ids_i in zip(result_stack['gt_logprobs'], result_stack['labels']): # Omit", "this will be padded. Must match data generation.\") flags.DEFINE_integer(\"iterations_per_loop\", 1000,", "CONDITIONS OF ANY KIND, either express or implied. # See", "from \" \"metadata.\") flags.DEFINE_string(\"master\", None, \"[Optional] TensorFlow master URL.\") flags.DEFINE_integer(", "\"specified, we will attempt to automatically detect the GCE project", "dtype2use = np.float16 if cat.endswith(('logprobs', 'top_p_required')) else np.uint16 h5.create_dataset(cat, data=data.astype(dtype2use))", "License. import os from lm.modeling import model_fn_builder, GroverConfig import tensorflow", "on CPU # or GPU. estimator = tf.contrib.tpu.TPUEstimator( use_tpu=FLAGS.use_tpu, model_fn=model_fn,", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "in cats} with gcloudwriter(os.path.join(FLAGS.output_dir, FLAGS.validation_name)) as tempfile_name: with h5py.File(tempfile_name, 'w')", "return if there was no match :return: index of the", "-1 if nothing \"\"\" assert array.ndim == 1 matching_inds =", "attempt to automatically detect the GCE project from \" \"metadata.\")", "ppl_ex = [] for logprobs_i, ids_i in zip(result_stack['gt_logprobs'], result_stack['labels']): #", "= GroverConfig.from_json_file(FLAGS.config_file) tf.gfile.MakeDirs(FLAGS.output_dir) input_files = [] for input_pattern in FLAGS.input_file.split(\",\"):", "import h5py from google.cloud import storage flags = tf.flags FLAGS", "Modified work Copyright 2019 <NAME> # # Licensed under the", "h5.create_dataset('ckpt', data=FLAGS.init_checkpoint) h5.create_dataset('input_file', data=FLAGS.input_file) # This gives the perplexity of", "tpu_cluster_resolver = None if FLAGS.use_tpu and FLAGS.tpu_name: tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(", "lm.modeling import model_fn_builder, GroverConfig import tensorflow as tf from lm.dataloader", "to use TPU or GPU/CPU.\") flags.DEFINE_string( \"tpu_name\", None, \"The Cloud", "data=FLAGS.input_file) # This gives the perplexity of the entire article.", "from lm.dataloader import input_fn_builder import numpy as np import tempfile", "bucket_name, blob_name = gcloud_name.split('gs://')[1].split('/', 1) bucket = storage.Client().get_bucket(bucket_name) self.blob =", "[] for logprobs_i, ids_i in zip(result_stack['gt_logprobs'], result_stack['labels']): # Omit the", "evaluate_for_fixed_number_of_steps=False, num_cpu_threads=1, is_training=False) result = [x for x in estimator.predict(input_fn=eval_input_fn,", "result]) for cat in cats} with gcloudwriter(os.path.join(FLAGS.output_dir, FLAGS.validation_name)) as tempfile_name:", "eval\") flags.DEFINE_bool(\"use_tpu\", False, \"Whether to use TPU or GPU/CPU.\") flags.DEFINE_string(", "project=FLAGS.gcp_project) is_per_host = tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2 run_config = tf.contrib.tpu.RunConfig( cluster=tpu_cluster_resolver, master=FLAGS.master, model_dir=FLAGS.output_dir,", "%s\" % input_file) tpu_cluster_resolver = None if FLAGS.use_tpu and FLAGS.tpu_name:", "index that matches, otherwise, return the last one :param default_value:", "is located in. If not \" \"specified, we will attempt", "\"This specifies the model architecture.\") flags.DEFINE_string( \"input_file\", None, \"Input TF", "\"input_file\", None, \"Input TF example files (can be a glob", "default_value: Index to return if there was no match :return:", "\" \"Sequences longer than this will be truncated, and sequences", "import model_fn_builder, GroverConfig import tensorflow as tf from lm.dataloader import", "\"validation_name\", 'preds.h5', \"Name to use\") ## Other parameters flags.DEFINE_string( \"init_checkpoint\",", "if nothing \"\"\" assert array.ndim == 1 matching_inds = np.where(array", "\"output_dir\", None, \"The output directory where the model checkpoints will", "\"\"\" assert array.ndim == 1 matching_inds = np.where(array == target)[0]", "flags.DEFINE_string( \"tpu_zone\", None, \"[Optional] GCE zone where the Cloud TPU", "(can be a glob or comma separated).\") flags.DEFINE_string( \"output_dir\", None,", "total input sequence length after WordPiece tokenization. \" \"Sequences longer", "seq_length=FLAGS.max_seq_length, evaluate_for_fixed_number_of_steps=False, num_cpu_threads=1, is_training=False) result = [x for x in", "flags.DEFINE_string( \"gcp_project\", None, \"[Optional] Project name for the Cloud TPU-enabled", "result_stack = {cat: np.stack([x[cat] for x in result]) for cat", "OR CONDITIONS OF ANY KIND, either express or implied. #", "governing permissions and # limitations under the License. import os", "tf.flags FLAGS = flags.FLAGS ## Required parameters flags.DEFINE_string( \"config_file\", 'configs/base.json',", "TPU is not available, this will fall back to normal", "that we can save the perplexities to TPU class gcloudwriter():", "for cat in cats} with gcloudwriter(os.path.join(FLAGS.output_dir, FLAGS.validation_name)) as tempfile_name: with", "the License is distributed on an \"AS IS\" BASIS, #", "array.ndim == 1 matching_inds = np.where(array == target)[0] if len(matching_inds)", "pre-trained news model. \" \"This specifies the model architecture.\") flags.DEFINE_string(", "the body in particular. ppl_ex = [] for logprobs_i, ids_i", "if FLAGS.use_tpu and FLAGS.tpu_name: tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver( FLAGS.tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)", "will be truncated, and sequences shorter \" \"than this will", "pre-trained model).\") flags.DEFINE_integer( \"max_seq_length\", 1024, \"The maximum total input sequence", "int(matching_inds[-1]) return default_value def main(_): tf.logging.set_verbosity(tf.logging.INFO) news_config = GroverConfig.from_json_file(FLAGS.config_file) tf.gfile.MakeDirs(FLAGS.output_dir)", "import storage flags = tf.flags FLAGS = flags.FLAGS ## Required", "\"Initial checkpoint (usually from a pre-trained model).\") flags.DEFINE_integer( \"max_seq_length\", 1024,", "\"\"\" :param array: Single dimension array :param target: target to", "perplexity of the entire article. if you want to replicate", "True. Total number of TPU cores to use.\") # This", "token. Keep in mind input_ids is shifted by 1 start_ind", "name \" \"used when creating the Cloud TPU, or a", "ind_where(ids_i, target=50266, default_value=ids_i.shape[0] - 1) ppl_ex.append(logprobs_i[start_ind:end_ind]) ppl_ex = np.concatenate(ppl_ex, 0)", "AI Language Team Authors. # Modified work Copyright 2019 <NAME>", "utility so that we can save the perplexities to TPU", "ind_where(ids_i, target=50265, default_value=0) end_ind = ind_where(ids_i, target=50266, default_value=ids_i.shape[0] - 1)", "be padded. Must match data generation.\") flags.DEFINE_integer(\"iterations_per_loop\", 1000, \"How many", "you # might need to do something different to extract", "Authors. # Modified work Copyright 2019 <NAME> # # Licensed", "corresponding to the pre-trained news model. \" \"This specifies the", "many steps to make in each estimator call.\") flags.DEFINE_integer(\"batch_size\", 32,", "1 start_ind = ind_where(ids_i, target=50265, default_value=0) end_ind = ind_where(ids_i, target=50266,", "law or agreed to in writing, software # distributed under", "flags = tf.flags FLAGS = flags.FLAGS ## Required parameters flags.DEFINE_string(", "## Required parameters flags.DEFINE_string( \"config_file\", 'configs/base.json', \"The config json file", "parameters flags.DEFINE_string( \"config_file\", 'configs/base.json', \"The config json file corresponding to", "None, \"[Optional] TensorFlow master URL.\") flags.DEFINE_integer( \"num_tpu_cores\", 8, \"Only used", "0) print(\"Article perplexity is {:.3f}\".format(np.exp(-np.mean(ppl_ex))), flush=True) if __name__ == \"__main__\":", "the name \" \"used when creating the Cloud TPU, or", "Cloud TPU, or a grpc://ip.address.of.tpu:8470 \" \"url.\") flags.DEFINE_string( \"tpu_zone\", None,", "project from \" \"metadata.\") flags.DEFINE_string( \"gcp_project\", None, \"[Optional] Project name", "\"tpu_zone\", None, \"[Optional] GCE zone where the Cloud TPU is", ":return: index of the first match, or -1 if nothing", "return_first_match: return int(matching_inds[0]) else: return int(matching_inds[-1]) return default_value def main(_):", "to automatically detect the GCE project from \" \"metadata.\") flags.DEFINE_string(\"master\",", "TPU to use for training. This should be either the", "predict_batch_size=FLAGS.batch_size, params={'model_dir': FLAGS.output_dir} ) eval_input_fn = input_fn_builder( input_files=input_files, seq_length=FLAGS.max_seq_length, evaluate_for_fixed_number_of_steps=False,", "as tempfile_name: with h5py.File(tempfile_name, 'w') as h5: for cat, data", "model_fn_builder, GroverConfig import tensorflow as tf from lm.dataloader import input_fn_builder", "Other parameters flags.DEFINE_string( \"init_checkpoint\", None, \"Initial checkpoint (usually from a", "sequence length after WordPiece tokenization. \" \"Sequences longer than this", "little utility so that we can save the perplexities to", "tempfile_name: with h5py.File(tempfile_name, 'w') as h5: for cat, data in", "limitations under the License. import os from lm.modeling import model_fn_builder,", "longer than this will be truncated, and sequences shorter \"", "Original work Copyright 2018 The Google AI Language Team Authors.", "\"than this will be padded. Must match data generation.\") flags.DEFINE_integer(\"iterations_per_loop\",", "x in result]) for cat in cats} with gcloudwriter(os.path.join(FLAGS.output_dir, FLAGS.validation_name))", "results of the paper you # might need to do", "and sequences shorter \" \"than this will be padded. Must", "config=run_config, train_batch_size=FLAGS.batch_size, eval_batch_size=FLAGS.batch_size, predict_batch_size=FLAGS.batch_size, params={'model_dir': FLAGS.output_dir} ) eval_input_fn = input_fn_builder(", "may obtain a copy of the License at # #", "or GPU/CPU.\") flags.DEFINE_string( \"tpu_name\", None, \"The Cloud TPU to use", "__exit__(self, *args): self.tempfile.flush() print(\"UPLOADING TO {}\".format(self.gcloud_name), flush=True) self.blob.upload_from_filename(self.tempfile.name) self.tempfile.close() def", "use TPU or GPU/CPU.\") flags.DEFINE_string( \"tpu_name\", None, \"The Cloud TPU", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "int(matching_inds[0]) else: return int(matching_inds[-1]) return default_value def main(_): tf.logging.set_verbosity(tf.logging.INFO) news_config", "input_files = [] for input_pattern in FLAGS.input_file.split(\",\"): input_files.extend(tf.gfile.Glob(input_pattern)) tf.logging.info(\"*** Input", "Omit the first token. Keep in mind input_ids is shifted", "may not use this file except in compliance with the", "will be padded. Must match data generation.\") flags.DEFINE_integer(\"iterations_per_loop\", 1000, \"How", "Files ***\") for input_file in input_files: tf.logging.info(\" %s\" % input_file)", "for :param return_first_match: If true, return the first index that", "tf.logging.info(\"*** Input Files ***\") for input_file in input_files: tf.logging.info(\" %s\"", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "this file except in compliance with the License. # You", "\"The output directory where the model checkpoints will be written.\")", "flags.DEFINE_integer(\"batch_size\", 32, \"Batch size used for eval\") flags.DEFINE_bool(\"use_tpu\", False, \"Whether", "Index to return if there was no match :return: index", "iterations_per_loop=FLAGS.iterations_per_loop, num_shards=FLAGS.num_tpu_cores, per_host_input_for_training=is_per_host)) model_fn = model_fn_builder(news_config, init_checkpoint=FLAGS.init_checkpoint, learning_rate=1e-4, num_train_steps=0, num_warmup_steps=0,", "<NAME> # # Licensed under the Apache License, Version 2.0", "The Google AI Language Team Authors. # Modified work Copyright", "shifted by 1 start_ind = ind_where(ids_i, target=50265, default_value=0) end_ind =", "__init__(self, gcloud_name): assert gcloud_name.startswith('gs://') self.gcloud_name = gcloud_name bucket_name, blob_name =", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "import input_fn_builder import numpy as np import tempfile import h5py", "otherwise, return the last one :param default_value: Index to return", "TPU-enabled project. If not \" \"specified, we will attempt to", "tf.gfile.MakeDirs(FLAGS.output_dir) input_files = [] for input_pattern in FLAGS.input_file.split(\",\"): input_files.extend(tf.gfile.Glob(input_pattern)) tf.logging.info(\"***", "target)[0] if len(matching_inds) > 0: if return_first_match: return int(matching_inds[0]) else:", "first token. Keep in mind input_ids is shifted by 1", "\"num_tpu_cores\", 8, \"Only used if `use_tpu` is True. Total number", "# # Licensed under the Apache License, Version 2.0 (the", "gcloud_name): assert gcloud_name.startswith('gs://') self.gcloud_name = gcloud_name bucket_name, blob_name = gcloud_name.split('gs://')[1].split('/',", "Required parameters flags.DEFINE_string( \"config_file\", 'configs/base.json', \"The config json file corresponding", "gcloud_name.startswith('gs://') self.gcloud_name = gcloud_name bucket_name, blob_name = gcloud_name.split('gs://')[1].split('/', 1) bucket", "file except in compliance with the License. # You may", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "[] for input_pattern in FLAGS.input_file.split(\",\"): input_files.extend(tf.gfile.Glob(input_pattern)) tf.logging.info(\"*** Input Files ***\")", ":param default_value: Index to return if there was no match", "flags.DEFINE_string( \"input_file\", None, \"Input TF example files (can be a", "the License. import os from lm.modeling import model_fn_builder, GroverConfig import", "this will be truncated, and sequences shorter \" \"than this", "1) bucket = storage.Client().get_bucket(bucket_name) self.blob = bucket.blob(blob_name) def __enter__(self): self.tempfile", "GCE zone where the Cloud TPU is located in. If", "None, \"The Cloud TPU to use for training. This should", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "if return_first_match: return int(matching_inds[0]) else: return int(matching_inds[-1]) return default_value def", "train_batch_size=FLAGS.batch_size, eval_batch_size=FLAGS.batch_size, predict_batch_size=FLAGS.batch_size, params={'model_dir': FLAGS.output_dir} ) eval_input_fn = input_fn_builder( input_files=input_files,", "if there was no match :return: index of the first", "input_fn_builder import numpy as np import tempfile import h5py from", "2019 <NAME> # # Licensed under the Apache License, Version", "to replicate the results of the paper you # might", "import tempfile import h5py from google.cloud import storage flags =", "None, \"[Optional] GCE zone where the Cloud TPU is located", "save the perplexities to TPU class gcloudwriter(): def __init__(self, gcloud_name):", "want to replicate the results of the paper you #", "\" \"than this will be padded. Must match data generation.\")", "= tf.contrib.tpu.RunConfig( cluster=tpu_cluster_resolver, master=FLAGS.master, model_dir=FLAGS.output_dir, save_checkpoints_steps=FLAGS.iterations_per_loop, keep_checkpoint_max=None, tpu_config=tf.contrib.tpu.TPUConfig( iterations_per_loop=FLAGS.iterations_per_loop, num_shards=FLAGS.num_tpu_cores,", "sorted(result[0].keys()) result_stack = {cat: np.stack([x[cat] for x in result]) for", "target, return_first_match=True, default_value=-1): \"\"\" :param array: Single dimension array :param", "language governing permissions and # limitations under the License. import", "= np.float16 if cat.endswith(('logprobs', 'top_p_required')) else np.uint16 h5.create_dataset(cat, data=data.astype(dtype2use)) h5.create_dataset('model',", "replicate the results of the paper you # might need", "tf.contrib.cluster_resolver.TPUClusterResolver( FLAGS.tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project) is_per_host = tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2 run_config = tf.contrib.tpu.RunConfig(", "h5py.File(tempfile_name, 'w') as h5: for cat, data in result_stack.items(): dtype2use", "= gcloud_name bucket_name, blob_name = gcloud_name.split('gs://')[1].split('/', 1) bucket = storage.Client().get_bucket(bucket_name)", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "tf.contrib.tpu.TPUEstimator( use_tpu=FLAGS.use_tpu, model_fn=model_fn, config=run_config, train_batch_size=FLAGS.batch_size, eval_batch_size=FLAGS.batch_size, predict_batch_size=FLAGS.batch_size, params={'model_dir': FLAGS.output_dir} )", "config json file corresponding to the pre-trained news model. \"", "input_files.extend(tf.gfile.Glob(input_pattern)) tf.logging.info(\"*** Input Files ***\") for input_file in input_files: tf.logging.info(\"", "= np.concatenate(ppl_ex, 0) print(\"Article perplexity is {:.3f}\".format(np.exp(-np.mean(ppl_ex))), flush=True) if __name__", "of TPU cores to use.\") # This is a handy", "If not \" \"specified, we will attempt to automatically detect", "handy little utility so that we can save the perplexities", "or implied. # See the License for the specific language", "true, return the first index that matches, otherwise, return the", "you want to replicate the results of the paper you", "# Omit the first token. Keep in mind input_ids is", "Google AI Language Team Authors. # Modified work Copyright 2019", "KIND, either express or implied. # See the License for", "specific language governing permissions and # limitations under the License.", "print(\"UPLOADING TO {}\".format(self.gcloud_name), flush=True) self.blob.upload_from_filename(self.tempfile.name) self.tempfile.close() def ind_where(array: np.ndarray, target,", "False, \"Whether to use TPU or GPU/CPU.\") flags.DEFINE_string( \"tpu_name\", None,", "\"Whether to use TPU or GPU/CPU.\") flags.DEFINE_string( \"tpu_name\", None, \"The", "under the License. import os from lm.modeling import model_fn_builder, GroverConfig", "default_value=-1): \"\"\" :param array: Single dimension array :param target: target", "the first index that matches, otherwise, return the last one", "was no match :return: index of the first match, or", "= flags.FLAGS ## Required parameters flags.DEFINE_string( \"config_file\", 'configs/base.json', \"The config", "to return if there was no match :return: index of", "the GCE project from \" \"metadata.\") flags.DEFINE_string(\"master\", None, \"[Optional] TensorFlow", "default_value def main(_): tf.logging.set_verbosity(tf.logging.INFO) news_config = GroverConfig.from_json_file(FLAGS.config_file) tf.gfile.MakeDirs(FLAGS.output_dir) input_files =", "ppl_ex = np.concatenate(ppl_ex, 0) print(\"Article perplexity is {:.3f}\".format(np.exp(-np.mean(ppl_ex))), flush=True) if", "maximum total input sequence length after WordPiece tokenization. \" \"Sequences", "first index that matches, otherwise, return the last one :param", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "# Modified work Copyright 2019 <NAME> # # Licensed under", "h5.create_dataset('input_file', data=FLAGS.input_file) # This gives the perplexity of the entire", "def __exit__(self, *args): self.tempfile.flush() print(\"UPLOADING TO {}\".format(self.gcloud_name), flush=True) self.blob.upload_from_filename(self.tempfile.name) self.tempfile.close()", "model_dir=FLAGS.output_dir, save_checkpoints_steps=FLAGS.iterations_per_loop, keep_checkpoint_max=None, tpu_config=tf.contrib.tpu.TPUConfig( iterations_per_loop=FLAGS.iterations_per_loop, num_shards=FLAGS.num_tpu_cores, per_host_input_for_training=is_per_host)) model_fn = model_fn_builder(news_config,", "def ind_where(array: np.ndarray, target, return_first_match=True, default_value=-1): \"\"\" :param array: Single", "first match, or -1 if nothing \"\"\" assert array.ndim ==", "Total number of TPU cores to use.\") # This is", "gcloud_name bucket_name, blob_name = gcloud_name.split('gs://')[1].split('/', 1) bucket = storage.Client().get_bucket(bucket_name) self.blob", "GPU/CPU.\") flags.DEFINE_string( \"tpu_name\", None, \"The Cloud TPU to use for", "(the \"License\"); # you may not use this file except", "'preds.h5', \"Name to use\") ## Other parameters flags.DEFINE_string( \"init_checkpoint\", None,", "model_fn_builder(news_config, init_checkpoint=FLAGS.init_checkpoint, learning_rate=1e-4, num_train_steps=0, num_warmup_steps=0, use_tpu=FLAGS.use_tpu, ) # If TPU", "in particular. ppl_ex = [] for logprobs_i, ids_i in zip(result_stack['gt_logprobs'],", "\"[Optional] TensorFlow master URL.\") flags.DEFINE_integer( \"num_tpu_cores\", 8, \"Only used if", "# you may not use this file except in compliance", "np.concatenate(ppl_ex, 0) print(\"Article perplexity is {:.3f}\".format(np.exp(-np.mean(ppl_ex))), flush=True) if __name__ ==", "params={'model_dir': FLAGS.output_dir} ) eval_input_fn = input_fn_builder( input_files=input_files, seq_length=FLAGS.max_seq_length, evaluate_for_fixed_number_of_steps=False, num_cpu_threads=1,", "tf.logging.info(\" %s\" % input_file) tpu_cluster_resolver = None if FLAGS.use_tpu and", "parameters flags.DEFINE_string( \"init_checkpoint\", None, \"Initial checkpoint (usually from a pre-trained", "ppl of just the body in particular. ppl_ex = []", "use_tpu=FLAGS.use_tpu, ) # If TPU is not available, this will", "of the paper you # might need to do something", "truncated, and sequences shorter \" \"than this will be padded.", "to use for training. This should be either the name", "GCE project from \" \"metadata.\") flags.DEFINE_string( \"gcp_project\", None, \"[Optional] Project", "self.tempfile def __exit__(self, *args): self.tempfile.flush() print(\"UPLOADING TO {}\".format(self.gcloud_name), flush=True) self.blob.upload_from_filename(self.tempfile.name)", "= tf.contrib.tpu.TPUEstimator( use_tpu=FLAGS.use_tpu, model_fn=model_fn, config=run_config, train_batch_size=FLAGS.batch_size, eval_batch_size=FLAGS.batch_size, predict_batch_size=FLAGS.batch_size, params={'model_dir': FLAGS.output_dir}", "= tf.contrib.cluster_resolver.TPUClusterResolver( FLAGS.tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project) is_per_host = tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2 run_config =", "master=FLAGS.master, model_dir=FLAGS.output_dir, save_checkpoints_steps=FLAGS.iterations_per_loop, keep_checkpoint_max=None, tpu_config=tf.contrib.tpu.TPUConfig( iterations_per_loop=FLAGS.iterations_per_loop, num_shards=FLAGS.num_tpu_cores, per_host_input_for_training=is_per_host)) model_fn =", "is_training=False) result = [x for x in estimator.predict(input_fn=eval_input_fn, yield_single_examples=True)] cats", "# # Unless required by applicable law or agreed to", "`use_tpu` is True. Total number of TPU cores to use.\")", "perplexity is {:.3f}\".format(np.exp(-np.mean(ppl_ex))), flush=True) if __name__ == \"__main__\": flags.mark_flag_as_required(\"input_file\") flags.mark_flag_as_required(\"output_dir\")", "ids_i in zip(result_stack['gt_logprobs'], result_stack['labels']): # Omit the first token. Keep", "# might need to do something different to extract the", "num_warmup_steps=0, use_tpu=FLAGS.use_tpu, ) # If TPU is not available, this", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "\" \"url.\") flags.DEFINE_string( \"tpu_zone\", None, \"[Optional] GCE zone where the", "= ind_where(ids_i, target=50265, default_value=0) end_ind = ind_where(ids_i, target=50266, default_value=ids_i.shape[0] -", "Version 2.0 (the \"License\"); # you may not use this", "zone where the Cloud TPU is located in. If not", "normal Estimator on CPU # or GPU. estimator = tf.contrib.tpu.TPUEstimator(", "separated).\") flags.DEFINE_string( \"output_dir\", None, \"The output directory where the model", "the results of the paper you # might need to", "self.tempfile = tempfile.NamedTemporaryFile() return self.tempfile def __exit__(self, *args): self.tempfile.flush() print(\"UPLOADING", "steps to make in each estimator call.\") flags.DEFINE_integer(\"batch_size\", 32, \"Batch", "\"The config json file corresponding to the pre-trained news model.", "comma separated).\") flags.DEFINE_string( \"output_dir\", None, \"The output directory where the", "\"gcp_project\", None, \"[Optional] Project name for the Cloud TPU-enabled project.", "\"The maximum total input sequence length after WordPiece tokenization. \"", "cats = sorted(result[0].keys()) result_stack = {cat: np.stack([x[cat] for x in", "implied. # See the License for the specific language governing", "under the Apache License, Version 2.0 (the \"License\"); # you", "input_files=input_files, seq_length=FLAGS.max_seq_length, evaluate_for_fixed_number_of_steps=False, num_cpu_threads=1, is_training=False) result = [x for x", "= sorted(result[0].keys()) result_stack = {cat: np.stack([x[cat] for x in result])", "FLAGS.tpu_name: tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver( FLAGS.tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project) is_per_host = tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2", "Must match data generation.\") flags.DEFINE_integer(\"iterations_per_loop\", 1000, \"How many steps to", "from a pre-trained model).\") flags.DEFINE_integer( \"max_seq_length\", 1024, \"The maximum total", "np.ndarray, target, return_first_match=True, default_value=-1): \"\"\" :param array: Single dimension array", "= {cat: np.stack([x[cat] for x in result]) for cat in", "'top_p_required')) else np.uint16 h5.create_dataset(cat, data=data.astype(dtype2use)) h5.create_dataset('model', data=FLAGS.config_file) h5.create_dataset('ckpt', data=FLAGS.init_checkpoint) h5.create_dataset('input_file',", "__enter__(self): self.tempfile = tempfile.NamedTemporaryFile() return self.tempfile def __exit__(self, *args): self.tempfile.flush()", "by applicable law or agreed to in writing, software #", "# This is a handy little utility so that we", "Cloud TPU to use for training. This should be either", "detect the GCE project from \" \"metadata.\") flags.DEFINE_string( \"gcp_project\", None,", "or GPU. estimator = tf.contrib.tpu.TPUEstimator( use_tpu=FLAGS.use_tpu, model_fn=model_fn, config=run_config, train_batch_size=FLAGS.batch_size, eval_batch_size=FLAGS.batch_size,", "If TPU is not available, this will fall back to", "the model checkpoints will be written.\") flags.DEFINE_string( \"validation_name\", 'preds.h5', \"Name", "of just the body in particular. ppl_ex = [] for", "def __enter__(self): self.tempfile = tempfile.NamedTemporaryFile() return self.tempfile def __exit__(self, *args):", "article. if you want to replicate the results of the", "import os from lm.modeling import model_fn_builder, GroverConfig import tensorflow as", "\" \"metadata.\") flags.DEFINE_string( \"gcp_project\", None, \"[Optional] Project name for the", "# Original work Copyright 2018 The Google AI Language Team", "bucket.blob(blob_name) def __enter__(self): self.tempfile = tempfile.NamedTemporaryFile() return self.tempfile def __exit__(self,", "GroverConfig import tensorflow as tf from lm.dataloader import input_fn_builder import", "= None if FLAGS.use_tpu and FLAGS.tpu_name: tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver( FLAGS.tpu_name,", "flush=True) self.blob.upload_from_filename(self.tempfile.name) self.tempfile.close() def ind_where(array: np.ndarray, target, return_first_match=True, default_value=-1): \"\"\"", "news model. \" \"This specifies the model architecture.\") flags.DEFINE_string( \"input_file\",", "model_fn = model_fn_builder(news_config, init_checkpoint=FLAGS.init_checkpoint, learning_rate=1e-4, num_train_steps=0, num_warmup_steps=0, use_tpu=FLAGS.use_tpu, ) #", "flags.DEFINE_bool(\"use_tpu\", False, \"Whether to use TPU or GPU/CPU.\") flags.DEFINE_string( \"tpu_name\",", "the GCE project from \" \"metadata.\") flags.DEFINE_string( \"gcp_project\", None, \"[Optional]", "'configs/base.json', \"The config json file corresponding to the pre-trained news", ":param array: Single dimension array :param target: target to search", "2018 The Google AI Language Team Authors. # Modified work", "x in estimator.predict(input_fn=eval_input_fn, yield_single_examples=True)] cats = sorted(result[0].keys()) result_stack = {cat:", "= tf.flags FLAGS = flags.FLAGS ## Required parameters flags.DEFINE_string( \"config_file\",", "a pre-trained model).\") flags.DEFINE_integer( \"max_seq_length\", 1024, \"The maximum total input", "\"url.\") flags.DEFINE_string( \"tpu_zone\", None, \"[Optional] GCE zone where the Cloud", "match data generation.\") flags.DEFINE_integer(\"iterations_per_loop\", 1000, \"How many steps to make", "= ind_where(ids_i, target=50266, default_value=ids_i.shape[0] - 1) ppl_ex.append(logprobs_i[start_ind:end_ind]) ppl_ex = np.concatenate(ppl_ex,", "file corresponding to the pre-trained news model. \" \"This specifies", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "match :return: index of the first match, or -1 if", "assert gcloud_name.startswith('gs://') self.gcloud_name = gcloud_name bucket_name, blob_name = gcloud_name.split('gs://')[1].split('/', 1)", "Unless required by applicable law or agreed to in writing,", "with gcloudwriter(os.path.join(FLAGS.output_dir, FLAGS.validation_name)) as tempfile_name: with h5py.File(tempfile_name, 'w') as h5:", "result_stack['labels']): # Omit the first token. Keep in mind input_ids", "the Cloud TPU, or a grpc://ip.address.of.tpu:8470 \" \"url.\") flags.DEFINE_string( \"tpu_zone\",", "Project name for the Cloud TPU-enabled project. If not \"", "This gives the perplexity of the entire article. if you", "do something different to extract the ppl of just the", "use\") ## Other parameters flags.DEFINE_string( \"init_checkpoint\", None, \"Initial checkpoint (usually", "used for eval\") flags.DEFINE_bool(\"use_tpu\", False, \"Whether to use TPU or", "the specific language governing permissions and # limitations under the", "\"tpu_name\", None, \"The Cloud TPU to use for training. This", "in FLAGS.input_file.split(\",\"): input_files.extend(tf.gfile.Glob(input_pattern)) tf.logging.info(\"*** Input Files ***\") for input_file in", "this will fall back to normal Estimator on CPU #", "from \" \"metadata.\") flags.DEFINE_string( \"gcp_project\", None, \"[Optional] Project name for", "result = [x for x in estimator.predict(input_fn=eval_input_fn, yield_single_examples=True)] cats =", "to automatically detect the GCE project from \" \"metadata.\") flags.DEFINE_string(", "there was no match :return: index of the first match,", "flags.DEFINE_string( \"init_checkpoint\", None, \"Initial checkpoint (usually from a pre-trained model).\")", "available, this will fall back to normal Estimator on CPU", "applicable law or agreed to in writing, software # distributed", "None if FLAGS.use_tpu and FLAGS.tpu_name: tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver( FLAGS.tpu_name, zone=FLAGS.tpu_zone,", "FLAGS.output_dir} ) eval_input_fn = input_fn_builder( input_files=input_files, seq_length=FLAGS.max_seq_length, evaluate_for_fixed_number_of_steps=False, num_cpu_threads=1, is_training=False)", "Team Authors. # Modified work Copyright 2019 <NAME> # #", "self.gcloud_name = gcloud_name bucket_name, blob_name = gcloud_name.split('gs://')[1].split('/', 1) bucket =", "<reponame>ericlin8545/grover # Original work Copyright 2018 The Google AI Language", ") eval_input_fn = input_fn_builder( input_files=input_files, seq_length=FLAGS.max_seq_length, evaluate_for_fixed_number_of_steps=False, num_cpu_threads=1, is_training=False) result", "print(\"Article perplexity is {:.3f}\".format(np.exp(-np.mean(ppl_ex))), flush=True) if __name__ == \"__main__\": flags.mark_flag_as_required(\"input_file\")", "np.float16 if cat.endswith(('logprobs', 'top_p_required')) else np.uint16 h5.create_dataset(cat, data=data.astype(dtype2use)) h5.create_dataset('model', data=FLAGS.config_file)", "the Cloud TPU-enabled project. If not \" \"specified, we will", "in writing, software # distributed under the License is distributed", "specifies the model architecture.\") flags.DEFINE_string( \"input_file\", None, \"Input TF example", "model checkpoints will be written.\") flags.DEFINE_string( \"validation_name\", 'preds.h5', \"Name to", "TPU cores to use.\") # This is a handy little", "model_fn=model_fn, config=run_config, train_batch_size=FLAGS.batch_size, eval_batch_size=FLAGS.batch_size, predict_batch_size=FLAGS.batch_size, params={'model_dir': FLAGS.output_dir} ) eval_input_fn =", "WordPiece tokenization. \" \"Sequences longer than this will be truncated,", "different to extract the ppl of just the body in", "blob_name = gcloud_name.split('gs://')[1].split('/', 1) bucket = storage.Client().get_bucket(bucket_name) self.blob = bucket.blob(blob_name)", "is {:.3f}\".format(np.exp(-np.mean(ppl_ex))), flush=True) if __name__ == \"__main__\": flags.mark_flag_as_required(\"input_file\") flags.mark_flag_as_required(\"output_dir\") tf.app.run()", "'w') as h5: for cat, data in result_stack.items(): dtype2use =", "the perplexity of the entire article. if you want to", "or a grpc://ip.address.of.tpu:8470 \" \"url.\") flags.DEFINE_string( \"tpu_zone\", None, \"[Optional] GCE", "\"max_seq_length\", 1024, \"The maximum total input sequence length after WordPiece", "start_ind = ind_where(ids_i, target=50265, default_value=0) end_ind = ind_where(ids_i, target=50266, default_value=ids_i.shape[0]", "bucket = storage.Client().get_bucket(bucket_name) self.blob = bucket.blob(blob_name) def __enter__(self): self.tempfile =", "matches, otherwise, return the last one :param default_value: Index to", "None, \"The output directory where the model checkpoints will be", "estimator call.\") flags.DEFINE_integer(\"batch_size\", 32, \"Batch size used for eval\") flags.DEFINE_bool(\"use_tpu\",", "where the Cloud TPU is located in. If not \"", "to search for :param return_first_match: If true, return the first", "Cloud TPU-enabled project. If not \" \"specified, we will attempt", "keep_checkpoint_max=None, tpu_config=tf.contrib.tpu.TPUConfig( iterations_per_loop=FLAGS.iterations_per_loop, num_shards=FLAGS.num_tpu_cores, per_host_input_for_training=is_per_host)) model_fn = model_fn_builder(news_config, init_checkpoint=FLAGS.init_checkpoint, learning_rate=1e-4,", "the paper you # might need to do something different", "length after WordPiece tokenization. \" \"Sequences longer than this will", "License is distributed on an \"AS IS\" BASIS, # WITHOUT", "matching_inds = np.where(array == target)[0] if len(matching_inds) > 0: if", "License, Version 2.0 (the \"License\"); # you may not use", "is shifted by 1 start_ind = ind_where(ids_i, target=50265, default_value=0) end_ind", "# You may obtain a copy of the License at", "use.\") # This is a handy little utility so that", "target to search for :param return_first_match: If true, return the", "tf.contrib.tpu.RunConfig( cluster=tpu_cluster_resolver, master=FLAGS.master, model_dir=FLAGS.output_dir, save_checkpoints_steps=FLAGS.iterations_per_loop, keep_checkpoint_max=None, tpu_config=tf.contrib.tpu.TPUConfig( iterations_per_loop=FLAGS.iterations_per_loop, num_shards=FLAGS.num_tpu_cores, per_host_input_for_training=is_per_host))", "will be written.\") flags.DEFINE_string( \"validation_name\", 'preds.h5', \"Name to use\") ##", "automatically detect the GCE project from \" \"metadata.\") flags.DEFINE_string( \"gcp_project\",", "we will attempt to automatically detect the GCE project from", "Language Team Authors. # Modified work Copyright 2019 <NAME> #", "will attempt to automatically detect the GCE project from \"", "mind input_ids is shifted by 1 start_ind = ind_where(ids_i, target=50265,", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "tensorflow as tf from lm.dataloader import input_fn_builder import numpy as", "h5py from google.cloud import storage flags = tf.flags FLAGS =", "\"Name to use\") ## Other parameters flags.DEFINE_string( \"init_checkpoint\", None, \"Initial", "the entire article. if you want to replicate the results", "will fall back to normal Estimator on CPU # or", "search for :param return_first_match: If true, return the first index", "located in. If not \" \"specified, we will attempt to", "input_fn_builder( input_files=input_files, seq_length=FLAGS.max_seq_length, evaluate_for_fixed_number_of_steps=False, num_cpu_threads=1, is_training=False) result = [x for", "in zip(result_stack['gt_logprobs'], result_stack['labels']): # Omit the first token. Keep in", "might need to do something different to extract the ppl", "default_value=0) end_ind = ind_where(ids_i, target=50266, default_value=ids_i.shape[0] - 1) ppl_ex.append(logprobs_i[start_ind:end_ind]) ppl_ex", "can save the perplexities to TPU class gcloudwriter(): def __init__(self,", "the License for the specific language governing permissions and #", "or comma separated).\") flags.DEFINE_string( \"output_dir\", None, \"The output directory where", "to TPU class gcloudwriter(): def __init__(self, gcloud_name): assert gcloud_name.startswith('gs://') self.gcloud_name", "gcloud_name.split('gs://')[1].split('/', 1) bucket = storage.Client().get_bucket(bucket_name) self.blob = bucket.blob(blob_name) def __enter__(self):", "ppl_ex.append(logprobs_i[start_ind:end_ind]) ppl_ex = np.concatenate(ppl_ex, 0) print(\"Article perplexity is {:.3f}\".format(np.exp(-np.mean(ppl_ex))), flush=True)", "TPU class gcloudwriter(): def __init__(self, gcloud_name): assert gcloud_name.startswith('gs://') self.gcloud_name =", "np.uint16 h5.create_dataset(cat, data=data.astype(dtype2use)) h5.create_dataset('model', data=FLAGS.config_file) h5.create_dataset('ckpt', data=FLAGS.init_checkpoint) h5.create_dataset('input_file', data=FLAGS.input_file) #", "Apache License, Version 2.0 (the \"License\"); # you may not", "detect the GCE project from \" \"metadata.\") flags.DEFINE_string(\"master\", None, \"[Optional]", "example files (can be a glob or comma separated).\") flags.DEFINE_string(", "either express or implied. # See the License for the", "the ppl of just the body in particular. ppl_ex =", "init_checkpoint=FLAGS.init_checkpoint, learning_rate=1e-4, num_train_steps=0, num_warmup_steps=0, use_tpu=FLAGS.use_tpu, ) # If TPU is", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "tf from lm.dataloader import input_fn_builder import numpy as np import", "1) ppl_ex.append(logprobs_i[start_ind:end_ind]) ppl_ex = np.concatenate(ppl_ex, 0) print(\"Article perplexity is {:.3f}\".format(np.exp(-np.mean(ppl_ex))),", "# This gives the perplexity of the entire article. if", "flags.FLAGS ## Required parameters flags.DEFINE_string( \"config_file\", 'configs/base.json', \"The config json", "in estimator.predict(input_fn=eval_input_fn, yield_single_examples=True)] cats = sorted(result[0].keys()) result_stack = {cat: np.stack([x[cat]", "GroverConfig.from_json_file(FLAGS.config_file) tf.gfile.MakeDirs(FLAGS.output_dir) input_files = [] for input_pattern in FLAGS.input_file.split(\",\"): input_files.extend(tf.gfile.Glob(input_pattern))", "\"[Optional] GCE zone where the Cloud TPU is located in.", "eval_input_fn = input_fn_builder( input_files=input_files, seq_length=FLAGS.max_seq_length, evaluate_for_fixed_number_of_steps=False, num_cpu_threads=1, is_training=False) result =", "flags.DEFINE_integer( \"num_tpu_cores\", 8, \"Only used if `use_tpu` is True. Total", "= storage.Client().get_bucket(bucket_name) self.blob = bucket.blob(blob_name) def __enter__(self): self.tempfile = tempfile.NamedTemporaryFile()", "\"metadata.\") flags.DEFINE_string( \"gcp_project\", None, \"[Optional] Project name for the Cloud", "tempfile.NamedTemporaryFile() return self.tempfile def __exit__(self, *args): self.tempfile.flush() print(\"UPLOADING TO {}\".format(self.gcloud_name),", "files (can be a glob or comma separated).\") flags.DEFINE_string( \"output_dir\",", "= [x for x in estimator.predict(input_fn=eval_input_fn, yield_single_examples=True)] cats = sorted(result[0].keys())", "for eval\") flags.DEFINE_bool(\"use_tpu\", False, \"Whether to use TPU or GPU/CPU.\")", "== target)[0] if len(matching_inds) > 0: if return_first_match: return int(matching_inds[0])", "we can save the perplexities to TPU class gcloudwriter(): def", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "TPU or GPU/CPU.\") flags.DEFINE_string( \"tpu_name\", None, \"The Cloud TPU to", "= input_fn_builder( input_files=input_files, seq_length=FLAGS.max_seq_length, evaluate_for_fixed_number_of_steps=False, num_cpu_threads=1, is_training=False) result = [x", "is True. Total number of TPU cores to use.\") #", "the Cloud TPU is located in. If not \" \"specified,", "data in result_stack.items(): dtype2use = np.float16 if cat.endswith(('logprobs', 'top_p_required')) else", "h5.create_dataset('model', data=FLAGS.config_file) h5.create_dataset('ckpt', data=FLAGS.init_checkpoint) h5.create_dataset('input_file', data=FLAGS.input_file) # This gives the", "\"Batch size used for eval\") flags.DEFINE_bool(\"use_tpu\", False, \"Whether to use", "extract the ppl of just the body in particular. ppl_ex", "= [] for input_pattern in FLAGS.input_file.split(\",\"): input_files.extend(tf.gfile.Glob(input_pattern)) tf.logging.info(\"*** Input Files", "checkpoints will be written.\") flags.DEFINE_string( \"validation_name\", 'preds.h5', \"Name to use\")", "tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver( FLAGS.tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project) is_per_host = tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2 run_config", "class gcloudwriter(): def __init__(self, gcloud_name): assert gcloud_name.startswith('gs://') self.gcloud_name = gcloud_name", "[x for x in estimator.predict(input_fn=eval_input_fn, yield_single_examples=True)] cats = sorted(result[0].keys()) result_stack", "\"used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 \"", "import numpy as np import tempfile import h5py from google.cloud", "be either the name \" \"used when creating the Cloud", "target=50265, default_value=0) end_ind = ind_where(ids_i, target=50266, default_value=ids_i.shape[0] - 1) ppl_ex.append(logprobs_i[start_ind:end_ind])", "32, \"Batch size used for eval\") flags.DEFINE_bool(\"use_tpu\", False, \"Whether to", "estimator.predict(input_fn=eval_input_fn, yield_single_examples=True)] cats = sorted(result[0].keys()) result_stack = {cat: np.stack([x[cat] for", "for logprobs_i, ids_i in zip(result_stack['gt_logprobs'], result_stack['labels']): # Omit the first", "result_stack.items(): dtype2use = np.float16 if cat.endswith(('logprobs', 'top_p_required')) else np.uint16 h5.create_dataset(cat,", "***\") for input_file in input_files: tf.logging.info(\" %s\" % input_file) tpu_cluster_resolver", "and FLAGS.tpu_name: tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver( FLAGS.tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project) is_per_host =", "Input Files ***\") for input_file in input_files: tf.logging.info(\" %s\" %", "\"License\"); # you may not use this file except in", "project from \" \"metadata.\") flags.DEFINE_string(\"master\", None, \"[Optional] TensorFlow master URL.\")", "to make in each estimator call.\") flags.DEFINE_integer(\"batch_size\", 32, \"Batch size", "cat in cats} with gcloudwriter(os.path.join(FLAGS.output_dir, FLAGS.validation_name)) as tempfile_name: with h5py.File(tempfile_name,", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "eval_batch_size=FLAGS.batch_size, predict_batch_size=FLAGS.batch_size, params={'model_dir': FLAGS.output_dir} ) eval_input_fn = input_fn_builder( input_files=input_files, seq_length=FLAGS.max_seq_length,", "FLAGS.input_file.split(\",\"): input_files.extend(tf.gfile.Glob(input_pattern)) tf.logging.info(\"*** Input Files ***\") for input_file in input_files:", "default_value=ids_i.shape[0] - 1) ppl_ex.append(logprobs_i[start_ind:end_ind]) ppl_ex = np.concatenate(ppl_ex, 0) print(\"Article perplexity", "return_first_match: If true, return the first index that matches, otherwise,", "name for the Cloud TPU-enabled project. If not \" \"specified,", "zone=FLAGS.tpu_zone, project=FLAGS.gcp_project) is_per_host = tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2 run_config = tf.contrib.tpu.RunConfig( cluster=tpu_cluster_resolver, master=FLAGS.master,", "return int(matching_inds[0]) else: return int(matching_inds[-1]) return default_value def main(_): tf.logging.set_verbosity(tf.logging.INFO)", "match, or -1 if nothing \"\"\" assert array.ndim == 1", "# distributed under the License is distributed on an \"AS", "fall back to normal Estimator on CPU # or GPU.", "# or GPU. estimator = tf.contrib.tpu.TPUEstimator( use_tpu=FLAGS.use_tpu, model_fn=model_fn, config=run_config, train_batch_size=FLAGS.batch_size,", "num_cpu_threads=1, is_training=False) result = [x for x in estimator.predict(input_fn=eval_input_fn, yield_single_examples=True)]", "model architecture.\") flags.DEFINE_string( \"input_file\", None, \"Input TF example files (can", "# Unless required by applicable law or agreed to in", "np import tempfile import h5py from google.cloud import storage flags", "not available, this will fall back to normal Estimator on", "self.tempfile.flush() print(\"UPLOADING TO {}\".format(self.gcloud_name), flush=True) self.blob.upload_from_filename(self.tempfile.name) self.tempfile.close() def ind_where(array: np.ndarray,", "assert array.ndim == 1 matching_inds = np.where(array == target)[0] if", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "something different to extract the ppl of just the body", "in result_stack.items(): dtype2use = np.float16 if cat.endswith(('logprobs', 'top_p_required')) else np.uint16", "if cat.endswith(('logprobs', 'top_p_required')) else np.uint16 h5.create_dataset(cat, data=data.astype(dtype2use)) h5.create_dataset('model', data=FLAGS.config_file) h5.create_dataset('ckpt',", "with h5py.File(tempfile_name, 'w') as h5: for cat, data in result_stack.items():", "num_shards=FLAGS.num_tpu_cores, per_host_input_for_training=is_per_host)) model_fn = model_fn_builder(news_config, init_checkpoint=FLAGS.init_checkpoint, learning_rate=1e-4, num_train_steps=0, num_warmup_steps=0, use_tpu=FLAGS.use_tpu,", "(usually from a pre-trained model).\") flags.DEFINE_integer( \"max_seq_length\", 1024, \"The maximum", "to the pre-trained news model. \" \"This specifies the model", "\"Input TF example files (can be a glob or comma", "one :param default_value: Index to return if there was no", "You may obtain a copy of the License at #", "input_pattern in FLAGS.input_file.split(\",\"): input_files.extend(tf.gfile.Glob(input_pattern)) tf.logging.info(\"*** Input Files ***\") for input_file", "of the entire article. if you want to replicate the", "flags.DEFINE_integer( \"max_seq_length\", 1024, \"The maximum total input sequence length after", "in. If not \" \"specified, we will attempt to automatically", "so that we can save the perplexities to TPU class", "last one :param default_value: Index to return if there was", "\"config_file\", 'configs/base.json', \"The config json file corresponding to the pre-trained", "This is a handy little utility so that we can", "lm.dataloader import input_fn_builder import numpy as np import tempfile import", "FLAGS = flags.FLAGS ## Required parameters flags.DEFINE_string( \"config_file\", 'configs/base.json', \"The", "save_checkpoints_steps=FLAGS.iterations_per_loop, keep_checkpoint_max=None, tpu_config=tf.contrib.tpu.TPUConfig( iterations_per_loop=FLAGS.iterations_per_loop, num_shards=FLAGS.num_tpu_cores, per_host_input_for_training=is_per_host)) model_fn = model_fn_builder(news_config, init_checkpoint=FLAGS.init_checkpoint,", "Copyright 2018 The Google AI Language Team Authors. # Modified", "the Apache License, Version 2.0 (the \"License\"); # you may", "GCE project from \" \"metadata.\") flags.DEFINE_string(\"master\", None, \"[Optional] TensorFlow master", "a grpc://ip.address.of.tpu:8470 \" \"url.\") flags.DEFINE_string( \"tpu_zone\", None, \"[Optional] GCE zone", "perplexities to TPU class gcloudwriter(): def __init__(self, gcloud_name): assert gcloud_name.startswith('gs://')", "None, \"[Optional] Project name for the Cloud TPU-enabled project. If", "\" \"metadata.\") flags.DEFINE_string(\"master\", None, \"[Optional] TensorFlow master URL.\") flags.DEFINE_integer( \"num_tpu_cores\",", "% input_file) tpu_cluster_resolver = None if FLAGS.use_tpu and FLAGS.tpu_name: tpu_cluster_resolver", "from google.cloud import storage flags = tf.flags FLAGS = flags.FLAGS" ]
[ "= bo.time_overhead results[\"X\"] = [x.tolist() for x in bo.X] results[\"y\"]", "cov_amp = 2 n_dims = lower.shape[0] initial_ls = np.ones([n_dims]) exp_kernel", "space num_iterations: int The number of iterations (initial design +", "from robo.maximizers.random_sampling import RandomSampling from robo.maximizers.scipy_optimizer import SciPyOptimizer from robo.maximizers.differential_evolution", "of iterations (initial design + BO) maximizer: {\"random\", \"scipy\", \"differential_evolution\"}", "return bo = BayesianOptimization(objective_function, lower, upper, acquisition_func, gp, max_func, initial_design=init_latin_hypercube_sampling,", "box optimization problems. This is a reimplemenation of the entropy", "robo.maximizers.random_sampling import RandomSampling from robo.maximizers.scipy_optimizer import SciPyOptimizer from robo.maximizers.differential_evolution import", "lower, upper, num_iterations=30, maximizer=\"random\", model=\"gp_mcmc\", n_init=3, output_path=None, rng=None): \"\"\" Entropy", "prior=prior, n_hypers=n_hypers, chain_length=200, burnin_steps=100, normalize_input=True, normalize_output=False, rng=rng, lower=lower, upper=upper) else:", "bound of the search space upper: np.ndarray (D,) The upper", "1: n_hypers += 1 if model == \"gp\": gp =", "\"gp_mcmc\"} The model for the objective function. n_init: int Number", "function to maximize the acquisition function!\" % maximizer) return bo", "= GaussianProcess(kernel, prior=prior, rng=rng, normalize_output=False, normalize_input=True, lower=lower, upper=upper) elif model", "lower bound of the search space upper: np.ndarray (D,) The", "black box optimization problems. This is a reimplemenation of the", "function!\" % maximizer) return bo = BayesianOptimization(objective_function, lower, upper, acquisition_func,", "n_init=3, output_path=None, rng=None): \"\"\" Entropy search for global black box", "the objective function. n_init: int Number of points for the", "acquisition_func = MarginalizationGPMCMC(a) if maximizer == \"random\": max_func = RandomSampling(acquisition_func,", "------- dict with all results \"\"\" assert upper.shape[0] == lower.shape[0],", "== \"gp\": acquisition_func = a elif model == \"gp_mcmc\": acquisition_func", "model: {\"gp\", \"gp_mcmc\"} The model for the objective function. n_init:", "path where the intermediate output after each iteration will be", "george import numpy as np from robo.priors.default_priors import DefaultPrior from", "from robo.acquisition_functions.ei import EI from robo.acquisition_functions.marginalization import MarginalizationGPMCMC from robo.initial_design", "george.kernels.Matern52Kernel(initial_ls, ndim=n_dims) kernel = cov_amp * exp_kernel prior = DefaultPrior(len(kernel)", "point has to be <= than the number of iterations\"", "problems. This is a reimplemenation of the entropy search algorithm", "GaussianProcess from robo.models.gaussian_process_mcmc import GaussianProcessMCMC from robo.maximizers.random_sampling import RandomSampling from", "Entropy search for global black box optimization problems. This is", "initial_points=n_init, rng=rng, output_path=output_path) x_best, f_min = bo.run(num_iterations) results = dict()", "== \"gp_mcmc\": gp = GaussianProcessMCMC(kernel, prior=prior, n_hypers=n_hypers, chain_length=200, burnin_steps=100, normalize_input=True,", "10000)) cov_amp = 2 n_dims = lower.shape[0] initial_ls = np.ones([n_dims])", "is a reimplemenation of the entropy search algorithm by Henning", "<= than the number of iterations\" if rng is None:", "each iteration will be saved. If None no output will", "1 if model == \"gp\": gp = GaussianProcess(kernel, prior=prior, rng=rng,", "else: print(\"ERROR: %s is not a valid model!\" % model)", "from robo.models.gaussian_process_mcmc import GaussianProcessMCMC from robo.maximizers.random_sampling import RandomSampling from robo.maximizers.scipy_optimizer", "objective function that is minimized. This function gets a numpy", "gp = GaussianProcess(kernel, prior=prior, rng=rng, normalize_output=False, normalize_input=True, lower=lower, upper=upper) elif", "for val in bo.incumbents_values] results[\"runtime\"] = bo.runtime results[\"overhead\"] = bo.time_overhead", "(D,) The lower bound of the search space upper: np.ndarray", "= InformationGain(gp, lower=lower, upper=upper, sampling_acquisition=EI) if model == \"gp\": acquisition_func", "global optimization. <NAME> and <NAME>. JMLR, (1), 2012. Parameters ----------", "= DifferentialEvolution(acquisition_func, lower, upper, rng=rng) else: print(\"ERROR: %s is not", "\"scipy\", \"differential_evolution\"} Defines how the acquisition function is maximized. model:", "== \"random\": max_func = RandomSampling(acquisition_func, lower, upper, rng=rng) elif maximizer", "upper, rng=rng) elif maximizer == \"scipy\": max_func = SciPyOptimizer(acquisition_func, lower,", "bo.X] results[\"y\"] = [y for y in bo.y] return results", "from robo.maximizers.scipy_optimizer import SciPyOptimizer from robo.maximizers.differential_evolution import DifferentialEvolution from robo.solver.bayesian_optimization", "(D,) The upper bound of the search space num_iterations: int", "= GaussianProcessMCMC(kernel, prior=prior, n_hypers=n_hypers, chain_length=200, burnin_steps=100, normalize_input=True, normalize_output=False, rng=rng, lower=lower,", "2 n_dims = lower.shape[0] initial_ls = np.ones([n_dims]) exp_kernel = george.kernels.Matern52Kernel(initial_ls,", "None: rng = np.random.RandomState(np.random.randint(0, 10000)) cov_amp = 2 n_dims =", "(scalar) lower: np.ndarray (D,) The lower bound of the search", "rng=None): \"\"\" Entropy search for global black box optimization problems.", "global black box optimization problems. This is a reimplemenation of", "and returns the function value (scalar) lower: np.ndarray (D,) The", "int Number of points for the initial design. Make sure", "be <= than the number of iterations\" if rng is", "maximized. model: {\"gp\", \"gp_mcmc\"} The model for the objective function.", "int The number of iterations (initial design + BO) maximizer:", "the intermediate output after each iteration will be saved. If", "prior=prior, rng=rng, normalize_output=False, normalize_input=True, lower=lower, upper=upper) elif model == \"gp_mcmc\":", "results[\"runtime\"] = bo.runtime results[\"overhead\"] = bo.time_overhead results[\"X\"] = [x.tolist() for", "Specifies the path where the intermediate output after each iteration", "= dict() results[\"x_opt\"] = x_best results[\"f_opt\"] = f_min results[\"incumbents\"] =", "3 * len(kernel) if n_hypers % 2 == 1: n_hypers", "disk. rng: numpy.random.RandomState Random number generator Returns ------- dict with", "results[\"x_opt\"] = x_best results[\"f_opt\"] = f_min results[\"incumbents\"] = [inc for", "The upper bound of the search space num_iterations: int The", "the search space upper: np.ndarray (D,) The upper bound of", "results[\"incumbents\"] = [inc for inc in bo.incumbents] results[\"incumbent_values\"] = [val", "after each iteration will be saved. If None no output", "= DefaultPrior(len(kernel) + 1) n_hypers = 3 * len(kernel) if", "a numpy array (D,) as input and returns the function", "rng: numpy.random.RandomState Random number generator Returns ------- dict with all", "<NAME>. JMLR, (1), 2012. Parameters ---------- objective_function: function The objective", "minimized. This function gets a numpy array (D,) as input", "returns the function value (scalar) lower: np.ndarray (D,) The lower", "num_iterations. output_path: string Specifies the path where the intermediate output", "maximizer == \"random\": max_func = RandomSampling(acquisition_func, lower, upper, rng=rng) elif", "a elif model == \"gp_mcmc\": acquisition_func = MarginalizationGPMCMC(a) if maximizer", "== \"differential_evolution\": max_func = DifferentialEvolution(acquisition_func, lower, upper, rng=rng) else: print(\"ERROR:", "\"differential_evolution\": max_func = DifferentialEvolution(acquisition_func, lower, upper, rng=rng) else: print(\"ERROR: %s", "BayesianOptimization from robo.acquisition_functions.information_gain import InformationGain from robo.acquisition_functions.ei import EI from", "upper=upper) else: print(\"ERROR: %s is not a valid model!\" %", "ndim=n_dims) kernel = cov_amp * exp_kernel prior = DefaultPrior(len(kernel) +", "robo.solver.bayesian_optimization import BayesianOptimization from robo.acquisition_functions.information_gain import InformationGain from robo.acquisition_functions.ei import", "of the search space upper: np.ndarray (D,) The upper bound", "maximizer=\"random\", model=\"gp_mcmc\", n_init=3, output_path=None, rng=None): \"\"\" Entropy search for global", "= lower.shape[0] initial_ls = np.ones([n_dims]) exp_kernel = george.kernels.Matern52Kernel(initial_ls, ndim=n_dims) kernel", "import BayesianOptimization from robo.acquisition_functions.information_gain import InformationGain from robo.acquisition_functions.ei import EI", "robo.initial_design import init_latin_hypercube_sampling logger = logging.getLogger(__name__) def entropy_search(objective_function, lower, upper,", "is not a valid function to maximize the acquisition function!\"", "lower: np.ndarray (D,) The lower bound of the search space", "The lower bound of the search space upper: np.ndarray (D,)", "results = dict() results[\"x_opt\"] = x_best results[\"f_opt\"] = f_min results[\"incumbents\"]", "gets a numpy array (D,) as input and returns the", "upper bound of the search space num_iterations: int The number", "maximizer == \"differential_evolution\": max_func = DifferentialEvolution(acquisition_func, lower, upper, rng=rng) else:", "robo.acquisition_functions.ei import EI from robo.acquisition_functions.marginalization import MarginalizationGPMCMC from robo.initial_design import", "Returns ------- dict with all results \"\"\" assert upper.shape[0] ==", "% 2 == 1: n_hypers += 1 if model ==", "miss match\" assert np.all(lower < upper), \"Lower bound >= upper", "from robo.priors.default_priors import DefaultPrior from robo.models.gaussian_process import GaussianProcess from robo.models.gaussian_process_mcmc", "search space upper: np.ndarray (D,) The upper bound of the", "== \"scipy\": max_func = SciPyOptimizer(acquisition_func, lower, upper, rng=rng) elif maximizer", "The number of iterations (initial design + BO) maximizer: {\"random\",", "(initial design + BO) maximizer: {\"random\", \"scipy\", \"differential_evolution\"} Defines how", "\"Number of initial design point has to be <= than", "np from robo.priors.default_priors import DefaultPrior from robo.models.gaussian_process import GaussianProcess from", "initial design point has to be <= than the number", "bound of the search space num_iterations: int The number of", "robo.maximizers.differential_evolution import DifferentialEvolution from robo.solver.bayesian_optimization import BayesianOptimization from robo.acquisition_functions.information_gain import", "The model for the objective function. n_init: int Number of", "lower, upper, rng=rng) elif maximizer == \"differential_evolution\": max_func = DifferentialEvolution(acquisition_func,", "iterations (initial design + BO) maximizer: {\"random\", \"scipy\", \"differential_evolution\"} Defines", "from robo.solver.bayesian_optimization import BayesianOptimization from robo.acquisition_functions.information_gain import InformationGain from robo.acquisition_functions.ei", "output after each iteration will be saved. If None no", "GaussianProcessMCMC(kernel, prior=prior, n_hypers=n_hypers, chain_length=200, burnin_steps=100, normalize_input=True, normalize_output=False, rng=rng, lower=lower, upper=upper)", "has to be <= than the number of iterations\" if", "upper=upper) elif model == \"gp_mcmc\": gp = GaussianProcessMCMC(kernel, prior=prior, n_hypers=n_hypers,", "information-efficient global optimization. <NAME> and <NAME>. JMLR, (1), 2012. Parameters", "where the intermediate output after each iteration will be saved.", "will be saved to disk. rng: numpy.random.RandomState Random number generator", "results[\"f_opt\"] = f_min results[\"incumbents\"] = [inc for inc in bo.incumbents]", "is not a valid model!\" % model) return a =", "from robo.models.gaussian_process import GaussianProcess from robo.models.gaussian_process_mcmc import GaussianProcessMCMC from robo.maximizers.random_sampling", "maximizer == \"scipy\": max_func = SciPyOptimizer(acquisition_func, lower, upper, rng=rng) elif", "dict with all results \"\"\" assert upper.shape[0] == lower.shape[0], \"Dimension", "= RandomSampling(acquisition_func, lower, upper, rng=rng) elif maximizer == \"scipy\": max_func", "match\" assert np.all(lower < upper), \"Lower bound >= upper bound\"", "lower.shape[0] initial_ls = np.ones([n_dims]) exp_kernel = george.kernels.Matern52Kernel(initial_ls, ndim=n_dims) kernel =", "lower=lower, upper=upper) elif model == \"gp_mcmc\": gp = GaussianProcessMCMC(kernel, prior=prior,", "{\"random\", \"scipy\", \"differential_evolution\"} Defines how the acquisition function is maximized.", "if model == \"gp\": acquisition_func = a elif model ==", "\"\"\" Entropy search for global black box optimization problems. This", "bound >= upper bound\" assert n_init <= num_iterations, \"Number of", "Make sure that it is <= num_iterations. output_path: string Specifies", "numpy.random.RandomState Random number generator Returns ------- dict with all results", "numpy array (D,) as input and returns the function value", "output_path: string Specifies the path where the intermediate output after", "maximizer) return bo = BayesianOptimization(objective_function, lower, upper, acquisition_func, gp, max_func,", "logging import george import numpy as np from robo.priors.default_priors import", "* len(kernel) if n_hypers % 2 == 1: n_hypers +=", "model) return a = InformationGain(gp, lower=lower, upper=upper, sampling_acquisition=EI) if model", "import numpy as np from robo.priors.default_priors import DefaultPrior from robo.models.gaussian_process", "lower, upper, acquisition_func, gp, max_func, initial_design=init_latin_hypercube_sampling, initial_points=n_init, rng=rng, output_path=output_path) x_best,", "array (D,) as input and returns the function value (scalar)", "f_min = bo.run(num_iterations) results = dict() results[\"x_opt\"] = x_best results[\"f_opt\"]", "elif maximizer == \"differential_evolution\": max_func = DifferentialEvolution(acquisition_func, lower, upper, rng=rng)", "saved. If None no output will be saved to disk.", "function gets a numpy array (D,) as input and returns", "entropy_search(objective_function, lower, upper, num_iterations=30, maximizer=\"random\", model=\"gp_mcmc\", n_init=3, output_path=None, rng=None): \"\"\"", "f_min results[\"incumbents\"] = [inc for inc in bo.incumbents] results[\"incumbent_values\"] =", "in bo.incumbents_values] results[\"runtime\"] = bo.runtime results[\"overhead\"] = bo.time_overhead results[\"X\"] =", "\"Dimension miss match\" assert np.all(lower < upper), \"Lower bound >=", "[val for val in bo.incumbents_values] results[\"runtime\"] = bo.runtime results[\"overhead\"] =", "rng=rng) elif maximizer == \"differential_evolution\": max_func = DifferentialEvolution(acquisition_func, lower, upper,", "algorithm by Henning and Schuler[1]. [1] Entropy search for information-efficient", "RandomSampling(acquisition_func, lower, upper, rng=rng) elif maximizer == \"scipy\": max_func =", "search for global black box optimization problems. This is a", "[x.tolist() for x in bo.X] results[\"y\"] = [y for y", "not a valid function to maximize the acquisition function!\" %", "model!\" % model) return a = InformationGain(gp, lower=lower, upper=upper, sampling_acquisition=EI)", "for x in bo.X] results[\"y\"] = [y for y in", "num_iterations=30, maximizer=\"random\", model=\"gp_mcmc\", n_init=3, output_path=None, rng=None): \"\"\" Entropy search for", "not a valid model!\" % model) return a = InformationGain(gp,", "than the number of iterations\" if rng is None: rng", "= [x.tolist() for x in bo.X] results[\"y\"] = [y for", "a reimplemenation of the entropy search algorithm by Henning and", "<= num_iterations, \"Number of initial design point has to be", "elif maximizer == \"scipy\": max_func = SciPyOptimizer(acquisition_func, lower, upper, rng=rng)", "be saved to disk. rng: numpy.random.RandomState Random number generator Returns", "of points for the initial design. Make sure that it", "import george import numpy as np from robo.priors.default_priors import DefaultPrior", "model == \"gp_mcmc\": gp = GaussianProcessMCMC(kernel, prior=prior, n_hypers=n_hypers, chain_length=200, burnin_steps=100,", "= SciPyOptimizer(acquisition_func, lower, upper, rng=rng) elif maximizer == \"differential_evolution\": max_func", "search algorithm by Henning and Schuler[1]. [1] Entropy search for", "for the objective function. n_init: int Number of points for", "results[\"incumbent_values\"] = [val for val in bo.incumbents_values] results[\"runtime\"] = bo.runtime", "be saved. If None no output will be saved to", "upper, rng=rng) elif maximizer == \"differential_evolution\": max_func = DifferentialEvolution(acquisition_func, lower,", "iterations\" if rng is None: rng = np.random.RandomState(np.random.randint(0, 10000)) cov_amp", "<= num_iterations. output_path: string Specifies the path where the intermediate", "= 2 n_dims = lower.shape[0] initial_ls = np.ones([n_dims]) exp_kernel =", "initial design. Make sure that it is <= num_iterations. output_path:", "= np.random.RandomState(np.random.randint(0, 10000)) cov_amp = 2 n_dims = lower.shape[0] initial_ls", "\"differential_evolution\"} Defines how the acquisition function is maximized. model: {\"gp\",", "print(\"ERROR: %s is not a valid model!\" % model) return", "robo.acquisition_functions.marginalization import MarginalizationGPMCMC from robo.initial_design import init_latin_hypercube_sampling logger = logging.getLogger(__name__)", "number of iterations (initial design + BO) maximizer: {\"random\", \"scipy\",", "< upper), \"Lower bound >= upper bound\" assert n_init <=", "to maximize the acquisition function!\" % maximizer) return bo =", "the entropy search algorithm by Henning and Schuler[1]. [1] Entropy", "= george.kernels.Matern52Kernel(initial_ls, ndim=n_dims) kernel = cov_amp * exp_kernel prior =", "the acquisition function!\" % maximizer) return bo = BayesianOptimization(objective_function, lower,", "The objective function that is minimized. This function gets a", "lower.shape[0], \"Dimension miss match\" assert np.all(lower < upper), \"Lower bound", "if maximizer == \"random\": max_func = RandomSampling(acquisition_func, lower, upper, rng=rng)", "import RandomSampling from robo.maximizers.scipy_optimizer import SciPyOptimizer from robo.maximizers.differential_evolution import DifferentialEvolution", "+ BO) maximizer: {\"random\", \"scipy\", \"differential_evolution\"} Defines how the acquisition", "DefaultPrior from robo.models.gaussian_process import GaussianProcess from robo.models.gaussian_process_mcmc import GaussianProcessMCMC from", "optimization problems. This is a reimplemenation of the entropy search", "rng is None: rng = np.random.RandomState(np.random.randint(0, 10000)) cov_amp = 2", "Defines how the acquisition function is maximized. model: {\"gp\", \"gp_mcmc\"}", "objective function. n_init: int Number of points for the initial", "search for information-efficient global optimization. <NAME> and <NAME>. JMLR, (1),", "EI from robo.acquisition_functions.marginalization import MarginalizationGPMCMC from robo.initial_design import init_latin_hypercube_sampling logger", "= MarginalizationGPMCMC(a) if maximizer == \"random\": max_func = RandomSampling(acquisition_func, lower,", "valid model!\" % model) return a = InformationGain(gp, lower=lower, upper=upper,", "for global black box optimization problems. This is a reimplemenation", "= bo.runtime results[\"overhead\"] = bo.time_overhead results[\"X\"] = [x.tolist() for x", "%s is not a valid model!\" % model) return a", "gp, max_func, initial_design=init_latin_hypercube_sampling, initial_points=n_init, rng=rng, output_path=output_path) x_best, f_min = bo.run(num_iterations)", "initial_design=init_latin_hypercube_sampling, initial_points=n_init, rng=rng, output_path=output_path) x_best, f_min = bo.run(num_iterations) results =", "= [inc for inc in bo.incumbents] results[\"incumbent_values\"] = [val for", "---------- objective_function: function The objective function that is minimized. This", "[1] Entropy search for information-efficient global optimization. <NAME> and <NAME>.", "elif model == \"gp_mcmc\": acquisition_func = MarginalizationGPMCMC(a) if maximizer ==", "GaussianProcessMCMC from robo.maximizers.random_sampling import RandomSampling from robo.maximizers.scipy_optimizer import SciPyOptimizer from", "iteration will be saved. If None no output will be", "function. n_init: int Number of points for the initial design.", "MarginalizationGPMCMC(a) if maximizer == \"random\": max_func = RandomSampling(acquisition_func, lower, upper,", "import EI from robo.acquisition_functions.marginalization import MarginalizationGPMCMC from robo.initial_design import init_latin_hypercube_sampling", "max_func = SciPyOptimizer(acquisition_func, lower, upper, rng=rng) elif maximizer == \"differential_evolution\":", "how the acquisition function is maximized. model: {\"gp\", \"gp_mcmc\"} The", "saved to disk. rng: numpy.random.RandomState Random number generator Returns -------", "def entropy_search(objective_function, lower, upper, num_iterations=30, maximizer=\"random\", model=\"gp_mcmc\", n_init=3, output_path=None, rng=None):", "[inc for inc in bo.incumbents] results[\"incumbent_values\"] = [val for val", "design point has to be <= than the number of", "results \"\"\" assert upper.shape[0] == lower.shape[0], \"Dimension miss match\" assert", "x_best, f_min = bo.run(num_iterations) results = dict() results[\"x_opt\"] = x_best", "all results \"\"\" assert upper.shape[0] == lower.shape[0], \"Dimension miss match\"", "This function gets a numpy array (D,) as input and", "upper, rng=rng) else: print(\"ERROR: %s is not a valid function", "design + BO) maximizer: {\"random\", \"scipy\", \"differential_evolution\"} Defines how the", "x_best results[\"f_opt\"] = f_min results[\"incumbents\"] = [inc for inc in", "2 == 1: n_hypers += 1 if model == \"gp\":", "\"gp_mcmc\": gp = GaussianProcessMCMC(kernel, prior=prior, n_hypers=n_hypers, chain_length=200, burnin_steps=100, normalize_input=True, normalize_output=False,", "n_hypers=n_hypers, chain_length=200, burnin_steps=100, normalize_input=True, normalize_output=False, rng=rng, lower=lower, upper=upper) else: print(\"ERROR:", "lower, upper, rng=rng) elif maximizer == \"scipy\": max_func = SciPyOptimizer(acquisition_func,", "\"random\": max_func = RandomSampling(acquisition_func, lower, upper, rng=rng) elif maximizer ==", "upper: np.ndarray (D,) The upper bound of the search space", "= x_best results[\"f_opt\"] = f_min results[\"incumbents\"] = [inc for inc", "{\"gp\", \"gp_mcmc\"} The model for the objective function. n_init: int", "BayesianOptimization(objective_function, lower, upper, acquisition_func, gp, max_func, initial_design=init_latin_hypercube_sampling, initial_points=n_init, rng=rng, output_path=output_path)", "that it is <= num_iterations. output_path: string Specifies the path", "rng=rng) else: print(\"ERROR: %s is not a valid function to", "print(\"ERROR: %s is not a valid function to maximize the", "as input and returns the function value (scalar) lower: np.ndarray", "upper.shape[0] == lower.shape[0], \"Dimension miss match\" assert np.all(lower < upper),", "function that is minimized. This function gets a numpy array", "intermediate output after each iteration will be saved. If None", "prior = DefaultPrior(len(kernel) + 1) n_hypers = 3 * len(kernel)", "robo.priors.default_priors import DefaultPrior from robo.models.gaussian_process import GaussianProcess from robo.models.gaussian_process_mcmc import", "model == \"gp\": acquisition_func = a elif model == \"gp_mcmc\":", "max_func = DifferentialEvolution(acquisition_func, lower, upper, rng=rng) else: print(\"ERROR: %s is", "sure that it is <= num_iterations. output_path: string Specifies the", "= a elif model == \"gp_mcmc\": acquisition_func = MarginalizationGPMCMC(a) if", "None no output will be saved to disk. rng: numpy.random.RandomState", "\"\"\" assert upper.shape[0] == lower.shape[0], \"Dimension miss match\" assert np.all(lower", "If None no output will be saved to disk. rng:", "import DifferentialEvolution from robo.solver.bayesian_optimization import BayesianOptimization from robo.acquisition_functions.information_gain import InformationGain", "bo.run(num_iterations) results = dict() results[\"x_opt\"] = x_best results[\"f_opt\"] = f_min", "robo.models.gaussian_process import GaussianProcess from robo.models.gaussian_process_mcmc import GaussianProcessMCMC from robo.maximizers.random_sampling import", "upper, num_iterations=30, maximizer=\"random\", model=\"gp_mcmc\", n_init=3, output_path=None, rng=None): \"\"\" Entropy search", "GaussianProcess(kernel, prior=prior, rng=rng, normalize_output=False, normalize_input=True, lower=lower, upper=upper) elif model ==", "acquisition_func = a elif model == \"gp_mcmc\": acquisition_func = MarginalizationGPMCMC(a)", "if model == \"gp\": gp = GaussianProcess(kernel, prior=prior, rng=rng, normalize_output=False,", "= cov_amp * exp_kernel prior = DefaultPrior(len(kernel) + 1) n_hypers", "as np from robo.priors.default_priors import DefaultPrior from robo.models.gaussian_process import GaussianProcess", "= 3 * len(kernel) if n_hypers % 2 == 1:", "DifferentialEvolution(acquisition_func, lower, upper, rng=rng) else: print(\"ERROR: %s is not a", "np.all(lower < upper), \"Lower bound >= upper bound\" assert n_init", "\"gp\": gp = GaussianProcess(kernel, prior=prior, rng=rng, normalize_output=False, normalize_input=True, lower=lower, upper=upper)", "InformationGain from robo.acquisition_functions.ei import EI from robo.acquisition_functions.marginalization import MarginalizationGPMCMC from", "optimization. <NAME> and <NAME>. JMLR, (1), 2012. Parameters ---------- objective_function:", "the search space num_iterations: int The number of iterations (initial", "acquisition function is maximized. model: {\"gp\", \"gp_mcmc\"} The model for", "is <= num_iterations. output_path: string Specifies the path where the", "output_path=output_path) x_best, f_min = bo.run(num_iterations) results = dict() results[\"x_opt\"] =", "of initial design point has to be <= than the", "% maximizer) return bo = BayesianOptimization(objective_function, lower, upper, acquisition_func, gp,", "will be saved. If None no output will be saved", "kernel = cov_amp * exp_kernel prior = DefaultPrior(len(kernel) + 1)", "for the initial design. Make sure that it is <=", "max_func, initial_design=init_latin_hypercube_sampling, initial_points=n_init, rng=rng, output_path=output_path) x_best, f_min = bo.run(num_iterations) results", "Entropy search for information-efficient global optimization. <NAME> and <NAME>. JMLR,", "max_func = RandomSampling(acquisition_func, lower, upper, rng=rng) elif maximizer == \"scipy\":", "lower, upper, rng=rng) else: print(\"ERROR: %s is not a valid", "burnin_steps=100, normalize_input=True, normalize_output=False, rng=rng, lower=lower, upper=upper) else: print(\"ERROR: %s is", "= BayesianOptimization(objective_function, lower, upper, acquisition_func, gp, max_func, initial_design=init_latin_hypercube_sampling, initial_points=n_init, rng=rng,", ">= upper bound\" assert n_init <= num_iterations, \"Number of initial", "inc in bo.incumbents] results[\"incumbent_values\"] = [val for val in bo.incumbents_values]", "<NAME> and <NAME>. JMLR, (1), 2012. Parameters ---------- objective_function: function", "the acquisition function is maximized. model: {\"gp\", \"gp_mcmc\"} The model", "% model) return a = InformationGain(gp, lower=lower, upper=upper, sampling_acquisition=EI) if", "for information-efficient global optimization. <NAME> and <NAME>. JMLR, (1), 2012.", "logger = logging.getLogger(__name__) def entropy_search(objective_function, lower, upper, num_iterations=30, maximizer=\"random\", model=\"gp_mcmc\",", "n_hypers = 3 * len(kernel) if n_hypers % 2 ==", "normalize_output=False, rng=rng, lower=lower, upper=upper) else: print(\"ERROR: %s is not a", "2012. Parameters ---------- objective_function: function The objective function that is", "and <NAME>. JMLR, (1), 2012. Parameters ---------- objective_function: function The", "number generator Returns ------- dict with all results \"\"\" assert", "normalize_input=True, lower=lower, upper=upper) elif model == \"gp_mcmc\": gp = GaussianProcessMCMC(kernel,", "InformationGain(gp, lower=lower, upper=upper, sampling_acquisition=EI) if model == \"gp\": acquisition_func =", "lower=lower, upper=upper, sampling_acquisition=EI) if model == \"gp\": acquisition_func = a", "DifferentialEvolution from robo.solver.bayesian_optimization import BayesianOptimization from robo.acquisition_functions.information_gain import InformationGain from", "import logging import george import numpy as np from robo.priors.default_priors", "model=\"gp_mcmc\", n_init=3, output_path=None, rng=None): \"\"\" Entropy search for global black", "bo = BayesianOptimization(objective_function, lower, upper, acquisition_func, gp, max_func, initial_design=init_latin_hypercube_sampling, initial_points=n_init,", "entropy search algorithm by Henning and Schuler[1]. [1] Entropy search", "maximize the acquisition function!\" % maximizer) return bo = BayesianOptimization(objective_function,", "the function value (scalar) lower: np.ndarray (D,) The lower bound", "from robo.maximizers.differential_evolution import DifferentialEvolution from robo.solver.bayesian_optimization import BayesianOptimization from robo.acquisition_functions.information_gain", "valid function to maximize the acquisition function!\" % maximizer) return", "it is <= num_iterations. output_path: string Specifies the path where", "and Schuler[1]. [1] Entropy search for information-efficient global optimization. <NAME>", "num_iterations, \"Number of initial design point has to be <=", "to be <= than the number of iterations\" if rng", "bo.incumbents_values] results[\"runtime\"] = bo.runtime results[\"overhead\"] = bo.time_overhead results[\"X\"] = [x.tolist()", "= np.ones([n_dims]) exp_kernel = george.kernels.Matern52Kernel(initial_ls, ndim=n_dims) kernel = cov_amp *", "lower=lower, upper=upper) else: print(\"ERROR: %s is not a valid model!\"", "SciPyOptimizer(acquisition_func, lower, upper, rng=rng) elif maximizer == \"differential_evolution\": max_func =", "upper, acquisition_func, gp, max_func, initial_design=init_latin_hypercube_sampling, initial_points=n_init, rng=rng, output_path=output_path) x_best, f_min", "maximizer: {\"random\", \"scipy\", \"differential_evolution\"} Defines how the acquisition function is", "output will be saved to disk. rng: numpy.random.RandomState Random number", "a valid model!\" % model) return a = InformationGain(gp, lower=lower,", "assert upper.shape[0] == lower.shape[0], \"Dimension miss match\" assert np.all(lower <", "exp_kernel prior = DefaultPrior(len(kernel) + 1) n_hypers = 3 *", "the initial design. Make sure that it is <= num_iterations.", "value (scalar) lower: np.ndarray (D,) The lower bound of the", "results[\"X\"] = [x.tolist() for x in bo.X] results[\"y\"] = [y", "import GaussianProcessMCMC from robo.maximizers.random_sampling import RandomSampling from robo.maximizers.scipy_optimizer import SciPyOptimizer", "acquisition_func, gp, max_func, initial_design=init_latin_hypercube_sampling, initial_points=n_init, rng=rng, output_path=output_path) x_best, f_min =", "DefaultPrior(len(kernel) + 1) n_hypers = 3 * len(kernel) if n_hypers", "points for the initial design. Make sure that it is", "chain_length=200, burnin_steps=100, normalize_input=True, normalize_output=False, rng=rng, lower=lower, upper=upper) else: print(\"ERROR: %s", "robo.models.gaussian_process_mcmc import GaussianProcessMCMC from robo.maximizers.random_sampling import RandomSampling from robo.maximizers.scipy_optimizer import", "initial_ls = np.ones([n_dims]) exp_kernel = george.kernels.Matern52Kernel(initial_ls, ndim=n_dims) kernel = cov_amp", "import SciPyOptimizer from robo.maximizers.differential_evolution import DifferentialEvolution from robo.solver.bayesian_optimization import BayesianOptimization", "n_dims = lower.shape[0] initial_ls = np.ones([n_dims]) exp_kernel = george.kernels.Matern52Kernel(initial_ls, ndim=n_dims)", "objective_function: function The objective function that is minimized. This function", "function is maximized. model: {\"gp\", \"gp_mcmc\"} The model for the", "assert np.all(lower < upper), \"Lower bound >= upper bound\" assert", "logging.getLogger(__name__) def entropy_search(objective_function, lower, upper, num_iterations=30, maximizer=\"random\", model=\"gp_mcmc\", n_init=3, output_path=None,", "is maximized. model: {\"gp\", \"gp_mcmc\"} The model for the objective", "from robo.acquisition_functions.information_gain import InformationGain from robo.acquisition_functions.ei import EI from robo.acquisition_functions.marginalization", "in bo.incumbents] results[\"incumbent_values\"] = [val for val in bo.incumbents_values] results[\"runtime\"]", "import InformationGain from robo.acquisition_functions.ei import EI from robo.acquisition_functions.marginalization import MarginalizationGPMCMC", "else: print(\"ERROR: %s is not a valid function to maximize", "space upper: np.ndarray (D,) The upper bound of the search", "reimplemenation of the entropy search algorithm by Henning and Schuler[1].", "to disk. rng: numpy.random.RandomState Random number generator Returns ------- dict", "in bo.X] results[\"y\"] = [y for y in bo.y] return", "Number of points for the initial design. Make sure that", "with all results \"\"\" assert upper.shape[0] == lower.shape[0], \"Dimension miss", "by Henning and Schuler[1]. [1] Entropy search for information-efficient global", "rng=rng, output_path=output_path) x_best, f_min = bo.run(num_iterations) results = dict() results[\"x_opt\"]", "for inc in bo.incumbents] results[\"incumbent_values\"] = [val for val in", "exp_kernel = george.kernels.Matern52Kernel(initial_ls, ndim=n_dims) kernel = cov_amp * exp_kernel prior", "function The objective function that is minimized. This function gets", "function value (scalar) lower: np.ndarray (D,) The lower bound of", "len(kernel) if n_hypers % 2 == 1: n_hypers += 1", "bo.runtime results[\"overhead\"] = bo.time_overhead results[\"X\"] = [x.tolist() for x in", "n_init: int Number of points for the initial design. Make", "Parameters ---------- objective_function: function The objective function that is minimized.", "normalize_input=True, normalize_output=False, rng=rng, lower=lower, upper=upper) else: print(\"ERROR: %s is not", "import GaussianProcess from robo.models.gaussian_process_mcmc import GaussianProcessMCMC from robo.maximizers.random_sampling import RandomSampling", "val in bo.incumbents_values] results[\"runtime\"] = bo.runtime results[\"overhead\"] = bo.time_overhead results[\"X\"]", "robo.acquisition_functions.information_gain import InformationGain from robo.acquisition_functions.ei import EI from robo.acquisition_functions.marginalization import", "%s is not a valid function to maximize the acquisition", "numpy as np from robo.priors.default_priors import DefaultPrior from robo.models.gaussian_process import", "\"Lower bound >= upper bound\" assert n_init <= num_iterations, \"Number", "sampling_acquisition=EI) if model == \"gp\": acquisition_func = a elif model", "that is minimized. This function gets a numpy array (D,)", "robo.maximizers.scipy_optimizer import SciPyOptimizer from robo.maximizers.differential_evolution import DifferentialEvolution from robo.solver.bayesian_optimization import", "init_latin_hypercube_sampling logger = logging.getLogger(__name__) def entropy_search(objective_function, lower, upper, num_iterations=30, maximizer=\"random\",", "string Specifies the path where the intermediate output after each", "(1), 2012. Parameters ---------- objective_function: function The objective function that", "rng = np.random.RandomState(np.random.randint(0, 10000)) cov_amp = 2 n_dims = lower.shape[0]", "== lower.shape[0], \"Dimension miss match\" assert np.all(lower < upper), \"Lower", "= f_min results[\"incumbents\"] = [inc for inc in bo.incumbents] results[\"incumbent_values\"]", "rng=rng) elif maximizer == \"scipy\": max_func = SciPyOptimizer(acquisition_func, lower, upper,", "Random number generator Returns ------- dict with all results \"\"\"", "import MarginalizationGPMCMC from robo.initial_design import init_latin_hypercube_sampling logger = logging.getLogger(__name__) def", "generator Returns ------- dict with all results \"\"\" assert upper.shape[0]", "rng=rng, normalize_output=False, normalize_input=True, lower=lower, upper=upper) elif model == \"gp_mcmc\": gp", "np.ones([n_dims]) exp_kernel = george.kernels.Matern52Kernel(initial_ls, ndim=n_dims) kernel = cov_amp * exp_kernel", "import init_latin_hypercube_sampling logger = logging.getLogger(__name__) def entropy_search(objective_function, lower, upper, num_iterations=30,", "Henning and Schuler[1]. [1] Entropy search for information-efficient global optimization.", "\"gp\": acquisition_func = a elif model == \"gp_mcmc\": acquisition_func =", "of iterations\" if rng is None: rng = np.random.RandomState(np.random.randint(0, 10000))", "SciPyOptimizer from robo.maximizers.differential_evolution import DifferentialEvolution from robo.solver.bayesian_optimization import BayesianOptimization from", "Schuler[1]. [1] Entropy search for information-efficient global optimization. <NAME> and", "np.ndarray (D,) The upper bound of the search space num_iterations:", "== \"gp\": gp = GaussianProcess(kernel, prior=prior, rng=rng, normalize_output=False, normalize_input=True, lower=lower,", "return a = InformationGain(gp, lower=lower, upper=upper, sampling_acquisition=EI) if model ==", "This is a reimplemenation of the entropy search algorithm by", "\"scipy\": max_func = SciPyOptimizer(acquisition_func, lower, upper, rng=rng) elif maximizer ==", "JMLR, (1), 2012. Parameters ---------- objective_function: function The objective function", "input and returns the function value (scalar) lower: np.ndarray (D,)", "acquisition function!\" % maximizer) return bo = BayesianOptimization(objective_function, lower, upper,", "a = InformationGain(gp, lower=lower, upper=upper, sampling_acquisition=EI) if model == \"gp\":", "cov_amp * exp_kernel prior = DefaultPrior(len(kernel) + 1) n_hypers =", "model == \"gp_mcmc\": acquisition_func = MarginalizationGPMCMC(a) if maximizer == \"random\":", "\"gp_mcmc\": acquisition_func = MarginalizationGPMCMC(a) if maximizer == \"random\": max_func =", "= logging.getLogger(__name__) def entropy_search(objective_function, lower, upper, num_iterations=30, maximizer=\"random\", model=\"gp_mcmc\", n_init=3,", "dict() results[\"x_opt\"] = x_best results[\"f_opt\"] = f_min results[\"incumbents\"] = [inc", "normalize_output=False, normalize_input=True, lower=lower, upper=upper) elif model == \"gp_mcmc\": gp =", "n_hypers += 1 if model == \"gp\": gp = GaussianProcess(kernel,", "np.random.RandomState(np.random.randint(0, 10000)) cov_amp = 2 n_dims = lower.shape[0] initial_ls =", "n_init <= num_iterations, \"Number of initial design point has to", "model for the objective function. n_init: int Number of points", "import DefaultPrior from robo.models.gaussian_process import GaussianProcess from robo.models.gaussian_process_mcmc import GaussianProcessMCMC", "MarginalizationGPMCMC from robo.initial_design import init_latin_hypercube_sampling logger = logging.getLogger(__name__) def entropy_search(objective_function,", "rng=rng, lower=lower, upper=upper) else: print(\"ERROR: %s is not a valid", "num_iterations: int The number of iterations (initial design + BO)", "x in bo.X] results[\"y\"] = [y for y in bo.y]", "upper), \"Lower bound >= upper bound\" assert n_init <= num_iterations,", "1) n_hypers = 3 * len(kernel) if n_hypers % 2", "gp = GaussianProcessMCMC(kernel, prior=prior, n_hypers=n_hypers, chain_length=200, burnin_steps=100, normalize_input=True, normalize_output=False, rng=rng,", "of the entropy search algorithm by Henning and Schuler[1]. [1]", "bo.incumbents] results[\"incumbent_values\"] = [val for val in bo.incumbents_values] results[\"runtime\"] =", "+= 1 if model == \"gp\": gp = GaussianProcess(kernel, prior=prior,", "design. Make sure that it is <= num_iterations. output_path: string", "no output will be saved to disk. rng: numpy.random.RandomState Random", "search space num_iterations: int The number of iterations (initial design", "a valid function to maximize the acquisition function!\" % maximizer)", "(D,) as input and returns the function value (scalar) lower:", "the number of iterations\" if rng is None: rng =", "* exp_kernel prior = DefaultPrior(len(kernel) + 1) n_hypers = 3", "is minimized. This function gets a numpy array (D,) as", "the path where the intermediate output after each iteration will", "is None: rng = np.random.RandomState(np.random.randint(0, 10000)) cov_amp = 2 n_dims", "if n_hypers % 2 == 1: n_hypers += 1 if", "results[\"overhead\"] = bo.time_overhead results[\"X\"] = [x.tolist() for x in bo.X]", "from robo.acquisition_functions.marginalization import MarginalizationGPMCMC from robo.initial_design import init_latin_hypercube_sampling logger =", "assert n_init <= num_iterations, \"Number of initial design point has", "RandomSampling from robo.maximizers.scipy_optimizer import SciPyOptimizer from robo.maximizers.differential_evolution import DifferentialEvolution from", "+ 1) n_hypers = 3 * len(kernel) if n_hypers %", "= bo.run(num_iterations) results = dict() results[\"x_opt\"] = x_best results[\"f_opt\"] =", "n_hypers % 2 == 1: n_hypers += 1 if model", "model == \"gp\": gp = GaussianProcess(kernel, prior=prior, rng=rng, normalize_output=False, normalize_input=True,", "number of iterations\" if rng is None: rng = np.random.RandomState(np.random.randint(0,", "upper=upper, sampling_acquisition=EI) if model == \"gp\": acquisition_func = a elif", "if rng is None: rng = np.random.RandomState(np.random.randint(0, 10000)) cov_amp =", "np.ndarray (D,) The lower bound of the search space upper:", "upper bound\" assert n_init <= num_iterations, \"Number of initial design", "elif model == \"gp_mcmc\": gp = GaussianProcessMCMC(kernel, prior=prior, n_hypers=n_hypers, chain_length=200,", "BO) maximizer: {\"random\", \"scipy\", \"differential_evolution\"} Defines how the acquisition function", "bo.time_overhead results[\"X\"] = [x.tolist() for x in bo.X] results[\"y\"] =", "== 1: n_hypers += 1 if model == \"gp\": gp", "bound\" assert n_init <= num_iterations, \"Number of initial design point", "output_path=None, rng=None): \"\"\" Entropy search for global black box optimization", "= [val for val in bo.incumbents_values] results[\"runtime\"] = bo.runtime results[\"overhead\"]", "from robo.initial_design import init_latin_hypercube_sampling logger = logging.getLogger(__name__) def entropy_search(objective_function, lower,", "== \"gp_mcmc\": acquisition_func = MarginalizationGPMCMC(a) if maximizer == \"random\": max_func", "of the search space num_iterations: int The number of iterations" ]
[ "3.1.13 on 2021-10-29 11:07 from django.db import migrations, models class", "= [ ('app', '0095_bisericapage_utitle'), ] operations = [ migrations.AddField( model_name='bisericapage',", "11:07 from django.db import migrations, models class Migration(migrations.Migration): dependencies =", "models class Migration(migrations.Migration): dependencies = [ ('app', '0095_bisericapage_utitle'), ] operations", "2021-10-29 11:07 from django.db import migrations, models class Migration(migrations.Migration): dependencies", "import migrations, models class Migration(migrations.Migration): dependencies = [ ('app', '0095_bisericapage_utitle'),", "Django 3.1.13 on 2021-10-29 11:07 from django.db import migrations, models", "# Generated by Django 3.1.13 on 2021-10-29 11:07 from django.db", "from django.db import migrations, models class Migration(migrations.Migration): dependencies = [", "on 2021-10-29 11:07 from django.db import migrations, models class Migration(migrations.Migration):", "class Migration(migrations.Migration): dependencies = [ ('app', '0095_bisericapage_utitle'), ] operations =", "dependencies = [ ('app', '0095_bisericapage_utitle'), ] operations = [ migrations.AddField(", "Generated by Django 3.1.13 on 2021-10-29 11:07 from django.db import", "('app', '0095_bisericapage_utitle'), ] operations = [ migrations.AddField( model_name='bisericapage', name='datare_an', field=models.IntegerField(blank=True,", "by Django 3.1.13 on 2021-10-29 11:07 from django.db import migrations,", "[ ('app', '0095_bisericapage_utitle'), ] operations = [ migrations.AddField( model_name='bisericapage', name='datare_an',", "operations = [ migrations.AddField( model_name='bisericapage', name='datare_an', field=models.IntegerField(blank=True, null=True), ), ]", "] operations = [ migrations.AddField( model_name='bisericapage', name='datare_an', field=models.IntegerField(blank=True, null=True), ),", "django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('app',", "Migration(migrations.Migration): dependencies = [ ('app', '0095_bisericapage_utitle'), ] operations = [", "'0095_bisericapage_utitle'), ] operations = [ migrations.AddField( model_name='bisericapage', name='datare_an', field=models.IntegerField(blank=True, null=True),", "migrations, models class Migration(migrations.Migration): dependencies = [ ('app', '0095_bisericapage_utitle'), ]" ]
[ "import Base from app.extensions import db class MMSSurfaceBulkSampleActivity(Base): __tablename__ =", "primary_key=True) messageid = db.Column(db.Integer, db.ForeignKey('mms_now_submissions.application.messageid')) mms_cid = db.Column(db.Integer) type =", "db.Column(db.Numeric(14, 2)) timbervolume = db.Column(db.Numeric(14, 2)) quantity = db.Column(db.Integer) def", "db class MMSSurfaceBulkSampleActivity(Base): __tablename__ = \"surface_bulk_sample_activity\" __table_args__ = {\"schema\": \"mms_now_submissions\"}", "id = db.Column(db.Integer, primary_key=True) messageid = db.Column(db.Integer, db.ForeignKey('mms_now_submissions.application.messageid')) mms_cid =", "db.Column(db.Integer, primary_key=True) messageid = db.Column(db.Integer, db.ForeignKey('mms_now_submissions.application.messageid')) mms_cid = db.Column(db.Integer) type", "db.Column(db.Integer, db.ForeignKey('mms_now_submissions.application.messageid')) mms_cid = db.Column(db.Integer) type = db.Column(db.String) disturbedarea =", "db.Column(db.String) disturbedarea = db.Column(db.Numeric(14, 2)) timbervolume = db.Column(db.Numeric(14, 2)) quantity", "= db.Column(db.Numeric(14, 2)) timbervolume = db.Column(db.Numeric(14, 2)) quantity = db.Column(db.Integer)", "app.extensions import db class MMSSurfaceBulkSampleActivity(Base): __tablename__ = \"surface_bulk_sample_activity\" __table_args__ =", "from app.extensions import db class MMSSurfaceBulkSampleActivity(Base): __tablename__ = \"surface_bulk_sample_activity\" __table_args__", "= {\"schema\": \"mms_now_submissions\"} id = db.Column(db.Integer, primary_key=True) messageid = db.Column(db.Integer,", "MMSSurfaceBulkSampleActivity(Base): __tablename__ = \"surface_bulk_sample_activity\" __table_args__ = {\"schema\": \"mms_now_submissions\"} id =", "timbervolume = db.Column(db.Numeric(14, 2)) quantity = db.Column(db.Integer) def __repr__(self): return", "<reponame>bcgov/mds from app.api.utils.models_mixins import Base from app.extensions import db class", "= db.Column(db.Integer) type = db.Column(db.String) disturbedarea = db.Column(db.Numeric(14, 2)) timbervolume", "messageid = db.Column(db.Integer, db.ForeignKey('mms_now_submissions.application.messageid')) mms_cid = db.Column(db.Integer) type = db.Column(db.String)", "disturbedarea = db.Column(db.Numeric(14, 2)) timbervolume = db.Column(db.Numeric(14, 2)) quantity =", "type = db.Column(db.String) disturbedarea = db.Column(db.Numeric(14, 2)) timbervolume = db.Column(db.Numeric(14,", "= db.Column(db.Numeric(14, 2)) quantity = db.Column(db.Integer) def __repr__(self): return '<MMSSurfaceBulkSampleActivity", "from app.api.utils.models_mixins import Base from app.extensions import db class MMSSurfaceBulkSampleActivity(Base):", "= db.Column(db.Integer, db.ForeignKey('mms_now_submissions.application.messageid')) mms_cid = db.Column(db.Integer) type = db.Column(db.String) disturbedarea", "db.Column(db.Numeric(14, 2)) quantity = db.Column(db.Integer) def __repr__(self): return '<MMSSurfaceBulkSampleActivity %r>'", "import db class MMSSurfaceBulkSampleActivity(Base): __tablename__ = \"surface_bulk_sample_activity\" __table_args__ = {\"schema\":", "__tablename__ = \"surface_bulk_sample_activity\" __table_args__ = {\"schema\": \"mms_now_submissions\"} id = db.Column(db.Integer,", "class MMSSurfaceBulkSampleActivity(Base): __tablename__ = \"surface_bulk_sample_activity\" __table_args__ = {\"schema\": \"mms_now_submissions\"} id", "quantity = db.Column(db.Integer) def __repr__(self): return '<MMSSurfaceBulkSampleActivity %r>' % self.id", "\"mms_now_submissions\"} id = db.Column(db.Integer, primary_key=True) messageid = db.Column(db.Integer, db.ForeignKey('mms_now_submissions.application.messageid')) mms_cid", "mms_cid = db.Column(db.Integer) type = db.Column(db.String) disturbedarea = db.Column(db.Numeric(14, 2))", "{\"schema\": \"mms_now_submissions\"} id = db.Column(db.Integer, primary_key=True) messageid = db.Column(db.Integer, db.ForeignKey('mms_now_submissions.application.messageid'))", "= \"surface_bulk_sample_activity\" __table_args__ = {\"schema\": \"mms_now_submissions\"} id = db.Column(db.Integer, primary_key=True)", "2)) quantity = db.Column(db.Integer) def __repr__(self): return '<MMSSurfaceBulkSampleActivity %r>' %", "Base from app.extensions import db class MMSSurfaceBulkSampleActivity(Base): __tablename__ = \"surface_bulk_sample_activity\"", "__table_args__ = {\"schema\": \"mms_now_submissions\"} id = db.Column(db.Integer, primary_key=True) messageid =", "db.ForeignKey('mms_now_submissions.application.messageid')) mms_cid = db.Column(db.Integer) type = db.Column(db.String) disturbedarea = db.Column(db.Numeric(14,", "= db.Column(db.String) disturbedarea = db.Column(db.Numeric(14, 2)) timbervolume = db.Column(db.Numeric(14, 2))", "app.api.utils.models_mixins import Base from app.extensions import db class MMSSurfaceBulkSampleActivity(Base): __tablename__", "db.Column(db.Integer) type = db.Column(db.String) disturbedarea = db.Column(db.Numeric(14, 2)) timbervolume =", "= db.Column(db.Integer, primary_key=True) messageid = db.Column(db.Integer, db.ForeignKey('mms_now_submissions.application.messageid')) mms_cid = db.Column(db.Integer)", "\"surface_bulk_sample_activity\" __table_args__ = {\"schema\": \"mms_now_submissions\"} id = db.Column(db.Integer, primary_key=True) messageid", "2)) timbervolume = db.Column(db.Numeric(14, 2)) quantity = db.Column(db.Integer) def __repr__(self):" ]
[ "get_state(self): return map_to_state(await self._send_command(self._command, 255)) async def set_state(self, state: RemoteControlLock):", "set_state(self, state: RemoteControlLock): return map_to_state(await self._send_command(self._command, state.value)) def on(self): return", "self._send_command(self._command, state.value)) def on(self): return self.set_state(RemoteControlLock.ON) def off(self): return self.set_state(RemoteControlLock.OFF)", "int): return RemoteControlLock(data) class RemoteControlLockCommands(object): _command = \"km\" def __init__(self,", "def get_state(self): return map_to_state(await self._send_command(self._command, 255)) async def set_state(self, state:", "1 def map_to_state(data: int): return RemoteControlLock(data) class RemoteControlLockCommands(object): _command =", "state: RemoteControlLock): return map_to_state(await self._send_command(self._command, state.value)) def on(self): return self.set_state(RemoteControlLock.ON)", "map_to_state(data: int): return RemoteControlLock(data) class RemoteControlLockCommands(object): _command = \"km\" def", "= 1 def map_to_state(data: int): return RemoteControlLock(data) class RemoteControlLockCommands(object): _command", "RemoteControlLock(data) class RemoteControlLockCommands(object): _command = \"km\" def __init__(self, send_command): self._send_command", "_command = \"km\" def __init__(self, send_command): self._send_command = send_command async", "RemoteControlLock(Enum): OFF = 0 ON = 1 def map_to_state(data: int):", "class RemoteControlLockCommands(object): _command = \"km\" def __init__(self, send_command): self._send_command =", "async def set_state(self, state: RemoteControlLock): return map_to_state(await self._send_command(self._command, state.value)) def", "= \"km\" def __init__(self, send_command): self._send_command = send_command async def", "from enum import Enum class RemoteControlLock(Enum): OFF = 0 ON", "= 0 ON = 1 def map_to_state(data: int): return RemoteControlLock(data)", "RemoteControlLockCommands(object): _command = \"km\" def __init__(self, send_command): self._send_command = send_command", "def set_state(self, state: RemoteControlLock): return map_to_state(await self._send_command(self._command, state.value)) def on(self):", "map_to_state(await self._send_command(self._command, state.value)) def on(self): return self.set_state(RemoteControlLock.ON) def off(self): return", "async def get_state(self): return map_to_state(await self._send_command(self._command, 255)) async def set_state(self,", "return map_to_state(await self._send_command(self._command, 255)) async def set_state(self, state: RemoteControlLock): return", "= send_command async def get_state(self): return map_to_state(await self._send_command(self._command, 255)) async", "import Enum class RemoteControlLock(Enum): OFF = 0 ON = 1", "map_to_state(await self._send_command(self._command, 255)) async def set_state(self, state: RemoteControlLock): return map_to_state(await", "return map_to_state(await self._send_command(self._command, state.value)) def on(self): return self.set_state(RemoteControlLock.ON) def off(self):", "Enum class RemoteControlLock(Enum): OFF = 0 ON = 1 def", "send_command async def get_state(self): return map_to_state(await self._send_command(self._command, 255)) async def", "self._send_command(self._command, 255)) async def set_state(self, state: RemoteControlLock): return map_to_state(await self._send_command(self._command,", "OFF = 0 ON = 1 def map_to_state(data: int): return", "RemoteControlLock): return map_to_state(await self._send_command(self._command, state.value)) def on(self): return self.set_state(RemoteControlLock.ON) def", "ON = 1 def map_to_state(data: int): return RemoteControlLock(data) class RemoteControlLockCommands(object):", "return RemoteControlLock(data) class RemoteControlLockCommands(object): _command = \"km\" def __init__(self, send_command):", "def map_to_state(data: int): return RemoteControlLock(data) class RemoteControlLockCommands(object): _command = \"km\"", "0 ON = 1 def map_to_state(data: int): return RemoteControlLock(data) class", "self._send_command = send_command async def get_state(self): return map_to_state(await self._send_command(self._command, 255))", "enum import Enum class RemoteControlLock(Enum): OFF = 0 ON =", "\"km\" def __init__(self, send_command): self._send_command = send_command async def get_state(self):", "class RemoteControlLock(Enum): OFF = 0 ON = 1 def map_to_state(data:", "def __init__(self, send_command): self._send_command = send_command async def get_state(self): return", "send_command): self._send_command = send_command async def get_state(self): return map_to_state(await self._send_command(self._command,", "255)) async def set_state(self, state: RemoteControlLock): return map_to_state(await self._send_command(self._command, state.value))", "__init__(self, send_command): self._send_command = send_command async def get_state(self): return map_to_state(await" ]
[ "class PowerOf2: def start(self): number=int(sys.argv[1]) print(number) for i in Utility().powerof2(number):", "from com.bridgelabz.utility.Utility import Utility class PowerOf2: def start(self): number=int(sys.argv[1]) print(number)", "start(self): number=int(sys.argv[1]) print(number) for i in Utility().powerof2(number): print(i) return PowerOf2().start()", "import sys from com.bridgelabz.utility.Utility import Utility class PowerOf2: def start(self):", "<reponame>aashishogale/FunctionalPrograms-Python- import sys from com.bridgelabz.utility.Utility import Utility class PowerOf2: def", "sys from com.bridgelabz.utility.Utility import Utility class PowerOf2: def start(self): number=int(sys.argv[1])", "def start(self): number=int(sys.argv[1]) print(number) for i in Utility().powerof2(number): print(i) return", "PowerOf2: def start(self): number=int(sys.argv[1]) print(number) for i in Utility().powerof2(number): print(i)", "Utility class PowerOf2: def start(self): number=int(sys.argv[1]) print(number) for i in", "import Utility class PowerOf2: def start(self): number=int(sys.argv[1]) print(number) for i", "com.bridgelabz.utility.Utility import Utility class PowerOf2: def start(self): number=int(sys.argv[1]) print(number) for" ]
[ "web_scraper.search_for_jobs(soup, last_scrape_date, driver) print(\"Scraping finished. Updating and saving Excel workbook.\",", "# by checking the date of the first job in", "\"//Job_Openings.xlsx\" print(\"-\" * 75, \"-\" * 75, \"\\n\\t\\t\\t\\t\\t\\t\\t JOB WEB", "+ '//Workbooks' + \"//Job_Openings.xlsx\" print(\"-\" * 75, \"-\" * 75,", "already exists then append the jobs not already in the", "workbook and append all of the jobs posted within the", "print(\"Creating soup and opening Chrome webdriver\", \"-\"*75, sep=\"\\n\") URL =", "timedelta from selenium import webdriver from app import web_scraper from", "- timedelta(weeks=4.348) # Average amount of weeks in a month", "= excel.get_first_job_date(worksheet) last_scrape_date = datetime.strptime(last_scrape_date, \"%d-%b-%Y\") # If not, create", "webdriver from app import web_scraper from app import excel job_list,", "* 75, \"\\n\\t\\t\\t\\t\\t\\t\\t JOB WEB SCRAPER\", \"-\" * 75, \"-\"", "Loading workbook.\", \"-\" * 75, sep=\"\\n\") workbook, worksheet = excel.load_xlsx(file_path)", "and appends it to an excel worksheet. import os from", "This program scraps data from job postings on the website", "workinstartups.com and update the worksheet with the found jobs print(\"Scraping", "Open webdriver to workinstartups.com and create soup print(\"Creating soup and", "appends it to an excel worksheet. import os from datetime", "time the site was scraped. if os.path.isfile(file_path): print(\"Job_Opening excel file", "webdriver\", \"-\"*75, sep=\"\\n\") URL = \"https://workinstartups.com/job-board/jobs-in/london\" soup = web_scraper.soup_creator(URL, max_retry=1,", "* 75, sep=\"\\n\") workbook, worksheet = excel.load_xlsx(file_path) last_scrape_date = excel.get_first_job_date(worksheet)", "in a month last_scrape_date = date_month_ago.replace(hour=0, minute=0, second=0, microsecond=0) #", "and create soup print(\"Creating soup and opening Chrome webdriver\", \"-\"*75,", "program scraps data from job postings on the website workinstartups.com", "already in the worksheet # by checking the date of", "exists. Loading workbook.\", \"-\" * 75, sep=\"\\n\") workbook, worksheet =", "datetime, timedelta from selenium import webdriver from app import web_scraper", "workinstartups.com. Please wait.\", \"-\" * 75, sep=\"\\n\") job_list = web_scraper.search_for_jobs(soup,", "selenium import webdriver from app import web_scraper from app import", "site was scraped. if os.path.isfile(file_path): print(\"Job_Opening excel file already exists.", "already exists. Loading workbook.\", \"-\" * 75, sep=\"\\n\") workbook, worksheet", "opening Chrome webdriver\", \"-\"*75, sep=\"\\n\") URL = \"https://workinstartups.com/job-board/jobs-in/london\" soup =", "import os from datetime import datetime, timedelta from selenium import", "driver) print(\"Scraping finished. Updating and saving Excel workbook.\", \"-\" *", "# If the Job_Openings workbook already exists then append the", "last_scrape_date = datetime.strptime(last_scrape_date, \"%d-%b-%Y\") # If not, create a new", "the jobs from workinstartups.com and update the worksheet with the", "current_date - timedelta(weeks=4.348) # Average amount of weeks in a", "append all of the jobs posted within the month else:", "# Average amount of weeks in a month last_scrape_date =", "75, \"-\" * 75, sep=\"\\n\") print(\"\\n\") # If the Job_Openings", "and saving Excel workbook.\", \"-\" * 75, sep=\"\\n\") driver.close() excel.update_xlsx(worksheet,", "not already in the worksheet # by checking the date", "the jobs not already in the worksheet # by checking", "\"-\" * 75, sep=\"\\n\") current_date = datetime.today() date_month_ago = current_date", "Chrome webdriver\", \"-\"*75, sep=\"\\n\") URL = \"https://workinstartups.com/job-board/jobs-in/london\" soup = web_scraper.soup_creator(URL,", "of weeks in a month last_scrape_date = date_month_ago.replace(hour=0, minute=0, second=0,", "append the jobs not already in the worksheet # by", "within the month else: print(\"Creating new Excel workbook.\", \"-\" *", "last time the site was scraped. if os.path.isfile(file_path): print(\"Job_Opening excel", "datetime.strptime(last_scrape_date, \"%d-%b-%Y\") # If not, create a new workbook and", "the jobs posted within the month else: print(\"Creating new Excel", "workbook.\", \"-\" * 75, sep=\"\\n\") workbook, worksheet = excel.load_xlsx(file_path) last_scrape_date", "max_retry=1, sleep_time=0) driver = webdriver.Chrome('./chromedriver') driver.get(URL) driver.find_element_by_link_text('Close').click() # Scrap the", "job_list = web_scraper.search_for_jobs(soup, last_scrape_date, driver) print(\"Scraping finished. Updating and saving", "workbook.\", \"-\" * 75, sep=\"\\n\") driver.close() excel.update_xlsx(worksheet, job_list) excel.save_xlsx(workbook, file_path)", "* 75, sep=\"\\n\") job_list = web_scraper.search_for_jobs(soup, last_scrape_date, driver) print(\"Scraping finished.", "from selenium import webdriver from app import web_scraper from app", "* 75, \"-\" * 75, \"\\n\\t\\t\\t\\t\\t\\t\\t JOB WEB SCRAPER\", \"-\"", "driver.find_element_by_link_text('Close').click() # Scrap the jobs from workinstartups.com and update the", "datetime import datetime, timedelta from selenium import webdriver from app", "month last_scrape_date = date_month_ago.replace(hour=0, minute=0, second=0, microsecond=0) # default to", "was scraped. if os.path.isfile(file_path): print(\"Job_Opening excel file already exists. Loading", "job postings on the website workinstartups.com and appends it to", "print(\"-\" * 75, \"-\" * 75, \"\\n\\t\\t\\t\\t\\t\\t\\t JOB WEB SCRAPER\",", "else: print(\"Creating new Excel workbook.\", \"-\" * 75, sep=\"\\n\") current_date", "worksheet with the found jobs print(\"Scraping jobs from workinstartups.com. Please", "# default to midnight workbook, worksheet = excel.init_xlsx(worksheet_title=\"Job Openings\") #", "75, sep=\"\\n\") workbook, worksheet = excel.load_xlsx(file_path) last_scrape_date = excel.get_first_job_date(worksheet) last_scrape_date", "75, \"\\n\\t\\t\\t\\t\\t\\t\\t JOB WEB SCRAPER\", \"-\" * 75, \"-\" *", "new workbook and append all of the jobs posted within", "Scrap the jobs from workinstartups.com and update the worksheet with", "the worksheet with the found jobs print(\"Scraping jobs from workinstartups.com.", "[], None file_path = os.path.abspath(\"main.py\").rstrip('/app/main.py') + '//Workbooks' + \"//Job_Openings.xlsx\" print(\"-\"", "import webdriver from app import web_scraper from app import excel", "weeks in a month last_scrape_date = date_month_ago.replace(hour=0, minute=0, second=0, microsecond=0)", "soup print(\"Creating soup and opening Chrome webdriver\", \"-\"*75, sep=\"\\n\") URL", "web_scraper.soup_creator(URL, max_retry=1, sleep_time=0) driver = webdriver.Chrome('./chromedriver') driver.get(URL) driver.find_element_by_link_text('Close').click() # Scrap", "the first job in excel, since the last time the", "to workinstartups.com and create soup print(\"Creating soup and opening Chrome", "create a new workbook and append all of the jobs", "datetime.today() date_month_ago = current_date - timedelta(weeks=4.348) # Average amount of", "workbook, worksheet = excel.init_xlsx(worksheet_title=\"Job Openings\") # Open webdriver to workinstartups.com", "= web_scraper.search_for_jobs(soup, last_scrape_date, driver) print(\"Scraping finished. Updating and saving Excel", "75, sep=\"\\n\") print(\"\\n\") # If the Job_Openings workbook already exists", "= excel.init_xlsx(worksheet_title=\"Job Openings\") # Open webdriver to workinstartups.com and create", "from workinstartups.com and update the worksheet with the found jobs", "Excel workbook.\", \"-\" * 75, sep=\"\\n\") driver.close() excel.update_xlsx(worksheet, job_list) excel.save_xlsx(workbook,", "Please wait.\", \"-\" * 75, sep=\"\\n\") job_list = web_scraper.search_for_jobs(soup, last_scrape_date,", "print(\"Scraping finished. Updating and saving Excel workbook.\", \"-\" * 75,", "then append the jobs not already in the worksheet #", "jobs print(\"Scraping jobs from workinstartups.com. Please wait.\", \"-\" * 75,", "create soup print(\"Creating soup and opening Chrome webdriver\", \"-\"*75, sep=\"\\n\")", "checking the date of the first job in excel, since", "worksheet = excel.init_xlsx(worksheet_title=\"Job Openings\") # Open webdriver to workinstartups.com and", "postings on the website workinstartups.com and appends it to an", "print(\"Job_Opening excel file already exists. Loading workbook.\", \"-\" * 75,", "excel file already exists. Loading workbook.\", \"-\" * 75, sep=\"\\n\")", "= datetime.today() date_month_ago = current_date - timedelta(weeks=4.348) # Average amount", "# Scrap the jobs from workinstartups.com and update the worksheet", "on the website workinstartups.com and appends it to an excel", "soup and opening Chrome webdriver\", \"-\"*75, sep=\"\\n\") URL = \"https://workinstartups.com/job-board/jobs-in/london\"", "with the found jobs print(\"Scraping jobs from workinstartups.com. Please wait.\",", "= webdriver.Chrome('./chromedriver') driver.get(URL) driver.find_element_by_link_text('Close').click() # Scrap the jobs from workinstartups.com", "since the last time the site was scraped. if os.path.isfile(file_path):", "print(\"\\n\") # If the Job_Openings workbook already exists then append", "excel.init_xlsx(worksheet_title=\"Job Openings\") # Open webdriver to workinstartups.com and create soup", "saving Excel workbook.\", \"-\" * 75, sep=\"\\n\") driver.close() excel.update_xlsx(worksheet, job_list)", "workinstartups.com and create soup print(\"Creating soup and opening Chrome webdriver\",", "exists then append the jobs not already in the worksheet", "job in excel, since the last time the site was", "# If not, create a new workbook and append all", "wait.\", \"-\" * 75, sep=\"\\n\") job_list = web_scraper.search_for_jobs(soup, last_scrape_date, driver)", "a month last_scrape_date = date_month_ago.replace(hour=0, minute=0, second=0, microsecond=0) # default", "workbook, worksheet = excel.load_xlsx(file_path) last_scrape_date = excel.get_first_job_date(worksheet) last_scrape_date = datetime.strptime(last_scrape_date,", "date of the first job in excel, since the last", "\"-\" * 75, \"\\n\\t\\t\\t\\t\\t\\t\\t JOB WEB SCRAPER\", \"-\" * 75,", "WEB SCRAPER\", \"-\" * 75, \"-\" * 75, sep=\"\\n\") print(\"\\n\")", "\"-\" * 75, \"-\" * 75, sep=\"\\n\") print(\"\\n\") # If", "file already exists. Loading workbook.\", \"-\" * 75, sep=\"\\n\") workbook,", "date_month_ago = current_date - timedelta(weeks=4.348) # Average amount of weeks", "Job_Openings workbook already exists then append the jobs not already", "= [], None file_path = os.path.abspath(\"main.py\").rstrip('/app/main.py') + '//Workbooks' + \"//Job_Openings.xlsx\"", "web_scraper from app import excel job_list, last_date = [], None", "from app import excel job_list, last_date = [], None file_path", "last_date = [], None file_path = os.path.abspath(\"main.py\").rstrip('/app/main.py') + '//Workbooks' +", "from job postings on the website workinstartups.com and appends it", "worksheet = excel.load_xlsx(file_path) last_scrape_date = excel.get_first_job_date(worksheet) last_scrape_date = datetime.strptime(last_scrape_date, \"%d-%b-%Y\")", "minute=0, second=0, microsecond=0) # default to midnight workbook, worksheet =", "\"-\" * 75, sep=\"\\n\") job_list = web_scraper.search_for_jobs(soup, last_scrape_date, driver) print(\"Scraping", "in the worksheet # by checking the date of the", "= datetime.strptime(last_scrape_date, \"%d-%b-%Y\") # If not, create a new workbook", "+ \"//Job_Openings.xlsx\" print(\"-\" * 75, \"-\" * 75, \"\\n\\t\\t\\t\\t\\t\\t\\t JOB", "* 75, \"-\" * 75, sep=\"\\n\") print(\"\\n\") # If the", "sleep_time=0) driver = webdriver.Chrome('./chromedriver') driver.get(URL) driver.find_element_by_link_text('Close').click() # Scrap the jobs", "<filename>app/main.py<gh_stars>0 # This program scraps data from job postings on", "excel, since the last time the site was scraped. if", "excel.load_xlsx(file_path) last_scrape_date = excel.get_first_job_date(worksheet) last_scrape_date = datetime.strptime(last_scrape_date, \"%d-%b-%Y\") # If", "last_scrape_date = date_month_ago.replace(hour=0, minute=0, second=0, microsecond=0) # default to midnight", "last_scrape_date, driver) print(\"Scraping finished. Updating and saving Excel workbook.\", \"-\"", "* 75, sep=\"\\n\") current_date = datetime.today() date_month_ago = current_date -", "current_date = datetime.today() date_month_ago = current_date - timedelta(weeks=4.348) # Average", "and update the worksheet with the found jobs print(\"Scraping jobs", "website workinstartups.com and appends it to an excel worksheet. import", "timedelta(weeks=4.348) # Average amount of weeks in a month last_scrape_date", "the date of the first job in excel, since the", "sep=\"\\n\") URL = \"https://workinstartups.com/job-board/jobs-in/london\" soup = web_scraper.soup_creator(URL, max_retry=1, sleep_time=0) driver", "= web_scraper.soup_creator(URL, max_retry=1, sleep_time=0) driver = webdriver.Chrome('./chromedriver') driver.get(URL) driver.find_element_by_link_text('Close').click() #", "= \"https://workinstartups.com/job-board/jobs-in/london\" soup = web_scraper.soup_creator(URL, max_retry=1, sleep_time=0) driver = webdriver.Chrome('./chromedriver')", "\"-\" * 75, sep=\"\\n\") workbook, worksheet = excel.load_xlsx(file_path) last_scrape_date =", "= os.path.abspath(\"main.py\").rstrip('/app/main.py') + '//Workbooks' + \"//Job_Openings.xlsx\" print(\"-\" * 75, \"-\"", "If the Job_Openings workbook already exists then append the jobs", "JOB WEB SCRAPER\", \"-\" * 75, \"-\" * 75, sep=\"\\n\")", "\"-\" * 75, sep=\"\\n\") print(\"\\n\") # If the Job_Openings workbook", "from datetime import datetime, timedelta from selenium import webdriver from", "workinstartups.com and appends it to an excel worksheet. import os", "and append all of the jobs posted within the month", "import excel job_list, last_date = [], None file_path = os.path.abspath(\"main.py\").rstrip('/app/main.py')", "app import excel job_list, last_date = [], None file_path =", "it to an excel worksheet. import os from datetime import", "75, sep=\"\\n\") job_list = web_scraper.search_for_jobs(soup, last_scrape_date, driver) print(\"Scraping finished. Updating", "import datetime, timedelta from selenium import webdriver from app import", "sep=\"\\n\") current_date = datetime.today() date_month_ago = current_date - timedelta(weeks=4.348) #", "in excel, since the last time the site was scraped.", "excel.get_first_job_date(worksheet) last_scrape_date = datetime.strptime(last_scrape_date, \"%d-%b-%Y\") # If not, create a", "webdriver.Chrome('./chromedriver') driver.get(URL) driver.find_element_by_link_text('Close').click() # Scrap the jobs from workinstartups.com and", "from workinstartups.com. Please wait.\", \"-\" * 75, sep=\"\\n\") job_list =", "None file_path = os.path.abspath(\"main.py\").rstrip('/app/main.py') + '//Workbooks' + \"//Job_Openings.xlsx\" print(\"-\" *", "amount of weeks in a month last_scrape_date = date_month_ago.replace(hour=0, minute=0,", "from app import web_scraper from app import excel job_list, last_date", "* 75, sep=\"\\n\") print(\"\\n\") # If the Job_Openings workbook already", "sep=\"\\n\") print(\"\\n\") # If the Job_Openings workbook already exists then", "the last time the site was scraped. if os.path.isfile(file_path): print(\"Job_Opening", "# This program scraps data from job postings on the", "an excel worksheet. import os from datetime import datetime, timedelta", "second=0, microsecond=0) # default to midnight workbook, worksheet = excel.init_xlsx(worksheet_title=\"Job", "not, create a new workbook and append all of the", "excel job_list, last_date = [], None file_path = os.path.abspath(\"main.py\").rstrip('/app/main.py') +", "the found jobs print(\"Scraping jobs from workinstartups.com. Please wait.\", \"-\"", "jobs from workinstartups.com and update the worksheet with the found", "jobs from workinstartups.com. Please wait.\", \"-\" * 75, sep=\"\\n\") job_list", "sep=\"\\n\") job_list = web_scraper.search_for_jobs(soup, last_scrape_date, driver) print(\"Scraping finished. Updating and", "job_list, last_date = [], None file_path = os.path.abspath(\"main.py\").rstrip('/app/main.py') + '//Workbooks'", "first job in excel, since the last time the site", "midnight workbook, worksheet = excel.init_xlsx(worksheet_title=\"Job Openings\") # Open webdriver to", "workbook.\", \"-\" * 75, sep=\"\\n\") current_date = datetime.today() date_month_ago =", "new Excel workbook.\", \"-\" * 75, sep=\"\\n\") current_date = datetime.today()", "last_scrape_date = excel.get_first_job_date(worksheet) last_scrape_date = datetime.strptime(last_scrape_date, \"%d-%b-%Y\") # If not,", "print(\"Scraping jobs from workinstartups.com. Please wait.\", \"-\" * 75, sep=\"\\n\")", "jobs posted within the month else: print(\"Creating new Excel workbook.\",", "data from job postings on the website workinstartups.com and appends", "excel worksheet. import os from datetime import datetime, timedelta from", "microsecond=0) # default to midnight workbook, worksheet = excel.init_xlsx(worksheet_title=\"Job Openings\")", "sep=\"\\n\") workbook, worksheet = excel.load_xlsx(file_path) last_scrape_date = excel.get_first_job_date(worksheet) last_scrape_date =", "the month else: print(\"Creating new Excel workbook.\", \"-\" * 75,", "import web_scraper from app import excel job_list, last_date = [],", "jobs not already in the worksheet # by checking the", "month else: print(\"Creating new Excel workbook.\", \"-\" * 75, sep=\"\\n\")", "the site was scraped. if os.path.isfile(file_path): print(\"Job_Opening excel file already", "workbook already exists then append the jobs not already in", "and opening Chrome webdriver\", \"-\"*75, sep=\"\\n\") URL = \"https://workinstartups.com/job-board/jobs-in/london\" soup", "\"-\"*75, sep=\"\\n\") URL = \"https://workinstartups.com/job-board/jobs-in/london\" soup = web_scraper.soup_creator(URL, max_retry=1, sleep_time=0)", "scraps data from job postings on the website workinstartups.com and", "by checking the date of the first job in excel,", "finished. Updating and saving Excel workbook.\", \"-\" * 75, sep=\"\\n\")", "default to midnight workbook, worksheet = excel.init_xlsx(worksheet_title=\"Job Openings\") # Open", "date_month_ago.replace(hour=0, minute=0, second=0, microsecond=0) # default to midnight workbook, worksheet", "soup = web_scraper.soup_creator(URL, max_retry=1, sleep_time=0) driver = webdriver.Chrome('./chromedriver') driver.get(URL) driver.find_element_by_link_text('Close').click()", "a new workbook and append all of the jobs posted", "to midnight workbook, worksheet = excel.init_xlsx(worksheet_title=\"Job Openings\") # Open webdriver", "= excel.load_xlsx(file_path) last_scrape_date = excel.get_first_job_date(worksheet) last_scrape_date = datetime.strptime(last_scrape_date, \"%d-%b-%Y\") #", "Openings\") # Open webdriver to workinstartups.com and create soup print(\"Creating", "driver.get(URL) driver.find_element_by_link_text('Close').click() # Scrap the jobs from workinstartups.com and update", "If not, create a new workbook and append all of", "webdriver to workinstartups.com and create soup print(\"Creating soup and opening", "to an excel worksheet. import os from datetime import datetime,", "worksheet # by checking the date of the first job", "of the first job in excel, since the last time", "file_path = os.path.abspath(\"main.py\").rstrip('/app/main.py') + '//Workbooks' + \"//Job_Openings.xlsx\" print(\"-\" * 75,", "os.path.isfile(file_path): print(\"Job_Opening excel file already exists. Loading workbook.\", \"-\" *", "os.path.abspath(\"main.py\").rstrip('/app/main.py') + '//Workbooks' + \"//Job_Openings.xlsx\" print(\"-\" * 75, \"-\" *", "SCRAPER\", \"-\" * 75, \"-\" * 75, sep=\"\\n\") print(\"\\n\") #", "# Open webdriver to workinstartups.com and create soup print(\"Creating soup", "driver = webdriver.Chrome('./chromedriver') driver.get(URL) driver.find_element_by_link_text('Close').click() # Scrap the jobs from", "'//Workbooks' + \"//Job_Openings.xlsx\" print(\"-\" * 75, \"-\" * 75, \"\\n\\t\\t\\t\\t\\t\\t\\t", "\"https://workinstartups.com/job-board/jobs-in/london\" soup = web_scraper.soup_creator(URL, max_retry=1, sleep_time=0) driver = webdriver.Chrome('./chromedriver') driver.get(URL)", "Updating and saving Excel workbook.\", \"-\" * 75, sep=\"\\n\") driver.close()", "if os.path.isfile(file_path): print(\"Job_Opening excel file already exists. Loading workbook.\", \"-\"", "worksheet. import os from datetime import datetime, timedelta from selenium", "os from datetime import datetime, timedelta from selenium import webdriver", "posted within the month else: print(\"Creating new Excel workbook.\", \"-\"", "the website workinstartups.com and appends it to an excel worksheet.", "all of the jobs posted within the month else: print(\"Creating", "found jobs print(\"Scraping jobs from workinstartups.com. Please wait.\", \"-\" *", "the worksheet # by checking the date of the first", "of the jobs posted within the month else: print(\"Creating new", "Average amount of weeks in a month last_scrape_date = date_month_ago.replace(hour=0,", "the Job_Openings workbook already exists then append the jobs not", "URL = \"https://workinstartups.com/job-board/jobs-in/london\" soup = web_scraper.soup_creator(URL, max_retry=1, sleep_time=0) driver =", "75, sep=\"\\n\") current_date = datetime.today() date_month_ago = current_date - timedelta(weeks=4.348)", "* 75, sep=\"\\n\") driver.close() excel.update_xlsx(worksheet, job_list) excel.save_xlsx(workbook, file_path) print(\"Finished!\", sep=\"\\n\")", "\"-\" * 75, sep=\"\\n\") driver.close() excel.update_xlsx(worksheet, job_list) excel.save_xlsx(workbook, file_path) print(\"Finished!\",", "app import web_scraper from app import excel job_list, last_date =", "\"\\n\\t\\t\\t\\t\\t\\t\\t JOB WEB SCRAPER\", \"-\" * 75, \"-\" * 75,", "\"%d-%b-%Y\") # If not, create a new workbook and append", "75, \"-\" * 75, \"\\n\\t\\t\\t\\t\\t\\t\\t JOB WEB SCRAPER\", \"-\" *", "scraped. if os.path.isfile(file_path): print(\"Job_Opening excel file already exists. Loading workbook.\",", "print(\"Creating new Excel workbook.\", \"-\" * 75, sep=\"\\n\") current_date =", "= current_date - timedelta(weeks=4.348) # Average amount of weeks in", "Excel workbook.\", \"-\" * 75, sep=\"\\n\") current_date = datetime.today() date_month_ago", "= date_month_ago.replace(hour=0, minute=0, second=0, microsecond=0) # default to midnight workbook,", "update the worksheet with the found jobs print(\"Scraping jobs from" ]
[ "to use for the given filename\"\"\" return filename.replace(\"/\", \"_\").replace(\".\", \"_\").upper()", "index in the range [1, num_blocks].\"\"\" for block in range(1,", "blocks to break classes into.\"\"\" num_blocks = len(separators) + 1", "and n <= num_blocks return [decoder for decoder in decoders", "= GetNumberCodeBlocks(separators) assert n > 0 and n <= num_blocks", "file is not meant for use in the TCB #endif", "is governed by a BSD-style license that can be #", "prefix length of the decoder name.\"\"\" decoder_name = name_fcn(decoder) prefix_len", "for decoder in decoders if ((n == 1 or IsPrefixLeDecoder(separators[n-2],", "list of separators.\"\"\" num_blocks = GetNumberCodeBlocks(separators) assert n > 0", "NACL_TRUSTED_BUT_NOT_TCB #error This file is not meant for use in", "the corresponding prefix length of the decoder name.\"\"\" decoder_name =", "\"_\").upper() + \"_\" def GetNumberCodeBlocks(separators): \"\"\"Gets the number of code", "num_blocks): \"\"\"Returns true if the filename matches the format with", "num_blocks = GetNumberCodeBlocks(separators) assert n > 0 and n <=", "are split using the list of separators.\"\"\" num_blocks = GetNumberCodeBlocks(separators)", "The Native Client Authors. All rights reserved. * Use of", "Some common boilerplates and helper functions for source code generation", "range [1, num_blocks].\"\"\" for block in range(1, num_blocks+1): suffix =", "newlines.\"\"\" def commented_string(str, indent=''): sep = NEWLINE_STR + indent +", "(sorted) list of decoders to include in block n, assuming", "code is governed by a BSD-style license that can be", "#error This file is not meant for use in the", "\"\"\"Gets the number of code blocks to break classes into.\"\"\"", "Native Client Authors. All rights reserved. * Use of this", "suffix = format % block if filename.endswith(suffix): return block raise", "if filename.endswith(suffix): return block raise Exception(\"Can't find block index: %s\"", "governed by a BSD-style license that can be # found", "Use of this source code is governed by a BSD-style", "separators, decoders, name_fcn): \"\"\"Returns the (sorted) list of decoders to", "is not meant for use in the TCB #endif \"\"\"", "by a BSD-style license that can * be found in", "name_fcn)))] def IsPrefixLeDecoder(prefix, decoder, name_fcn): \"\"\"Returns true if the prefix", "to the corresponding prefix length of the decoder name.\"\"\" decoder_name", "Generates the ifdef name to use for the given filename\"\"\"", "meant for use in the TCB #endif \"\"\" NEWLINE_STR=\"\"\" \"\"\"", "2013 The Native Client Authors. All rights reserved. * Use", "the given filename\"\"\" return filename.replace(\"/\", \"_\").replace(\".\", \"_\").upper() + \"_\" def", "assert n > 0 and n <= num_blocks return [decoder", "functions for source code generation in files dgen_test_output.py and dgen_decode_output.py.", "block if filename.endswith(suffix): return block raise Exception(\"Can't find block index:", "given filename\"\"\" return filename.replace(\"/\", \"_\").replace(\".\", \"_\").upper() + \"_\" def GetNumberCodeBlocks(separators):", "\"\"\"Adds comment '// ' string after newlines.\"\"\" def commented_string(str, indent=''):", "IsPrefixLeDecoder(prefix, decoder, name_fcn): \"\"\"Returns true if the prefix is less", "* Use of this source code is governed by a", "DO NOT EDIT: GENERATED CODE \"\"\" NOT_TCB_BOILERPLATE=\"\"\"#ifndef NACL_TRUSTED_BUT_NOT_TCB #error This", "block in range(1, num_blocks+1): suffix = format % block if", "return [decoder for decoder in decoders if ((n == 1", "hack, and fix it. return str.replace('\\\\n', sep) def ifdef_name(filename): \"\"\"", "rights reserved. # Use of this source code is governed", "len(decoder_name) decoder_prefix = (decoder_name[0:prefix_len] if prefix_len < decoder_len else decoder_name)", "\"\"\" Generates the ifdef name to use for the given", "format % block if filename.endswith(suffix): return block raise Exception(\"Can't find", "sep = NEWLINE_STR + indent + '//' str = str.replace(NEWLINE_STR,", "CODE \"\"\" NOT_TCB_BOILERPLATE=\"\"\"#ifndef NACL_TRUSTED_BUT_NOT_TCB #error This file is not meant", "and dgen_decode_output.py. \"\"\" HEADER_BOILERPLATE =\"\"\"/* * Copyright 2013 The Native", "equal to the corresponding prefix length of the decoder name.\"\"\"", "that can * be found in the LICENSE file. */", "decoders are split using the list of separators.\"\"\" num_blocks =", "not IsPrefixLeDecoder(separators[n-1], decoder, name_fcn)))] def IsPrefixLeDecoder(prefix, decoder, name_fcn): \"\"\"Returns true", "license that can * be found in the LICENSE file.", "name.\"\"\" decoder_name = name_fcn(decoder) prefix_len = len(prefix) decoder_len = len(decoder_name)", "if ((n == 1 or IsPrefixLeDecoder(separators[n-2], decoder, name_fcn)) and (n", "=\"\"\"/* * Copyright 2013 The Native Client Authors. All rights", "+ '//' str = str.replace(NEWLINE_STR, sep) # This second line", "def ifdef_name(filename): \"\"\" Generates the ifdef name to use for", "in the LICENSE file. */ // DO NOT EDIT: GENERATED", "\"\"\" Some common boilerplates and helper functions for source code", "* Copyright 2013 The Native Client Authors. All rights reserved.", "and helper functions for source code generation in files dgen_test_output.py", "decoder_name = name_fcn(decoder) prefix_len = len(prefix) decoder_len = len(decoder_name) decoder_prefix", "license that can be # found in the LICENSE file.", "is less than or equal to the corresponding prefix length", "decoders if ((n == 1 or IsPrefixLeDecoder(separators[n-2], decoder, name_fcn)) and", "str.replace('\\\\n', sep) def ifdef_name(filename): \"\"\" Generates the ifdef name to", "dgen_test_output.py and dgen_decode_output.py. \"\"\" HEADER_BOILERPLATE =\"\"\"/* * Copyright 2013 The", "BSD-style license that can be # found in the LICENSE", "filename matches the format with an index in the range", "# Use of this source code is governed by a", "EDIT: GENERATED CODE \"\"\" NOT_TCB_BOILERPLATE=\"\"\"#ifndef NACL_TRUSTED_BUT_NOT_TCB #error This file is", "[decoder for decoder in decoders if ((n == 1 or", "true if the filename matches the format with an index", "less than or equal to the corresponding prefix length of", "in block n, assuming decoders are split using the list", "= str.replace(NEWLINE_STR, sep) # This second line is a hack", "helper functions for source code generation in files dgen_test_output.py and", "code blocks to break classes into.\"\"\" num_blocks = len(separators) +", "ifdef name to use for the given filename\"\"\" return filename.replace(\"/\",", "with an index in the range [1, num_blocks].\"\"\" for block", "((n == 1 or IsPrefixLeDecoder(separators[n-2], decoder, name_fcn)) and (n ==", "separators.\"\"\" num_blocks = GetNumberCodeBlocks(separators) assert n > 0 and n", "or IsPrefixLeDecoder(separators[n-2], decoder, name_fcn)) and (n == num_blocks or not", "a BSD-style license that can be # found in the", "#!/usr/bin/python2 # # Copyright (c) 2012 The Native Client Authors.", "in decoders if ((n == 1 or IsPrefixLeDecoder(separators[n-2], decoder, name_fcn))", "block index: %s\" % filename) def GetDecodersBlock(n, separators, decoders, name_fcn):", "'// ' string after newlines.\"\"\" def commented_string(str, indent=''): sep =", "str = str.replace(NEWLINE_STR, sep) # This second line is a", ">= 2 return num_blocks def FindBlockIndex(filename, format, num_blocks): \"\"\"Returns true", "num_blocks >= 2 return num_blocks def FindBlockIndex(filename, format, num_blocks): \"\"\"Returns", "to break classes into.\"\"\" num_blocks = len(separators) + 1 assert", "1 or IsPrefixLeDecoder(separators[n-2], decoder, name_fcn)) and (n == num_blocks or", "corresponding prefix length of the decoder name.\"\"\" decoder_name = name_fcn(decoder)", "the range [1, num_blocks].\"\"\" for block in range(1, num_blocks+1): suffix", "in files dgen_test_output.py and dgen_decode_output.py. \"\"\" HEADER_BOILERPLATE =\"\"\"/* * Copyright", "for the given filename\"\"\" return filename.replace(\"/\", \"_\").replace(\".\", \"_\").upper() + \"_\"", "FindBlockIndex(filename, format, num_blocks): \"\"\"Returns true if the filename matches the", "the ifdef name to use for the given filename\"\"\" return", "line is a hack to fix that sometimes newlines are", "\"\"\"Returns true if the prefix is less than or equal", "# Copyright (c) 2012 The Native Client Authors. All rights", "fix that sometimes newlines are # represented as '\\n'. #", "return num_blocks def FindBlockIndex(filename, format, num_blocks): \"\"\"Returns true if the", "after newlines.\"\"\" def commented_string(str, indent=''): sep = NEWLINE_STR + indent", "GENERATED CODE \"\"\" NOT_TCB_BOILERPLATE=\"\"\"#ifndef NACL_TRUSTED_BUT_NOT_TCB #error This file is not", "the TCB #endif \"\"\" NEWLINE_STR=\"\"\" \"\"\" COMMENTED_NEWLINE_STR=\"\"\" //\"\"\" \"\"\"Adds comment", "\"\"\" NEWLINE_STR=\"\"\" \"\"\" COMMENTED_NEWLINE_STR=\"\"\" //\"\"\" \"\"\"Adds comment '// ' string", "NEWLINE_STR + indent + '//' str = str.replace(NEWLINE_STR, sep) #", "as '\\n'. # TODO(karl) Find the cause of this hack,", "in the LICENSE file. # \"\"\" Some common boilerplates and", "name_fcn(decoder) prefix_len = len(prefix) decoder_len = len(decoder_name) decoder_prefix = (decoder_name[0:prefix_len]", "decoders to include in block n, assuming decoders are split", "return filename.replace(\"/\", \"_\").replace(\".\", \"_\").upper() + \"_\" def GetNumberCodeBlocks(separators): \"\"\"Gets the", "filename\"\"\" return filename.replace(\"/\", \"_\").replace(\".\", \"_\").upper() + \"_\" def GetNumberCodeBlocks(separators): \"\"\"Gets", "index: %s\" % filename) def GetDecodersBlock(n, separators, decoders, name_fcn): \"\"\"Returns", "All rights reserved. * Use of this source code is", "# \"\"\" Some common boilerplates and helper functions for source", "cause of this hack, and fix it. return str.replace('\\\\n', sep)", "the number of code blocks to break classes into.\"\"\" num_blocks", "decoder, name_fcn)))] def IsPrefixLeDecoder(prefix, decoder, name_fcn): \"\"\"Returns true if the", "found in the LICENSE file. # \"\"\" Some common boilerplates", "num_blocks return [decoder for decoder in decoders if ((n ==", "Exception(\"Can't find block index: %s\" % filename) def GetDecodersBlock(n, separators,", "decoder in decoders if ((n == 1 or IsPrefixLeDecoder(separators[n-2], decoder,", "true if the prefix is less than or equal to", "is governed by a BSD-style license that can * be", "into.\"\"\" num_blocks = len(separators) + 1 assert num_blocks >= 2", "= len(decoder_name) decoder_prefix = (decoder_name[0:prefix_len] if prefix_len < decoder_len else", "decoder_len = len(decoder_name) decoder_prefix = (decoder_name[0:prefix_len] if prefix_len < decoder_len", "decoder name.\"\"\" decoder_name = name_fcn(decoder) prefix_len = len(prefix) decoder_len =", "name_fcn): \"\"\"Returns the (sorted) list of decoders to include in", "second line is a hack to fix that sometimes newlines", "length of the decoder name.\"\"\" decoder_name = name_fcn(decoder) prefix_len =", "files dgen_test_output.py and dgen_decode_output.py. \"\"\" HEADER_BOILERPLATE =\"\"\"/* * Copyright 2013", "this source code is governed by a BSD-style license that", "assert num_blocks >= 2 return num_blocks def FindBlockIndex(filename, format, num_blocks):", "can * be found in the LICENSE file. */ //", "prefix is less than or equal to the corresponding prefix", "TODO(karl) Find the cause of this hack, and fix it.", "number of code blocks to break classes into.\"\"\" num_blocks =", "return block raise Exception(\"Can't find block index: %s\" % filename)", "source code is governed by a BSD-style license that can", "Authors. All rights reserved. # Use of this source code", "= format % block if filename.endswith(suffix): return block raise Exception(\"Can't", "the LICENSE file. */ // DO NOT EDIT: GENERATED CODE", "or not IsPrefixLeDecoder(separators[n-1], decoder, name_fcn)))] def IsPrefixLeDecoder(prefix, decoder, name_fcn): \"\"\"Returns", "for source code generation in files dgen_test_output.py and dgen_decode_output.py. \"\"\"", "or equal to the corresponding prefix length of the decoder", "def GetDecodersBlock(n, separators, decoders, name_fcn): \"\"\"Returns the (sorted) list of", "2012 The Native Client Authors. All rights reserved. # Use", "len(prefix) decoder_len = len(decoder_name) decoder_prefix = (decoder_name[0:prefix_len] if prefix_len <", "decoder_prefix = (decoder_name[0:prefix_len] if prefix_len < decoder_len else decoder_name) return", "//\"\"\" \"\"\"Adds comment '// ' string after newlines.\"\"\" def commented_string(str,", "format with an index in the range [1, num_blocks].\"\"\" for", "the format with an index in the range [1, num_blocks].\"\"\"", "an index in the range [1, num_blocks].\"\"\" for block in", "common boilerplates and helper functions for source code generation in", "use in the TCB #endif \"\"\" NEWLINE_STR=\"\"\" \"\"\" COMMENTED_NEWLINE_STR=\"\"\" //\"\"\"", "of this source code is governed by a BSD-style license", "if prefix_len < decoder_len else decoder_name) return prefix <= decoder_prefix", "a BSD-style license that can * be found in the", "commented_string(str, indent=''): sep = NEWLINE_STR + indent + '//' str", "NOT EDIT: GENERATED CODE \"\"\" NOT_TCB_BOILERPLATE=\"\"\"#ifndef NACL_TRUSTED_BUT_NOT_TCB #error This file", "reserved. # Use of this source code is governed by", "indent=''): sep = NEWLINE_STR + indent + '//' str =", "% filename) def GetDecodersBlock(n, separators, decoders, name_fcn): \"\"\"Returns the (sorted)", "> 0 and n <= num_blocks return [decoder for decoder", "num_blocks def FindBlockIndex(filename, format, num_blocks): \"\"\"Returns true if the filename", "= (decoder_name[0:prefix_len] if prefix_len < decoder_len else decoder_name) return prefix", "file. */ // DO NOT EDIT: GENERATED CODE \"\"\" NOT_TCB_BOILERPLATE=\"\"\"#ifndef", "range(1, num_blocks+1): suffix = format % block if filename.endswith(suffix): return", "classes into.\"\"\" num_blocks = len(separators) + 1 assert num_blocks >=", "it. return str.replace('\\\\n', sep) def ifdef_name(filename): \"\"\" Generates the ifdef", "the (sorted) list of decoders to include in block n,", "in the TCB #endif \"\"\" NEWLINE_STR=\"\"\" \"\"\" COMMENTED_NEWLINE_STR=\"\"\" //\"\"\" \"\"\"Adds", "sometimes newlines are # represented as '\\n'. # TODO(karl) Find", "%s\" % filename) def GetDecodersBlock(n, separators, decoders, name_fcn): \"\"\"Returns the", "include in block n, assuming decoders are split using the", "num_blocks = len(separators) + 1 assert num_blocks >= 2 return", "can be # found in the LICENSE file. # \"\"\"", "<= num_blocks return [decoder for decoder in decoders if ((n", "list of decoders to include in block n, assuming decoders", "\"\"\" COMMENTED_NEWLINE_STR=\"\"\" //\"\"\" \"\"\"Adds comment '// ' string after newlines.\"\"\"", "assuming decoders are split using the list of separators.\"\"\" num_blocks", "block n, assuming decoders are split using the list of", "This file is not meant for use in the TCB", "num_blocks or not IsPrefixLeDecoder(separators[n-1], decoder, name_fcn)))] def IsPrefixLeDecoder(prefix, decoder, name_fcn):", "in range(1, num_blocks+1): suffix = format % block if filename.endswith(suffix):", "Copyright 2013 The Native Client Authors. All rights reserved. *", "name to use for the given filename\"\"\" return filename.replace(\"/\", \"_\").replace(\".\",", "Native Client Authors. All rights reserved. # Use of this", "def IsPrefixLeDecoder(prefix, decoder, name_fcn): \"\"\"Returns true if the prefix is", "the decoder name.\"\"\" decoder_name = name_fcn(decoder) prefix_len = len(prefix) decoder_len", "for use in the TCB #endif \"\"\" NEWLINE_STR=\"\"\" \"\"\" COMMENTED_NEWLINE_STR=\"\"\"", "'//' str = str.replace(NEWLINE_STR, sep) # This second line is", "\"\"\"Returns true if the filename matches the format with an", "sep) def ifdef_name(filename): \"\"\" Generates the ifdef name to use", "matches the format with an index in the range [1,", "\"\"\" HEADER_BOILERPLATE =\"\"\"/* * Copyright 2013 The Native Client Authors.", "that can be # found in the LICENSE file. #", "(decoder_name[0:prefix_len] if prefix_len < decoder_len else decoder_name) return prefix <=", "COMMENTED_NEWLINE_STR=\"\"\" //\"\"\" \"\"\"Adds comment '// ' string after newlines.\"\"\" def", "a hack to fix that sometimes newlines are # represented", "this hack, and fix it. return str.replace('\\\\n', sep) def ifdef_name(filename):", "= len(separators) + 1 assert num_blocks >= 2 return num_blocks", "for block in range(1, num_blocks+1): suffix = format % block", "n <= num_blocks return [decoder for decoder in decoders if", "* be found in the LICENSE file. */ // DO", "generation in files dgen_test_output.py and dgen_decode_output.py. \"\"\" HEADER_BOILERPLATE =\"\"\"/* *", "not meant for use in the TCB #endif \"\"\" NEWLINE_STR=\"\"\"", "GetNumberCodeBlocks(separators) assert n > 0 and n <= num_blocks return", "boilerplates and helper functions for source code generation in files", "are # represented as '\\n'. # TODO(karl) Find the cause", "str.replace(NEWLINE_STR, sep) # This second line is a hack to", "ifdef_name(filename): \"\"\" Generates the ifdef name to use for the", "1 assert num_blocks >= 2 return num_blocks def FindBlockIndex(filename, format,", "filename.endswith(suffix): return block raise Exception(\"Can't find block index: %s\" %", "The Native Client Authors. All rights reserved. # Use of", "# This second line is a hack to fix that", "filename) def GetDecodersBlock(n, separators, decoders, name_fcn): \"\"\"Returns the (sorted) list", "return str.replace('\\\\n', sep) def ifdef_name(filename): \"\"\" Generates the ifdef name", "and fix it. return str.replace('\\\\n', sep) def ifdef_name(filename): \"\"\" Generates", "reserved. * Use of this source code is governed by", "LICENSE file. */ // DO NOT EDIT: GENERATED CODE \"\"\"", "of separators.\"\"\" num_blocks = GetNumberCodeBlocks(separators) assert n > 0 and", "file. # \"\"\" Some common boilerplates and helper functions for", "to fix that sometimes newlines are # represented as '\\n'.", "+ \"_\" def GetNumberCodeBlocks(separators): \"\"\"Gets the number of code blocks", "Copyright (c) 2012 The Native Client Authors. All rights reserved.", "\"\"\" NOT_TCB_BOILERPLATE=\"\"\"#ifndef NACL_TRUSTED_BUT_NOT_TCB #error This file is not meant for", "# found in the LICENSE file. # \"\"\" Some common", "== num_blocks or not IsPrefixLeDecoder(separators[n-1], decoder, name_fcn)))] def IsPrefixLeDecoder(prefix, decoder,", "// DO NOT EDIT: GENERATED CODE \"\"\" NOT_TCB_BOILERPLATE=\"\"\"#ifndef NACL_TRUSTED_BUT_NOT_TCB #error", "name_fcn)) and (n == num_blocks or not IsPrefixLeDecoder(separators[n-1], decoder, name_fcn)))]", "[1, num_blocks].\"\"\" for block in range(1, num_blocks+1): suffix = format", "if the filename matches the format with an index in", "TCB #endif \"\"\" NEWLINE_STR=\"\"\" \"\"\" COMMENTED_NEWLINE_STR=\"\"\" //\"\"\" \"\"\"Adds comment '//", "== 1 or IsPrefixLeDecoder(separators[n-2], decoder, name_fcn)) and (n == num_blocks", "% block if filename.endswith(suffix): return block raise Exception(\"Can't find block", "IsPrefixLeDecoder(separators[n-1], decoder, name_fcn)))] def IsPrefixLeDecoder(prefix, decoder, name_fcn): \"\"\"Returns true if", "0 and n <= num_blocks return [decoder for decoder in", "the LICENSE file. # \"\"\" Some common boilerplates and helper", "comment '// ' string after newlines.\"\"\" def commented_string(str, indent=''): sep", "(n == num_blocks or not IsPrefixLeDecoder(separators[n-1], decoder, name_fcn)))] def IsPrefixLeDecoder(prefix,", "(c) 2012 The Native Client Authors. All rights reserved. #", "NOT_TCB_BOILERPLATE=\"\"\"#ifndef NACL_TRUSTED_BUT_NOT_TCB #error This file is not meant for use", "use for the given filename\"\"\" return filename.replace(\"/\", \"_\").replace(\".\", \"_\").upper() +", "that sometimes newlines are # represented as '\\n'. # TODO(karl)", "than or equal to the corresponding prefix length of the", "is a hack to fix that sometimes newlines are #", "Authors. All rights reserved. * Use of this source code", "'\\n'. # TODO(karl) Find the cause of this hack, and", "the prefix is less than or equal to the corresponding", "by a BSD-style license that can be # found in", "indent + '//' str = str.replace(NEWLINE_STR, sep) # This second", "# # Copyright (c) 2012 The Native Client Authors. All", "using the list of separators.\"\"\" num_blocks = GetNumberCodeBlocks(separators) assert n", "n, assuming decoders are split using the list of separators.\"\"\"", "rights reserved. * Use of this source code is governed", "source code generation in files dgen_test_output.py and dgen_decode_output.py. \"\"\" HEADER_BOILERPLATE", "be found in the LICENSE file. */ // DO NOT", "Client Authors. All rights reserved. * Use of this source", "num_blocks].\"\"\" for block in range(1, num_blocks+1): suffix = format %", "def GetNumberCodeBlocks(separators): \"\"\"Gets the number of code blocks to break", "+ 1 assert num_blocks >= 2 return num_blocks def FindBlockIndex(filename,", "format, num_blocks): \"\"\"Returns true if the filename matches the format", "' string after newlines.\"\"\" def commented_string(str, indent=''): sep = NEWLINE_STR", "= name_fcn(decoder) prefix_len = len(prefix) decoder_len = len(decoder_name) decoder_prefix =", "num_blocks+1): suffix = format % block if filename.endswith(suffix): return block", "hack to fix that sometimes newlines are # represented as", "\"\"\"Returns the (sorted) list of decoders to include in block", "Client Authors. All rights reserved. # Use of this source", "GetNumberCodeBlocks(separators): \"\"\"Gets the number of code blocks to break classes", "fix it. return str.replace('\\\\n', sep) def ifdef_name(filename): \"\"\" Generates the", "\"_\").replace(\".\", \"_\").upper() + \"_\" def GetNumberCodeBlocks(separators): \"\"\"Gets the number of", "All rights reserved. # Use of this source code is", "break classes into.\"\"\" num_blocks = len(separators) + 1 assert num_blocks", "NEWLINE_STR=\"\"\" \"\"\" COMMENTED_NEWLINE_STR=\"\"\" //\"\"\" \"\"\"Adds comment '// ' string after", "n > 0 and n <= num_blocks return [decoder for", "def commented_string(str, indent=''): sep = NEWLINE_STR + indent + '//'", "decoder, name_fcn): \"\"\"Returns true if the prefix is less than", "# TODO(karl) Find the cause of this hack, and fix", "the list of separators.\"\"\" num_blocks = GetNumberCodeBlocks(separators) assert n >", "Find the cause of this hack, and fix it. return", "of decoders to include in block n, assuming decoders are", "and (n == num_blocks or not IsPrefixLeDecoder(separators[n-1], decoder, name_fcn)))] def", "def FindBlockIndex(filename, format, num_blocks): \"\"\"Returns true if the filename matches", "HEADER_BOILERPLATE =\"\"\"/* * Copyright 2013 The Native Client Authors. All", "+ indent + '//' str = str.replace(NEWLINE_STR, sep) # This", "# represented as '\\n'. # TODO(karl) Find the cause of", "block raise Exception(\"Can't find block index: %s\" % filename) def", "name_fcn): \"\"\"Returns true if the prefix is less than or", "if the prefix is less than or equal to the", "2 return num_blocks def FindBlockIndex(filename, format, num_blocks): \"\"\"Returns true if", "*/ // DO NOT EDIT: GENERATED CODE \"\"\" NOT_TCB_BOILERPLATE=\"\"\"#ifndef NACL_TRUSTED_BUT_NOT_TCB", "string after newlines.\"\"\" def commented_string(str, indent=''): sep = NEWLINE_STR +", "in the range [1, num_blocks].\"\"\" for block in range(1, num_blocks+1):", "GetDecodersBlock(n, separators, decoders, name_fcn): \"\"\"Returns the (sorted) list of decoders", "dgen_decode_output.py. \"\"\" HEADER_BOILERPLATE =\"\"\"/* * Copyright 2013 The Native Client", "be # found in the LICENSE file. # \"\"\" Some", "the filename matches the format with an index in the", "IsPrefixLeDecoder(separators[n-2], decoder, name_fcn)) and (n == num_blocks or not IsPrefixLeDecoder(separators[n-1],", "filename.replace(\"/\", \"_\").replace(\".\", \"_\").upper() + \"_\" def GetNumberCodeBlocks(separators): \"\"\"Gets the number", "#endif \"\"\" NEWLINE_STR=\"\"\" \"\"\" COMMENTED_NEWLINE_STR=\"\"\" //\"\"\" \"\"\"Adds comment '// '", "of this hack, and fix it. return str.replace('\\\\n', sep) def", "governed by a BSD-style license that can * be found", "len(separators) + 1 assert num_blocks >= 2 return num_blocks def", "raise Exception(\"Can't find block index: %s\" % filename) def GetDecodersBlock(n,", "to include in block n, assuming decoders are split using", "sep) # This second line is a hack to fix", "of the decoder name.\"\"\" decoder_name = name_fcn(decoder) prefix_len = len(prefix)", "found in the LICENSE file. */ // DO NOT EDIT:", "\"_\" def GetNumberCodeBlocks(separators): \"\"\"Gets the number of code blocks to", "represented as '\\n'. # TODO(karl) Find the cause of this", "prefix_len = len(prefix) decoder_len = len(decoder_name) decoder_prefix = (decoder_name[0:prefix_len] if", "find block index: %s\" % filename) def GetDecodersBlock(n, separators, decoders,", "This second line is a hack to fix that sometimes", "LICENSE file. # \"\"\" Some common boilerplates and helper functions", "code is governed by a BSD-style license that can *", "BSD-style license that can * be found in the LICENSE", "decoder, name_fcn)) and (n == num_blocks or not IsPrefixLeDecoder(separators[n-1], decoder,", "code generation in files dgen_test_output.py and dgen_decode_output.py. \"\"\" HEADER_BOILERPLATE =\"\"\"/*", "of code blocks to break classes into.\"\"\" num_blocks = len(separators)", "= len(prefix) decoder_len = len(decoder_name) decoder_prefix = (decoder_name[0:prefix_len] if prefix_len", "decoders, name_fcn): \"\"\"Returns the (sorted) list of decoders to include", "split using the list of separators.\"\"\" num_blocks = GetNumberCodeBlocks(separators) assert", "newlines are # represented as '\\n'. # TODO(karl) Find the", "the cause of this hack, and fix it. return str.replace('\\\\n',", "= NEWLINE_STR + indent + '//' str = str.replace(NEWLINE_STR, sep)" ]
[ "list) -> np.array: \"\"\" Loads a mnist image from the", "-> np.array: \"\"\" Loads a image in the human-readable format.", "file: for j in range(num_images): line_arr = file.readline().split(\",\") targets_array[j] =", "numpy array. \"\"\" images = np.zeros((len(img_nums), *shape), dtype=float) for idx,", "a image in the human-readable format. Args: path: The path", "return load_img(path, img_nums, (28, 28)) def load_cifar10_human_readable(path: str, img_nums: list)", "want to load. Returns: The images as a Mx3x32x32 numpy", "the human-readable format. Args: path: The path to the to", "the Cifar10 images in human readable format. Args: path: The", "open(file, \"r\") as f: data = [float(pixel) for pixel in", "Loads a mnist image from the neurify dataset. Args: path:", "f.readlines()[0].split(\",\")[:-1]] images[idx, :, :] = np.array(data).reshape(*shape) return images def load_mnist_human_readable(path:", "the images we want to load. shape: The shape of", "with open(file, \"r\") as f: data = [float(pixel) for pixel", "str(i)) with open(file, \"r\") as f: data = [float(pixel) for", "images as a Mx3x32x32 numpy array. \"\"\" return load_img(path, img_nums,", "import numpy as np def load_img(path: str, img_nums: list, shape:", "a mnist image from the neurify dataset. Args: path: The", "a single image. Returns: The images as a MxCx28x28 numpy", "image from the neurify dataset. Args: path: The path to", "import os import numpy as np def load_img(path: str, img_nums:", "\"r\") as file: for j in range(num_images): line_arr = file.readline().split(\",\")", "f: data = [float(pixel) for pixel in f.readlines()[0].split(\",\")[:-1]] images[idx, :,", "data = [float(pixel) for pixel in f.readlines()[0].split(\",\")[:-1]] images[idx, :, :]", "= \"../../resources/images/cifar10_test.csv\", num_images: int = 100, image_shape: tuple = (3,", "The images as a Mx28x28 numpy array. \"\"\" return load_img(path,", "= np.array(data).reshape(*shape) return images def load_mnist_human_readable(path: str, img_nums: list) ->", "*shape), dtype=float) for idx, i in enumerate(img_nums): file = os.path.join(path,", "\"\"\" Loads the Cifar10 images in human readable format. Args:", "shape of a single image. Returns: The images as a", "neurify dataset. Args: path: The path to the to the", "img_nums, (28, 28)) def load_cifar10_human_readable(path: str, img_nums: list) -> np.array:", "in human readable format. Args: path: The path to the", "Mx3x32x32 numpy array. \"\"\" return load_img(path, img_nums, (3, 32, 32))", "with open(img_csv, \"r\") as file: for j in range(num_images): line_arr", "with the numbers of the images we want to load.", "return images def load_mnist_human_readable(path: str, img_nums: list) -> np.array: \"\"\"", "as file: for j in range(num_images): line_arr = file.readline().split(\",\") targets_array[j]", "The images as a Mx3x32x32 numpy array. \"\"\" return load_img(path,", "= 100 images_array = np.zeros((num_images, np.prod(image_shape)), dtype=np.float32) targets_array = np.zeros(num_images,", "\"\"\" Functions for loading input data. Author: <NAME> <<EMAIL>> \"\"\"", "np.array: \"\"\" Loads the Cifar10 images in human readable format.", "np.array: \"\"\" Loads a mnist image from the neurify dataset.", "dtype=np.float32) targets_array = np.zeros(num_images, dtype=int) with open(img_csv, \"r\") as file:", "str, img_nums: list) -> np.array: \"\"\" Loads the Cifar10 images", "load. Returns: The images as a Mx3x32x32 numpy array. \"\"\"", "load_img(path: str, img_nums: list, shape: tuple) -> np.array: \"\"\" Loads", "a MxCx28x28 numpy array. \"\"\" images = np.zeros((len(img_nums), *shape), dtype=float)", "= [float(pixel) for pixel in line_arr[1:]] return images_array.reshape((num_images, *image_shape)), targets_array", "to load. shape: The shape of a single image. Returns:", "for idx, i in enumerate(img_nums): file = os.path.join(path, \"image\" +", "load_images_eran(img_csv: str = \"../../resources/images/cifar10_test.csv\", num_images: int = 100, image_shape: tuple", "images def load_mnist_human_readable(path: str, img_nums: list) -> np.array: \"\"\" Loads", "+ str(i)) with open(file, \"r\") as f: data = [float(pixel)", "of the images we want to load. shape: The shape", "the images we want to load. Returns: The images as", "Loads the Cifar10 images in human readable format. Args: path:", ":] = np.array(data).reshape(*shape) return images def load_mnist_human_readable(path: str, img_nums: list)", "images[idx, :, :] = np.array(data).reshape(*shape) return images def load_mnist_human_readable(path: str,", "csv path Returns: images, targets \"\"\" num_images = 100 images_array", "images_array = np.zeros((num_images, np.prod(image_shape)), dtype=np.float32) targets_array = np.zeros(num_images, dtype=int) with", "\"\"\" Loads a image in the human-readable format. Args: path:", "the to the folder with mnist images. img_nums: A list", "to the to the folder with mnist images. img_nums: A", "image. Returns: The images as a MxCx28x28 numpy array. \"\"\"", "np def load_img(path: str, img_nums: list, shape: tuple) -> np.array:", "\"../../resources/images/cifar10_test.csv\", num_images: int = 100, image_shape: tuple = (3, 32,", "100, image_shape: tuple = (3, 32, 32)) -> tuple: \"\"\"", "tuple) -> np.array: \"\"\" Loads a image in the human-readable", "the numbers of the images we want to load. shape:", "Returns: images, targets \"\"\" num_images = 100 images_array = np.zeros((num_images,", "file = os.path.join(path, \"image\" + str(i)) with open(file, \"r\") as", "= np.zeros(num_images, dtype=int) with open(img_csv, \"r\") as file: for j", "pixel in f.readlines()[0].split(\",\")[:-1]] images[idx, :, :] = np.array(data).reshape(*shape) return images", "human-readable format. Args: path: The path to the to the", "\"\"\" images = np.zeros((len(img_nums), *shape), dtype=float) for idx, i in", "The csv path Returns: images, targets \"\"\" num_images = 100", "as a MxCx28x28 numpy array. \"\"\" images = np.zeros((len(img_nums), *shape),", "to the folder with mnist images. img_nums: A list with", "tuple = (3, 32, 32)) -> tuple: \"\"\" Loads the", "the eran csv. Args: The csv path Returns: images, targets", "data. Author: <NAME> <<EMAIL>> \"\"\" import os import numpy as", "in the human-readable format. Args: path: The path to the", "for loading input data. Author: <NAME> <<EMAIL>> \"\"\" import os", "input data. Author: <NAME> <<EMAIL>> \"\"\" import os import numpy", "images as a Mx28x28 numpy array. \"\"\" return load_img(path, img_nums,", "int = 100, image_shape: tuple = (3, 32, 32)) ->", "tuple: \"\"\" Loads the images from the eran csv. Args:", "file.readline().split(\",\") targets_array[j] = int(line_arr[0]) images_array[j] = [float(pixel) for pixel in", "= int(line_arr[0]) images_array[j] = [float(pixel) for pixel in line_arr[1:]] return", "Author: <NAME> <<EMAIL>> \"\"\" import os import numpy as np", "of a single image. Returns: The images as a MxCx28x28", "= file.readline().split(\",\") targets_array[j] = int(line_arr[0]) images_array[j] = [float(pixel) for pixel", "= os.path.join(path, \"image\" + str(i)) with open(file, \"r\") as f:", "we want to load. shape: The shape of a single", "array. \"\"\" return load_img(path, img_nums, (3, 32, 32)) def load_images_eran(img_csv:", "The images as a MxCx28x28 numpy array. \"\"\" images =", ":, :] = np.array(data).reshape(*shape) return images def load_mnist_human_readable(path: str, img_nums:", "a Mx28x28 numpy array. \"\"\" return load_img(path, img_nums, (28, 28))", "images in human readable format. Args: path: The path to", "str, img_nums: list) -> np.array: \"\"\" Loads a mnist image", "mnist image from the neurify dataset. Args: path: The path", "\"\"\" import os import numpy as np def load_img(path: str,", "return load_img(path, img_nums, (3, 32, 32)) def load_images_eran(img_csv: str =", "Returns: The images as a Mx28x28 numpy array. \"\"\" return", "shape: The shape of a single image. Returns: The images", "format. Args: path: The path to the to the folder", "numbers of the images we want to load. Returns: The", "\"r\") as f: data = [float(pixel) for pixel in f.readlines()[0].split(\",\")[:-1]]", "path: The path to the to the folder with mnist", "load. shape: The shape of a single image. Returns: The", "str = \"../../resources/images/cifar10_test.csv\", num_images: int = 100, image_shape: tuple =", "array. \"\"\" images = np.zeros((len(img_nums), *shape), dtype=float) for idx, i", "the numbers of the images we want to load. Returns:", "def load_images_eran(img_csv: str = \"../../resources/images/cifar10_test.csv\", num_images: int = 100, image_shape:", "a Mx3x32x32 numpy array. \"\"\" return load_img(path, img_nums, (3, 32,", "as f: data = [float(pixel) for pixel in f.readlines()[0].split(\",\")[:-1]] images[idx,", "= np.zeros((len(img_nums), *shape), dtype=float) for idx, i in enumerate(img_nums): file", "in enumerate(img_nums): file = os.path.join(path, \"image\" + str(i)) with open(file,", "= [float(pixel) for pixel in f.readlines()[0].split(\",\")[:-1]] images[idx, :, :] =", "images. img_nums: A list with the numbers of the images", "from the eran csv. Args: The csv path Returns: images,", "-> tuple: \"\"\" Loads the images from the eran csv.", "we want to load. Returns: The images as a Mx3x32x32", "img_nums, (3, 32, 32)) def load_images_eran(img_csv: str = \"../../resources/images/cifar10_test.csv\", num_images:", "as a Mx3x32x32 numpy array. \"\"\" return load_img(path, img_nums, (3,", "loading input data. Author: <NAME> <<EMAIL>> \"\"\" import os import", "from the neurify dataset. Args: path: The path to the", "\"\"\" return load_img(path, img_nums, (3, 32, 32)) def load_images_eran(img_csv: str", "os.path.join(path, \"image\" + str(i)) with open(file, \"r\") as f: data", "100 images_array = np.zeros((num_images, np.prod(image_shape)), dtype=np.float32) targets_array = np.zeros(num_images, dtype=int)", "32)) def load_images_eran(img_csv: str = \"../../resources/images/cifar10_test.csv\", num_images: int = 100,", "load_mnist_human_readable(path: str, img_nums: list) -> np.array: \"\"\" Loads a mnist", "Loads the images from the eran csv. Args: The csv", "Returns: The images as a MxCx28x28 numpy array. \"\"\" images", "to load. Returns: The images as a Mx28x28 numpy array.", "human readable format. Args: path: The path to the to", "for j in range(num_images): line_arr = file.readline().split(\",\") targets_array[j] = int(line_arr[0])", "images_array[j] = [float(pixel) for pixel in line_arr[1:]] return images_array.reshape((num_images, *image_shape)),", "img_nums: list) -> np.array: \"\"\" Loads a mnist image from", "-> np.array: \"\"\" Loads the Cifar10 images in human readable", "Returns: The images as a Mx3x32x32 numpy array. \"\"\" return", "folder with mnist images. img_nums: A list with the numbers", "idx, i in enumerate(img_nums): file = os.path.join(path, \"image\" + str(i))", "line_arr = file.readline().split(\",\") targets_array[j] = int(line_arr[0]) images_array[j] = [float(pixel) for", "(28, 28)) def load_cifar10_human_readable(path: str, img_nums: list) -> np.array: \"\"\"", "img_nums: A list with the numbers of the images we", "Loads a image in the human-readable format. Args: path: The", "as a Mx28x28 numpy array. \"\"\" return load_img(path, img_nums, (28,", "load_cifar10_human_readable(path: str, img_nums: list) -> np.array: \"\"\" Loads the Cifar10", "csv. Args: The csv path Returns: images, targets \"\"\" num_images", "path Returns: images, targets \"\"\" num_images = 100 images_array =", "str, img_nums: list, shape: tuple) -> np.array: \"\"\" Loads a", "<NAME> <<EMAIL>> \"\"\" import os import numpy as np def", "def load_img(path: str, img_nums: list, shape: tuple) -> np.array: \"\"\"", "of the images we want to load. Returns: The images", "load. Returns: The images as a Mx28x28 numpy array. \"\"\"", "num_images: int = 100, image_shape: tuple = (3, 32, 32))", "range(num_images): line_arr = file.readline().split(\",\") targets_array[j] = int(line_arr[0]) images_array[j] = [float(pixel)", "images from the eran csv. Args: The csv path Returns:", "in f.readlines()[0].split(\",\")[:-1]] images[idx, :, :] = np.array(data).reshape(*shape) return images def", "images we want to load. Returns: The images as a", "list) -> np.array: \"\"\" Loads the Cifar10 images in human", "image_shape: tuple = (3, 32, 32)) -> tuple: \"\"\" Loads", "int(line_arr[0]) images_array[j] = [float(pixel) for pixel in line_arr[1:]] return images_array.reshape((num_images,", "as np def load_img(path: str, img_nums: list, shape: tuple) ->", "load_img(path, img_nums, (28, 28)) def load_cifar10_human_readable(path: str, img_nums: list) ->", "the images from the eran csv. Args: The csv path", "array. \"\"\" return load_img(path, img_nums, (28, 28)) def load_cifar10_human_readable(path: str,", "eran csv. Args: The csv path Returns: images, targets \"\"\"", "list, shape: tuple) -> np.array: \"\"\" Loads a image in", "np.array: \"\"\" Loads a image in the human-readable format. Args:", "images as a MxCx28x28 numpy array. \"\"\" images = np.zeros((len(img_nums),", "numpy as np def load_img(path: str, img_nums: list, shape: tuple)", "(3, 32, 32)) -> tuple: \"\"\" Loads the images from", "np.zeros((num_images, np.prod(image_shape)), dtype=np.float32) targets_array = np.zeros(num_images, dtype=int) with open(img_csv, \"r\")", "The shape of a single image. Returns: The images as", "\"\"\" Loads a mnist image from the neurify dataset. Args:", "np.array(data).reshape(*shape) return images def load_mnist_human_readable(path: str, img_nums: list) -> np.array:", "image in the human-readable format. Args: path: The path to", "images we want to load. shape: The shape of a", "path to the to the folder with mnist images. img_nums:", "want to load. shape: The shape of a single image.", "dtype=float) for idx, i in enumerate(img_nums): file = os.path.join(path, \"image\"", "enumerate(img_nums): file = os.path.join(path, \"image\" + str(i)) with open(file, \"r\")", "with mnist images. img_nums: A list with the numbers of", "\"\"\" Loads the images from the eran csv. Args: The", "32)) -> tuple: \"\"\" Loads the images from the eran", "28)) def load_cifar10_human_readable(path: str, img_nums: list) -> np.array: \"\"\" Loads", "dtype=int) with open(img_csv, \"r\") as file: for j in range(num_images):", "j in range(num_images): line_arr = file.readline().split(\",\") targets_array[j] = int(line_arr[0]) images_array[j]", "= np.zeros((num_images, np.prod(image_shape)), dtype=np.float32) targets_array = np.zeros(num_images, dtype=int) with open(img_csv,", "np.zeros((len(img_nums), *shape), dtype=float) for idx, i in enumerate(img_nums): file =", "load_img(path, img_nums, (3, 32, 32)) def load_images_eran(img_csv: str = \"../../resources/images/cifar10_test.csv\",", "= (3, 32, 32)) -> tuple: \"\"\" Loads the images", "single image. Returns: The images as a MxCx28x28 numpy array.", "Cifar10 images in human readable format. Args: path: The path", "numpy array. \"\"\" return load_img(path, img_nums, (3, 32, 32)) def", "images = np.zeros((len(img_nums), *shape), dtype=float) for idx, i in enumerate(img_nums):", "img_nums: list, shape: tuple) -> np.array: \"\"\" Loads a image", "numbers of the images we want to load. shape: The", "targets_array[j] = int(line_arr[0]) images_array[j] = [float(pixel) for pixel in line_arr[1:]]", "os import numpy as np def load_img(path: str, img_nums: list,", "32, 32)) -> tuple: \"\"\" Loads the images from the", "want to load. Returns: The images as a Mx28x28 numpy", "targets_array = np.zeros(num_images, dtype=int) with open(img_csv, \"r\") as file: for", "Mx28x28 numpy array. \"\"\" return load_img(path, img_nums, (28, 28)) def", "readable format. Args: path: The path to the to the", "the folder with mnist images. img_nums: A list with the", "def load_mnist_human_readable(path: str, img_nums: list) -> np.array: \"\"\" Loads a", "[float(pixel) for pixel in f.readlines()[0].split(\",\")[:-1]] images[idx, :, :] = np.array(data).reshape(*shape)", "in range(num_images): line_arr = file.readline().split(\",\") targets_array[j] = int(line_arr[0]) images_array[j] =", "for pixel in f.readlines()[0].split(\",\")[:-1]] images[idx, :, :] = np.array(data).reshape(*shape) return", "Functions for loading input data. Author: <NAME> <<EMAIL>> \"\"\" import", "img_nums: list) -> np.array: \"\"\" Loads the Cifar10 images in", "targets \"\"\" num_images = 100 images_array = np.zeros((num_images, np.prod(image_shape)), dtype=np.float32)", "num_images = 100 images_array = np.zeros((num_images, np.prod(image_shape)), dtype=np.float32) targets_array =", "<<EMAIL>> \"\"\" import os import numpy as np def load_img(path:", "np.prod(image_shape)), dtype=np.float32) targets_array = np.zeros(num_images, dtype=int) with open(img_csv, \"r\") as", "-> np.array: \"\"\" Loads a mnist image from the neurify", "the neurify dataset. Args: path: The path to the to", "\"image\" + str(i)) with open(file, \"r\") as f: data =", "numpy array. \"\"\" return load_img(path, img_nums, (28, 28)) def load_cifar10_human_readable(path:", "list with the numbers of the images we want to", "= 100, image_shape: tuple = (3, 32, 32)) -> tuple:", "(3, 32, 32)) def load_images_eran(img_csv: str = \"../../resources/images/cifar10_test.csv\", num_images: int", "\"\"\" num_images = 100 images_array = np.zeros((num_images, np.prod(image_shape)), dtype=np.float32) targets_array", "to load. Returns: The images as a Mx3x32x32 numpy array.", "mnist images. img_nums: A list with the numbers of the", "np.zeros(num_images, dtype=int) with open(img_csv, \"r\") as file: for j in", "open(img_csv, \"r\") as file: for j in range(num_images): line_arr =", "MxCx28x28 numpy array. \"\"\" images = np.zeros((len(img_nums), *shape), dtype=float) for", "\"\"\" return load_img(path, img_nums, (28, 28)) def load_cifar10_human_readable(path: str, img_nums:", "def load_cifar10_human_readable(path: str, img_nums: list) -> np.array: \"\"\" Loads the", "shape: tuple) -> np.array: \"\"\" Loads a image in the", "The path to the to the folder with mnist images.", "we want to load. Returns: The images as a Mx28x28", "dataset. Args: path: The path to the to the folder", "Args: The csv path Returns: images, targets \"\"\" num_images =", "Args: path: The path to the to the folder with", "32, 32)) def load_images_eran(img_csv: str = \"../../resources/images/cifar10_test.csv\", num_images: int =", "A list with the numbers of the images we want", "images, targets \"\"\" num_images = 100 images_array = np.zeros((num_images, np.prod(image_shape)),", "i in enumerate(img_nums): file = os.path.join(path, \"image\" + str(i)) with" ]
[ "= QLabel(self.frame) self.label.setObjectName(u\"label\") self.label.setGeometry(QRect(0, 0, 720, 425)) self.label.setLineWidth(0) self.label.setPixmap(QPixmap(u\"img/SS_logo.jpg\")) self.label.setIndent(0)", "self.label.setGeometry(QRect(0, 0, 720, 425)) self.label.setLineWidth(0) self.label.setPixmap(QPixmap(u\"img/SS_logo.jpg\")) self.label.setIndent(0) self.progressBar = QProgressBar(self.frame)", "Form generated from reading UI file 'splash_screen.ui' ## ## Created", "330, 591, 41)) self.progressBar.setStyleSheet(u\"QProgressBar {\\n\" \" background-color:rgb(149, 165, 166);\\n\" \"", "QVBoxLayout(self.centralwidget) self.verticalLayout.setSpacing(0) self.verticalLayout.setObjectName(u\"verticalLayout\") self.verticalLayout.setContentsMargins(0, 0, 0, 0) self.frame = QFrame(self.centralwidget)", "center;\\n\" \"}\\n\" \"QProgressBar::chunk {\\n\" \" border-radius: 10px;\\n\" \" background-color: qlineargradient(spread:pad,", "self.progressBar.setStyleSheet(u\"QProgressBar {\\n\" \" background-color:rgb(149, 165, 166);\\n\" \" border-style: none;\\n\" \"", "this file will be lost when recompiling UI file! ################################################################################", "## Created by: Qt User Interface Compiler version 5.15.1 ##", "0, 0) self.frame = QFrame(self.centralwidget) self.frame.setObjectName(u\"frame\") self.frame.setFrameShape(QFrame.StyledPanel) self.frame.setFrameShadow(QFrame.Raised) self.frame.setLineWidth(0) self.label", "<reponame>hirokiyaginuma/scriptspinner-software # -*- coding: utf-8 -*- ################################################################################ ## Form generated", "self.frame.setFrameShape(QFrame.StyledPanel) self.frame.setFrameShadow(QFrame.Raised) self.frame.setLineWidth(0) self.label = QLabel(self.frame) self.label.setObjectName(u\"label\") self.label.setGeometry(QRect(0, 0, 720,", "self.centralwidget.setObjectName(u\"centralwidget\") self.verticalLayout = QVBoxLayout(self.centralwidget) self.verticalLayout.setSpacing(0) self.verticalLayout.setObjectName(u\"verticalLayout\") self.verticalLayout.setContentsMargins(0, 0, 0, 0)", "file 'splash_screen.ui' ## ## Created by: Qt User Interface Compiler", "= QVBoxLayout(self.centralwidget) self.verticalLayout.setSpacing(0) self.verticalLayout.setObjectName(u\"verticalLayout\") self.verticalLayout.setContentsMargins(0, 0, 0, 0) self.frame =", "{\\n\" \" border-radius: 10px;\\n\" \" background-color: qlineargradient(spread:pad, x1:0, y1:0, x2:1,", "rgba(156, 69, 255, 255));\\n\" \"}\") self.progressBar.setValue(24) self.verticalLayout.addWidget(self.frame) Splash_Screen.setCentralWidget(self.centralwidget) self.retranslateUi(Splash_Screen) QMetaObject.connectSlotsByName(Splash_Screen)", "self.verticalLayout = QVBoxLayout(self.centralwidget) self.verticalLayout.setSpacing(0) self.verticalLayout.setObjectName(u\"verticalLayout\") self.verticalLayout.setContentsMargins(0, 0, 0, 0) self.frame", "self.progressBar.setObjectName(u\"progressBar\") self.progressBar.setGeometry(QRect(70, 330, 591, 41)) self.progressBar.setStyleSheet(u\"QProgressBar {\\n\" \" background-color:rgb(149, 165,", "border-radius: 10px;\\n\" \" background-color: qlineargradient(spread:pad, x1:0, y1:0, x2:1, y2:0, stop:0", "-*- ################################################################################ ## Form generated from reading UI file 'splash_screen.ui'", "Splash_Screen.resize(720, 425) self.centralwidget = QWidget(Splash_Screen) self.centralwidget.setObjectName(u\"centralwidget\") self.verticalLayout = QVBoxLayout(self.centralwidget) self.verticalLayout.setSpacing(0)", "10px;\\n\" \" background-color: qlineargradient(spread:pad, x1:0, y1:0, x2:1, y2:0, stop:0 rgba(210,", "255), stop:1 rgba(156, 69, 255, 255));\\n\" \"}\") self.progressBar.setValue(24) self.verticalLayout.addWidget(self.frame) Splash_Screen.setCentralWidget(self.centralwidget)", "QWidget(Splash_Screen) self.centralwidget.setObjectName(u\"centralwidget\") self.verticalLayout = QVBoxLayout(self.centralwidget) self.verticalLayout.setSpacing(0) self.verticalLayout.setObjectName(u\"verticalLayout\") self.verticalLayout.setContentsMargins(0, 0, 0,", "self.progressBar = QProgressBar(self.frame) self.progressBar.setObjectName(u\"progressBar\") self.progressBar.setGeometry(QRect(70, 330, 591, 41)) self.progressBar.setStyleSheet(u\"QProgressBar {\\n\"", "file will be lost when recompiling UI file! ################################################################################ from", "self.frame = QFrame(self.centralwidget) self.frame.setObjectName(u\"frame\") self.frame.setFrameShape(QFrame.StyledPanel) self.frame.setFrameShadow(QFrame.Raised) self.frame.setLineWidth(0) self.label = QLabel(self.frame)", "\" border-style: none;\\n\" \" border-radius: 10px;\\n\" \" text-align: center;\\n\" \"}\\n\"", "## ## Created by: Qt User Interface Compiler version 5.15.1", "################################################################################ ## Form generated from reading UI file 'splash_screen.ui' ##", "background-color:rgb(149, 165, 166);\\n\" \" border-style: none;\\n\" \" border-radius: 10px;\\n\" \"", "\"QProgressBar::chunk {\\n\" \" border-radius: 10px;\\n\" \" background-color: qlineargradient(spread:pad, x1:0, y1:0,", "none;\\n\" \" border-radius: 10px;\\n\" \" text-align: center;\\n\" \"}\\n\" \"QProgressBar::chunk {\\n\"", "setupUi(self, Splash_Screen): if not Splash_Screen.objectName(): Splash_Screen.setObjectName(u\"Splash_Screen\") Splash_Screen.resize(720, 425) self.centralwidget =", "coding: utf-8 -*- ################################################################################ ## Form generated from reading UI", "## WARNING! All changes made in this file will be", "will be lost when recompiling UI file! ################################################################################ from PySide2.QtCore", "Splash_Screen.objectName(): Splash_Screen.setObjectName(u\"Splash_Screen\") Splash_Screen.resize(720, 425) self.centralwidget = QWidget(Splash_Screen) self.centralwidget.setObjectName(u\"centralwidget\") self.verticalLayout =", "version 5.15.1 ## ## WARNING! All changes made in this", "165, 166);\\n\" \" border-style: none;\\n\" \" border-radius: 10px;\\n\" \" text-align:", "text-align: center;\\n\" \"}\\n\" \"QProgressBar::chunk {\\n\" \" border-radius: 10px;\\n\" \" background-color:", "PySide2.QtWidgets import * class Ui_Splash_Screen(object): def setupUi(self, Splash_Screen): if not", "Qt User Interface Compiler version 5.15.1 ## ## WARNING! All", "import * from PySide2.QtWidgets import * class Ui_Splash_Screen(object): def setupUi(self,", "self.frame.setLineWidth(0) self.label = QLabel(self.frame) self.label.setObjectName(u\"label\") self.label.setGeometry(QRect(0, 0, 720, 425)) self.label.setLineWidth(0)", "self.label.setPixmap(QPixmap(u\"img/SS_logo.jpg\")) self.label.setIndent(0) self.progressBar = QProgressBar(self.frame) self.progressBar.setObjectName(u\"progressBar\") self.progressBar.setGeometry(QRect(70, 330, 591, 41))", "166);\\n\" \" border-style: none;\\n\" \" border-radius: 10px;\\n\" \" text-align: center;\\n\"", "0, 0, 0) self.frame = QFrame(self.centralwidget) self.frame.setObjectName(u\"frame\") self.frame.setFrameShape(QFrame.StyledPanel) self.frame.setFrameShadow(QFrame.Raised) self.frame.setLineWidth(0)", "\" border-radius: 10px;\\n\" \" text-align: center;\\n\" \"}\\n\" \"QProgressBar::chunk {\\n\" \"", "x1:0, y1:0, x2:1, y2:0, stop:0 rgba(210, 157, 255, 255), stop:1", "0, 720, 425)) self.label.setLineWidth(0) self.label.setPixmap(QPixmap(u\"img/SS_logo.jpg\")) self.label.setIndent(0) self.progressBar = QProgressBar(self.frame) self.progressBar.setObjectName(u\"progressBar\")", "'splash_screen.ui' ## ## Created by: Qt User Interface Compiler version", "import * from PySide2.QtGui import * from PySide2.QtWidgets import *", "background-color: qlineargradient(spread:pad, x1:0, y1:0, x2:1, y2:0, stop:0 rgba(210, 157, 255,", "border-radius: 10px;\\n\" \" text-align: center;\\n\" \"}\\n\" \"QProgressBar::chunk {\\n\" \" border-radius:", "Compiler version 5.15.1 ## ## WARNING! All changes made in", "157, 255, 255), stop:1 rgba(156, 69, 255, 255));\\n\" \"}\") self.progressBar.setValue(24)", "self.verticalLayout.addWidget(self.frame) Splash_Screen.setCentralWidget(self.centralwidget) self.retranslateUi(Splash_Screen) QMetaObject.connectSlotsByName(Splash_Screen) # setupUi def retranslateUi(self, Splash_Screen): Splash_Screen.setWindowTitle(QCoreApplication.translate(\"Splash_Screen\",", "qlineargradient(spread:pad, x1:0, y1:0, x2:1, y2:0, stop:0 rgba(210, 157, 255, 255),", "255, 255));\\n\" \"}\") self.progressBar.setValue(24) self.verticalLayout.addWidget(self.frame) Splash_Screen.setCentralWidget(self.centralwidget) self.retranslateUi(Splash_Screen) QMetaObject.connectSlotsByName(Splash_Screen) # setupUi", "\" border-radius: 10px;\\n\" \" background-color: qlineargradient(spread:pad, x1:0, y1:0, x2:1, y2:0,", "setupUi def retranslateUi(self, Splash_Screen): Splash_Screen.setWindowTitle(QCoreApplication.translate(\"Splash_Screen\", u\"MainWindow\", None)) self.label.setText(\"\") # retranslateUi", "* class Ui_Splash_Screen(object): def setupUi(self, Splash_Screen): if not Splash_Screen.objectName(): Splash_Screen.setObjectName(u\"Splash_Screen\")", "self.verticalLayout.setSpacing(0) self.verticalLayout.setObjectName(u\"verticalLayout\") self.verticalLayout.setContentsMargins(0, 0, 0, 0) self.frame = QFrame(self.centralwidget) self.frame.setObjectName(u\"frame\")", "= QFrame(self.centralwidget) self.frame.setObjectName(u\"frame\") self.frame.setFrameShape(QFrame.StyledPanel) self.frame.setFrameShadow(QFrame.Raised) self.frame.setLineWidth(0) self.label = QLabel(self.frame) self.label.setObjectName(u\"label\")", "self.retranslateUi(Splash_Screen) QMetaObject.connectSlotsByName(Splash_Screen) # setupUi def retranslateUi(self, Splash_Screen): Splash_Screen.setWindowTitle(QCoreApplication.translate(\"Splash_Screen\", u\"MainWindow\", None))", "QProgressBar(self.frame) self.progressBar.setObjectName(u\"progressBar\") self.progressBar.setGeometry(QRect(70, 330, 591, 41)) self.progressBar.setStyleSheet(u\"QProgressBar {\\n\" \" background-color:rgb(149,", "= QWidget(Splash_Screen) self.centralwidget.setObjectName(u\"centralwidget\") self.verticalLayout = QVBoxLayout(self.centralwidget) self.verticalLayout.setSpacing(0) self.verticalLayout.setObjectName(u\"verticalLayout\") self.verticalLayout.setContentsMargins(0, 0,", "################################################################################ from PySide2.QtCore import * from PySide2.QtGui import * from", "425) self.centralwidget = QWidget(Splash_Screen) self.centralwidget.setObjectName(u\"centralwidget\") self.verticalLayout = QVBoxLayout(self.centralwidget) self.verticalLayout.setSpacing(0) self.verticalLayout.setObjectName(u\"verticalLayout\")", "stop:0 rgba(210, 157, 255, 255), stop:1 rgba(156, 69, 255, 255));\\n\"", "Splash_Screen.setCentralWidget(self.centralwidget) self.retranslateUi(Splash_Screen) QMetaObject.connectSlotsByName(Splash_Screen) # setupUi def retranslateUi(self, Splash_Screen): Splash_Screen.setWindowTitle(QCoreApplication.translate(\"Splash_Screen\", u\"MainWindow\",", "y1:0, x2:1, y2:0, stop:0 rgba(210, 157, 255, 255), stop:1 rgba(156,", "\" text-align: center;\\n\" \"}\\n\" \"QProgressBar::chunk {\\n\" \" border-radius: 10px;\\n\" \"", "# setupUi def retranslateUi(self, Splash_Screen): Splash_Screen.setWindowTitle(QCoreApplication.translate(\"Splash_Screen\", u\"MainWindow\", None)) self.label.setText(\"\") #", "UI file! ################################################################################ from PySide2.QtCore import * from PySide2.QtGui import", "from PySide2.QtGui import * from PySide2.QtWidgets import * class Ui_Splash_Screen(object):", "= QProgressBar(self.frame) self.progressBar.setObjectName(u\"progressBar\") self.progressBar.setGeometry(QRect(70, 330, 591, 41)) self.progressBar.setStyleSheet(u\"QProgressBar {\\n\" \"", "self.label.setIndent(0) self.progressBar = QProgressBar(self.frame) self.progressBar.setObjectName(u\"progressBar\") self.progressBar.setGeometry(QRect(70, 330, 591, 41)) self.progressBar.setStyleSheet(u\"QProgressBar", "Splash_Screen.setObjectName(u\"Splash_Screen\") Splash_Screen.resize(720, 425) self.centralwidget = QWidget(Splash_Screen) self.centralwidget.setObjectName(u\"centralwidget\") self.verticalLayout = QVBoxLayout(self.centralwidget)", "made in this file will be lost when recompiling UI", "from PySide2.QtWidgets import * class Ui_Splash_Screen(object): def setupUi(self, Splash_Screen): if", "by: Qt User Interface Compiler version 5.15.1 ## ## WARNING!", "720, 425)) self.label.setLineWidth(0) self.label.setPixmap(QPixmap(u\"img/SS_logo.jpg\")) self.label.setIndent(0) self.progressBar = QProgressBar(self.frame) self.progressBar.setObjectName(u\"progressBar\") self.progressBar.setGeometry(QRect(70,", "{\\n\" \" background-color:rgb(149, 165, 166);\\n\" \" border-style: none;\\n\" \" border-radius:", "69, 255, 255));\\n\" \"}\") self.progressBar.setValue(24) self.verticalLayout.addWidget(self.frame) Splash_Screen.setCentralWidget(self.centralwidget) self.retranslateUi(Splash_Screen) QMetaObject.connectSlotsByName(Splash_Screen) #", "591, 41)) self.progressBar.setStyleSheet(u\"QProgressBar {\\n\" \" background-color:rgb(149, 165, 166);\\n\" \" border-style:", "recompiling UI file! ################################################################################ from PySide2.QtCore import * from PySide2.QtGui", "not Splash_Screen.objectName(): Splash_Screen.setObjectName(u\"Splash_Screen\") Splash_Screen.resize(720, 425) self.centralwidget = QWidget(Splash_Screen) self.centralwidget.setObjectName(u\"centralwidget\") self.verticalLayout", "x2:1, y2:0, stop:0 rgba(210, 157, 255, 255), stop:1 rgba(156, 69,", "PySide2.QtGui import * from PySide2.QtWidgets import * class Ui_Splash_Screen(object): def", "class Ui_Splash_Screen(object): def setupUi(self, Splash_Screen): if not Splash_Screen.objectName(): Splash_Screen.setObjectName(u\"Splash_Screen\") Splash_Screen.resize(720,", "reading UI file 'splash_screen.ui' ## ## Created by: Qt User", "self.centralwidget = QWidget(Splash_Screen) self.centralwidget.setObjectName(u\"centralwidget\") self.verticalLayout = QVBoxLayout(self.centralwidget) self.verticalLayout.setSpacing(0) self.verticalLayout.setObjectName(u\"verticalLayout\") self.verticalLayout.setContentsMargins(0,", "Created by: Qt User Interface Compiler version 5.15.1 ## ##", "self.frame.setObjectName(u\"frame\") self.frame.setFrameShape(QFrame.StyledPanel) self.frame.setFrameShadow(QFrame.Raised) self.frame.setLineWidth(0) self.label = QLabel(self.frame) self.label.setObjectName(u\"label\") self.label.setGeometry(QRect(0, 0,", "self.label = QLabel(self.frame) self.label.setObjectName(u\"label\") self.label.setGeometry(QRect(0, 0, 720, 425)) self.label.setLineWidth(0) self.label.setPixmap(QPixmap(u\"img/SS_logo.jpg\"))", "generated from reading UI file 'splash_screen.ui' ## ## Created by:", "Ui_Splash_Screen(object): def setupUi(self, Splash_Screen): if not Splash_Screen.objectName(): Splash_Screen.setObjectName(u\"Splash_Screen\") Splash_Screen.resize(720, 425)", "self.progressBar.setValue(24) self.verticalLayout.addWidget(self.frame) Splash_Screen.setCentralWidget(self.centralwidget) self.retranslateUi(Splash_Screen) QMetaObject.connectSlotsByName(Splash_Screen) # setupUi def retranslateUi(self, Splash_Screen):", "UI file 'splash_screen.ui' ## ## Created by: Qt User Interface", "when recompiling UI file! ################################################################################ from PySide2.QtCore import * from", "\"}\") self.progressBar.setValue(24) self.verticalLayout.addWidget(self.frame) Splash_Screen.setCentralWidget(self.centralwidget) self.retranslateUi(Splash_Screen) QMetaObject.connectSlotsByName(Splash_Screen) # setupUi def retranslateUi(self,", "y2:0, stop:0 rgba(210, 157, 255, 255), stop:1 rgba(156, 69, 255,", "# -*- coding: utf-8 -*- ################################################################################ ## Form generated from", "PySide2.QtCore import * from PySide2.QtGui import * from PySide2.QtWidgets import", "from reading UI file 'splash_screen.ui' ## ## Created by: Qt", "\"}\\n\" \"QProgressBar::chunk {\\n\" \" border-radius: 10px;\\n\" \" background-color: qlineargradient(spread:pad, x1:0,", "All changes made in this file will be lost when", "QLabel(self.frame) self.label.setObjectName(u\"label\") self.label.setGeometry(QRect(0, 0, 720, 425)) self.label.setLineWidth(0) self.label.setPixmap(QPixmap(u\"img/SS_logo.jpg\")) self.label.setIndent(0) self.progressBar", "import * class Ui_Splash_Screen(object): def setupUi(self, Splash_Screen): if not Splash_Screen.objectName():", "255, 255), stop:1 rgba(156, 69, 255, 255));\\n\" \"}\") self.progressBar.setValue(24) self.verticalLayout.addWidget(self.frame)", "changes made in this file will be lost when recompiling", "-*- coding: utf-8 -*- ################################################################################ ## Form generated from reading", "self.frame.setFrameShadow(QFrame.Raised) self.frame.setLineWidth(0) self.label = QLabel(self.frame) self.label.setObjectName(u\"label\") self.label.setGeometry(QRect(0, 0, 720, 425))", "self.label.setLineWidth(0) self.label.setPixmap(QPixmap(u\"img/SS_logo.jpg\")) self.label.setIndent(0) self.progressBar = QProgressBar(self.frame) self.progressBar.setObjectName(u\"progressBar\") self.progressBar.setGeometry(QRect(70, 330, 591,", "\" background-color:rgb(149, 165, 166);\\n\" \" border-style: none;\\n\" \" border-radius: 10px;\\n\"", "border-style: none;\\n\" \" border-radius: 10px;\\n\" \" text-align: center;\\n\" \"}\\n\" \"QProgressBar::chunk", "Interface Compiler version 5.15.1 ## ## WARNING! All changes made", "self.label.setObjectName(u\"label\") self.label.setGeometry(QRect(0, 0, 720, 425)) self.label.setLineWidth(0) self.label.setPixmap(QPixmap(u\"img/SS_logo.jpg\")) self.label.setIndent(0) self.progressBar =", "* from PySide2.QtGui import * from PySide2.QtWidgets import * class", "425)) self.label.setLineWidth(0) self.label.setPixmap(QPixmap(u\"img/SS_logo.jpg\")) self.label.setIndent(0) self.progressBar = QProgressBar(self.frame) self.progressBar.setObjectName(u\"progressBar\") self.progressBar.setGeometry(QRect(70, 330,", "utf-8 -*- ################################################################################ ## Form generated from reading UI file", "from PySide2.QtCore import * from PySide2.QtGui import * from PySide2.QtWidgets", "WARNING! All changes made in this file will be lost", "5.15.1 ## ## WARNING! All changes made in this file", "if not Splash_Screen.objectName(): Splash_Screen.setObjectName(u\"Splash_Screen\") Splash_Screen.resize(720, 425) self.centralwidget = QWidget(Splash_Screen) self.centralwidget.setObjectName(u\"centralwidget\")", "file! ################################################################################ from PySide2.QtCore import * from PySide2.QtGui import *", "\" background-color: qlineargradient(spread:pad, x1:0, y1:0, x2:1, y2:0, stop:0 rgba(210, 157,", "be lost when recompiling UI file! ################################################################################ from PySide2.QtCore import", "QFrame(self.centralwidget) self.frame.setObjectName(u\"frame\") self.frame.setFrameShape(QFrame.StyledPanel) self.frame.setFrameShadow(QFrame.Raised) self.frame.setLineWidth(0) self.label = QLabel(self.frame) self.label.setObjectName(u\"label\") self.label.setGeometry(QRect(0,", "10px;\\n\" \" text-align: center;\\n\" \"}\\n\" \"QProgressBar::chunk {\\n\" \" border-radius: 10px;\\n\"", "self.verticalLayout.setObjectName(u\"verticalLayout\") self.verticalLayout.setContentsMargins(0, 0, 0, 0) self.frame = QFrame(self.centralwidget) self.frame.setObjectName(u\"frame\") self.frame.setFrameShape(QFrame.StyledPanel)", "## ## WARNING! All changes made in this file will", "Splash_Screen): if not Splash_Screen.objectName(): Splash_Screen.setObjectName(u\"Splash_Screen\") Splash_Screen.resize(720, 425) self.centralwidget = QWidget(Splash_Screen)", "self.verticalLayout.setContentsMargins(0, 0, 0, 0) self.frame = QFrame(self.centralwidget) self.frame.setObjectName(u\"frame\") self.frame.setFrameShape(QFrame.StyledPanel) self.frame.setFrameShadow(QFrame.Raised)", "lost when recompiling UI file! ################################################################################ from PySide2.QtCore import *", "41)) self.progressBar.setStyleSheet(u\"QProgressBar {\\n\" \" background-color:rgb(149, 165, 166);\\n\" \" border-style: none;\\n\"", "in this file will be lost when recompiling UI file!", "0) self.frame = QFrame(self.centralwidget) self.frame.setObjectName(u\"frame\") self.frame.setFrameShape(QFrame.StyledPanel) self.frame.setFrameShadow(QFrame.Raised) self.frame.setLineWidth(0) self.label =", "stop:1 rgba(156, 69, 255, 255));\\n\" \"}\") self.progressBar.setValue(24) self.verticalLayout.addWidget(self.frame) Splash_Screen.setCentralWidget(self.centralwidget) self.retranslateUi(Splash_Screen)", "QMetaObject.connectSlotsByName(Splash_Screen) # setupUi def retranslateUi(self, Splash_Screen): Splash_Screen.setWindowTitle(QCoreApplication.translate(\"Splash_Screen\", u\"MainWindow\", None)) self.label.setText(\"\")", "self.progressBar.setGeometry(QRect(70, 330, 591, 41)) self.progressBar.setStyleSheet(u\"QProgressBar {\\n\" \" background-color:rgb(149, 165, 166);\\n\"", "rgba(210, 157, 255, 255), stop:1 rgba(156, 69, 255, 255));\\n\" \"}\")", "255));\\n\" \"}\") self.progressBar.setValue(24) self.verticalLayout.addWidget(self.frame) Splash_Screen.setCentralWidget(self.centralwidget) self.retranslateUi(Splash_Screen) QMetaObject.connectSlotsByName(Splash_Screen) # setupUi def", "## Form generated from reading UI file 'splash_screen.ui' ## ##", "User Interface Compiler version 5.15.1 ## ## WARNING! All changes", "def setupUi(self, Splash_Screen): if not Splash_Screen.objectName(): Splash_Screen.setObjectName(u\"Splash_Screen\") Splash_Screen.resize(720, 425) self.centralwidget", "* from PySide2.QtWidgets import * class Ui_Splash_Screen(object): def setupUi(self, Splash_Screen):" ]
[ "as a column Notes ----- This function is deprecated in", "return True else: return False def get_table(self, table_name): return self.meta.tables.get(table_name)", "'mysql': warnings.warn(_MYSQL_WARNING, FutureWarning) return PandasSQLLegacy(con, flavor, is_cursor=is_cursor) except ImportError: if", "np.datetime64 is also a subclass of np.number. pytype_name = \"datetime\"", "} } _SAFE_NAMES_WARNING = (\"The spaces in these column names", "= _convert_params(sql, params) return pandas_sql.execute(*args) #------------------------------------------------------------------------------ #--- Deprecated tquery and", "Helper functions def _convert_params(sql, params): \"\"\"convert sql and params args", "_create_table_statement(self): \"Return a CREATE TABLE statement to suit the contents", "SQL table in database con : SQLAlchemy engine Sqlite DBAPI", "default 'fail' - fail: If table exists, do nothing. -", "args = _convert_params(sql, params) return pandas_sql.execute(*args) #------------------------------------------------------------------------------ #--- Deprecated tquery", "\"'{0}' is not valid for if_exists\".format(if_exists)) else: self.table = self._create_table_statement()", "== 1: # python 3 compat result = list(lzip(*result)[0]) elif", "because converting bool column with None replaces all Nones with", "is not valid for if_exists\".format(if_exists)) else: self.table = self._create_table_statement() self.create()", "date parsing if col_name in parse_dates: try: fmt = parse_dates[col_name]", "self._fetchall_as_list(cursor) cursor.close() data_frame = DataFrame.from_records( data, columns=columns, coerce_float=coerce_float) _parse_date_columns(data_frame, parse_dates)", ": list or dict - List of column names to", "function from 0.13.1. To keep backwards compatibility. When mysql legacy", "self, frame=frame) return str(table.sql_schema()) def get_schema(frame, name, flavor='sqlite', keys=None, con=None):", "plain list is returned. To obtain the same result in", "con, flavor='sqlite', if_exists='fail', index=True, index_label=None): \"\"\" Write records stored in", "tquery(sql, con=con, retry=False) if result and len(result[0]) == 1: #", "DataFrame to a SQL database. Parameters ---------- frame : DataFrame", "return PandasSQLAlchemy(con, meta=meta) else: if flavor == 'mysql': warnings.warn(_MYSQL_WARNING, FutureWarning)", "None: if not isinstance(index_label, list): index_label = [index_label] if len(index_label)", "converted to \" \"underscores.\") class PandasSQLTableLegacy(PandasSQLTable): \"\"\"Patch the PandasSQLTable for", "name): flavor_map = { 'sqlite': (\"SELECT name FROM sqlite_master \"", "a column Notes ----- This function is deprecated in favor", "= [index_label] if len(index_label) != nlevels: raise ValueError( \"Length of", "input (database table name or sql query). See also --------", "supported. index_col : string, optional column name to use as", "parsing string times or is one of (D, s, ns,", "self.con.cursor() try: if kwargs: cur.execute(*args, **kwargs) else: cur.execute(*args) return cur", "labels for the index columns if nlevels == 1 and", "to select from sql table Returns ------- DataFrame See also", "connection object or an SQLAlchemy engine Using SQLAlchemy makes it", "integer size. return int if isinstance(sqltype, DateTime): # Caution: np.datetime64", "'date': { 'mysql': 'DATE', 'sqlite': 'TIMESTAMP', }, 'bool': { 'mysql':", "= [col_desc[0] for col_desc in cursor.description] data = self._fetchall_as_list(cursor) cursor.close()", "read_sql_table(table_name, con, index_col=None, coerce_float=True, parse_dates=None, columns=None): \"\"\"Read SQL database table", "'mysql': 'BIGINT', 'sqlite': 'INTEGER', }, 'datetime': { 'mysql': 'DATETIME', 'sqlite':", "col_name = sql_col.name try: df_col = self.frame[col_name] # the type", "has named table. Parameters ---------- table_name: string Name of SQL", "return float if isinstance(sqltype, Integer): # TODO: Refine integer size.", "compatibility, set index=False when not specified index = kwargs.pop('index', False)", "cursor is obtained from connection params: list or tuple, optional", "meta = MetaData(self.engine) meta.reflect(self.engine) self.meta = meta def execute(self, *args,", "raise ValueError( \"'{0}' is not valid for if_exists\".format(if_exists)) else: self.table", "to set as index elif isinstance(index, string_types): return [index] elif", "table_name, index_col=None, coerce_float=True, parse_dates=None, columns=None): table = PandasSQLTable(table_name, self, index=index_col)", "in temp.itertuples(): data = tuple((self.maybe_asscalar(v) for v in t[1:])) data_list.append(data)", "result except Exception as e: # pragma: no cover excName", "Exception as e: # pragma: no cover excName = e.__class__.__name__", "_create_sql_schema(self, frame, table_name): table = PandasSQLTableLegacy(table_name, self, frame=frame) return str(table.sql_schema())", "\"database.\", UserWarning) return Integer elif com.is_float_dtype(arr_or_dtype): return Float elif com.is_integer_dtype(arr_or_dtype):", "is supported. \"\"\" if con is None: if flavor ==", "0.14, spaces were converted to \" \"underscores.\") class PandasSQLTableLegacy(PandasSQLTable): \"\"\"Patch", "return Integer elif com.is_float_dtype(arr_or_dtype): return Float elif com.is_integer_dtype(arr_or_dtype): # TODO:", "list): result = list(result) return result except Exception as e:", "def _create_sql_schema(self, frame, table_name): table = PandasSQLTable(table_name, self, frame=frame) return", "to_datetime(col, coerce=True, unit=format) else: return to_datetime(col, coerce=True, format=format) def _parse_date_columns(data_frame,", "if any(map(pat.search, columns)): warnings.warn(_SAFE_NAMES_WARNING) column_types = [self._sql_type_name(typ) for typ in", "fail: If table exists, do nothing. replace: If table exists,", "SQLAlchemy engines.\") def pandasSQL_builder(con, flavor=None, meta=None, is_cursor=False): \"\"\" Convenience function", "+ br_r for column in names] col_names = ','.join(bracketed_names) wildcards", "to set as index coerce_float : boolean, default True Attempt", "from sqlalchemy.types import Integer, Float, Text, Boolean, DateTime, Date, Interval", "\"\"\"CREATE TABLE %(name)s ( %(columns)s %(keystr)s );\"\"\" create_statement = template", "'fail': raise ValueError(\"Table '%s' already exists.\" % name) elif if_exists", "is datetime or col_type is date: if not issubclass(df_col.dtype.type, np.datetime64):", "= ', PRIMARY KEY (%s)' % ','.join(keys) template = \"\"\"CREATE", "\"\"\" # handle non-list entries for parse_dates gracefully if parse_dates", "else: self.flavor = flavor def execute(self, *args, **kwargs): if self.is_cursor:", "False Write DataFrame index as a column Notes ----- This", "= meta def execute(self, *args, **kwargs): \"\"\"Simple passthrough to SQLAlchemy", "columns to be read as such. Supports both string formatted", "on DB-specific API. \"\"\" from __future__ import print_function, division from", "return len(self.execute(query).fetchall()) > 0 def get_table(self, table_name): return None #", "index=True to include index in sql table if index is", "or parse_dates is False: parse_dates = [] if not hasattr(parse_dates,", "'mysql': 'VARCHAR (63)', 'sqlite': 'TEXT', }, 'float': { 'mysql': 'FLOAT',", "Table statement\"\"\" def sql_schema(self): return str(self.table) def create(self): self.pd_sql.execute(self.table) def", "pandas_sql = pandasSQL_builder(con) if isinstance(pandas_sql, PandasSQLLegacy): return pandas_sql.read_sql( sql, index_col=index_col,", "= tuple((self.maybe_asscalar(v) for v in t[1:])) data_list.append(data) cur = self.pd_sql.con.cursor()", "a DataFrame. This function does not support DBAPI connections. Parameters", "i, idx_label in enumerate(self.index[::-1]): columns.insert(0, idx_label) column_types.insert(0, self._sql_type_name(self.frame.index.get_level_values(i).dtype)) flv =", "that table is reflected by SQLAlchemy to do better type", "- fail: If table exists, do nothing. - replace: If", ": SQLAlchemy engine or DBAPI2 connection (legacy mode) Using SQLAlchemy", "None: self.table = self._create_table_statement() else: raise ValueError( \"'{0}' is not", "for x in column_types) keystr = '' if keys is", "Text def _numpy_type(self, sqltype): from sqlalchemy.types import Integer, Float, Boolean,", "``index=False``. - The new ``to_sql`` function supports sqlalchemy engines to", "is None: return ['index'] else: return [l if l is", "name): if self.meta.tables.get(name) is not None: return True else: return", "used column labels for the index columns if nlevels ==", "table exists, drop it, recreate it, and insert data. -", "necessary. try: import sqlalchemy if isinstance(con, sqlalchemy.engine.Engine): return PandasSQLAlchemy(con, meta=meta)", "name} query = flavor_map.get(self.flavor) return len(self.execute(query).fetchall()) > 0 def get_table(self,", "return to_datetime(col, coerce=True, format=format) def _parse_date_columns(data_frame, parse_dates): \"\"\" Force non-datetime", "or dict, optional List of parameters to pass to execute", "flavor=flavor, if_exists=if_exists, index=index, **kwargs) # Append wrapped function docstrings read_frame.__doc__", "get_schema(frame, name, flavor='sqlite', keys=None, con=None): \"\"\" Get the SQL db", "x for x in column_types) else: columns = ',\\n '.join('`%s`", "cur = self.pd_sql.con.cursor() cur.executemany(ins, data_list) cur.close() self.pd_sql.con.commit() def _create_table_statement(self): \"Return", "fail: If table exists, do nothing. - replace: If table", "Create if does not exist. \"\"\" table = PandasSQLTableLegacy( name,", "depreciated, use to_sql\", FutureWarning) # for backwards compatibility, set index=False", "KeyError: pass # this column not in results def _sqlalchemy_type(self,", "cur: depreciated, cursor is obtained from connection params: list or", "FutureWarning) return PandasSQLLegacy(con, flavor, is_cursor=is_cursor) except ImportError: if flavor ==", "in cursor.description] data = self._fetchall_as_list(cursor) cursor.close() data_frame = DataFrame.from_records( data,", "exist. index : boolean, default True Write DataFrame index as", "columns.insert(0, Column(idx_label, idx_type, index=True)) return Table(self.name, self.pd_sql.meta, *columns) def _harmonize_columns(self,", "dict((k, self.maybe_asscalar(v)) for k, v in zip(keys, t[1:])) data_list.append(data) self.pd_sql.execute(ins,", "passthrough to SQLAlchemy engine\"\"\" return self.engine.execute(*args, **kwargs) def read_table(self, table_name,", "if retry: return tquery(sql, con=con, retry=False) if result and len(result[0])", "= engine if not meta: from sqlalchemy.schema import MetaData meta", "= self._sqlalchemy_type( self.frame.index.get_level_values(i)) columns.insert(0, Column(idx_label, idx_type, index=True)) return Table(self.name, self.pd_sql.meta,", "tuple or dict, optional List of parameters to pass to", "db table schema for the given frame. Parameters ---------- frame", "new ``to_sql`` function supports sqlalchemy engines to work with different", "such. Supports both string formatted and integer timestamp columns \"\"\"", "col_name in parse_dates: try: fmt = parse_dates[col_name] except TypeError: fmt", "if columns is not None and len(columns) > 0: from", "include index in sql table if index is True: nlevels", "data = tuple((self.maybe_asscalar(v) for v in t[1:])) data_list.append(data) cur =", "else: cur = self.con.cursor() try: if kwargs: cur.execute(*args, **kwargs) else:", "DataFrame, Series from pandas.core.base import PandasObject from pandas.tseries.tools import to_datetime", "return table.read(coerce_float=coerce_float, parse_dates=parse_dates, columns=columns) def read_sql(self, sql, index_col=None, coerce_float=True, parse_dates=None,", "#--- Read and write to DataFrames def read_sql_table(table_name, con, index_col=None,", "as such. Supports both string formatted and integer timestamp columns", "pytype_name = \"text\" if issubclass(pytype, np.floating): pytype_name = \"float\" elif", "---------- frame : DataFrame name : string name of SQL", "init table '%s'\" % name) def exists(self): return self.pd_sql.has_table(self.name) def", "if does not exist. \"\"\" table = PandasSQLTableLegacy( name, self,", "None: data_frame.set_index(index_col, inplace=True) return data_frame def _fetchall_as_list(self, cur): result =", "= \"text\" if issubclass(pytype, np.floating): pytype_name = \"float\" elif com.is_timedelta64_dtype(pytype):", "if not isinstance(result, list): result = list(result) return result def", "if_exists : {'fail', 'replace', 'append'}, default 'fail' - fail: If", "is depreciated, and will be removed in future versions. \"", "corresponds to the keyword arguments of :func:`pandas.to_datetime` Especially useful with", ": string, optional column name to use as index for", "created with an SQLAlchemy engine or connection+sql flavor\") def to_sql(self,", "spaces were converted to \" \"underscores.\") class PandasSQLTableLegacy(PandasSQLTable): \"\"\"Patch the", "of 'index_label' should match number of \" \"levels, which is", "as err: raise ValueError( \"duplicate name in index/columns: {0}\".format(err)) else:", "'us', 'ns']: return to_datetime(col, coerce=True, unit=format) elif issubclass(col.dtype.type, np.floating) or", "columns=None): \"\"\"Read SQL database table into a DataFrame. Given a", "con: an open SQL database connection object or an SQLAlchemy", "parameters to pass to execute method. Returns ------- Results Iterable", "self.pd_sql.execute(self.table) def insert_statement(self): names = list(map(str, self.frame.columns)) flv = self.pd_sql.flavor", "data_list) cur.close() self.pd_sql.con.commit() def _create_table_statement(self): \"Return a CREATE TABLE statement", "table = pandas_sql.read_table( table_name, index_col=index_col, coerce_float=coerce_float, parse_dates=parse_dates, columns=columns) if table", "if isinstance(keys, string_types): keys = (keys,) keystr = ', PRIMARY", "params=params, coerce_float=coerce_float, parse_dates=parse_dates) if pandas_sql.has_table(sql): return pandas_sql.read_table( sql, index_col=index_col, coerce_float=coerce_float,", "return to_datetime(col, coerce=True, unit=format) elif issubclass(col.dtype.type, np.floating) or issubclass(col.dtype.type, np.integer):", "\"PandasSQL must be created with an SQLAlchemy engine or connection+sql", "and will be \" \"written as integer values (ns frequency)", "sql flavors. See also -------- pandas.DataFrame.to_sql \"\"\" warnings.warn(\"write_frame is depreciated,", "coerce=True, unit=format) elif issubclass(col.dtype.type, np.floating) or issubclass(col.dtype.type, np.integer): # parse", "def __init__(self, engine, meta=None): self.engine = engine if not meta:", "return pandas_sql._create_sql_schema(frame, name) def _get_schema_legacy(frame, name, flavor, keys=None): \"\"\"Old function", "# Caution: np.datetime64 is also a subclass of np.number. return", "com.is_float_dtype(arr_or_dtype): return Float elif com.is_integer_dtype(arr_or_dtype): # TODO: Refine integer size.", "self.index is not None: self.frame.set_index(self.index, inplace=True) return self.frame def _index_name(self,", "For mapping Pandas tables to SQL tables. Uses fact that", "use the following: >>> execute(sql, con).rowcount Parameters ---------- sql: string", "dtype): pytype = dtype.type pytype_name = \"text\" if issubclass(pytype, np.floating):", "is None: # pragma: no cover result = [] return", "pandas_sql = pandasSQL_builder(cur, is_cursor=True) args = _convert_params(sql, params) return pandas_sql.execute(*args)", "in Legacy mode def drop_table(self, name): drop_sql = \"DROP TABLE", "engine, meta=None): self.engine = engine if not meta: from sqlalchemy.schema", "the specific function depending on the provided input (database table", "= execute(sql, con, cur=cur) result = _safe_fetch(cur) if con is", "use as index for the returned DataFrame object. coerce_float :", "DataFrame index as a column index_label : string or sequence,", "= _SQL_SYMB[flv]['br_r'] # right val quote char col_template = br_l", "= pandasSQL_builder(con, flavor=flavor) if isinstance(frame, Series): frame = frame.to_frame() elif", "database. Parameters ---------- frame : DataFrame name : string Name", "%(name)s ( %(columns)s )\"\"\" create_statement = template % {'name': self.name,", "= [] return result def uquery(sql, con=None, cur=None, retry=True, params=None):", "frame.to_frame() elif not isinstance(frame, DataFrame): raise NotImplementedError pandas_sql.to_sql(frame, name, if_exists=if_exists,", "type is not supported, and will be \" \"written as", "an sql_table column types Need to work around limited NA", "object. Parameters ---------- sql : string Query to be executed", "---------- table_name: string Name of SQL table con: SQLAlchemy engine", "SQL database connection object or an SQLAlchemy engine Using SQLAlchemy", "Exception: # pragma: no cover ex = DatabaseError( \"Execution failed", "do better type convertions. Also holds various flags needed to", "result and len(result[0]) == 1: # python 3 compat result", "table (only used when reading a table). Returns ------- DataFrame", "not found\" % table_name, con) def read_sql_query(sql, con, index_col=None, coerce_float=True,", "'columns': columns, 'keystr': keystr} return create_statement # legacy names, with", "elif if_exists == 'replace': self.pd_sql.drop_table(self.name) self.table = self._create_table_statement() self.create() elif", "for backwards compatibility, set index=False when not specified index =", "will be removed in future versions. \" \"You can use", "should not be necessary. try: import sqlalchemy if isinstance(con, sqlalchemy.engine.Engine):", "DatabaseError(IOError): pass #------------------------------------------------------------------------------ # Helper functions def _convert_params(sql, params): \"\"\"convert", "if self.is_cursor: cur = self.con else: cur = self.con.cursor() try:", ": DataFrame name : string con : DBAPI2 connection flavor", "con=None): \"\"\" Get the SQL db table schema for the", "is not None: [names.insert(0, idx) for idx in self.index[::-1]] bracketed_names", "} _SAFE_NAMES_WARNING = (\"The spaces in these column names will", "column selected, then plain list is returned. To obtain the", "integer values (ns frequency) to the \" \"database.\", UserWarning) pytype_name", "con : SQLAlchemy engine or sqlite3 DBAPI2 connection Using SQLAlchemy", "to_sql\", FutureWarning) # for backwards compatibility, set index=False when not", "no cover print('Failed to commit, may need to restart interpreter')", "the DataFrame uses MultiIndex. \"\"\" if if_exists not in ('fail',", "x in column_types) else: columns = ',\\n '.join('`%s` %s' %", "List of parameters to pass to execute method. Returns -------", "is not None: [cols.insert(0, self.table.c[idx]) for idx in self.index[::-1]] sql_select", "= ','.join(bracketed_names) wildcards = ','.join([wld] * len(names)) insert_statement = 'INSERT", "database by default. To keep the behaviour this function you", "insert data. Create if does not exist. index : boolean,", "is one of (D, s, ns, ms, us) in case", "sqlite3 is supported. \"\"\" if con is None: if flavor", "excName = e.__class__.__name__ if excName == 'OperationalError': # pragma: no", "data_frame.set_index(index_col, inplace=True) return data_frame def _fetchall_as_list(self, cur): result = cur.fetchall()", "index_col : string, optional Column to set as index coerce_float", "sqlalchemy import Table, Column columns = list(map(str, self.frame.columns)) column_types =", "np.asscalar(i) except AttributeError: return i def insert_data(self): if self.index is", "columns=columns) def read_sql(self, sql, index_col=None, coerce_float=True, parse_dates=None, params=None): args =", "a primary key con: an open SQL database connection object", "executed or database table name. con : SQLAlchemy engine or", "have col_type = self._numpy_type(sql_col.type) if col_type is datetime or col_type", "(%s)' % ( self.name, col_names, wildcards) return insert_statement def insert(self):", "return Integer elif com.is_bool(arr_or_dtype): return Boolean return Text def _numpy_type(self,", "frame : DataFrame name : string name of SQL table", "is not None: temp = self.frame.copy() temp.index.names = self.index try:", "entries for parse_dates gracefully if parse_dates is True or parse_dates", "= arr_or_dtype.tzinfo return DateTime(timezone=True) except: return DateTime if com.is_timedelta64_dtype(arr_or_dtype): warnings.warn(\"the", "meta=None, is_cursor=False): \"\"\" Convenience function to return the correct PandasSQL", "% (args[0], e)) raise_with_traceback(ex) ex = DatabaseError(\"Execution failed on sql:", "index as a column Notes ----- This function is deprecated", "elif com.is_timedelta64_dtype(pytype): warnings.warn(\"the 'timedelta' type is not supported, and will", "DataFrame read_sql \"\"\" pandas_sql = pandasSQL_builder(con) return pandas_sql.read_sql( sql, index_col=index_col,", "= _SQL_SYMB[flv]['wld'] # wildcard char if self.index is not None:", "in zip(keys, t[1:])) data_list.append(data) self.pd_sql.execute(ins, data_list) def read(self, coerce_float=True, parse_dates=None,", "table exists, drop it, recreate it, and insert data. append:", "to use one of the columns as the index, otherwise", "one column selected, then plain list is returned. To obtain", "= [] if not hasattr(parse_dates, '__iter__'): parse_dates = [parse_dates] for", "None: if isinstance(keys, string_types): keys = (keys,) keystr = ',", "keystr = '' if keys is not None: if isinstance(keys,", "dependency on DB-specific API. \"\"\" from __future__ import print_function, division", "not issubclass(df_col.dtype.type, np.datetime64): self.frame[col_name] = _handle_date_column(df_col) elif col_type is float:", "installed # SQL type convertions for each DB _SQL_TYPES =", "isinstance(frame, Series): frame = frame.to_frame() elif not isinstance(frame, DataFrame): raise", "future versions. \" \"You can use ``execute(...).rowcount`` instead.\", FutureWarning) cur", "replaces all Nones with false. Therefore only convert bool if", "if isinstance(sqltype, Float): return float if isinstance(sqltype, Integer): # TODO:", "use. if_exists : {'fail', 'replace', 'append'}, default 'fail' - fail:", "removed in future versions. \" \"You can use ``execute(...).fetchall()`` instead.\",", "None: return table else: raise ValueError(\"Table %s not found\" %", "favor of ``to_sql``. There are however two differences: - With", "function depending on the provided input (database table name or", "params=params, coerce_float=coerce_float, parse_dates=parse_dates) def read_sql(sql, con, index_col=None, coerce_float=True, params=None, parse_dates=None,", "parse_dates: try: fmt = parse_dates[col_name] except TypeError: fmt = None", "This class enables convertion between DataFrame and SQL databases using", "raise NotImplementedError else: self.flavor = flavor def execute(self, *args, **kwargs):", "Name of SQL table in database con : SQLAlchemy engine", "else format return to_datetime(col, coerce=True, unit=format) else: return to_datetime(col, coerce=True,", "name : string Name of SQL table con : SQLAlchemy", "= self.pd_sql.con.cursor() cur.executemany(ins, data_list) cur.close() self.pd_sql.con.commit() def _create_table_statement(self): \"Return a", "pass class DatabaseError(IOError): pass #------------------------------------------------------------------------------ # Helper functions def _convert_params(sql,", "parse_dates=parse_dates) def to_sql(frame, name, con, flavor='sqlite', if_exists='fail', index=True, index_label=None): \"\"\"", "list): result = list(result) return result def to_sql(self, frame, name,", "= [parse_dates] for col_name in parse_dates: df_col = data_frame[col_name] try:", "(only used when reading a table). Returns ------- DataFrame Notes", "None Column label for index column(s). If None is given", "supported index_col : string, optional Column to set as index", "PandasSQLLegacy(con, flavor, is_cursor=is_cursor) except ImportError: if flavor == 'mysql': warnings.warn(_MYSQL_WARNING,", "insert_statement def insert(self): ins = self.insert_statement() temp = self.insert_data() data_list", "Exception as e: try: self.con.rollback() except Exception: # pragma: no", "FutureWarning) return PandasSQLLegacy(con, flavor, is_cursor=is_cursor) class PandasSQLTable(PandasObject): \"\"\" For mapping", "with databases without native Datetime support, such as SQLite columns", "\"int\" elif issubclass(pytype, np.datetime64) or pytype is datetime: # Caution:", "table = PandasSQLTable(table_name, self, index=index_col) return table.read(coerce_float=coerce_float, parse_dates=parse_dates, columns=columns) def", "writing: index=True to include index in sql table if index", "read_sql\", FutureWarning) return read_sql(*args, **kwargs) def frame_query(*args, **kwargs): \"\"\"DEPRECIATED -", "like your connection failed, reconnecting...') return uquery(sql, con, retry=False) return", "a DataFrame. read_sql \"\"\" pandas_sql = PandasSQLAlchemy(con) table = pandas_sql.read_table(", "{ 'text': { 'mysql': 'VARCHAR (63)', 'sqlite': 'TEXT', }, 'float':", "size. return int if isinstance(sqltype, DateTime): # Caution: np.datetime64 is", "cover excName = e.__class__.__name__ if excName == 'OperationalError': return []", "Does the same thing as tquery, but instead of returning", "also -------- read_sql_query : Read SQL query into a DataFrame.", "index_col=index_col, params=params, coerce_float=coerce_float, parse_dates=parse_dates) def to_sql(frame, name, con, flavor='sqlite', if_exists='fail',", "if isinstance(con, sqlalchemy.engine.Engine): return PandasSQLAlchemy(con, meta=meta) else: if flavor ==", "left val quote char br_r = _SQL_SYMB[flv]['br_r'] # right val", "from 0.13.1. To keep backwards compatibility. When mysql legacy support", "= PandasSQLTableLegacy(table_name, self, frame=frame) return str(table.sql_schema()) def get_schema(frame, name, flavor='sqlite',", "def create(self): self.table.create() def insert_statement(self): return self.table.insert() def maybe_asscalar(self, i):", "frequency) to the \" \"database.\", UserWarning) pytype_name = \"int\" elif", "Notes ----- This function is deprecated in favor of ``to_sql``.", "cur=None, params=None): \"\"\" Execute the given SQL query using the", "a DataFrame. Parameters ---------- sql : string SQL query to", "sql_schema(self): from sqlalchemy.schema import CreateTable return str(CreateTable(self.table)) def create(self): self.table.create()", "FutureWarning) # for backwards compatibility, set index=False when not specified", "for i, idx_label in enumerate(self.index[::-1]): columns.insert(0, idx_label) column_types.insert(0, self._sql_type_name(self.frame.index.get_level_values(i).dtype)) flv", "returns the number of rows affected. Good for update queries.", "(%s) VALUES (%s)' % ( self.name, col_names, wildcards) return insert_statement", "to commit, may need to restart interpreter') else: raise traceback.print_exc()", "string or sequence, default None Column label for index column(s).", "is not None: data_frame.set_index(index_col, inplace=True) return data_frame def _fetchall_as_list(self, cur):", "DEPRECATED. Returns list of tuples corresponding to each row in", "pass to execute method. Returns ------- Number of affected rows", "'.join('`%s` %s' % x for x in column_types) keystr =", "pandas.tseries.tools import to_datetime class SQLAlchemyRequired(ImportError): pass class DatabaseError(IOError): pass #------------------------------------------------------------------------------", "engine\"\"\" return self.engine.execute(*args, **kwargs) def read_table(self, table_name, index_col=None, coerce_float=True, parse_dates=None,", "re import numpy as np import pandas.core.common as com from", "is False: parse_dates = [] if not hasattr(parse_dates, '__iter__'): parse_dates", "{'name': self.name, 'columns': columns} return create_statement def _sql_type_name(self, dtype): pytype", "def drop_table(self, table_name): if self.engine.has_table(table_name): self.get_table(table_name).drop() self.meta.clear() self.meta.reflect() def _create_sql_schema(self,", "timestamp format = 's' if format is None else format", "this column not in results def _sqlalchemy_type(self, arr_or_dtype): from sqlalchemy.types", "Float elif com.is_integer_dtype(arr_or_dtype): # TODO: Refine integer size. return Integer", "columns.insert(0, idx_label) column_types.insert(0, self._sql_type_name(self.frame.index.get_level_values(i).dtype)) flv = self.pd_sql.flavor br_l = _SQL_SYMB[flv]['br_l']", "== 'OperationalError': return [] def tquery(sql, con=None, cur=None, retry=True): \"\"\"", "as e: try: self.con.rollback() except Exception: # pragma: no cover", "return data_frame def _fetchall_as_list(self, cur): result = cur.fetchall() if not", "Check if DataBase has named table. Parameters ---------- table_name: string", "be converted to np.datetime if supported, but here we also", "%(columns)s )\"\"\" create_statement = template % {'name': self.name, 'columns': columns}", "supported. index_col : string, optional Column name to use as", "from sqlalchemy.types import Integer, Float, Boolean, DateTime, Date if isinstance(sqltype,", "def get_table(self, table_name): return self.meta.tables.get(table_name) def drop_table(self, table_name): if self.engine.has_table(table_name):", "specify ``index=False``. - The new ``to_sql`` function supports sqlalchemy engines", "datetime.date: pytype_name = \"date\" elif issubclass(pytype, np.bool_): pytype_name = \"bool\"", "if not hasattr(parse_dates, '__iter__'): parse_dates = [parse_dates] for col_name in", "timedelta import warnings import traceback import itertools import re import", "con) def read_sql_query(sql, con, index_col=None, coerce_float=True, params=None, parse_dates=None): \"\"\"Read SQL", "mysql legacy support is dropped, it should be possible to", "columns=None): table = PandasSQLTable(table_name, self, index=index_col) return table.read(coerce_float=coerce_float, parse_dates=parse_dates, columns=columns)", "for k, v in zip(keys, t[1:])) data_list.append(data) self.pd_sql.execute(ins, data_list) def", "type convertions for each DB _SQL_TYPES = { 'text': {", "{ 'br_l': '[', 'br_r': ']', 'wld': '?' } } _SAFE_NAMES_WARNING", "use read_sql \"\"\" warnings.warn(\"frame_query is depreciated, use read_sql\", FutureWarning) return", "read_frame(*args, **kwargs): \"\"\"DEPRECIATED - use read_sql \"\"\" warnings.warn(\"read_frame is depreciated,", "index=index, **kwargs) # Append wrapped function docstrings read_frame.__doc__ += read_sql.__doc__", "# return the used column labels for the index columns", "\" \"written as integer values (ns frequency) to the \"", "here we also force conversion if required \"\"\" # handle", "fmt = parse_dates[col_name] except TypeError: fmt = None self.frame[col_name] =", "= { 'sqlite': (\"SELECT name FROM sqlite_master \" \"WHERE type='table'", "return pandas_sql.read_table( sql, index_col=index_col, coerce_float=coerce_float, parse_dates=parse_dates, columns=columns) else: return pandas_sql.read_sql(", "pandas_sql.read_sql( sql, index_col=index_col, params=params, coerce_float=coerce_float, parse_dates=parse_dates) if pandas_sql.has_table(sql): return pandas_sql.read_table(", "'DATE', 'sqlite': 'TIMESTAMP', }, 'bool': { 'mysql': 'BOOLEAN', 'sqlite': 'INTEGER',", "'OperationalError': raise traceback.print_exc() if retry: print('Looks like your connection failed,", "with None replaces all Nones with false. Therefore only convert", "= \"\"\"CREATE TABLE %(name)s ( %(columns)s %(keystr)s );\"\"\" create_statement =", "ints must always be floats if there are Null values.", "'int': { 'mysql': 'BIGINT', 'sqlite': 'INTEGER', }, 'datetime': { 'mysql':", "is_cursor=False): self.is_cursor = is_cursor self.con = con if flavor is", "\"\"\" def read_sql(self, *args, **kwargs): raise ValueError( \"PandasSQL must be", "data = result.fetchall() column_names = result.keys() self.frame = DataFrame.from_records( data,", "of affected rows \"\"\" warnings.warn( \"uquery is depreciated, and will", "index=True, index_label=None): \"\"\" Write records stored in a DataFrame to", "= \"float\" elif com.is_timedelta64_dtype(pytype): warnings.warn(\"the 'timedelta' type is not supported,", "cur): result = cur.fetchall() if not isinstance(result, list): result =", "DataFrame to a SQL database. Parameters ---------- frame: DataFrame name:", "flavor == 'mysql': warnings.warn(_MYSQL_WARNING, FutureWarning) return _get_schema_legacy(frame, name, flavor, keys)", "be further supported through SQLAlchemy engines. keys : string or", "pandas.compat import lzip, map, zip, raise_with_traceback, string_types from pandas.core.api import", "flavor, is_cursor=is_cursor) except ImportError: if flavor == 'mysql': warnings.warn(_MYSQL_WARNING, FutureWarning)", "does not exist. index : boolean, default True Write DataFrame", "warnings.warn(\"write_frame is depreciated, use to_sql\", FutureWarning) # for backwards compatibility,", "is a convenience wrapper around ``read_sql_table`` and ``read_sql_query`` (and for", "the contents of a DataFrame.\" columns = list(map(str, self.frame.columns)) pat", "support. Instead of a table variable just use the Create", "Column name to use as index for the returned DataFrame", "without SQLAlchemy --- # Flavour specific sql strings and handler", "Supports both string formatted and integer timestamp columns \"\"\" #", "np.number. pytype_name = \"datetime\" elif pytype is datetime.date: pytype_name =", "\"date\" elif issubclass(pytype, np.bool_): pytype_name = \"bool\" return _SQL_TYPES[pytype_name][self.pd_sql.flavor] class", "\"levels, which is {0}\".format(nlevels)) else: return index_label # return the", "index_label=index_label) table.insert() @property def tables(self): return self.meta.tables def has_table(self, name):", "---------- sql : string Query to be executed con :", "# no data provided, read-only mode self.table = self.pd_sql.get_table(self.name) if", "elif com.is_bool(arr_or_dtype): return Boolean return Text def _numpy_type(self, sqltype): from", "in case of parsing string times or is one of", "SQLAlchemy engines. if_exists : {'fail', 'replace', 'append'}, default 'fail' -", "is deprecated \" \"and will be removed in future versions.", "versions. \" \"MySQL will be further supported with SQLAlchemy engines.\")", "a CREATE TABLE statement to suit the contents of a", "the given frame. Parameters ---------- frame : DataFrame name :", "# wildcard char if self.index is not None: [names.insert(0, idx)", "elif issubclass(pytype, np.bool_): pytype_name = \"bool\" return _SQL_TYPES[pytype_name][self.pd_sql.flavor] class PandasSQLLegacy(PandasSQL):", "Handle date parsing if col_name in parse_dates: try: fmt =", "= _handle_date_column(df_col, format=fmt) return data_frame def execute(sql, con, cur=None, params=None):", "engines to work with different sql flavors. See also --------", "retry=False) if result and len(result[0]) == 1: # python 3", "'append': self.table = self.pd_sql.get_table(self.name) if self.table is None: self.table =", "library. If a DBAPI2 object, only sqlite3 is supported. cur", "as integer values (ns frequency) to the \" \"database.\", UserWarning)", "AND name='%s';\") % name, 'mysql': \"SHOW TABLES LIKE '%s'\" %", "x for x in column_types) keystr = '' if keys", "self.table = self._create_table_statement() self.create() elif if_exists == 'append': self.table =", "args = [sql] if params is not None: if hasattr(params,", "are always fine, ints must always be floats if there", "elif isinstance(index, string_types): return [index] elif isinstance(index, list): return index", "convert bool if there are no NA values. Datetimes should", "}, 'bool': { 'mysql': 'BOOLEAN', 'sqlite': 'INTEGER', } } #", "_SQL_SYMB = { 'mysql': { 'br_l': '`', 'br_r': '`', 'wld':", "use a primary key con: an open SQL database connection", "is None: self.table = self._create_table_statement() else: raise ValueError( \"'{0}' is", "quote char col_template = br_l + '%s' + br_r +", "returning results, it returns the number of rows affected. Good", "!= nlevels: raise ValueError( \"Length of 'index_label' should match number", "import MetaData meta = MetaData(self.engine) meta.reflect(self.engine) self.meta = meta def", "excName = e.__class__.__name__ if excName != 'OperationalError': raise traceback.print_exc() if", "sql, index_col=index_col, params=params, coerce_float=coerce_float, parse_dates=parse_dates) def to_sql(frame, name, con, flavor='sqlite',", "!= 'OperationalError': raise traceback.print_exc() if retry: print('Looks like your connection", "self.create() elif if_exists == 'append': self.table = self.pd_sql.get_table(self.name) if self.table", "self.pd_sql.get_table(self.name) if self.table is None: self.table = self._create_table_statement() else: raise", "i, idx_label in enumerate(self.index[::-1]): idx_type = self._sqlalchemy_type( self.frame.index.get_level_values(i)) columns.insert(0, Column(idx_label,", "also -------- pandas.DataFrame.to_sql \"\"\" warnings.warn(\"write_frame is depreciated, use to_sql\", FutureWarning)", "datetime, date, timedelta import warnings import traceback import itertools import", "None data_frame[col_name] = _handle_date_column(df_col, format=fmt) return data_frame def execute(sql, con,", "pass them between functions all the time. \"\"\" # TODO:", "self.con = con if flavor is None: flavor = 'sqlite'", "= PandasSQLTableLegacy( name, self, frame=frame, index=index, if_exists=if_exists, index_label=index_label) table.insert() def", "return self.engine.execute(*args, **kwargs) def read_table(self, table_name, index_col=None, coerce_float=True, parse_dates=None, columns=None):", "with DBAPI connection is deprecated \" \"and will be removed", "args += [list(params)] return args def _handle_date_column(col, format=None): if isinstance(format,", "*args, **kwargs): if self.is_cursor: cur = self.con else: cur =", "+ br_r + ' %s' columns = ',\\n '.join(col_template %", "or tuple, optional List of parameters to pass to execute", "con, retry=False) return result #------------------------------------------------------------------------------ #--- Read and write to", "'mysql' is deprecated and will be removed in future versions,", "be \" \"written as integer values (ns frequency) to the", "provided input (database table name or sql query). See also", "Read SQL query into a DataFrame. read_sql \"\"\" pandas_sql =", "pass # this column not in results def _sqlalchemy_type(self, arr_or_dtype):", "else: # no data provided, read-only mode self.table = self.pd_sql.get_table(self.name)", "to suit the contents of a DataFrame.\" columns = list(map(str,", "coerce=True, format=format) def _parse_date_columns(data_frame, parse_dates): \"\"\" Force non-datetime columns to", "databases without native Datetime support, such as SQLite Returns -------", "'__iter__'): parse_dates = [parse_dates] for sql_col in self.table.columns: col_name =", "= \"date\" elif issubclass(pytype, np.bool_): pytype_name = \"bool\" return _SQL_TYPES[pytype_name][flavor]", "if the DataFrame uses MultiIndex. \"\"\" if if_exists not in", "frame=frame) return str(table.sql_schema()) # ---- SQL without SQLAlchemy --- #", "on the provided parameters \"\"\" # When support for DBAPI", "index_col is not None: data_frame.set_index(index_col, inplace=True) return data_frame def to_sql(self,", "return self.frame def _index_name(self, index, index_label): # for writing: index=True", "parameter to use one of the columns as the index,", "handler class for access to DBs without # SQLAlchemy installed", "flv = self.pd_sql.flavor br_l = _SQL_SYMB[flv]['br_l'] # left val quote", "True or parse_dates is None or parse_dates is False: parse_dates", "query to be executed con : SQLAlchemy engine or sqlite3", "by that library. If a DBAPI2 object, only sqlite3 is", "'sqlite': 'TIMESTAMP', }, 'bool': { 'mysql': 'BOOLEAN', 'sqlite': 'INTEGER', }", "if not meta: from sqlalchemy.schema import MetaData meta = MetaData(self.engine)", "columns=columns) if table is not None: return table else: raise", "insert(self): ins = self.insert_statement() data_list = [] temp = self.insert_data()", "non-string, non-numeric objects (like decimal.Decimal) to floating point, useful for", "warnings import traceback import itertools import re import numpy as", "index_col : string, optional Column name to use as index", "a DBAPI2 object, only sqlite3 is supported. \"\"\" if con", "return Float elif com.is_integer_dtype(arr_or_dtype): # TODO: Refine integer size. return", "Deprecated tquery and uquery def _safe_fetch(cur): try: result = cur.fetchall()", "issubclass(df_col.dtype.type, np.datetime64): self.frame[col_name] = _handle_date_column(df_col) elif col_type is float: #", "FutureWarning) return read_sql(*args, **kwargs) def write_frame(frame, name, con, flavor='sqlite', if_exists='fail',", "columns : list List of column names to select from", "con is not None: try: cur.close() con.commit() except Exception as", "read(self, coerce_float=True, parse_dates=None, columns=None): if columns is not None and", "reflected by SQLAlchemy to do better type convertions. Also holds", "pandas_sql.read_sql( sql, index_col=index_col, params=params, coerce_float=coerce_float, parse_dates=parse_dates) def to_sql(frame, name, con,", "Table(self.name, self.pd_sql.meta, *columns) def _harmonize_columns(self, parse_dates=None): \"\"\" Make a data_frame's", "= result.fetchall() column_names = result.keys() self.frame = DataFrame.from_records( data, columns=column_names,", "can use the following: >>> execute(sql, con).rowcount Parameters ---------- sql:", "numpy as np import pandas.core.common as com from pandas.compat import", "\"\"\" DEPRECATED. Does the same thing as tquery, but instead", "the index names are used. A sequence should be given", "= self.frame.copy() temp.index.names = self.index try: temp.reset_index(inplace=True) except ValueError as", "for idx in self.index[::-1]] bracketed_names = [br_l + column +", "excName == 'OperationalError': return [] def tquery(sql, con=None, cur=None, retry=True):", "idx_type = self._sqlalchemy_type( self.frame.index.get_level_values(i)) columns.insert(0, Column(idx_label, idx_type, index=True)) return Table(self.name,", "\" \"underscores.\") class PandasSQLTableLegacy(PandasSQLTable): \"\"\"Patch the PandasSQLTable for legacy support.", "of returning results, it returns the number of rows affected.", "to rollback\" % (args[0], e)) raise_with_traceback(ex) ex = DatabaseError(\"Execution failed", "written to the sql database by default. To keep the", "given sql query. If only one column selected, then plain", "parsing integer timestamps - Dict of ``{column_name: arg dict}``, where", "connection params: list or tuple, optional List of parameters to", "insert data. - append: If table exists, insert data. Create", "names are used. A sequence should be given if the", "data_frame def _fetchall_as_list(self, cur): result = cur.fetchall() if not isinstance(result,", "or is one of (D, s, ns, ms, us) in", "DataFrames def read_sql_table(table_name, con, index_col=None, coerce_float=True, parse_dates=None, columns=None): \"\"\"Read SQL", "DateTime if com.is_timedelta64_dtype(arr_or_dtype): warnings.warn(\"the 'timedelta' type is not supported, and", "prefix self.frame = frame self.index = self._index_name(index, index_label) if frame", "'FLOAT', 'sqlite': 'REAL', }, 'int': { 'mysql': 'BIGINT', 'sqlite': 'INTEGER',", "read-only mode self.table = self.pd_sql.get_table(self.name) if self.table is None: raise", "to DBs without # SQLAlchemy installed # SQL type convertions", "times or is one of (D, s, ns, ms, us)", "be created with an SQLAlchemy engine or connection+sql flavor\") class", "as index name(s) if index_label is not None: if not", "\"\"\"Patch the PandasSQLTable for legacy support. Instead of a table", "df_col = data_frame[col_name] try: fmt = parse_dates[col_name] except TypeError: fmt", "it, recreate it, and insert data. append: If table exists,", "mapping args += [params] else: args += [list(params)] return args", ": list, tuple or dict, optional List of parameters to", "return np.asscalar(i) except AttributeError: return i def insert_data(self): if self.index", "To obtain the same result in the future, you can", "'INTEGER', }, 'datetime': { 'mysql': 'DATETIME', 'sqlite': 'TIMESTAMP', }, 'date':", "'datetime': { 'mysql': 'DATETIME', 'sqlite': 'TIMESTAMP', }, 'date': { 'mysql':", "for x in zip(columns, column_types)) template = \"\"\"CREATE TABLE %(name)s", "warnings.warn(\"read_frame is depreciated, use read_sql\", FutureWarning) return read_sql(*args, **kwargs) def", "== 'replace': self.pd_sql.drop_table(self.name) self.table = self._create_table_statement() self.create() elif if_exists ==", "self.execute(*args) columns = [col_desc[0] for col_desc in cursor.description] data =", "else: self.table = self._create_table_statement() self.create() else: # no data provided,", "= '' if keys is not None: if isinstance(keys, string_types):", "specific function depending on the provided input (database table name", "match number of \" \"levels, which is {0}\".format(nlevels)) else: return", "but here we also force conversion if required \"\"\" #", "\"Return a CREATE TABLE statement to suit the contents of", "if_exists\".format(if_exists)) else: self.table = self._create_table_statement() self.create() else: # no data", "flavor, is_cursor=False): self.is_cursor = is_cursor self.con = con if flavor", "= list(lzip(*result)[0]) elif result is None: # pragma: no cover", "and `index` is True, then the index names are used.", "zip(columns, column_types)) template = \"\"\"CREATE TABLE %(name)s ( %(columns)s )\"\"\"", "temp = self.frame.copy() temp.index.names = self.index try: temp.reset_index(inplace=True) except ValueError", "Write DataFrame index as a column index_label : string or", "name : string name of SQL table flavor : {'sqlite',", "{0}\".format(nlevels)) else: return index_label # return the used column labels", "of SQL to use. if_exists : {'fail', 'replace', 'append'}, default", "SQL without SQLAlchemy --- # Flavour specific sql strings and", "or col_type is bool: self.frame[col_name].astype(col_type, copy=False) # Handle date parsing", "returned DataFrame object. coerce_float : boolean, default True Attempt to", "to non-string, non-numeric objects (like decimal.Decimal) to floating point, useful", "for col_desc in cursor.description] data = self._fetchall_as_list(cursor) cursor.close() data_frame =", "self.frame def _index_name(self, index, index_label): # for writing: index=True to", "Query to be executed con : SQLAlchemy engine or sqlite3", "import lzip, map, zip, raise_with_traceback, string_types from pandas.core.api import DataFrame,", "coerce_float=True, params=None, parse_dates=None, columns=None): \"\"\" Read SQL query or database", "except TypeError: fmt = None self.frame[col_name] = _handle_date_column( df_col, format=fmt)", "possible to remove this code \"\"\" def get_sqltype(dtype, flavor): pytype", "# TODO: Refine integer size. return Integer elif com.is_bool(arr_or_dtype): return", "datetime: # Caution: np.datetime64 is also a subclass of np.number.", "'keystr': keystr} return create_statement # legacy names, with depreciation warnings", "con=None, cur=None, retry=True, params=None): \"\"\" DEPRECATED. Does the same thing", "instead of returning results, it returns the number of rows", "table into a DataFrame. Parameters ---------- sql : string SQL", "append: If table exists, insert data. Create if does not", "= result.fetchall() columns = result.keys() data_frame = DataFrame.from_records( data, columns=columns,", "should be given if the DataFrame uses MultiIndex. \"\"\" if", "def _harmonize_columns(self, parse_dates=None): \"\"\" Make a data_frame's column type align", "= is_cursor self.con = con if flavor is None: flavor", "self.meta.tables def has_table(self, name): if self.meta.tables.get(name) is not None: return", "is {0}\".format(nlevels)) else: return index_label # return the used column", "columns = result.keys() data_frame = DataFrame.from_records( data, columns=columns, coerce_float=coerce_float) _parse_date_columns(data_frame,", "# if index_label is specified, set this as index name(s)", "code \"\"\" def get_sqltype(dtype, flavor): pytype = dtype.type pytype_name =", "pandas_sql.execute(*args) #------------------------------------------------------------------------------ #--- Deprecated tquery and uquery def _safe_fetch(cur): try:", "self._numpy_type(sql_col.type) if col_type is datetime or col_type is date: if", "= self._fetchall_as_list(cursor) cursor.close() data_frame = DataFrame.from_records( data, columns=columns, coerce_float=coerce_float) _parse_date_columns(data_frame,", "frame, name, if_exists='fail', index=True, index_label=None): \"\"\" Write records stored in", "such as SQLite columns : list List of column names", "Column(idx_label, idx_type, index=True)) return Table(self.name, self.pd_sql.meta, *columns) def _harmonize_columns(self, parse_dates=None):", "__init__(self, engine, meta=None): self.engine = engine if not meta: from", "to the sql database by default. To keep the behaviour", "is not None: self.frame.set_index(self.index, inplace=True) return self.frame def _index_name(self, index,", "'OperationalError': # pragma: no cover print('Failed to commit, may need", "def _sqlalchemy_type(self, arr_or_dtype): from sqlalchemy.types import Integer, Float, Text, Boolean,", "no cover result = [] return result def uquery(sql, con=None,", "the behaviour this function you need to specify ``index=False``. -", "fmt = None data_frame[col_name] = _handle_date_column(df_col, format=fmt) return data_frame def", "this function you need to specify ``index=False``. - The new", "for if_exists\".format(if_exists)) pandas_sql = pandasSQL_builder(con, flavor=flavor) if isinstance(frame, Series): frame", "t[1:])) data_list.append(data) self.pd_sql.execute(ins, data_list) def read(self, coerce_float=True, parse_dates=None, columns=None): if", "ValueError(\"'{0}' is not valid for if_exists\".format(if_exists)) pandas_sql = pandasSQL_builder(con, flavor=flavor)", "can always convert! self.frame[col_name].astype(col_type, copy=False) elif len(df_col) == df_col.count(): #", "instead.\", FutureWarning) cur = execute(sql, con, cur=cur) result = _safe_fetch(cur)", "_SAFE_NAMES_WARNING = (\"The spaces in these column names will not", "table = PandasSQLTableLegacy(table_name, self, frame=frame) return str(table.sql_schema()) def get_schema(frame, name,", "need to specify ``index=False``. - The new ``to_sql`` function supports", "self, frame=frame) return str(table.sql_schema()) # ---- SQL without SQLAlchemy ---", "self.frame return temp def insert(self): ins = self.insert_statement() data_list =", "'mysql' flavor with DBAPI connection is deprecated \" \"and will", "sqlalchemy if isinstance(con, sqlalchemy.engine.Engine): return PandasSQLAlchemy(con, meta=meta) else: if flavor", "coerce_float=True, parse_dates=None, columns=None): table = PandasSQLTable(table_name, self, index=index_col) return table.read(coerce_float=coerce_float,", "exists, do nothing. replace: If table exists, drop it, recreate", "List of column names to select from sql table Returns", "Should define read_sql and to_sql \"\"\" def read_sql(self, *args, **kwargs):", "------- DataFrame See also -------- read_sql_query : Read SQL query", "self.table is None: self.table = self._create_table_statement() else: raise ValueError( \"'{0}'", "string, optional column name to use as index for the", "DBAPI2 connection (legacy mode) Using SQLAlchemy makes it possible to", "Therefore only convert bool if there are no NA values.", "df_col = self.frame[col_name] # the type the dataframe column should", "execute(sql, con, cur=cur, params=params) result = cur.rowcount try: con.commit() except", "result = _safe_fetch(cur) if con is not None: try: cur.close()", "coerce_float=True, parse_dates=None, columns=None): \"\"\"Read SQL database table into a DataFrame.", "return self.meta.tables.get(table_name) def drop_table(self, table_name): if self.engine.has_table(table_name): self.get_table(table_name).drop() self.meta.clear() self.meta.reflect()", "} # SQL enquote and wildcard symbols _SQL_SYMB = {", "to remove this code \"\"\" def get_sqltype(dtype, flavor): pytype =", "values (ns frequency) to the \" \"database.\", UserWarning) return Integer", "reading: index=(list of) string to specify column to set as", "wildcards) return insert_statement def insert(self): ins = self.insert_statement() temp =", "gracefully if parse_dates is True or parse_dates is None or", "temp.itertuples(): data = dict((k, self.maybe_asscalar(v)) for k, v in zip(keys,", "in column_types) keystr = '' if keys is not None:", "None self.frame[col_name] = _handle_date_column( df_col, format=fmt) except KeyError: pass #", "python 3 compat result = list(lzip(*result)[0]) elif result is None:", "and copied docs def read_frame(*args, **kwargs): \"\"\"DEPRECIATED - use read_sql", "Good for update queries. To obtain the same result in", "idx_label) column_types.insert(0, self._sql_type_name(self.frame.index.get_level_values(i).dtype)) flv = self.pd_sql.flavor br_l = _SQL_SYMB[flv]['br_l'] #", "com.is_integer_dtype(arr_or_dtype): # TODO: Refine integer size. return Integer elif com.is_bool(arr_or_dtype):", "in these column names will not be changed. \" \"In", "= 's' if format is None else format return to_datetime(col,", "else \"level_{0}\".format(i) for i, l in enumerate(self.frame.index.names)] # for reading:", "args to DBAPI2.0 compliant format\"\"\" args = [sql] if params", "index_label=index_label) table.insert() def has_table(self, name): flavor_map = { 'sqlite': (\"SELECT", "names to select from sql table Returns ------- DataFrame See", "is None or parse_dates is False: parse_dates = [] if", "of SQL table con: SQLAlchemy engine or sqlite3 DBAPI2 connection", "return Text def _numpy_type(self, sqltype): from sqlalchemy.types import Integer, Float,", "is not None: return True else: return False def get_table(self,", "return None # not supported in Legacy mode def drop_table(self,", "False def get_table(self, table_name): return self.meta.tables.get(table_name) def drop_table(self, table_name): if", "self.table.c[idx]) for idx in self.index[::-1]] sql_select = select(cols) else: sql_select", "SQLAlchemy engines. keys : string or sequence columns to use", "= e.__class__.__name__ if excName == 'OperationalError': # pragma: no cover", "convertions for each DB _SQL_TYPES = { 'text': { 'mysql':", "index_label = [index_label] if len(index_label) != nlevels: raise ValueError( \"Length", "try: return np.asscalar(i) except AttributeError: return i def insert_data(self): if", "= (\"The spaces in these column names will not be", "else: return index_label # return the used column labels for", "column_types)) template = \"\"\"CREATE TABLE %(name)s ( %(columns)s )\"\"\" create_statement", "+= [params] else: args += [list(params)] return args def _handle_date_column(col,", "flavor): pytype = dtype.type pytype_name = \"text\" if issubclass(pytype, np.floating):", "format string is strftime compatible in case of parsing string", "and write to DataFrames def read_sql_table(table_name, con, index_col=None, coerce_float=True, parse_dates=None,", "dict corresponds to the keyword arguments of :func:`pandas.to_datetime` Especially useful", "if con is not None: try: cur.close() con.commit() except Exception", "to select from sql table (only used when reading a", "parse_dates=parse_dates, columns=columns) else: return pandas_sql.read_sql( sql, index_col=index_col, params=params, coerce_float=coerce_float, parse_dates=parse_dates)", "be removed in future versions, but it will be further", "valid for if_exists\".format(if_exists)) else: self.table = self._create_table_statement() self.create() else: #", "%(name)s ( %(columns)s %(keystr)s );\"\"\" create_statement = template % {'name':", "from sqlalchemy import Table, Column columns = list(map(str, self.frame.columns)) column_types", "to write a frame if self.pd_sql.has_table(self.name): if if_exists == 'fail':", "insert_statement = 'INSERT INTO %s (%s) VALUES (%s)' % (", "return index else: return None def _create_table_statement(self): from sqlalchemy import", "# for backwards compatibility, set index=False when not specified index", "if frame is not None: # We want to write", "as com from pandas.compat import lzip, map, zip, raise_with_traceback, string_types", "from connection Returns ------- Results Iterable \"\"\" warnings.warn( \"tquery is", "format = 's' if format is None else format return", "into a DataFrame. read_sql \"\"\" pandas_sql = PandasSQLAlchemy(con) table =", "format=None): if isinstance(format, dict): return to_datetime(col, **format) else: if format", "insert_data(self): if self.index is not None: temp = self.frame.copy() temp.index.names", "table Returns ------- DataFrame See also -------- read_sql_query : Read", "self.table = self._create_table_statement() self.create() else: # no data provided, read-only", "Read SQL query into a DataFrame \"\"\" pandas_sql = pandasSQL_builder(con)", "Interval if arr_or_dtype is date: return Date if com.is_datetime64_dtype(arr_or_dtype): try:", "\"bool\" return _SQL_TYPES[pytype_name][flavor] lookup_type = lambda dtype: get_sqltype(dtype, flavor) column_types", "will be further supported through SQLAlchemy engines. keys : string", "name, self, frame=frame, index=index, if_exists=if_exists, index_label=index_label) table.insert() def has_table(self, name):", "of ``{column_name: format string}`` where format string is strftime compatible", "drop it, recreate it, and insert data. append: If table", "given (default) and `index` is True, then the index names", "both facilitate data retrieval and to reduce dependency on DB-specific", "coerce=True, unit=format) else: return to_datetime(col, coerce=True, format=format) def _parse_date_columns(data_frame, parse_dates):", "_get_schema_legacy(frame, name, flavor, keys=None): \"\"\"Old function from 0.13.1. To keep", "legacy support. Instead of a table variable just use the", "(and for backward compatibility) and will delegate to the specific", "# SQL type convertions for each DB _SQL_TYPES = {", "= _convert_params(sql, params) cursor = self.execute(*args) columns = [col_desc[0] for", "% name self.execute(drop_sql) def _create_sql_schema(self, frame, table_name): table = PandasSQLTableLegacy(table_name,", "(%s)' % ','.join(keys) template = \"\"\"CREATE TABLE %(name)s ( %(columns)s", "= e.__class__.__name__ if excName == 'OperationalError': return [] def tquery(sql,", "col_name in parse_dates: df_col = data_frame[col_name] try: fmt = parse_dates[col_name]", "Float, Text, Boolean, DateTime, Date, Interval if arr_or_dtype is date:", "Date, Interval if arr_or_dtype is date: return Date if com.is_datetime64_dtype(arr_or_dtype):", "\"uquery is depreciated, and will be removed in future versions.", "DateTime(timezone=True) except: return DateTime if com.is_timedelta64_dtype(arr_or_dtype): warnings.warn(\"the 'timedelta' type is", "When mysql legacy support is dropped, it should be possible", "if self.meta.tables.get(name) is not None: return True else: return False", "number of \" \"levels, which is {0}\".format(nlevels)) else: return index_label", "SQL query or database table into a DataFrame. Parameters ----------", "% x for x in zip(columns, column_types)) template = \"\"\"CREATE", "for n in columns] if self.index is not None: [cols.insert(0,", "query = flavor_map.get(self.flavor) return len(self.execute(query).fetchall()) > 0 def get_table(self, table_name):", "cur.executemany(ins, data_list) cur.close() self.pd_sql.con.commit() def _create_table_statement(self): \"Return a CREATE TABLE", "results, it returns the number of rows affected. Good for", "{ 'mysql': 'VARCHAR (63)', 'sqlite': 'TEXT', }, 'float': { 'mysql':", "'mysql': { 'br_l': '`', 'br_r': '`', 'wld': '%s' }, 'sqlite':", "to np.datetime if supported, but here we also force conversion", "not in self.frame.columns and self.frame.index.name is None: return ['index'] else:", "a DBAPI2 object, only sqlite3 is supported. flavor : {'sqlite',", "\" \"MySQL will be further supported with SQLAlchemy engines.\") def", "SQLAlchemy --- # Flavour specific sql strings and handler class", "not support DBAPI connections. Parameters ---------- table_name : string Name", "is not None: if hasattr(params, 'keys'): # test if params", "\" \"In pandas versions < 0.14, spaces were converted to", "= cur.rowcount try: con.commit() except Exception as e: excName =", "\"Execution failed on sql: %s\\n%s\\nunable to rollback\" % (args[0], e))", "query or database table into a DataFrame. Parameters ---------- sql", "index_col=index_col, coerce_float=coerce_float, parse_dates=parse_dates, columns=columns) if table is not None: return", "table exists, do nothing. - replace: If table exists, drop", "Caution: np.datetime64 is also a subclass of np.number. return datetime", "you can use the following: >>> execute(sql, con, params).fetchall() Parameters", "SQL table con: SQLAlchemy engine or sqlite3 DBAPI2 connection Using", "con: SQLAlchemy engine or sqlite3 DBAPI2 connection Using SQLAlchemy makes", "read_sql(self, sql, index_col=None, coerce_float=True, parse_dates=None, params=None): args = _convert_params(sql, params)", "variable just use the Create Table statement\"\"\" def sql_schema(self): return", "parse_dates=None): args = _convert_params(sql, params) cursor = self.execute(*args) columns =", "Exception as e: excName = e.__class__.__name__ if excName == 'OperationalError':", "from __future__ import print_function, division from datetime import datetime, date,", "further supported through SQLAlchemy engines. Returns ------- boolean \"\"\" pandas_sql", "con, flavor='sqlite', if_exists='fail', **kwargs): \"\"\"DEPRECIATED - use to_sql Write records", "Create if does not exist. index : boolean, default True", "access to DBs without # SQLAlchemy installed # SQL type", "boolean, default True Attempt to convert values to non-string, non-numeric", "may need to restart interpreter') else: raise traceback.print_exc() if retry:", "and 'index' not in self.frame.columns and self.frame.index.name is None: return", "read_sql_table : Read SQL database table into a DataFrame read_sql_query", "index/columns: {0}\".format(err)) else: temp = self.frame return temp def insert(self):", "return PandasSQLLegacy(con, flavor, is_cursor=is_cursor) except ImportError: if flavor == 'mysql':", "sqlalchemy import select cols = [self.table.c[n] for n in columns]", "Results Iterable \"\"\" if cur is None: pandas_sql = pandasSQL_builder(con)", "np.bool_): pytype_name = \"bool\" return _SQL_TYPES[pytype_name][flavor] lookup_type = lambda dtype:", "# pragma: no cover print('Failed to commit, may need to", "return str(table.sql_schema()) # ---- SQL without SQLAlchemy --- # Flavour", "\"\"\"Old function from 0.13.1. To keep backwards compatibility. When mysql", "= DatabaseError(\"Execution failed on sql: %s\" % args[0]) raise_with_traceback(ex) def", "and an SQLAlchemy engine, returns a DataFrame. This function does", "self.pd_sql.execute(ins, data_list) def read(self, coerce_float=True, parse_dates=None, columns=None): if columns is", "column names to select from sql table Returns ------- DataFrame", "symbols _SQL_SYMB = { 'mysql': { 'br_l': '`', 'br_r': '`',", "column types Need to work around limited NA value support.", "pandasSQL_builder(con) if isinstance(pandas_sql, PandasSQLLegacy): return pandas_sql.read_sql( sql, index_col=index_col, params=params, coerce_float=coerce_float,", "meta.reflect(self.engine) self.meta = meta def execute(self, *args, **kwargs): \"\"\"Simple passthrough", "self.frame.dtypes) columns = [Column(name, typ) for name, typ in zip(columns,", "should already be converted to np.datetime if supported, but here", "- List of column names to parse as dates -", "cur.execute(*args) return cur except Exception as e: try: self.con.rollback() except", "removed in future versions, but it will be further supported", "an SQLAlchemy engine or connection+sql flavor\") class PandasSQLAlchemy(PandasSQL): \"\"\" This", "keys : string or sequence columns to use a primary", "= [self._sql_type_name(typ) for typ in self.frame.dtypes] if self.index is not", "used. A sequence should be given if the DataFrame uses", "string formatted and integer timestamp columns \"\"\" # handle non-list", "to use a primary key con: an open SQL database", "\"\"\" warnings.warn(\"write_frame is depreciated, use to_sql\", FutureWarning) # for backwards", "raise ValueError( \"Length of 'index_label' should match number of \"", "isinstance(format, dict): return to_datetime(col, **format) else: if format in ['D',", "ValueError(\"Table '%s' already exists.\" % name) elif if_exists == 'replace':", "return to_datetime(col, coerce=True, unit=format) else: return to_datetime(col, coerce=True, format=format) def", "return False def get_table(self, table_name): return self.meta.tables.get(table_name) def drop_table(self, table_name):", "recreate it, and insert data. append: If table exists, insert", "has_table(self, name): flavor_map = { 'sqlite': (\"SELECT name FROM sqlite_master", "integer index will be used. Parameters ---------- sql : string", "for SQL result sets params : list, tuple or dict,", "MultiIndex. \"\"\" if if_exists not in ('fail', 'replace', 'append'): raise", "class SQLAlchemyRequired(ImportError): pass class DatabaseError(IOError): pass #------------------------------------------------------------------------------ # Helper functions", "as the index, otherwise default integer index will be used.", "'' if keys is not None: if isinstance(keys, string_types): keys", "= e.__class__.__name__ if excName != 'OperationalError': raise traceback.print_exc() if retry:", "can use the following: >>> execute(sql, con, params).fetchall() Parameters ----------", "arguments of :func:`pandas.to_datetime` Especially useful with databases without native Datetime", "provided connection object. Parameters ---------- sql : string Query to", "and will delegate to the specific function depending on the", "string Name of SQL table con: SQLAlchemy engine or sqlite3", "#------------------------------------------------------------------------------ # Helper functions def _convert_params(sql, params): \"\"\"convert sql and", "data. Create if does not exist. index : boolean, default", "flavor\") class PandasSQLAlchemy(PandasSQL): \"\"\" This class enables convertion between DataFrame", "self.meta.clear() self.meta.reflect() def _create_sql_schema(self, frame, table_name): table = PandasSQLTable(table_name, self,", "obtained from connection params: list or tuple, optional List of", "Floats are always fine, ints must always be floats if", "np import pandas.core.common as com from pandas.compat import lzip, map,", "Returns ------- Number of affected rows \"\"\" warnings.warn( \"uquery is", "string, optional Column name to use as index for the", "function does not support DBAPI connections. Parameters ---------- table_name :", "not hasattr(parse_dates, '__iter__'): parse_dates = [parse_dates] for sql_col in self.table.columns:", "delegate to the specific function depending on the provided input", "data = result.fetchall() columns = result.keys() data_frame = DataFrame.from_records( data,", "index column(s). If None is given (default) and `index` is", "flavor: {'sqlite', 'mysql'}, default 'sqlite' if_exists: {'fail', 'replace', 'append'}, default", "list(map(str, self.frame.columns)) column_types = map(self._sqlalchemy_type, self.frame.dtypes) columns = [Column(name, typ)", "division from datetime import datetime, date, timedelta import warnings import", "is_cursor self.con = con if flavor is None: flavor =", "sqlite3 is supported. flavor: {'sqlite', 'mysql'}, default 'sqlite' The flavor", "[l if l is not None else \"level_{0}\".format(i) for i,", "= None self.frame[col_name] = _handle_date_column( df_col, format=fmt) except KeyError: pass", "issubclass(pytype, np.bool_): pytype_name = \"bool\" return _SQL_TYPES[pytype_name][self.pd_sql.flavor] class PandasSQLLegacy(PandasSQL): def", "DBAPI connection is deprecated \" \"and will be removed in", "execute(self, *args, **kwargs): \"\"\"Simple passthrough to SQLAlchemy engine\"\"\" return self.engine.execute(*args,", "by SQLAlchemy to do better type convertions. Also holds various", "except Exception as e: excName = e.__class__.__name__ if excName ==", "'mysql': \"SHOW TABLES LIKE '%s'\" % name} query = flavor_map.get(self.flavor)", "engine or connection+sql flavor\") class PandasSQLAlchemy(PandasSQL): \"\"\" This class enables", "to SQLAlchemy engine\"\"\" return self.engine.execute(*args, **kwargs) def read_table(self, table_name, index_col=None,", "con, params).fetchall() Parameters ---------- sql: string SQL query to be", "future versions. \" \"You can use ``execute(...).fetchall()`` instead.\", FutureWarning) cur", "remove this code \"\"\" def get_sqltype(dtype, flavor): pytype = dtype.type", "def execute(self, *args, **kwargs): if self.is_cursor: cur = self.con else:", "depreciated, and will be removed in future versions. \" \"You", "data_list = [] temp = self.insert_data() keys = temp.columns for", "con).rowcount Parameters ---------- sql: string SQL query to be executed", "create(self): self.table.create() def insert_statement(self): return self.table.insert() def maybe_asscalar(self, i): try:", "values to non-string, non-numeric objects (like decimal.Decimal) to floating point.", "be executed con : SQLAlchemy engine or sqlite3 DBAPI2 connection", "specified, set this as index name(s) if index_label is not", "affected. Good for update queries. To obtain the same result", "','.join(keys) template = \"\"\"CREATE TABLE %(name)s ( %(columns)s %(keystr)s );\"\"\"", "if_exists: {'fail', 'replace', 'append'}, default 'fail' fail: If table exists,", "the SQL db table schema for the given frame. Parameters", "result = cur.fetchall() if not isinstance(result, list): result = list(result)", "for the given frame. Parameters ---------- frame : DataFrame name", "'mysql': 'BOOLEAN', 'sqlite': 'INTEGER', } } # SQL enquote and", "to specify ``index=False``. - The new ``to_sql`` function supports sqlalchemy", "DatabaseError( \"Execution failed on sql: %s\\n%s\\nunable to rollback\" % (args[0],", "'mysql': warnings.warn(_MYSQL_WARNING, FutureWarning) return _get_schema_legacy(frame, name, flavor, keys) pandas_sql =", "``{column_name: arg dict}``, where the arg dict corresponds to the", "columns=None): if columns is not None and len(columns) > 0:", "function is a convenience wrapper around ``read_sql_table`` and ``read_sql_query`` (and", "self.index is not None: temp = self.frame.copy() temp.index.names = self.index", "specific sql strings and handler class for access to DBs", "just use the Create Table statement\"\"\" def sql_schema(self): return str(self.table)", "np.datetime if supported, but here we also force conversion if", "names to parse as dates - Dict of ``{column_name: format", "= self.pd_sql.execute(sql_select) data = result.fetchall() column_names = result.keys() self.frame =", "_harmonize_columns(self, parse_dates=None): \"\"\" Make a data_frame's column type align with", "Collection of query wrappers / abstractions to both facilitate data", "Can result in loss of Precision. parse_dates : list or", "is supported. index_col : string, optional Column name to use", "SQL database. Parameters ---------- frame : DataFrame name : string", "= data_frame[col_name] try: fmt = parse_dates[col_name] except TypeError: fmt =", "table into a DataFrame read_sql_query : Read SQL query into", "index : boolean, default False Write DataFrame index as a", "\"MySQL will be further supported with SQLAlchemy engines.\") def pandasSQL_builder(con,", "con: DBAPI2 connection cur: depreciated, cursor is obtained from connection", "is deprecated and will be removed in future versions, but", "#------------------------------------------------------------------------------ #--- Deprecated tquery and uquery def _safe_fetch(cur): try: result", "ValueError( \"'{0}' is not valid for if_exists\".format(if_exists)) else: self.table =", "to work with different sql flavors. See also -------- pandas.DataFrame.to_sql", "PandasSQLTable(table_name, self, frame=frame) return str(table.sql_schema()) # ---- SQL without SQLAlchemy", "does not support DBAPI connections. Parameters ---------- table_name : string", "not None: [cols.insert(0, self.table.c[idx]) for idx in self.index[::-1]] sql_select =", "and wildcard symbols _SQL_SYMB = { 'mysql': { 'br_l': '`',", "on the provided input (database table name or sql query).", "import to_datetime class SQLAlchemyRequired(ImportError): pass class DatabaseError(IOError): pass #------------------------------------------------------------------------------ #", "np.datetime64 is also a subclass of np.number. return datetime if", "TABLES LIKE '%s'\" % name} query = flavor_map.get(self.flavor) return len(self.execute(query).fetchall())", "flavor, keys) pandas_sql = pandasSQL_builder(con=con, flavor=flavor) return pandas_sql._create_sql_schema(frame, name) def", "-------- pandas.DataFrame.to_sql \"\"\" warnings.warn(\"write_frame is depreciated, use to_sql\", FutureWarning) #", "to use any DB supported by that library. If a", "of a DataFrame.\" columns = list(map(str, self.frame.columns)) pat = re.compile('\\s+')", "list(map(str, self.frame.columns)) pat = re.compile('\\s+') if any(map(pat.search, columns)): warnings.warn(_SAFE_NAMES_WARNING) column_types", "deprecated and will be removed in future versions, but it", "behaviour this function you need to specify ``index=False``. - The", "name and an SQLAlchemy engine, returns a DataFrame. This function", "DataFrame uses MultiIndex. \"\"\" if if_exists not in ('fail', 'replace',", "# is_cursor should not be necessary. try: import sqlalchemy if", "= _SQL_SYMB[flv]['br_l'] # left val quote char br_r = _SQL_SYMB[flv]['br_r']", "'sqlite': 'TIMESTAMP', }, 'date': { 'mysql': 'DATE', 'sqlite': 'TIMESTAMP', },", "'s', 'ms', 'us', 'ns']: return to_datetime(col, coerce=True, unit=format) elif issubclass(col.dtype.type,", "column_types = [self._sql_type_name(typ) for typ in self.frame.dtypes] if self.index is", "_SQL_TYPES[pytype_name][flavor] lookup_type = lambda dtype: get_sqltype(dtype, flavor) column_types = lzip(frame.dtypes.index,", "Force non-datetime columns to be read as such. Supports both", "object or an SQLAlchemy engine Using SQLAlchemy makes it possible", "result.fetchall() columns = result.keys() data_frame = DataFrame.from_records( data, columns=columns, coerce_float=coerce_float)", "bools if col_type is int or col_type is bool: self.frame[col_name].astype(col_type,", "failed, reconnecting...') return uquery(sql, con, retry=False) return result #------------------------------------------------------------------------------ #---", "------- boolean \"\"\" pandas_sql = pandasSQL_builder(con, flavor=flavor) return pandas_sql.has_table(table_name) table_exists", "Results Iterable \"\"\" warnings.warn( \"tquery is depreciated, and will be", "ins = self.insert_statement() temp = self.insert_data() data_list = [] for", "rollback\" % (args[0], e)) raise_with_traceback(ex) ex = DatabaseError(\"Execution failed on", "\"\"\" def __init__(self, engine, meta=None): self.engine = engine if not", "into a DataFrame. Returns a DataFrame corresponding to the result", "method. parse_dates : list or dict - List of column", "\"\"\"DEPRECIATED - use to_sql Write records stored in a DataFrame", "keys) pandas_sql = pandasSQL_builder(con=con, flavor=flavor) return pandas_sql._create_sql_schema(frame, name) def _get_schema_legacy(frame,", "raise_with_traceback, string_types from pandas.core.api import DataFrame, Series from pandas.core.base import", ": boolean, default True Attempt to convert values to non-string,", "table = PandasSQLTable(table_name, self, frame=frame) return str(table.sql_schema()) # ---- SQL", "**kwargs): raise ValueError( \"PandasSQL must be created with an SQLAlchemy", "in self.table.columns: col_name = sql_col.name try: df_col = self.frame[col_name] #", "query into a DataFrame. read_sql \"\"\" pandas_sql = PandasSQLAlchemy(con) table", "index_col=None, coerce_float=True, params=None, parse_dates=None, columns=None): \"\"\" Read SQL query or", "query using the provided connection object. Parameters ---------- sql :", "\"written as integer values (ns frequency) to the \" \"database.\",", "or database table name. con : SQLAlchemy engine or DBAPI2", "to avoid having to pass them between functions all the", "is obtained from connection params: list or tuple, optional List", "'%s'\" % name) def exists(self): return self.pd_sql.has_table(self.name) def sql_schema(self): from", "database table into a DataFrame read_sql \"\"\" pandas_sql = pandasSQL_builder(con)", "coerce_float : boolean, default True Attempt to convert values to", "= [] for t in temp.itertuples(): data = tuple((self.maybe_asscalar(v) for", "pytype = dtype.type pytype_name = \"text\" if issubclass(pytype, np.floating): pytype_name", "ms, us) in case of parsing integer timestamps - Dict", "params is not None: if hasattr(params, 'keys'): # test if", "column with None replaces all Nones with false. Therefore only", "else: if format in ['D', 's', 'ms', 'us', 'ns']: return", "PandasSQLTable for legacy support. Instead of a table variable just", "[params] else: args += [list(params)] return args def _handle_date_column(col, format=None):", "{ 'mysql': 'BOOLEAN', 'sqlite': 'INTEGER', } } # SQL enquote", "if not issubclass(df_col.dtype.type, np.datetime64): self.frame[col_name] = _handle_date_column(df_col) elif col_type is", "val quote char col_template = br_l + '%s' + br_r", "= ',\\n '.join(col_template % x for x in zip(columns, column_types))", "if flavor == 'sqlite': columns = ',\\n '.join('[%s] %s' %", "None or parse_dates is False: parse_dates = [] if not", "If table exists, insert data. Create if does not exist.", "DEPRECATED. Does the same thing as tquery, but instead of", "return [l if l is not None else \"level_{0}\".format(i) for", "== 'append': self.table = self.pd_sql.get_table(self.name) if self.table is None: self.table", "flavor='sqlite', keys=None, con=None): \"\"\" Get the SQL db table schema", "object, only sqlite3 is supported. index_col : string, optional Column", "\"date\" elif issubclass(pytype, np.bool_): pytype_name = \"bool\" return _SQL_TYPES[pytype_name][flavor] lookup_type", "optional column name to use as index for the returned", "DataFrame and SQL databases using SQLAlchemy to handle DataBase abstraction", "def read_sql(self, sql, index_col=None, coerce_float=True, params=None, parse_dates=None): args = _convert_params(sql,", "col_type is date: if not issubclass(df_col.dtype.type, np.datetime64): self.frame[col_name] = _handle_date_column(df_col)", "isinstance(sqltype, Float): return float if isinstance(sqltype, Integer): # TODO: Refine", "pandas_sql.read_table( table_name, index_col=index_col, coerce_float=coerce_float, parse_dates=parse_dates, columns=columns) if table is not", "DataFrame See also -------- read_sql_query : Read SQL query into", "keystr} return create_statement # legacy names, with depreciation warnings and", "= DataFrame.from_records( data, columns=columns, coerce_float=coerce_float) _parse_date_columns(data_frame, parse_dates) if index_col is", "coerce_float=coerce_float) self._harmonize_columns(parse_dates=parse_dates) if self.index is not None: self.frame.set_index(self.index, inplace=True) return", "excName != 'OperationalError': raise traceback.print_exc() if retry: print('Looks like your", "read_sql(*args, **kwargs) def frame_query(*args, **kwargs): \"\"\"DEPRECIATED - use read_sql \"\"\"", "class PandasSQL(PandasObject): \"\"\" Subclasses Should define read_sql and to_sql \"\"\"", "self.pd_sql.has_table(self.name) def sql_schema(self): from sqlalchemy.schema import CreateTable return str(CreateTable(self.table)) def", "to_sql(frame, name, con, flavor=flavor, if_exists=if_exists, index=index, **kwargs) # Append wrapped", "TypeError: fmt = None data_frame[col_name] = _handle_date_column(df_col, format=fmt) return data_frame", "If a DBAPI2 object, only sqlite3 is supported. flavor :", "\"\"\" Check if DataBase has named table. Parameters ---------- table_name:", "which is {0}\".format(nlevels)) else: return index_label # return the used", "for multiIndex def __init__(self, name, pandas_sql_engine, frame=None, index=True, if_exists='fail', prefix='pandas',", "= template % {'name': self.name, 'columns': columns} return create_statement def", "params: list or tuple, optional List of parameters to pass", "isinstance(con, sqlalchemy.engine.Engine): return PandasSQLAlchemy(con, meta=meta) else: if flavor == 'mysql':", "DateTime, Date, Interval if arr_or_dtype is date: return Date if", "return date if isinstance(sqltype, Boolean): return bool return object class", "execute(sql, con, params).fetchall() Parameters ---------- sql: string SQL query to", "list): index_label = [index_label] if len(index_label) != nlevels: raise ValueError(", "DBAPI2 object, only sqlite3 is supported. cur : depreciated, cursor", "table '%s'\" % name) def exists(self): return self.pd_sql.has_table(self.name) def sql_schema(self):", "'.join('[%s] %s' % x for x in column_types) else: columns", "**kwargs): \"\"\"DEPRECIATED - use read_sql \"\"\" warnings.warn(\"read_frame is depreciated, use", "if index_label is not None: if not isinstance(index_label, list): index_label", "To keep backwards compatibility. When mysql legacy support is dropped,", "or sqlite3 DBAPI2 connection Using SQLAlchemy makes it possible to", "function supports sqlalchemy engines to work with different sql flavors.", "with SQLAlchemy engines.\") def pandasSQL_builder(con, flavor=None, meta=None, is_cursor=False): \"\"\" Convenience", "idx_type, index=True)) return Table(self.name, self.pd_sql.meta, *columns) def _harmonize_columns(self, parse_dates=None): \"\"\"", "# for writing: index=True to include index in sql table", "'mysql'}, default 'sqlite' The flavor of SQL to use. Ignored", "\"\"\" # TODO: support for multiIndex def __init__(self, name, pandas_sql_engine,", "_parse_date_columns(data_frame, parse_dates) if index_col is not None: data_frame.set_index(index_col, inplace=True) return", "warnings.warn(_MYSQL_WARNING, FutureWarning) return PandasSQLLegacy(con, flavor, is_cursor=is_cursor) class PandasSQLTable(PandasObject): \"\"\" For", "['D', 's', 'ms', 'us', 'ns']: return to_datetime(col, coerce=True, unit=format) elif", "index_label=None): \"\"\" Write records stored in a DataFrame to a", "be further supported with SQLAlchemy engines.\") def pandasSQL_builder(con, flavor=None, meta=None,", "tuple((self.maybe_asscalar(v) for v in t[1:])) data_list.append(data) cur = self.pd_sql.con.cursor() cur.executemany(ins,", "engines.\") def pandasSQL_builder(con, flavor=None, meta=None, is_cursor=False): \"\"\" Convenience function to", "stored in a DataFrame to a SQL database. Parameters ----------", "table if index is True: nlevels = self.frame.index.nlevels # if", "index_label is specified, set this as index name(s) if index_label", "self.table.select() result = self.pd_sql.execute(sql_select) data = result.fetchall() column_names = result.keys()", "result.keys() data_frame = DataFrame.from_records( data, columns=columns, coerce_float=coerce_float) _parse_date_columns(data_frame, parse_dates) if", "'br_r': ']', 'wld': '?' } } _SAFE_NAMES_WARNING = (\"The spaces", "default False Write DataFrame index as a column Notes -----", "return table else: raise ValueError(\"Table %s not found\" % table_name,", "column type align with an sql_table column types Need to", "to be executed con: DBAPI2 connection cur: depreciated, cursor is", "self.frame.index.get_level_values(i)) columns.insert(0, Column(idx_label, idx_type, index=True)) return Table(self.name, self.pd_sql.meta, *columns) def", "cur.rowcount try: con.commit() except Exception as e: excName = e.__class__.__name__", "in sql table if index is True: nlevels = self.frame.index.nlevels", "= _handle_date_column( df_col, format=fmt) except KeyError: pass # this column", "= ',\\n '.join('`%s` %s' % x for x in column_types)", "be read as such. Supports both string formatted and integer", "See also -------- read_sql_query : Read SQL query into a", "already be converted to np.datetime if supported, but here we", "('fail', 'replace', 'append'): raise ValueError(\"'{0}' is not valid for if_exists\".format(if_exists))", "in enumerate(self.frame.index.names)] # for reading: index=(list of) string to specify", "= self.pd_sql.flavor br_l = _SQL_SYMB[flv]['br_l'] # left val quote char", "values to non-string, non-numeric objects (like decimal.Decimal) to floating point,", "integer size. return Integer elif com.is_bool(arr_or_dtype): return Boolean return Text", "= self.insert_data() data_list = [] for t in temp.itertuples(): data", "same result in the future, you can use the following:", "= self._create_table_statement() self.create() elif if_exists == 'append': self.table = self.pd_sql.get_table(self.name)", "with depreciation warnings and copied docs def read_frame(*args, **kwargs): \"\"\"DEPRECIATED", "frame, table_name): table = PandasSQLTableLegacy(table_name, self, frame=frame) return str(table.sql_schema()) def", "def read_sql_query(sql, con, index_col=None, coerce_float=True, params=None, parse_dates=None): \"\"\"Read SQL query", "executed con: DBAPI2 connection cur: depreciated, cursor is obtained from", "of query wrappers / abstractions to both facilitate data retrieval", "return str(self.table) def create(self): self.pd_sql.execute(self.table) def insert_statement(self): names = list(map(str,", "= list(result) return result def to_sql(self, frame, name, if_exists='fail', index=True,", "is bool: self.frame[col_name].astype(col_type, copy=False) # Handle date parsing if col_name", "functions def _convert_params(sql, params): \"\"\"convert sql and params args to", "Iterable \"\"\" warnings.warn( \"tquery is depreciated, and will be removed", "pragma: no cover print('Failed to commit, may need to restart", "self.frame.dtypes] if self.index is not None: for i, idx_label in", "NA, can always convert! self.frame[col_name].astype(col_type, copy=False) elif len(df_col) == df_col.count():", "PandasSQLTable(table_name, self, index=index_col) return table.read(coerce_float=coerce_float, parse_dates=parse_dates, columns=columns) def read_sql(self, sql,", "query string. Optionally provide an `index_col` parameter to use one", "br_r + ' %s' columns = ',\\n '.join(col_template % x", "**format) else: if format in ['D', 's', 'ms', 'us', 'ns']:", "pandas_sql = pandasSQL_builder(con) else: pandas_sql = pandasSQL_builder(cur, is_cursor=True) args =", "SQL enquote and wildcard symbols _SQL_SYMB = { 'mysql': {", "given if the DataFrame uses MultiIndex. \"\"\" if if_exists not", "if_exists=if_exists, index=index, **kwargs) # Append wrapped function docstrings read_frame.__doc__ +=", "name of SQL table flavor: {'sqlite', 'mysql'}, default 'sqlite' if_exists:", "self.frame.columns and self.frame.index.name is None: return ['index'] else: return [l", "}, 'sqlite': { 'br_l': '[', 'br_r': ']', 'wld': '?' }", "sql table Returns ------- DataFrame See also -------- read_sql_query :", "but it will be further supported through SQLAlchemy engines. if_exists", "sqlalchemy.engine.Engine): return PandasSQLAlchemy(con, meta=meta) else: if flavor == 'mysql': warnings.warn(_MYSQL_WARNING,", "self._index_name(index, index_label) if frame is not None: # We want", "t in temp.itertuples(): data = dict((k, self.maybe_asscalar(v)) for k, v", "or connection+sql flavor\") def to_sql(self, *args, **kwargs): raise ValueError( \"PandasSQL", "if_exists='fail', prefix='pandas', index_label=None): self.name = name self.pd_sql = pandas_sql_engine self.prefix", "3 compat result = list(lzip(*result)[0]) elif result is None: #", "further supported with SQLAlchemy engines.\") def pandasSQL_builder(con, flavor=None, meta=None, is_cursor=False):", "data_frame def execute(sql, con, cur=None, params=None): \"\"\" Execute the given", "object class PandasSQL(PandasObject): \"\"\" Subclasses Should define read_sql and to_sql", "_create_sql_schema(self, frame, table_name): table = PandasSQLTable(table_name, self, frame=frame) return str(table.sql_schema())", "Null values. Booleans are hard because converting bool column with", "is True: nlevels = self.frame.index.nlevels # if index_label is specified,", "correct PandasSQL subclass based on the provided parameters \"\"\" #", "database table into a DataFrame. Given a table name and", "of \" \"levels, which is {0}\".format(nlevels)) else: return index_label #", "Boolean, DateTime, Date, Interval if arr_or_dtype is date: return Date", "cur = self.con.cursor() try: if kwargs: cur.execute(*args, **kwargs) else: cur.execute(*args)", "if flavor == 'mysql': warnings.warn(_MYSQL_WARNING, FutureWarning) return _get_schema_legacy(frame, name, flavor,", "= _handle_date_column(df_col) elif col_type is float: # floats support NA,", "of rows affected. Good for update queries. To obtain the", "DateTime): # Caution: np.datetime64 is also a subclass of np.number.", "= PandasSQLTable( name, self, frame=frame, index=index, if_exists=if_exists, index_label=index_label) table.insert() @property", "self.index[::-1]] bracketed_names = [br_l + column + br_r for column", "None: # We want to write a frame if self.pd_sql.has_table(self.name):", "def sql_schema(self): from sqlalchemy.schema import CreateTable return str(CreateTable(self.table)) def create(self):", "def _safe_fetch(cur): try: result = cur.fetchall() if not isinstance(result, list):", "engine or sqlite3 DBAPI2 connection Using SQLAlchemy makes it possible", "index_col=None, coerce_float=True, params=None, parse_dates=None): args = _convert_params(sql, params) cursor =", "not exist. index : boolean, default False Write DataFrame index", "'sqlite': columns = ',\\n '.join('[%s] %s' % x for x", "'columns': columns} return create_statement def _sql_type_name(self, dtype): pytype = dtype.type", "be further supported through SQLAlchemy engines. if_exists : {'fail', 'replace',", "table is reflected by SQLAlchemy to do better type convertions.", "index_col=None, coerce_float=True, parse_dates=None, columns=None): \"\"\"Read SQL database table into a", "if isinstance(frame, Series): frame = frame.to_frame() elif not isinstance(frame, DataFrame):", "default 'sqlite' The flavor of SQL to use. if_exists :", "name self.pd_sql = pandas_sql_engine self.prefix = prefix self.frame = frame", "nothing. - replace: If table exists, drop it, recreate it,", "`index_col` parameter to use one of the columns as the", "[] def tquery(sql, con=None, cur=None, retry=True): \"\"\" DEPRECATED. Returns list", "self.meta.tables.get(table_name) def drop_table(self, table_name): if self.engine.has_table(table_name): self.get_table(table_name).drop() self.meta.clear() self.meta.reflect() def", "create_statement def _sql_type_name(self, dtype): pytype = dtype.type pytype_name = \"text\"", "to the \" \"database.\", UserWarning) pytype_name = \"int\" elif issubclass(pytype,", "if excName != 'OperationalError': raise traceback.print_exc() if retry: print('Looks like", "read_sql_query : Read SQL query into a DataFrame \"\"\" pandas_sql", "to convert values to non-string, non-numeric objects (like decimal.Decimal) to", "name. con : SQLAlchemy engine or DBAPI2 connection (legacy mode)", "of SQL table con : SQLAlchemy engine or sqlite3 DBAPI2", "sql query. If only one column selected, then plain list", "TODO: Refine integer size. return Integer elif com.is_bool(arr_or_dtype): return Boolean", "def _get_schema_legacy(frame, name, flavor, keys=None): \"\"\"Old function from 0.13.1. To", "_convert_params(sql, params) return pandas_sql.execute(*args) #------------------------------------------------------------------------------ #--- Deprecated tquery and uquery", "be possible to remove this code \"\"\" def get_sqltype(dtype, flavor):", "are Null values. Booleans are hard because converting bool column", "pytype_name = \"date\" elif issubclass(pytype, np.bool_): pytype_name = \"bool\" return", "read_sql \"\"\" pandas_sql = PandasSQLAlchemy(con) table = pandas_sql.read_table( table_name, index_col=index_col,", ": Read SQL query into a DataFrame. read_sql \"\"\" pandas_sql", "name, flavor, keys) pandas_sql = pandasSQL_builder(con=con, flavor=flavor) return pandas_sql._create_sql_schema(frame, name)", "self.index[::-1]] sql_select = select(cols) else: sql_select = self.table.select() result =", "SQL database table into a DataFrame read_sql \"\"\" pandas_sql =", "= cur.fetchall() if not isinstance(result, list): result = list(result) return", "if params is a mapping args += [params] else: args", "data_list = [] for t in temp.itertuples(): data = tuple((self.maybe_asscalar(v)", "not None: temp = self.frame.copy() temp.index.names = self.index try: temp.reset_index(inplace=True)", "class PandasSQLTable(PandasObject): \"\"\" For mapping Pandas tables to SQL tables.", "# pragma: no cover result = [] return result def", "to parse as dates - Dict of ``{column_name: format string}``", "future, you can use the following: >>> execute(sql, con, params).fetchall()", "result = cur.rowcount try: con.commit() except Exception as e: excName", "self.frame[col_name] = _handle_date_column(df_col) elif col_type is float: # floats support", "except TypeError: fmt = None data_frame[col_name] = _handle_date_column(df_col, format=fmt) return", "= self.pd_sql.get_table(self.name) if self.table is None: self.table = self._create_table_statement() else:", "name, if_exists=if_exists, index=index, index_label=index_label) def has_table(table_name, con, flavor='sqlite'): \"\"\" Check", "in parse_dates: try: fmt = parse_dates[col_name] except TypeError: fmt =", "flavor_map.get(self.flavor) return len(self.execute(query).fetchall()) > 0 def get_table(self, table_name): return None", "If a DBAPI2 object, only sqlite3 is supported. index_col :", "boolean, default False Write DataFrame index as a column Notes", "is None: flavor = 'sqlite' if flavor not in ['sqlite',", "subclass of np.number. pytype_name = \"datetime\" elif pytype is datetime.date:", "support is dropped, it should be possible to remove this", "result in the future, you can use the following: >>>", "[names.insert(0, idx) for idx in self.index[::-1]] bracketed_names = [br_l +", "params : list, tuple or dict, optional List of parameters", "create_statement = template % {'name': self.name, 'columns': columns} return create_statement", "it will be further supported through SQLAlchemy engines. if_exists :", "of) string to specify column to set as index elif", "\"\"\" table = PandasSQLTableLegacy( name, self, frame=frame, index=index, if_exists=if_exists, index_label=index_label)", "= kwargs.pop('index', False) return to_sql(frame, name, con, flavor=flavor, if_exists=if_exists, index=index,", "floating point, useful for SQL result sets params : list,", ": SQLAlchemy engine Sqlite DBAPI conncection mode not supported index_col", "br_l + '%s' + br_r + ' %s' columns =", "self._sql_type_name(self.frame.index.get_level_values(i).dtype)) flv = self.pd_sql.flavor br_l = _SQL_SYMB[flv]['br_l'] # left val", "def has_table(table_name, con, flavor='sqlite'): \"\"\" Check if DataBase has named", "- With ``to_sql`` the index is written to the sql", "then plain list is returned. To obtain the same result", "\"float\" elif com.is_timedelta64_dtype(pytype): warnings.warn(\"the 'timedelta' type is not supported, and", "'index' not in self.frame.columns and self.frame.index.name is None: return ['index']", "is True, then the index names are used. A sequence", "to be executed or database table name. con : SQLAlchemy", "list or dict - List of column names to parse", "\"\"\"convert sql and params args to DBAPI2.0 compliant format\"\"\" args", "update queries. To obtain the same result in the future,", "sql, index_col=index_col, params=params, coerce_float=coerce_float, parse_dates=parse_dates) if pandas_sql.has_table(sql): return pandas_sql.read_table( sql,", "def insert(self): ins = self.insert_statement() data_list = [] temp =", "**kwargs): \"\"\"DEPRECIATED - use to_sql Write records stored in a", "= _safe_fetch(cur) if con is not None: try: cur.close() con.commit()", "if format in ['D', 's', 'ms', 'us', 'ns']: return to_datetime(col,", "__future__ import print_function, division from datetime import datetime, date, timedelta", "SQL result sets params : list, tuple or dict, optional", "con : SQLAlchemy engine or DBAPI2 connection (legacy mode) Using", "err: raise ValueError( \"duplicate name in index/columns: {0}\".format(err)) else: temp", "frame, table_name): table = PandasSQLTable(table_name, self, frame=frame) return str(table.sql_schema()) #", "con : DBAPI2 connection flavor : {'sqlite', 'mysql'}, default 'sqlite'", "temp = self.insert_data() keys = temp.columns for t in temp.itertuples():", "to_sql(self, frame, name, if_exists='fail', index=True, index_label=None): \"\"\" Write records stored", "bool return object class PandasSQL(PandasObject): \"\"\" Subclasses Should define read_sql", "tuple, optional List of parameters to pass to execute method.", "select(cols) else: sql_select = self.table.select() result = self.pd_sql.execute(sql_select) data =", "{ 'mysql': 'BIGINT', 'sqlite': 'INTEGER', }, 'datetime': { 'mysql': 'DATETIME',", "[self.table.c[n] for n in columns] if self.index is not None:", "force conversion if required \"\"\" # handle non-list entries for", "class PandasSQLTableLegacy(PandasSQLTable): \"\"\"Patch the PandasSQLTable for legacy support. Instead of", "failed on sql: %s\" % args[0]) raise_with_traceback(ex) def read_sql(self, sql,", "print('Looks like your connection failed, reconnecting...') return uquery(sql, con, retry=False)", "Number of affected rows \"\"\" warnings.warn( \"uquery is depreciated, and", "# TODO: support for multiIndex def __init__(self, name, pandas_sql_engine, frame=None,", "= parse_dates[col_name] except TypeError: fmt = None self.frame[col_name] = _handle_date_column(", "# parse dates as timestamp format = 's' if format", "data_list.append(data) cur = self.pd_sql.con.cursor() cur.executemany(ins, data_list) cur.close() self.pd_sql.con.commit() def _create_table_statement(self):", "is removed, # is_cursor should not be necessary. try: import", "is supported. flavor: {'sqlite', 'mysql'}, default 'sqlite' The flavor of", "_fetchall_as_list(self, cur): result = cur.fetchall() if not isinstance(result, list): result", "sqlalchemy engines to work with different sql flavors. See also", "of parameters to pass to execute method. parse_dates : list", "cur=cur, params=params) result = cur.rowcount try: con.commit() except Exception as", "objects (like decimal.Decimal) to floating point, useful for SQL result", "== 'OperationalError': # pragma: no cover print('Failed to commit, may", "default 'sqlite' The flavor of SQL to use. Ignored when", "a DataFrame. Returns a DataFrame corresponding to the result set", "True else: return False def get_table(self, table_name): return self.meta.tables.get(table_name) def", "but it will be further supported through SQLAlchemy engines. keys", "of column names to parse as dates - Dict of", "use any DB supported by that library. If a DBAPI2", "See also -------- read_sql_table : Read SQL database table into", "self.table = self.pd_sql.get_table(self.name) if self.table is None: self.table = self._create_table_statement()", "columns=column_names, coerce_float=coerce_float) self._harmonize_columns(parse_dates=parse_dates) if self.index is not None: self.frame.set_index(self.index, inplace=True)", "- use to_sql Write records stored in a DataFrame to", "flavor == 'sqlite': columns = ',\\n '.join('[%s] %s' % x", "no NA values. Datetimes should already be converted to np.datetime", "to pass to execute method. Returns ------- Number of affected", "SQL query into a DataFrame. read_sql \"\"\" pandas_sql = PandasSQLAlchemy(con)", "'br_l': '`', 'br_r': '`', 'wld': '%s' }, 'sqlite': { 'br_l':", "column Notes ----- This function is deprecated in favor of", "con if flavor is None: flavor = 'sqlite' if flavor", "import select cols = [self.table.c[n] for n in columns] if", "a table variable just use the Create Table statement\"\"\" def", "Iterable \"\"\" if cur is None: pandas_sql = pandasSQL_builder(con) else:", "def _create_table_statement(self): \"Return a CREATE TABLE statement to suit the", "= pandas_sql.read_table( table_name, index_col=index_col, coerce_float=coerce_float, parse_dates=parse_dates, columns=columns) if table is", "self.insert_data() keys = temp.columns for t in temp.itertuples(): data =", "name, con, flavor=flavor, if_exists=if_exists, index=index, **kwargs) # Append wrapped function", "is obtained from connection Returns ------- Results Iterable \"\"\" warnings.warn(", "params=params) result = cur.rowcount try: con.commit() except Exception as e:", "[index_label] if len(index_label) != nlevels: raise ValueError( \"Length of 'index_label'", "'fail' - fail: If table exists, do nothing. - replace:", "self.table.columns: col_name = sql_col.name try: df_col = self.frame[col_name] # the", "**kwargs) def read_table(self, table_name, index_col=None, coerce_float=True, parse_dates=None, columns=None): table =", "if com.is_datetime64_dtype(arr_or_dtype): try: tz = arr_or_dtype.tzinfo return DateTime(timezone=True) except: return", "as dates - Dict of ``{column_name: format string}`` where format", "np.floating): pytype_name = \"float\" elif issubclass(pytype, np.integer): pytype_name = \"int\"", ":func:`pandas.to_datetime` Especially useful with databases without native Datetime support, such", "facilitate data retrieval and to reduce dependency on DB-specific API.", "i def insert_data(self): if self.index is not None: temp =", "as e: excName = e.__class__.__name__ if excName != 'OperationalError': raise", "ins = self.insert_statement() data_list = [] temp = self.insert_data() keys", "'replace', 'append'}, default 'fail' - fail: If table exists, do", "string name of SQL table flavor : {'sqlite', 'mysql'}, default", "but instead of returning results, it returns the number of", "is_cursor=is_cursor) class PandasSQLTable(PandasObject): \"\"\" For mapping Pandas tables to SQL", "data_frame's column type align with an sql_table column types Need", "wildcard char if self.index is not None: [names.insert(0, idx) for", "self.pd_sql.get_table(self.name) if self.table is None: raise ValueError(\"Could not init table", "else: sql_select = self.table.select() result = self.pd_sql.execute(sql_select) data = result.fetchall()", "the index columns if nlevels == 1 and 'index' not", "Append wrapped function docstrings read_frame.__doc__ += read_sql.__doc__ frame_query.__doc__ += read_sql.__doc__", "without native Datetime support, such as SQLite Returns ------- DataFrame", "return index_label # return the used column labels for the", "columns} return create_statement def _sql_type_name(self, dtype): pytype = dtype.type pytype_name", "SQL query into a DataFrame \"\"\" pandas_sql = pandasSQL_builder(con) if", "datetime or col_type is date: if not issubclass(df_col.dtype.type, np.datetime64): self.frame[col_name]", "enumerate(self.index[::-1]): columns.insert(0, idx_label) column_types.insert(0, self._sql_type_name(self.frame.index.get_level_values(i).dtype)) flv = self.pd_sql.flavor br_l =", "Pandas tables to SQL tables. Uses fact that table is", "columns = list(map(str, self.frame.columns)) pat = re.compile('\\s+') if any(map(pat.search, columns)):", "table variable just use the Create Table statement\"\"\" def sql_schema(self):", "%s (%s) VALUES (%s)' % ( self.name, col_names, wildcards) return", "else: return to_datetime(col, coerce=True, format=format) def _parse_date_columns(data_frame, parse_dates): \"\"\" Force", "index coerce_float : boolean, default True Attempt to convert values", "(default) and `index` is True, then the index names are", "columns = [col_desc[0] for col_desc in cursor.description] data = self._fetchall_as_list(cursor)", "(legacy mode) Using SQLAlchemy makes it possible to use any", "fact that table is reflected by SQLAlchemy to do better", "No NA values, can convert ints and bools if col_type", "def get_table(self, table_name): return None # not supported in Legacy", "TABLE %(name)s ( %(columns)s )\"\"\" create_statement = template % {'name':", "import itertools import re import numpy as np import pandas.core.common", "PandasSQLTable(PandasObject): \"\"\" For mapping Pandas tables to SQL tables. Uses", "return DateTime(timezone=True) except: return DateTime if com.is_timedelta64_dtype(arr_or_dtype): warnings.warn(\"the 'timedelta' type", "floats support NA, can always convert! self.frame[col_name].astype(col_type, copy=False) elif len(df_col)", "only one column selected, then plain list is returned. To", "return i def insert_data(self): if self.index is not None: temp", "self.index is not None: [cols.insert(0, self.table.c[idx]) for idx in self.index[::-1]]", "for writing: index=True to include index in sql table if", "pytype_name = \"bool\" return _SQL_TYPES[pytype_name][self.pd_sql.flavor] class PandasSQLLegacy(PandasSQL): def __init__(self, con,", "if does not exist. index : boolean, default True Write", "if l is not None else \"level_{0}\".format(i) for i, l", "if self.index is not None: [cols.insert(0, self.table.c[idx]) for idx in", "warnings.warn(_MYSQL_WARNING, FutureWarning) return _get_schema_legacy(frame, name, flavor, keys) pandas_sql = pandasSQL_builder(con=con,", "= frame.to_frame() elif not isinstance(frame, DataFrame): raise NotImplementedError pandas_sql.to_sql(frame, name,", "class enables convertion between DataFrame and SQL databases using SQLAlchemy", "data_frame.set_index(index_col, inplace=True) return data_frame def to_sql(self, frame, name, if_exists='fail', index=True,", "[list(params)] return args def _handle_date_column(col, format=None): if isinstance(format, dict): return", "\"\"\" Execute the given SQL query using the provided connection", "con : SQLAlchemy engine Sqlite DBAPI conncection mode not supported", "TABLE statement to suit the contents of a DataFrame.\" columns", "the time. \"\"\" # TODO: support for multiIndex def __init__(self,", "---------- sql : string SQL query to be executed con", "elif com.is_float_dtype(arr_or_dtype): return Float elif com.is_integer_dtype(arr_or_dtype): # TODO: Refine integer", "def create(self): self.pd_sql.execute(self.table) def insert_statement(self): names = list(map(str, self.frame.columns)) flv", "',\\n '.join(col_template % x for x in zip(columns, column_types)) template", "elif issubclass(pytype, np.bool_): pytype_name = \"bool\" return _SQL_TYPES[pytype_name][flavor] lookup_type =", "pandas_sql_engine, frame=None, index=True, if_exists='fail', prefix='pandas', index_label=None): self.name = name self.pd_sql", "coerce_float=coerce_float, parse_dates=parse_dates) def read_sql(sql, con, index_col=None, coerce_float=True, params=None, parse_dates=None, columns=None):", "or connection+sql flavor\") class PandasSQLAlchemy(PandasSQL): \"\"\" This class enables convertion", "# right val quote char wld = _SQL_SYMB[flv]['wld'] # wildcard", "e: try: self.con.rollback() except Exception: # pragma: no cover ex", "read_sql(*args, **kwargs) def write_frame(frame, name, con, flavor='sqlite', if_exists='fail', **kwargs): \"\"\"DEPRECIATED", "be given if the DataFrame uses MultiIndex. \"\"\" if if_exists", "mode self.table = self.pd_sql.get_table(self.name) if self.table is None: raise ValueError(\"Could", "exists, insert data. Create if does not exist. \"\"\" table", "cur except Exception as e: try: self.con.rollback() except Exception: #", "TABLE %(name)s ( %(columns)s %(keystr)s );\"\"\" create_statement = template %", "selected, then plain list is returned. To obtain the same", "parameters \"\"\" # When support for DBAPI connections is removed,", "in a DataFrame to a SQL database. Parameters ---------- frame:", "'INSERT INTO %s (%s) VALUES (%s)' % ( self.name, col_names,", "elif com.is_integer_dtype(arr_or_dtype): # TODO: Refine integer size. return Integer elif", "found\" % table_name, con) def read_sql_query(sql, con, index_col=None, coerce_float=True, params=None,", "# for reading: index=(list of) string to specify column to", "further supported through SQLAlchemy engines. if_exists : {'fail', 'replace', 'append'},", "sqlite3 is supported. flavor : {'sqlite', 'mysql'}, default 'sqlite' The", "ex = DatabaseError( \"Execution failed on sql: %s\\n%s\\nunable to rollback\"", "from sqlalchemy.schema import CreateTable return str(CreateTable(self.table)) def create(self): self.table.create() def", "\"and will be removed in future versions. \" \"MySQL will", "e.__class__.__name__ if excName == 'OperationalError': return [] def tquery(sql, con=None,", "if col_type is int or col_type is bool: self.frame[col_name].astype(col_type, copy=False)", "\"\"\"DEPRECIATED - use read_sql \"\"\" warnings.warn(\"read_frame is depreciated, use read_sql\",", "else: raise ValueError(\"Table %s not found\" % table_name, con) def", "AttributeError: return i def insert_data(self): if self.index is not None:", "a DataFrame. Given a table name and an SQLAlchemy engine,", "index name(s) if index_label is not None: if not isinstance(index_label,", "def write_frame(frame, name, con, flavor='sqlite', if_exists='fail', **kwargs): \"\"\"DEPRECIATED - use", "then the index names are used. A sequence should be", "%s\\n%s\\nunable to rollback\" % (args[0], e)) raise_with_traceback(ex) ex = DatabaseError(\"Execution", "pandasSQL_builder(con, flavor=flavor) if isinstance(frame, Series): frame = frame.to_frame() elif not", "else: args += [list(params)] return args def _handle_date_column(col, format=None): if", "row in given sql query. If only one column selected,", "raise ValueError(\"Table '%s' already exists.\" % name) elif if_exists ==", "frame. Parameters ---------- frame : DataFrame name : string name", "Date if isinstance(sqltype, Float): return float if isinstance(sqltype, Integer): #", "DataFrame corresponding to the result set of the query string.", "self.pd_sql.con.commit() def _create_table_statement(self): \"Return a CREATE TABLE statement to suit", "in index/columns: {0}\".format(err)) else: temp = self.frame return temp def", "except KeyError: pass # this column not in results def", "= \"\"\"CREATE TABLE %(name)s ( %(columns)s )\"\"\" create_statement = template", "keys = temp.columns for t in temp.itertuples(): data = dict((k,", "sqlalchemy.types import Integer, Float, Boolean, DateTime, Date if isinstance(sqltype, Float):", ": Read SQL database table into a DataFrame read_sql_query :", "to do better type convertions. Also holds various flags needed", "table exists, insert data. Create if does not exist. \"\"\"", "will not be changed. \" \"In pandas versions < 0.14,", "= list(map(str, self.frame.columns)) flv = self.pd_sql.flavor br_l = _SQL_SYMB[flv]['br_l'] #", "for the index columns if nlevels == 1 and 'index'", "from pandas.core.base import PandasObject from pandas.tseries.tools import to_datetime class SQLAlchemyRequired(ImportError):", "= result.keys() data_frame = DataFrame.from_records( data, columns=columns, coerce_float=coerce_float) _parse_date_columns(data_frame, parse_dates)", "Returns ------- DataFrame See also -------- read_sql_query : Read SQL", "is_cursor=False): \"\"\" Convenience function to return the correct PandasSQL subclass", "com.is_timedelta64_dtype(pytype): warnings.warn(\"the 'timedelta' type is not supported, and will be", "non-datetime columns to be read as such. Supports both string", "__init__(self, name, pandas_sql_engine, frame=None, index=True, if_exists='fail', prefix='pandas', index_label=None): self.name =", "[] return result def uquery(sql, con=None, cur=None, retry=True, params=None): \"\"\"", "'`', 'br_r': '`', 'wld': '%s' }, 'sqlite': { 'br_l': '[',", "wrappers / abstractions to both facilitate data retrieval and to", "object, only sqlite3 is supported. flavor: {'sqlite', 'mysql'}, default 'sqlite'", "dict}``, where the arg dict corresponds to the keyword arguments", "replace: If table exists, drop it, recreate it, and insert", "read_sql\", FutureWarning) return read_sql(*args, **kwargs) def write_frame(frame, name, con, flavor='sqlite',", "fmt = parse_dates[col_name] except TypeError: fmt = None data_frame[col_name] =", "con=con, retry=False) if result and len(result[0]) == 1: # python", "sequence should be given if the DataFrame uses MultiIndex. \"\"\"", "need to restart interpreter') else: raise traceback.print_exc() if retry: return", "flavor: {'sqlite', 'mysql'}, default 'sqlite' The flavor of SQL to", "from pandas.tseries.tools import to_datetime class SQLAlchemyRequired(ImportError): pass class DatabaseError(IOError): pass", "string SQL query to be executed con: DBAPI2 connection cur:", "coerce_float=True, parse_dates=None, columns=None): if columns is not None and len(columns)", "of SQL table in database con : SQLAlchemy engine Sqlite", "query wrappers / abstractions to both facilitate data retrieval and", "sequence, default None Column label for index column(s). If None", "is supported. cur : depreciated, cursor is obtained from connection", "exist. \"\"\" table = PandasSQLTableLegacy( name, self, frame=frame, index=index, if_exists=if_exists,", "hasattr(params, 'keys'): # test if params is a mapping args", "to be executed con : SQLAlchemy engine or sqlite3 DBAPI2", "_safe_fetch(cur) if con is not None: try: cur.close() con.commit() except", "True Write DataFrame index as a column index_label : string", "**kwargs) def frame_query(*args, **kwargs): \"\"\"DEPRECIATED - use read_sql \"\"\" warnings.warn(\"frame_query", "cur = execute(sql, con, cur=cur) result = _safe_fetch(cur) if con", "parse_dates: df_col = data_frame[col_name] try: fmt = parse_dates[col_name] except TypeError:", "\"\"\" warnings.warn(\"read_frame is depreciated, use read_sql\", FutureWarning) return read_sql(*args, **kwargs)", "VALUES (%s)' % ( self.name, col_names, wildcards) return insert_statement def", "for backward compatibility) and will delegate to the specific function", "\"\"\" pandas_sql = pandasSQL_builder(con) return pandas_sql.read_sql( sql, index_col=index_col, params=params, coerce_float=coerce_float,", "If table exists, drop it, recreate it, and insert data.", "table). Returns ------- DataFrame Notes ----- This function is a", "self.frame[col_name] # the type the dataframe column should have col_type", "---------- frame : DataFrame name : string con : DBAPI2", "copy=False) # Handle date parsing if col_name in parse_dates: try:", "*columns) def _harmonize_columns(self, parse_dates=None): \"\"\" Make a data_frame's column type", "a data_frame's column type align with an sql_table column types", "else: return False def get_table(self, table_name): return self.meta.tables.get(table_name) def drop_table(self,", "%s' % x for x in column_types) else: columns =", "Need to work around limited NA value support. Floats are", "elif not isinstance(frame, DataFrame): raise NotImplementedError pandas_sql.to_sql(frame, name, if_exists=if_exists, index=index,", "string, optional Column to set as index coerce_float : boolean,", "if_exists == 'append': self.table = self.pd_sql.get_table(self.name) if self.table is None:", "pandasSQL_builder(con) else: pandas_sql = pandasSQL_builder(cur, is_cursor=True) args = _convert_params(sql, params)", "default 'fail' fail: If table exists, do nothing. replace: If", "if index_label is specified, set this as index name(s) if", "len(df_col) == df_col.count(): # No NA values, can convert ints", "[parse_dates] for sql_col in self.table.columns: col_name = sql_col.name try: df_col", "test if params is a mapping args += [params] else:", "- use read_sql \"\"\" warnings.warn(\"read_frame is depreciated, use read_sql\", FutureWarning)", "as index for the returned DataFrame object. coerce_float : boolean,", "UserWarning) pytype_name = \"int\" elif issubclass(pytype, np.integer): pytype_name = \"int\"", "of column names to select from sql table Returns -------", "None else \"level_{0}\".format(i) for i, l in enumerate(self.frame.index.names)] # for", "optional Column name to use as index for the returned", "DataFrame index as a column Notes ----- This function is", "except: return DateTime if com.is_timedelta64_dtype(arr_or_dtype): warnings.warn(\"the 'timedelta' type is not", "e)) raise_with_traceback(ex) ex = DatabaseError(\"Execution failed on sql: %s\" %", "pragma: no cover excName = e.__class__.__name__ if excName == 'OperationalError':", "FROM sqlite_master \" \"WHERE type='table' AND name='%s';\") % name, 'mysql':", "can convert ints and bools if col_type is int or", "table_name): table = PandasSQLTable(table_name, self, frame=frame) return str(table.sql_schema()) # ----", "queries. To obtain the same result in the future, you", "the given SQL query using the provided connection object. Parameters", "{ 'mysql': { 'br_l': '`', 'br_r': '`', 'wld': '%s' },", "default integer index will be used. Parameters ---------- sql :", "format string}`` where format string is strftime compatible in case", "result.keys() self.frame = DataFrame.from_records( data, columns=column_names, coerce_float=coerce_float) self._harmonize_columns(parse_dates=parse_dates) if self.index", "NA values, can convert ints and bools if col_type is", "in enumerate(self.index[::-1]): idx_type = self._sqlalchemy_type( self.frame.index.get_level_values(i)) columns.insert(0, Column(idx_label, idx_type, index=True))", "when not specified index = kwargs.pop('index', False) return to_sql(frame, name,", "drop_table(self, table_name): if self.engine.has_table(table_name): self.get_table(table_name).drop() self.meta.clear() self.meta.reflect() def _create_sql_schema(self, frame,", "used. Parameters ---------- sql : string SQL query to be", "depreciated, cursor is obtained from connection params: list or tuple,", "This function does not support DBAPI connections. Parameters ---------- table_name", "database table into a DataFrame. Parameters ---------- sql : string", "without native Datetime support, such as SQLite columns : list", "types Need to work around limited NA value support. Floats", "one of the columns as the index, otherwise default integer", "retry: return tquery(sql, con=con, retry=False) if result and len(result[0]) ==", "== 1 and 'index' not in self.frame.columns and self.frame.index.name is", "is not supported, and will be \" \"written as integer", "sql strings and handler class for access to DBs without", "if col_type is datetime or col_type is date: if not", "read_sql and to_sql \"\"\" def read_sql(self, *args, **kwargs): raise ValueError(", "cur = execute(sql, con, cur=cur, params=params) result = cur.rowcount try:", "SQL database. Parameters ---------- frame: DataFrame name: name of SQL", "% name) elif if_exists == 'replace': self.pd_sql.drop_table(self.name) self.table = self._create_table_statement()", "it returns the number of rows affected. Good for update", "SQLAlchemyRequired(ImportError): pass class DatabaseError(IOError): pass #------------------------------------------------------------------------------ # Helper functions def", "tables(self): return self.meta.tables def has_table(self, name): if self.meta.tables.get(name) is not", "of SQL table flavor : {'sqlite', 'mysql'}, default 'sqlite' The", "uquery(sql, con, retry=False) return result #------------------------------------------------------------------------------ #--- Read and write", "name or sql query). See also -------- read_sql_table : Read", "drop it, recreate it, and insert data. - append: If", "# Handle date parsing if col_name in parse_dates: try: fmt", "\"\"\" if con is None: if flavor == 'mysql': warnings.warn(_MYSQL_WARNING,", "retry=True, params=None): \"\"\" DEPRECATED. Does the same thing as tquery,", "----- This function is deprecated in favor of ``to_sql``. There", "not None: return table else: raise ValueError(\"Table %s not found\"", "the provided connection object. Parameters ---------- sql : string Query", "FutureWarning) return read_sql(*args, **kwargs) def frame_query(*args, **kwargs): \"\"\"DEPRECIATED - use", "There are however two differences: - With ``to_sql`` the index", "various flags needed to avoid having to pass them between", "dtype.type pytype_name = \"text\" if issubclass(pytype, np.floating): pytype_name = \"float\"", "Date): return date if isinstance(sqltype, Boolean): return bool return object", "table con: SQLAlchemy engine or sqlite3 DBAPI2 connection Using SQLAlchemy", "flavor=flavor) if isinstance(frame, Series): frame = frame.to_frame() elif not isinstance(frame,", "return pandas_sql.read_sql( sql, index_col=index_col, params=params, coerce_float=coerce_float, parse_dates=parse_dates) if pandas_sql.has_table(sql): return", "traceback import itertools import re import numpy as np import", "read_sql \"\"\" pandas_sql = pandasSQL_builder(con) return pandas_sql.read_sql( sql, index_col=index_col, params=params,", "\"int\" elif issubclass(pytype, np.integer): pytype_name = \"int\" elif issubclass(pytype, np.datetime64)", "not meta: from sqlalchemy.schema import MetaData meta = MetaData(self.engine) meta.reflect(self.engine)", "_MYSQL_WARNING = (\"The 'mysql' flavor with DBAPI connection is deprecated", "be removed in future versions. \" \"MySQL will be further", "def exists(self): return self.pd_sql.has_table(self.name) def sql_schema(self): from sqlalchemy.schema import CreateTable", "index_col=index_col, params=params, coerce_float=coerce_float, parse_dates=parse_dates) if pandas_sql.has_table(sql): return pandas_sql.read_table( sql, index_col=index_col,", "SQL query using the provided connection object. Parameters ---------- sql", "tquery(sql, con=None, cur=None, retry=True): \"\"\" DEPRECATED. Returns list of tuples", "compatible in case of parsing string times or is one", "index_col=index_col, params=params, coerce_float=coerce_float, parse_dates=parse_dates) def read_sql(sql, con, index_col=None, coerce_float=True, params=None,", "elif issubclass(col.dtype.type, np.floating) or issubclass(col.dtype.type, np.integer): # parse dates as", "tables to SQL tables. Uses fact that table is reflected", "None: try: cur.close() con.commit() except Exception as e: excName =", "= sql_col.name try: df_col = self.frame[col_name] # the type the", "as SQLite Returns ------- DataFrame See also -------- read_sql_table :", "if_exists=if_exists, index_label=index_label) table.insert() @property def tables(self): return self.meta.tables def has_table(self,", "col_type = self._numpy_type(sql_col.type) if col_type is datetime or col_type is", "pandas_sql.has_table(sql): return pandas_sql.read_table( sql, index_col=index_col, coerce_float=coerce_float, parse_dates=parse_dates, columns=columns) else: return", "\"float\" elif issubclass(pytype, np.integer): pytype_name = \"int\" elif issubclass(pytype, np.datetime64)", "sets params : list, tuple or dict, optional List of", "a column index_label : string or sequence, default None Column", "default None Column label for index column(s). If None is", "return Boolean return Text def _numpy_type(self, sqltype): from sqlalchemy.types import", "raise ValueError( \"PandasSQL must be created with an SQLAlchemy engine", "if excName == 'OperationalError': # pragma: no cover print('Failed to", "pandas_sql = PandasSQLAlchemy(con) table = pandas_sql.read_table( table_name, index_col=index_col, coerce_float=coerce_float, parse_dates=parse_dates,", "import numpy as np import pandas.core.common as com from pandas.compat", "= re.compile('\\s+') if any(map(pat.search, columns)): warnings.warn(_SAFE_NAMES_WARNING) column_types = [self._sql_type_name(typ) for", "not None: data_frame.set_index(index_col, inplace=True) return data_frame def _fetchall_as_list(self, cur): result", "to a SQL database. Parameters ---------- frame: DataFrame name: name", "com.is_datetime64_dtype(arr_or_dtype): try: tz = arr_or_dtype.tzinfo return DateTime(timezone=True) except: return DateTime", "columns to use a primary key con: an open SQL", "# We want to write a frame if self.pd_sql.has_table(self.name): if", "if if_exists == 'fail': raise ValueError(\"Table '%s' already exists.\" %", "tables. Uses fact that table is reflected by SQLAlchemy to", "['sqlite', 'mysql']: raise NotImplementedError else: self.flavor = flavor def execute(self,", "string_types): keys = (keys,) keystr = ', PRIMARY KEY (%s)'", "DBAPI connections is removed, # is_cursor should not be necessary.", "also force conversion if required \"\"\" # handle non-list entries", "cur=None, retry=True, params=None): \"\"\" DEPRECATED. Does the same thing as", "raise_with_traceback(ex) ex = DatabaseError(\"Execution failed on sql: %s\" % args[0])", "engines. if_exists : {'fail', 'replace', 'append'}, default 'fail' - fail:", "retry=True): \"\"\" DEPRECATED. Returns list of tuples corresponding to each", "sql : string SQL query to be executed or database", "or sql query). See also -------- read_sql_table : Read SQL", "raise NotImplementedError pandas_sql.to_sql(frame, name, if_exists=if_exists, index=index, index_label=index_label) def has_table(table_name, con,", "SQL tables. Uses fact that table is reflected by SQLAlchemy", "in the future, you can use the following: >>> execute(sql,", "work around limited NA value support. Floats are always fine,", "table = PandasSQLTable( name, self, frame=frame, index=index, if_exists=if_exists, index_label=index_label) table.insert()", "return bool return object class PandasSQL(PandasObject): \"\"\" Subclasses Should define", "except Exception as e: # pragma: no cover excName =", "for the returned DataFrame object. coerce_float : boolean, default True", "date: return Date if com.is_datetime64_dtype(arr_or_dtype): try: tz = arr_or_dtype.tzinfo return", "'DATETIME', 'sqlite': 'TIMESTAMP', }, 'date': { 'mysql': 'DATE', 'sqlite': 'TIMESTAMP',", "None: flavor = 'sqlite' if flavor not in ['sqlite', 'mysql']:", "case of parsing integer timestamps - Dict of ``{column_name: arg", "pandasSQL_builder(con) return pandas_sql.read_sql( sql, index_col=index_col, params=params, coerce_float=coerce_float, parse_dates=parse_dates) def read_sql(sql,", "in self.frame.columns and self.frame.index.name is None: return ['index'] else: return", "'append'}, default 'fail' fail: If table exists, do nothing. replace:", "column name to use as index for the returned DataFrame", "= pandas_sql_engine self.prefix = prefix self.frame = frame self.index =", "PandasSQLAlchemy(PandasSQL): \"\"\" This class enables convertion between DataFrame and SQL", "len(columns) > 0: from sqlalchemy import select cols = [self.table.c[n]", "DB _SQL_TYPES = { 'text': { 'mysql': 'VARCHAR (63)', 'sqlite':", "# Append wrapped function docstrings read_frame.__doc__ += read_sql.__doc__ frame_query.__doc__ +=", "of the columns as the index, otherwise default integer index", "def has_table(self, name): if self.meta.tables.get(name) is not None: return True", "subclass based on the provided parameters \"\"\" # When support", "flavor=flavor) return pandas_sql._create_sql_schema(frame, name) def _get_schema_legacy(frame, name, flavor, keys=None): \"\"\"Old", "columns=columns, coerce_float=coerce_float) _parse_date_columns(data_frame, parse_dates) if index_col is not None: data_frame.set_index(index_col,", "the following: >>> execute(sql, con, params).fetchall() Parameters ---------- sql: string", "pandas_sql.to_sql(frame, name, if_exists=if_exists, index=index, index_label=index_label) def has_table(table_name, con, flavor='sqlite'): \"\"\"", "+ '%s' + br_r + ' %s' columns = ',\\n", "except Exception as e: excName = e.__class__.__name__ if excName !=", "if params is not None: if hasattr(params, 'keys'): # test", "[col_desc[0] for col_desc in cursor.description] data = self._fetchall_as_list(cursor) cursor.close() data_frame", "deprecated in favor of ``to_sql``. There are however two differences:", "self.frame.columns)) pat = re.compile('\\s+') if any(map(pat.search, columns)): warnings.warn(_SAFE_NAMES_WARNING) column_types =", "in ['sqlite', 'mysql']: raise NotImplementedError else: self.flavor = flavor def", "TABLE %s\" % name self.execute(drop_sql) def _create_sql_schema(self, frame, table_name): table", "i): try: return np.asscalar(i) except AttributeError: return i def insert_data(self):", "Exception as e: excName = e.__class__.__name__ if excName != 'OperationalError':", "in columns] if self.index is not None: [cols.insert(0, self.table.c[idx]) for", "result sets params : list, tuple or dict, optional List", "self.pd_sql.execute(sql_select) data = result.fetchall() column_names = result.keys() self.frame = DataFrame.from_records(", "result = [] return result def uquery(sql, con=None, cur=None, retry=True,", "index, index_label): # for writing: index=True to include index in", "parse_dates = [parse_dates] for sql_col in self.table.columns: col_name = sql_col.name", "return object class PandasSQL(PandasObject): \"\"\" Subclasses Should define read_sql and", "'TIMESTAMP', }, 'bool': { 'mysql': 'BOOLEAN', 'sqlite': 'INTEGER', } }", "'BOOLEAN', 'sqlite': 'INTEGER', } } # SQL enquote and wildcard", "issubclass(pytype, np.datetime64) or pytype is datetime: # Caution: np.datetime64 is", "% name} query = flavor_map.get(self.flavor) return len(self.execute(query).fetchall()) > 0 def", "holds various flags needed to avoid having to pass them", ": string name of SQL table flavor : {'sqlite', 'mysql'},", "\"\"\" from __future__ import print_function, division from datetime import datetime,", "if excName == 'OperationalError': return [] def tquery(sql, con=None, cur=None,", "read_sql_table : Read SQL database table into a DataFrame read_sql", "= [] temp = self.insert_data() keys = temp.columns for t", "ex = DatabaseError(\"Execution failed on sql: %s\" % args[0]) raise_with_traceback(ex)", "---------- frame: DataFrame name: name of SQL table flavor: {'sqlite',", "pass to execute method. Returns ------- Results Iterable \"\"\" if", "self.insert_statement() data_list = [] temp = self.insert_data() keys = temp.columns", "not be necessary. try: import sqlalchemy if isinstance(con, sqlalchemy.engine.Engine): return", "list): return index else: return None def _create_table_statement(self): from sqlalchemy", "def to_sql(self, frame, name, if_exists='fail', index=True, index_label=None): \"\"\" Write records", "mode) Using SQLAlchemy makes it possible to use any DB", "legacy support is dropped, it should be possible to remove", "a convenience wrapper around ``read_sql_table`` and ``read_sql_query`` (and for backward", "when reading a table). Returns ------- DataFrame Notes ----- This", "to reduce dependency on DB-specific API. \"\"\" from __future__ import", "<gh_stars>0 \"\"\" Collection of query wrappers / abstractions to both", "timestamps - Dict of ``{column_name: arg dict}``, where the arg", "DataFrame): raise NotImplementedError pandas_sql.to_sql(frame, name, if_exists=if_exists, index=index, index_label=index_label) def has_table(table_name,", "SQLAlchemy makes it possible to use any DB supported by", "issubclass(col.dtype.type, np.integer): # parse dates as timestamp format = 's'", "def read_sql_table(table_name, con, index_col=None, coerce_float=True, parse_dates=None, columns=None): \"\"\"Read SQL database", "instead.\", FutureWarning) cur = execute(sql, con, cur=cur, params=params) result =", "into a DataFrame \"\"\" pandas_sql = pandasSQL_builder(con) if isinstance(pandas_sql, PandasSQLLegacy):", "read_sql \"\"\" warnings.warn(\"frame_query is depreciated, use read_sql\", FutureWarning) return read_sql(*args,", "databases without native Datetime support, such as SQLite columns :", "}, 'float': { 'mysql': 'FLOAT', 'sqlite': 'REAL', }, 'int': {", "= select(cols) else: sql_select = self.table.select() result = self.pd_sql.execute(sql_select) data", "pytype_name = \"datetime\" elif pytype is datetime.date: pytype_name = \"date\"", "arr_or_dtype is date: return Date if com.is_datetime64_dtype(arr_or_dtype): try: tz =", "self.name, col_names, wildcards) return insert_statement def insert(self): ins = self.insert_statement()", "key con: an open SQL database connection object or an", "Given a table name and an SQLAlchemy engine, returns a", "``to_sql``. There are however two differences: - With ``to_sql`` the", "be executed con: DBAPI2 connection cur: depreciated, cursor is obtained", "database. Parameters ---------- frame : DataFrame name : string con", "``read_sql_query`` (and for backward compatibility) and will delegate to the", "= [parse_dates] for sql_col in self.table.columns: col_name = sql_col.name try:", "in temp.itertuples(): data = dict((k, self.maybe_asscalar(v)) for k, v in", "the provided parameters \"\"\" # When support for DBAPI connections", "to_sql(self, *args, **kwargs): raise ValueError( \"PandasSQL must be created with", "{ 'mysql': 'DATETIME', 'sqlite': 'TIMESTAMP', }, 'date': { 'mysql': 'DATE',", "'mysql': 'FLOAT', 'sqlite': 'REAL', }, 'int': { 'mysql': 'BIGINT', 'sqlite':", "'.join(col_template % x for x in zip(columns, column_types)) template =", "return args def _handle_date_column(col, format=None): if isinstance(format, dict): return to_datetime(col,", "future, you can use the following: >>> execute(sql, con).rowcount Parameters", "= name self.pd_sql = pandas_sql_engine self.prefix = prefix self.frame =", "if self.pd_sql.has_table(self.name): if if_exists == 'fail': raise ValueError(\"Table '%s' already", "and uquery def _safe_fetch(cur): try: result = cur.fetchall() if not", "con, cur=None, params=None): \"\"\" Execute the given SQL query using", "always be floats if there are Null values. Booleans are", "'fail' fail: If table exists, do nothing. replace: If table", "DataFrame object. coerce_float : boolean, default True Attempt to convert", "con, flavor=flavor, if_exists=if_exists, index=index, **kwargs) # Append wrapped function docstrings", "of parameters to pass to execute method. Returns ------- Number", "= parse_dates[col_name] except TypeError: fmt = None data_frame[col_name] = _handle_date_column(df_col,", "def read_table(self, table_name, index_col=None, coerce_float=True, parse_dates=None, columns=None): table = PandasSQLTable(table_name,", "Parameters ---------- table_name: string Name of SQL table con: SQLAlchemy", "_SQL_SYMB[flv]['br_r'] # right val quote char wld = _SQL_SYMB[flv]['wld'] #", "= self._create_table_statement() self.create() else: # no data provided, read-only mode", "True, then the index names are used. A sequence should", ": string, optional Column name to use as index for", "self.pd_sql.meta, *columns) def _harmonize_columns(self, parse_dates=None): \"\"\" Make a data_frame's column", "from sql table Returns ------- DataFrame See also -------- read_sql_query", "Nones with false. Therefore only convert bool if there are", "self.table.create() def insert_statement(self): return self.table.insert() def maybe_asscalar(self, i): try: return", "be floats if there are Null values. Booleans are hard", "PandasSQLAlchemy(con) table = pandas_sql.read_table( table_name, index_col=index_col, coerce_float=coerce_float, parse_dates=parse_dates, columns=columns) if", "conncection mode not supported index_col : string, optional Column to", "is not None: for i, idx_label in enumerate(self.index[::-1]): columns.insert(0, idx_label)", "parse_dates=None): \"\"\" Make a data_frame's column type align with an", "read_sql(self, *args, **kwargs): raise ValueError( \"PandasSQL must be created with", "None: if flavor == 'mysql': warnings.warn(_MYSQL_WARNING, FutureWarning) return _get_schema_legacy(frame, name,", "time. \"\"\" # TODO: support for multiIndex def __init__(self, name,", "DataFrame. Given a table name and an SQLAlchemy engine, returns", "class PandasSQLLegacy(PandasSQL): def __init__(self, con, flavor, is_cursor=False): self.is_cursor = is_cursor", "dict, optional List of parameters to pass to execute method.", "read_sql_query(sql, con, index_col=None, coerce_float=True, params=None, parse_dates=None): \"\"\"Read SQL query into", "column_types) keystr = '' if keys is not None: if", "dtype: get_sqltype(dtype, flavor) column_types = lzip(frame.dtypes.index, map(lookup_type, frame.dtypes)) if flavor", "the returned DataFrame object. coerce_float : boolean, default True Attempt", "type align with an sql_table column types Need to work", "# No NA values, can convert ints and bools if", "[Column(name, typ) for name, typ in zip(columns, column_types)] if self.index", "are hard because converting bool column with None replaces all", "---- SQL without SQLAlchemy --- # Flavour specific sql strings", "np.integer): # parse dates as timestamp format = 's' if", "return pandas_sql.read_sql( sql, index_col=index_col, params=params, coerce_float=coerce_float, parse_dates=parse_dates) def read_sql(sql, con,", "read_sql(self, sql, index_col=None, coerce_float=True, params=None, parse_dates=None): args = _convert_params(sql, params)", "it possible to use any DB supported by that library.", "get_table(self, table_name): return None # not supported in Legacy mode", "The flavor of SQL to use. Ignored when using SQLAlchemy", "both string formatted and integer timestamp columns \"\"\" # handle", "parse_dates[col_name] except TypeError: fmt = None data_frame[col_name] = _handle_date_column(df_col, format=fmt)", "UserWarning) return Integer elif com.is_float_dtype(arr_or_dtype): return Float elif com.is_integer_dtype(arr_or_dtype): #", "support, such as SQLite columns : list List of column", "tquery and uquery def _safe_fetch(cur): try: result = cur.fetchall() if", "for typ in self.frame.dtypes] if self.index is not None: for", "self.pd_sql.has_table(self.name): if if_exists == 'fail': raise ValueError(\"Table '%s' already exists.\"", "- use read_sql \"\"\" warnings.warn(\"frame_query is depreciated, use read_sql\", FutureWarning)", "do nothing. - replace: If table exists, drop it, recreate", "format in ['D', 's', 'ms', 'us', 'ns']: return to_datetime(col, coerce=True,", "ValueError(\"Table %s not found\" % table_name, con) def read_sql_query(sql, con,", "is not None: if not isinstance(index_label, list): index_label = [index_label]", "CREATE TABLE statement to suit the contents of a DataFrame.\"", "cursor.close() data_frame = DataFrame.from_records( data, columns=columns, coerce_float=coerce_float) _parse_date_columns(data_frame, parse_dates) if", ": Read SQL database table into a DataFrame read_sql \"\"\"", "\"\"\" warnings.warn(\"frame_query is depreciated, use read_sql\", FutureWarning) return read_sql(*args, **kwargs)", "rows affected. Good for update queries. To obtain the same", "are however two differences: - With ``to_sql`` the index is", "index=index, if_exists=if_exists, index_label=index_label) table.insert() def has_table(self, name): flavor_map = {", "\"\"\" Force non-datetime columns to be read as such. Supports", "flags needed to avoid having to pass them between functions", "df_col.count(): # No NA values, can convert ints and bools", "do nothing. replace: If table exists, drop it, recreate it,", "data, columns=columns, coerce_float=coerce_float) _parse_date_columns(data_frame, parse_dates) if index_col is not None:", "to SQL tables. Uses fact that table is reflected by", "converted to np.datetime if supported, but here we also force", "}, 'int': { 'mysql': 'BIGINT', 'sqlite': 'INTEGER', }, 'datetime': {", "self.engine.execute(*args, **kwargs) def read_table(self, table_name, index_col=None, coerce_float=True, parse_dates=None, columns=None): table", "sql : string Query to be executed con : SQLAlchemy", "Integer elif com.is_bool(arr_or_dtype): return Boolean return Text def _numpy_type(self, sqltype):", "if self.index is not None: temp = self.frame.copy() temp.index.names =", "coerce_float=True, params=None, parse_dates=None): \"\"\"Read SQL query into a DataFrame. Returns", "for x in column_types) else: columns = ',\\n '.join('`%s` %s'", "PandasSQLTableLegacy( name, self, frame=frame, index=index, if_exists=if_exists, index_label=index_label) table.insert() def has_table(self,", "between DataFrame and SQL databases using SQLAlchemy to handle DataBase", "versions. \" \"You can use ``execute(...).rowcount`` instead.\", FutureWarning) cur =", "temp.index.names = self.index try: temp.reset_index(inplace=True) except ValueError as err: raise", "sql, index_col=index_col, params=params, coerce_float=coerce_float, parse_dates=parse_dates) def read_sql(sql, con, index_col=None, coerce_float=True,", "backward compatibility) and will delegate to the specific function depending", "None: return ['index'] else: return [l if l is not", "def tables(self): return self.meta.tables def has_table(self, name): if self.meta.tables.get(name) is", "names] col_names = ','.join(bracketed_names) wildcards = ','.join([wld] * len(names)) insert_statement", "if_exists='fail', index=True, index_label=None): \"\"\" Write records stored in a DataFrame", "If only one column selected, then plain list is returned.", "deprecated \" \"and will be removed in future versions. \"", "PandasSQLLegacy(con, flavor, is_cursor=is_cursor) class PandasSQLTable(PandasObject): \"\"\" For mapping Pandas tables", "if there are no NA values. Datetimes should already be", "not supported in Legacy mode def drop_table(self, name): drop_sql =", "= self.frame.index.nlevels # if index_label is specified, set this as", "not None else \"level_{0}\".format(i) for i, l in enumerate(self.frame.index.names)] #", "the Create Table statement\"\"\" def sql_schema(self): return str(self.table) def create(self):", "= self.index try: temp.reset_index(inplace=True) except ValueError as err: raise ValueError(", "{ 'mysql': 'DATE', 'sqlite': 'TIMESTAMP', }, 'bool': { 'mysql': 'BOOLEAN',", "_SQL_TYPES = { 'text': { 'mysql': 'VARCHAR (63)', 'sqlite': 'TEXT',", "% name, 'mysql': \"SHOW TABLES LIKE '%s'\" % name} query", "[] if not hasattr(parse_dates, '__iter__'): parse_dates = [parse_dates] for col_name", ": string SQL query to be executed con : SQLAlchemy", "column not in results def _sqlalchemy_type(self, arr_or_dtype): from sqlalchemy.types import", "reading a table). Returns ------- DataFrame Notes ----- This function", "PandasSQL(PandasObject): \"\"\" Subclasses Should define read_sql and to_sql \"\"\" def", "'br_r': '`', 'wld': '%s' }, 'sqlite': { 'br_l': '[', 'br_r':", "self.meta.reflect() def _create_sql_schema(self, frame, table_name): table = PandasSQLTable(table_name, self, frame=frame)", "np.bool_): pytype_name = \"bool\" return _SQL_TYPES[pytype_name][self.pd_sql.flavor] class PandasSQLLegacy(PandasSQL): def __init__(self,", "only sqlite3 is supported. index_col : string, optional column name", "FutureWarning) return _get_schema_legacy(frame, name, flavor, keys) pandas_sql = pandasSQL_builder(con=con, flavor=flavor)", "'append'): raise ValueError(\"'{0}' is not valid for if_exists\".format(if_exists)) pandas_sql =", "template % {'name': self.name, 'columns': columns} return create_statement def _sql_type_name(self,", "connections is removed, # is_cursor should not be necessary. try:", "None replaces all Nones with false. Therefore only convert bool", "sql, index_col=index_col, coerce_float=coerce_float, parse_dates=parse_dates, columns=columns) else: return pandas_sql.read_sql( sql, index_col=index_col,", "names, with depreciation warnings and copied docs def read_frame(*args, **kwargs):", "flavor is None: flavor = 'sqlite' if flavor not in", "try: if kwargs: cur.execute(*args, **kwargs) else: cur.execute(*args) return cur except", ": boolean, default True Write DataFrame index as a column", "- Dict of ``{column_name: format string}`` where format string is", "differences: - With ``to_sql`` the index is written to the", "in ('fail', 'replace', 'append'): raise ValueError(\"'{0}' is not valid for", "%(keystr)s );\"\"\" create_statement = template % {'name': name, 'columns': columns,", "'mysql': 'DATETIME', 'sqlite': 'TIMESTAMP', }, 'date': { 'mysql': 'DATE', 'sqlite':", "into a DataFrame read_sql_query : Read SQL query into a", "Parameters ---------- table_name : string Name of SQL table in", "convert values to non-string, non-numeric objects (like decimal.Decimal) to floating", "SQLAlchemy to handle DataBase abstraction \"\"\" def __init__(self, engine, meta=None):", "write to DataFrames def read_sql_table(table_name, con, index_col=None, coerce_float=True, parse_dates=None, columns=None):", "obtain the same result in the future, you can use", "from sql table (only used when reading a table). Returns", "self._create_table_statement() self.create() elif if_exists == 'append': self.table = self.pd_sql.get_table(self.name) if", ": DataFrame name : string name of SQL table flavor", "not init table '%s'\" % name) def exists(self): return self.pd_sql.has_table(self.name)", "case of parsing string times or is one of (D,", "an `index_col` parameter to use one of the columns as", "pandas.core.base import PandasObject from pandas.tseries.tools import to_datetime class SQLAlchemyRequired(ImportError): pass", "connection params : list or tuple, optional List of parameters", "br_r for column in names] col_names = ','.join(bracketed_names) wildcards =", "name, flavor='sqlite', keys=None, con=None): \"\"\" Get the SQL db table", "------- DataFrame See also -------- read_sql_table : Read SQL database", "\" \"database.\", UserWarning) return Integer elif com.is_float_dtype(arr_or_dtype): return Float elif", "string to specify column to set as index elif isinstance(index,", "elif isinstance(index, list): return index else: return None def _create_table_statement(self):", "as timestamp format = 's' if format is None else", "pandas_sql = pandasSQL_builder(con, flavor=flavor) return pandas_sql.has_table(table_name) table_exists = has_table _MYSQL_WARNING", "[parse_dates] for col_name in parse_dates: df_col = data_frame[col_name] try: fmt", "Float, Boolean, DateTime, Date if isinstance(sqltype, Float): return float if", "Especially useful with databases without native Datetime support, such as", "else: if flavor == 'mysql': warnings.warn(_MYSQL_WARNING, FutureWarning) return PandasSQLLegacy(con, flavor,", "SQL table flavor: {'sqlite', 'mysql'}, default 'sqlite' if_exists: {'fail', 'replace',", "is date: if not issubclass(df_col.dtype.type, np.datetime64): self.frame[col_name] = _handle_date_column(df_col) elif", "pragma: no cover result = [] return result def uquery(sql,", "','.join([wld] * len(names)) insert_statement = 'INSERT INTO %s (%s) VALUES", "= lambda dtype: get_sqltype(dtype, flavor) column_types = lzip(frame.dtypes.index, map(lookup_type, frame.dtypes))", "if issubclass(pytype, np.floating): pytype_name = \"float\" elif com.is_timedelta64_dtype(pytype): warnings.warn(\"the 'timedelta'", "'sqlite': 'TEXT', }, 'float': { 'mysql': 'FLOAT', 'sqlite': 'REAL', },", "column names to select from sql table (only used when", "a DataFrame \"\"\" pandas_sql = pandasSQL_builder(con) if isinstance(pandas_sql, PandasSQLLegacy): return", "Create Table statement\"\"\" def sql_schema(self): return str(self.table) def create(self): self.pd_sql.execute(self.table)", "Parameters ---------- frame: DataFrame name: name of SQL table flavor:", "result = list(result) return result except Exception as e: #", "Returns list of tuples corresponding to each row in given", "is not None else \"level_{0}\".format(i) for i, l in enumerate(self.frame.index.names)]", "dates - Dict of ``{column_name: format string}`` where format string", "= list(result) return result except Exception as e: # pragma:", "self.name, 'columns': columns} return create_statement def _sql_type_name(self, dtype): pytype =", "versions, but it will be further supported through SQLAlchemy engines.", "(keys,) keystr = ', PRIMARY KEY (%s)' % ','.join(keys) template", "\"\"\" DEPRECATED. Returns list of tuples corresponding to each row", "DBAPI conncection mode not supported index_col : string, optional Column", "= (\"The 'mysql' flavor with DBAPI connection is deprecated \"", "already exists.\" % name) elif if_exists == 'replace': self.pd_sql.drop_table(self.name) self.table", "def read(self, coerce_float=True, parse_dates=None, columns=None): if columns is not None", "database table into a DataFrame read_sql_query : Read SQL query", "def _handle_date_column(col, format=None): if isinstance(format, dict): return to_datetime(col, **format) else:", "engine. 'mysql' is deprecated and will be removed in future", "%s' columns = ',\\n '.join(col_template % x for x in", "insert data. append: If table exists, insert data. Create if", ": {'fail', 'replace', 'append'}, default 'fail' - fail: If table", "column should have col_type = self._numpy_type(sql_col.type) if col_type is datetime", "false. Therefore only convert bool if there are no NA", "self.engine.has_table(table_name): self.get_table(table_name).drop() self.meta.clear() self.meta.reflect() def _create_sql_schema(self, frame, table_name): table =", "import pandas.core.common as com from pandas.compat import lzip, map, zip,", "DBAPI2.0 compliant format\"\"\" args = [sql] if params is not", "_safe_fetch(cur): try: result = cur.fetchall() if not isinstance(result, list): result", "coerce_float=coerce_float, parse_dates=parse_dates) def to_sql(frame, name, con, flavor='sqlite', if_exists='fail', index=True, index_label=None):", "it, and insert data. - append: If table exists, insert", "nlevels = self.frame.index.nlevels # if index_label is specified, set this", "zip, raise_with_traceback, string_types from pandas.core.api import DataFrame, Series from pandas.core.base", "cur : depreciated, cursor is obtained from connection params :", "reconnecting...') return uquery(sql, con, retry=False) return result #------------------------------------------------------------------------------ #--- Read", "= prefix self.frame = frame self.index = self._index_name(index, index_label) if", "if self.index is not None: [names.insert(0, idx) for idx in", "def get_schema(frame, name, flavor='sqlite', keys=None, con=None): \"\"\" Get the SQL", "index=False when not specified index = kwargs.pop('index', False) return to_sql(frame,", "Returns ------- Results Iterable \"\"\" warnings.warn( \"tquery is depreciated, and", "Make a data_frame's column type align with an sql_table column", "coerce_float=coerce_float, parse_dates=parse_dates, columns=columns) if table is not None: return table", "sql_schema(self): return str(self.table) def create(self): self.pd_sql.execute(self.table) def insert_statement(self): names =", "pandasSQL_builder(cur, is_cursor=True) args = _convert_params(sql, params) return pandas_sql.execute(*args) #------------------------------------------------------------------------------ #---", "the \" \"database.\", UserWarning) pytype_name = \"int\" elif issubclass(pytype, np.integer):", "only sqlite3 is supported. flavor: {'sqlite', 'mysql'}, default 'sqlite' The", "self.is_cursor: cur = self.con else: cur = self.con.cursor() try: if", "DBAPI2 object, only sqlite3 is supported. \"\"\" if con is", "'sqlite' The flavor of SQL to use. Ignored when using", "index_label=None): table = PandasSQLTable( name, self, frame=frame, index=index, if_exists=if_exists, index_label=index_label)", "= PandasSQLTable(table_name, self, frame=frame) return str(table.sql_schema()) # ---- SQL without", "_numpy_type(self, sqltype): from sqlalchemy.types import Integer, Float, Boolean, DateTime, Date", "has_table(table_name, con, flavor='sqlite'): \"\"\" Check if DataBase has named table.", "\"text\" if issubclass(pytype, np.floating): pytype_name = \"float\" elif com.is_timedelta64_dtype(pytype): warnings.warn(\"the", "index=True, if_exists='fail', prefix='pandas', index_label=None): self.name = name self.pd_sql = pandas_sql_engine", "always fine, ints must always be floats if there are", "in future versions. \" \"MySQL will be further supported with", "strftime compatible in case of parsing string times or is", "with an sql_table column types Need to work around limited", "engine or DBAPI2 connection (legacy mode) Using SQLAlchemy makes it", "index for the returned DataFrame object. coerce_float : boolean, default", "is float: # floats support NA, can always convert! self.frame[col_name].astype(col_type,", "hasattr(parse_dates, '__iter__'): parse_dates = [parse_dates] for sql_col in self.table.columns: col_name", "template % {'name': name, 'columns': columns, 'keystr': keystr} return create_statement", "without # SQLAlchemy installed # SQL type convertions for each", "column_types) else: columns = ',\\n '.join('`%s` %s' % x for", "not isinstance(index_label, list): index_label = [index_label] if len(index_label) != nlevels:", "except Exception: # pragma: no cover ex = DatabaseError( \"Execution", "'sqlite' if flavor not in ['sqlite', 'mysql']: raise NotImplementedError else:", "return pandas_sql.has_table(table_name) table_exists = has_table _MYSQL_WARNING = (\"The 'mysql' flavor", "def get_sqltype(dtype, flavor): pytype = dtype.type pytype_name = \"text\" if", "pragma: no cover ex = DatabaseError( \"Execution failed on sql:", "DBAPI2 object, only sqlite3 is supported. index_col : string, optional", "self.pd_sql = pandas_sql_engine self.prefix = prefix self.frame = frame self.index", "'index_label' should match number of \" \"levels, which is {0}\".format(nlevels))", "future versions, but it will be further supported through SQLAlchemy", "sql_select = self.table.select() result = self.pd_sql.execute(sql_select) data = result.fetchall() column_names", "(\"SELECT name FROM sqlite_master \" \"WHERE type='table' AND name='%s';\") %", "\"In pandas versions < 0.14, spaces were converted to \"", ");\"\"\" create_statement = template % {'name': name, 'columns': columns, 'keystr':", "table con : SQLAlchemy engine or sqlite3 DBAPI2 connection Using", "one of (D, s, ns, ms, us) in case of", "use to_sql\", FutureWarning) # for backwards compatibility, set index=False when", "dates as timestamp format = 's' if format is None", "fine, ints must always be floats if there are Null", "class for access to DBs without # SQLAlchemy installed #", "= list(map(str, self.frame.columns)) column_types = map(self._sqlalchemy_type, self.frame.dtypes) columns = [Column(name,", "ValueError(\"Could not init table '%s'\" % name) def exists(self): return", "table schema for the given frame. Parameters ---------- frame :", "column + br_r for column in names] col_names = ','.join(bracketed_names)", "index_col=None, coerce_float=True, params=None, parse_dates=None): \"\"\"Read SQL query into a DataFrame.", "isinstance(keys, string_types): keys = (keys,) keystr = ', PRIMARY KEY", "fmt = None self.frame[col_name] = _handle_date_column( df_col, format=fmt) except KeyError:", "< 0.14, spaces were converted to \" \"underscores.\") class PandasSQLTableLegacy(PandasSQLTable):", "format=format) def _parse_date_columns(data_frame, parse_dates): \"\"\" Force non-datetime columns to be", "s, ns, ms, us) in case of parsing integer timestamps", "failed on sql: %s\\n%s\\nunable to rollback\" % (args[0], e)) raise_with_traceback(ex)", "None and len(columns) > 0: from sqlalchemy import select cols", "k, v in zip(keys, t[1:])) data_list.append(data) self.pd_sql.execute(ins, data_list) def read(self,", "n in columns] if self.index is not None: [cols.insert(0, self.table.c[idx])", "Series): frame = frame.to_frame() elif not isinstance(frame, DataFrame): raise NotImplementedError", "_convert_params(sql, params) result = self.execute(*args) data = result.fetchall() columns =", "traceback.print_exc() if retry: print('Looks like your connection failed, reconnecting...') return", "ValueError( \"PandasSQL must be created with an SQLAlchemy engine or", "table in database con : SQLAlchemy engine Sqlite DBAPI conncection", "# Helper functions def _convert_params(sql, params): \"\"\"convert sql and params", "default True Attempt to convert values to non-string, non-numeric objects", "string Name of SQL table con : SQLAlchemy engine or", "create_statement # legacy names, with depreciation warnings and copied docs", "decimal.Decimal) to floating point. Can result in loss of Precision.", "to_sql \"\"\" def read_sql(self, *args, **kwargs): raise ValueError( \"PandasSQL must", "datetime if isinstance(sqltype, Date): return date if isinstance(sqltype, Boolean): return", "set as index elif isinstance(index, string_types): return [index] elif isinstance(index,", "'replace', 'append'}, default 'fail' fail: If table exists, do nothing.", "- replace: If table exists, drop it, recreate it, and", "# test if params is a mapping args += [params]", "frame, name, if_exists='fail', index=True, index_label=None): table = PandasSQLTable( name, self,", "% table_name, con) def read_sql_query(sql, con, index_col=None, coerce_float=True, params=None, parse_dates=None):", "recreate it, and insert data. - append: If table exists,", "name in index/columns: {0}\".format(err)) else: temp = self.frame return temp", "DataFrame See also -------- read_sql_table : Read SQL database table", "list(map(str, self.frame.columns)) flv = self.pd_sql.flavor br_l = _SQL_SYMB[flv]['br_l'] # left", "Instead of a table variable just use the Create Table", "str(table.sql_schema()) # ---- SQL without SQLAlchemy --- # Flavour specific", "into a DataFrame. Given a table name and an SQLAlchemy", "if required \"\"\" # handle non-list entries for parse_dates gracefully", "not in results def _sqlalchemy_type(self, arr_or_dtype): from sqlalchemy.types import Integer,", "the same result in the future, you can use the", "index names are used. A sequence should be given if", "function is deprecated in favor of ``to_sql``. There are however", "','.join(bracketed_names) wildcards = ','.join([wld] * len(names)) insert_statement = 'INSERT INTO", "named table. Parameters ---------- table_name: string Name of SQL table", "'VARCHAR (63)', 'sqlite': 'TEXT', }, 'float': { 'mysql': 'FLOAT', 'sqlite':", "DataFrame read_sql_query : Read SQL query into a DataFrame \"\"\"", "are used. A sequence should be given if the DataFrame", "(like decimal.Decimal) to floating point. Can result in loss of", "no data provided, read-only mode self.table = self.pd_sql.get_table(self.name) if self.table", "self.frame.columns)) column_types = map(self._sqlalchemy_type, self.frame.dtypes) columns = [Column(name, typ) for", "self.index = self._index_name(index, index_label) if frame is not None: #", "supported by that library. If a DBAPI2 object, only sqlite3", "if com.is_timedelta64_dtype(arr_or_dtype): warnings.warn(\"the 'timedelta' type is not supported, and will", "select from sql table Returns ------- DataFrame See also --------", "create_statement = template % {'name': name, 'columns': columns, 'keystr': keystr}", "keys = (keys,) keystr = ', PRIMARY KEY (%s)' %", "native Datetime support, such as SQLite Returns ------- DataFrame See", "result = self.pd_sql.execute(sql_select) data = result.fetchall() column_names = result.keys() self.frame", "name, if_exists='fail', index=True, index_label=None): table = PandasSQLTable( name, self, frame=frame,", "execute method. Returns ------- Number of affected rows \"\"\" warnings.warn(", "con.commit() except Exception as e: excName = e.__class__.__name__ if excName", "# legacy names, with depreciation warnings and copied docs def", "result def to_sql(self, frame, name, if_exists='fail', index=True, index_label=None): \"\"\" Write", "--- # Flavour specific sql strings and handler class for", "pass #------------------------------------------------------------------------------ # Helper functions def _convert_params(sql, params): \"\"\"convert sql", "DBAPI2 connection flavor : {'sqlite', 'mysql'}, default 'sqlite' The flavor", "except ValueError as err: raise ValueError( \"duplicate name in index/columns:", "depreciated, cursor is obtained from connection Returns ------- Results Iterable", "in future versions, but it will be further supported through", "except ImportError: if flavor == 'mysql': warnings.warn(_MYSQL_WARNING, FutureWarning) return PandasSQLLegacy(con,", "== 'fail': raise ValueError(\"Table '%s' already exists.\" % name) elif", "[] if not hasattr(parse_dates, '__iter__'): parse_dates = [parse_dates] for sql_col", "an SQLAlchemy engine or connection+sql flavor\") def to_sql(self, *args, **kwargs):", "char wld = _SQL_SYMB[flv]['wld'] # wildcard char if self.index is", "to_datetime(col, coerce=True, format=format) def _parse_date_columns(data_frame, parse_dates): \"\"\" Force non-datetime columns", "'sqlite' The flavor of SQL to use. if_exists : {'fail',", "int if isinstance(sqltype, DateTime): # Caution: np.datetime64 is also a", "con, index_col=None, coerce_float=True, params=None, parse_dates=None): \"\"\"Read SQL query into a", "library. If a DBAPI2 object, only sqlite3 is supported. flavor", "function to return the correct PandasSQL subclass based on the", "DataFrame.from_records( data, columns=columns, coerce_float=coerce_float) _parse_date_columns(data_frame, parse_dates) if index_col is not", "cursor is obtained from connection params : list or tuple,", "params=None, parse_dates=None): \"\"\"Read SQL query into a DataFrame. Returns a", "conversion if required \"\"\" # handle non-list entries for parse_dates", "# floats support NA, can always convert! self.frame[col_name].astype(col_type, copy=False) elif", "= self.con.cursor() try: if kwargs: cur.execute(*args, **kwargs) else: cur.execute(*args) return", "def has_table(self, name): flavor_map = { 'sqlite': (\"SELECT name FROM", "backwards compatibility. When mysql legacy support is dropped, it should", "connection cur: depreciated, cursor is obtained from connection Returns -------", "is None: if flavor == 'mysql': warnings.warn(_MYSQL_WARNING, FutureWarning) return _get_schema_legacy(frame,", "str(table.sql_schema()) def get_schema(frame, name, flavor='sqlite', keys=None, con=None): \"\"\" Get the", "DateTime, Date if isinstance(sqltype, Float): return float if isinstance(sqltype, Integer):", "insert data. Create if does not exist. \"\"\" table =", "Column columns = list(map(str, self.frame.columns)) column_types = map(self._sqlalchemy_type, self.frame.dtypes) columns", "does not exist. index : boolean, default False Write DataFrame", "non-list entries for parse_dates gracefully if parse_dates is True or", "around limited NA value support. Floats are always fine, ints", ": DataFrame name : string Name of SQL table con", "spaces in these column names will not be changed. \"", "a SQL database. Parameters ---------- frame : DataFrame name :", "DataFrame. Parameters ---------- sql : string SQL query to be", "support for DBAPI connections is removed, # is_cursor should not", "index columns if nlevels == 1 and 'index' not in", "sequence columns to use a primary key con: an open", "\"\"\" if cur is None: pandas_sql = pandasSQL_builder(con) else: pandas_sql", "data_frame def to_sql(self, frame, name, if_exists='fail', index=True, index_label=None): table =", "string_types from pandas.core.api import DataFrame, Series from pandas.core.base import PandasObject", "be removed in future versions. \" \"You can use ``execute(...).rowcount``", "ValueError( \"duplicate name in index/columns: {0}\".format(err)) else: temp = self.frame", "a table name and an SQLAlchemy engine, returns a DataFrame.", "index=index, index_label=index_label) def has_table(table_name, con, flavor='sqlite'): \"\"\" Check if DataBase", "frame : DataFrame name : string Name of SQL table", "Name of SQL table con : SQLAlchemy engine or sqlite3", "is True or parse_dates is None or parse_dates is False:", "functions all the time. \"\"\" # TODO: support for multiIndex", "'%s' }, 'sqlite': { 'br_l': '[', 'br_r': ']', 'wld': '?'", "= [sql] if params is not None: if hasattr(params, 'keys'):", "SQL to use. Ignored when using SQLAlchemy engine. 'mysql' is", "is date: return Date if com.is_datetime64_dtype(arr_or_dtype): try: tz = arr_or_dtype.tzinfo", "v in zip(keys, t[1:])) data_list.append(data) self.pd_sql.execute(ins, data_list) def read(self, coerce_float=True,", "supported. flavor: {'sqlite', 'mysql'}, default 'sqlite' The flavor of SQL", ": string or sequence columns to use a primary key", "self, frame=frame, index=index, if_exists=if_exists, index_label=index_label) table.insert() def has_table(self, name): flavor_map", "**kwargs) else: cur.execute(*args) return cur except Exception as e: try:", "'replace', 'append'): raise ValueError(\"'{0}' is not valid for if_exists\".format(if_exists)) pandas_sql", "type='table' AND name='%s';\") % name, 'mysql': \"SHOW TABLES LIKE '%s'\"", "a DataFrame corresponding to the result set of the query", "import warnings import traceback import itertools import re import numpy", "= [br_l + column + br_r for column in names]", "\" \"and will be removed in future versions. \" \"MySQL", "can use ``execute(...).fetchall()`` instead.\", FutureWarning) cur = execute(sql, con, cur=cur)", "\"\"\" Subclasses Should define read_sql and to_sql \"\"\" def read_sql(self,", "want to write a frame if self.pd_sql.has_table(self.name): if if_exists ==", "df_col, format=fmt) except KeyError: pass # this column not in", "for if_exists\".format(if_exists)) else: self.table = self._create_table_statement() self.create() else: # no", "Datetime support, such as SQLite columns : list List of", "\"Length of 'index_label' should match number of \" \"levels, which", "True Attempt to convert values to non-string, non-numeric objects (like", "except AttributeError: return i def insert_data(self): if self.index is not", "enumerate(self.index[::-1]): idx_type = self._sqlalchemy_type( self.frame.index.get_level_values(i)) columns.insert(0, Column(idx_label, idx_type, index=True)) return", "format is None else format return to_datetime(col, coerce=True, unit=format) else:", "Refine integer size. return Integer elif com.is_bool(arr_or_dtype): return Boolean return", "len(result[0]) == 1: # python 3 compat result = list(lzip(*result)[0])", "# TODO: Refine integer size. return int if isinstance(sqltype, DateTime):", "is written to the sql database by default. To keep", "connection+sql flavor\") class PandasSQLAlchemy(PandasSQL): \"\"\" This class enables convertion between", "typ) for name, typ in zip(columns, column_types)] if self.index is", "__init__(self, con, flavor, is_cursor=False): self.is_cursor = is_cursor self.con = con", "# handle non-list entries for parse_dates gracefully if parse_dates is", "con, cur=cur, params=params) result = cur.rowcount try: con.commit() except Exception", "try: self.con.rollback() except Exception: # pragma: no cover ex =", "if self.index is not None: self.frame.set_index(self.index, inplace=True) return self.frame def", "+ ' %s' columns = ',\\n '.join(col_template % x for", "self.execute(drop_sql) def _create_sql_schema(self, frame, table_name): table = PandasSQLTableLegacy(table_name, self, frame=frame)", "is reflected by SQLAlchemy to do better type convertions. Also", "to floating point. Can result in loss of Precision. parse_dates", "DBAPI2 object, only sqlite3 is supported. flavor : {'sqlite', 'mysql'},", "created with an SQLAlchemy engine or connection+sql flavor\") class PandasSQLAlchemy(PandasSQL):", "name, 'mysql': \"SHOW TABLES LIKE '%s'\" % name} query =", "'TEXT', }, 'float': { 'mysql': 'FLOAT', 'sqlite': 'REAL', }, 'int':", "pandas.DataFrame.to_sql \"\"\" warnings.warn(\"write_frame is depreciated, use to_sql\", FutureWarning) # for", "return read_sql(*args, **kwargs) def write_frame(frame, name, con, flavor='sqlite', if_exists='fail', **kwargs):", "sql: %s\\n%s\\nunable to rollback\" % (args[0], e)) raise_with_traceback(ex) ex =", "database connection object or an SQLAlchemy engine Using SQLAlchemy makes", "of ``to_sql``. There are however two differences: - With ``to_sql``", "not in ('fail', 'replace', 'append'): raise ValueError(\"'{0}' is not valid", "excName == 'OperationalError': # pragma: no cover print('Failed to commit,", "return DateTime if com.is_timedelta64_dtype(arr_or_dtype): warnings.warn(\"the 'timedelta' type is not supported,", "supported. \"\"\" if con is None: if flavor == 'mysql':", "False: parse_dates = [] if not hasattr(parse_dates, '__iter__'): parse_dates =", "List of column names to parse as dates - Dict", "pytype_name = \"float\" elif com.is_timedelta64_dtype(pytype): warnings.warn(\"the 'timedelta' type is not", "issubclass(col.dtype.type, np.floating) or issubclass(col.dtype.type, np.integer): # parse dates as timestamp", "= pandasSQL_builder(con) return pandas_sql.read_sql( sql, index_col=index_col, params=params, coerce_float=coerce_float, parse_dates=parse_dates) def", "result = self.execute(*args) data = result.fetchall() columns = result.keys() data_frame", "exist. index : boolean, default False Write DataFrame index as", "datetime import datetime, date, timedelta import warnings import traceback import", "'BIGINT', 'sqlite': 'INTEGER', }, 'datetime': { 'mysql': 'DATETIME', 'sqlite': 'TIMESTAMP',", "given SQL query using the provided connection object. Parameters ----------", "col_type is int or col_type is bool: self.frame[col_name].astype(col_type, copy=False) #", "result = list(lzip(*result)[0]) elif result is None: # pragma: no", "removed in future versions. \" \"You can use ``execute(...).rowcount`` instead.\",", "string_types): return [index] elif isinstance(index, list): return index else: return", "and len(columns) > 0: from sqlalchemy import select cols =", "for parse_dates gracefully if parse_dates is True or parse_dates is", "column names to parse as dates - Dict of ``{column_name:", "If a DBAPI2 object, only sqlite3 is supported. \"\"\" if", "= dict((k, self.maybe_asscalar(v)) for k, v in zip(keys, t[1:])) data_list.append(data)", "if keys is not None: if isinstance(keys, string_types): keys =", "a mapping args += [params] else: args += [list(params)] return", "the number of rows affected. Good for update queries. To", "will be used. Parameters ---------- sql : string SQL query", "isinstance(sqltype, Boolean): return bool return object class PandasSQL(PandasObject): \"\"\" Subclasses", "meta=None): self.engine = engine if not meta: from sqlalchemy.schema import", "to pass them between functions all the time. \"\"\" #", "params=None): args = _convert_params(sql, params) result = self.execute(*args) data =", "PandasSQLTableLegacy(table_name, self, frame=frame) return str(table.sql_schema()) def get_schema(frame, name, flavor='sqlite', keys=None,", "pandas_sql.read_sql( sql, index_col=index_col, params=params, coerce_float=coerce_float, parse_dates=parse_dates) def read_sql(sql, con, index_col=None,", "frame = frame.to_frame() elif not isinstance(frame, DataFrame): raise NotImplementedError pandas_sql.to_sql(frame,", "these column names will not be changed. \" \"In pandas", "boolean, default True Write DataFrame index as a column index_label", "This function is deprecated in favor of ``to_sql``. There are", "TODO: support for multiIndex def __init__(self, name, pandas_sql_engine, frame=None, index=True,", "NotImplementedError else: self.flavor = flavor def execute(self, *args, **kwargs): if", "column in names] col_names = ','.join(bracketed_names) wildcards = ','.join([wld] *", "If a DBAPI2 object, only sqlite3 is supported. cur :", "column(s). If None is given (default) and `index` is True,", "br_r = _SQL_SYMB[flv]['br_r'] # right val quote char col_template =", "corresponding to the result set of the query string. Optionally", "else: temp = self.frame return temp def insert(self): ins =", "params=None): \"\"\" Execute the given SQL query using the provided", "isinstance(result, list): result = list(result) return result except Exception as", "# When support for DBAPI connections is removed, # is_cursor", "'text': { 'mysql': 'VARCHAR (63)', 'sqlite': 'TEXT', }, 'float': {", "DBs without # SQLAlchemy installed # SQL type convertions for", ": string, optional Column to set as index coerce_float :", "l is not None else \"level_{0}\".format(i) for i, l in", "retrieval and to reduce dependency on DB-specific API. \"\"\" from", "= MetaData(self.engine) meta.reflect(self.engine) self.meta = meta def execute(self, *args, **kwargs):", "where format string is strftime compatible in case of parsing", "table_name, index_col=index_col, coerce_float=coerce_float, parse_dates=parse_dates, columns=columns) if table is not None:", "not None: for i, idx_label in enumerate(self.index[::-1]): columns.insert(0, idx_label) column_types.insert(0,", "of :func:`pandas.to_datetime` Especially useful with databases without native Datetime support,", "name, self, frame=frame, index=index, if_exists=if_exists, index_label=index_label) table.insert() @property def tables(self):", "index_label=None): self.name = name self.pd_sql = pandas_sql_engine self.prefix = prefix", "it, recreate it, and insert data. - append: If table", "if not isinstance(result, list): result = list(result) return result except", "= self.pd_sql.get_table(self.name) if self.table is None: raise ValueError(\"Could not init", "self.pd_sql.flavor br_l = _SQL_SYMB[flv]['br_l'] # left val quote char br_r", "_convert_params(sql, params) cursor = self.execute(*args) columns = [col_desc[0] for col_desc", "bool if there are no NA values. Datetimes should already", "_SQL_SYMB[flv]['br_r'] # right val quote char col_template = br_l +", "self.table.insert() def maybe_asscalar(self, i): try: return np.asscalar(i) except AttributeError: return", "np.floating) or issubclass(col.dtype.type, np.integer): # parse dates as timestamp format", "= pandasSQL_builder(con) if isinstance(pandas_sql, PandasSQLLegacy): return pandas_sql.read_sql( sql, index_col=index_col, params=params,", "table. Parameters ---------- table_name: string Name of SQL table con:", "# not supported in Legacy mode def drop_table(self, name): drop_sql", "'s' if format is None else format return to_datetime(col, coerce=True,", "in given sql query. If only one column selected, then", "all the time. \"\"\" # TODO: support for multiIndex def", ": string con : DBAPI2 connection flavor : {'sqlite', 'mysql'},", "should match number of \" \"levels, which is {0}\".format(nlevels)) else:", "parse_dates[col_name] except TypeError: fmt = None self.frame[col_name] = _handle_date_column( df_col,", "create(self): self.pd_sql.execute(self.table) def insert_statement(self): names = list(map(str, self.frame.columns)) flv =", "prefix='pandas', index_label=None): self.name = name self.pd_sql = pandas_sql_engine self.prefix =", "result = list(result) return result def to_sql(self, frame, name, if_exists='fail',", "-------- read_sql_query : Read SQL query into a DataFrame. read_sql", "class PandasSQLAlchemy(PandasSQL): \"\"\" This class enables convertion between DataFrame and", "the future, you can use the following: >>> execute(sql, con).rowcount", "len(names)) insert_statement = 'INSERT INTO %s (%s) VALUES (%s)' %", "> 0 def get_table(self, table_name): return None # not supported", "``{column_name: format string}`` where format string is strftime compatible in", "temp.reset_index(inplace=True) except ValueError as err: raise ValueError( \"duplicate name in", "itertools import re import numpy as np import pandas.core.common as", "'%s'\" % name} query = flavor_map.get(self.flavor) return len(self.execute(query).fetchall()) > 0", "None: temp = self.frame.copy() temp.index.names = self.index try: temp.reset_index(inplace=True) except", "to specify column to set as index elif isinstance(index, string_types):", "if self.engine.has_table(table_name): self.get_table(table_name).drop() self.meta.clear() self.meta.reflect() def _create_sql_schema(self, frame, table_name): table", "\"\"\" Get the SQL db table schema for the given", "arg dict corresponds to the keyword arguments of :func:`pandas.to_datetime` Especially", "returns a DataFrame. This function does not support DBAPI connections.", "try: temp.reset_index(inplace=True) except ValueError as err: raise ValueError( \"duplicate name", "and self.frame.index.name is None: return ['index'] else: return [l if", "parsing if col_name in parse_dates: try: fmt = parse_dates[col_name] except", "Series from pandas.core.base import PandasObject from pandas.tseries.tools import to_datetime class", "( self.name, col_names, wildcards) return insert_statement def insert(self): ins =", "returned. To obtain the same result in the future, you", "where the arg dict corresponds to the keyword arguments of", "if self.index is not None: for i, idx_label in enumerate(self.index[::-1]):", "is given (default) and `index` is True, then the index", "in enumerate(self.index[::-1]): columns.insert(0, idx_label) column_types.insert(0, self._sql_type_name(self.frame.index.get_level_values(i).dtype)) flv = self.pd_sql.flavor br_l", "{'sqlite', 'mysql'}, default 'sqlite' if_exists: {'fail', 'replace', 'append'}, default 'fail'", "lzip, map, zip, raise_with_traceback, string_types from pandas.core.api import DataFrame, Series", "query to be executed con: DBAPI2 connection cur: depreciated, cursor", "set of the query string. Optionally provide an `index_col` parameter", "bool column with None replaces all Nones with false. Therefore", "support NA, can always convert! self.frame[col_name].astype(col_type, copy=False) elif len(df_col) ==", "if self.table is None: raise ValueError(\"Could not init table '%s'\"", "not None and len(columns) > 0: from sqlalchemy import select", "compliant format\"\"\" args = [sql] if params is not None:", "required \"\"\" # handle non-list entries for parse_dates gracefully if", "integer timestamp columns \"\"\" # handle non-list entries for parse_dates", "mode not supported index_col : string, optional Column to set", "when using SQLAlchemy engine. 'mysql' is deprecated and will be", "_SQL_SYMB[flv]['br_l'] # left val quote char br_r = _SQL_SYMB[flv]['br_r'] #", "the arg dict corresponds to the keyword arguments of :func:`pandas.to_datetime`", "must be created with an SQLAlchemy engine or connection+sql flavor\")", "SQLAlchemy engine or connection+sql flavor\") class PandasSQLAlchemy(PandasSQL): \"\"\" This class", "---------- sql: string SQL query to be executed con: DBAPI2", "return the correct PandasSQL subclass based on the provided parameters", "for column in names] col_names = ','.join(bracketed_names) wildcards = ','.join([wld]", "def drop_table(self, name): drop_sql = \"DROP TABLE %s\" % name", "def read_sql(self, *args, **kwargs): raise ValueError( \"PandasSQL must be created", "self.con else: cur = self.con.cursor() try: if kwargs: cur.execute(*args, **kwargs)", "table_name): return None # not supported in Legacy mode def", "try: import sqlalchemy if isinstance(con, sqlalchemy.engine.Engine): return PandasSQLAlchemy(con, meta=meta) else:", "KEY (%s)' % ','.join(keys) template = \"\"\"CREATE TABLE %(name)s (", "connection cur: depreciated, cursor is obtained from connection params: list", "SQL table flavor : {'sqlite', 'mysql'}, default 'sqlite' The flavor", "name) def exists(self): return self.pd_sql.has_table(self.name) def sql_schema(self): from sqlalchemy.schema import", "having to pass them between functions all the time. \"\"\"", "sqlite3 DBAPI2 connection Using SQLAlchemy makes it possible to use", "each row in given sql query. If only one column", "elif result is None: # pragma: no cover result =", "SQLAlchemy installed # SQL type convertions for each DB _SQL_TYPES", "columns] if self.index is not None: [cols.insert(0, self.table.c[idx]) for idx", "result #------------------------------------------------------------------------------ #--- Read and write to DataFrames def read_sql_table(table_name,", "= self.frame[col_name] # the type the dataframe column should have", "idx in self.index[::-1]] sql_select = select(cols) else: sql_select = self.table.select()", "class DatabaseError(IOError): pass #------------------------------------------------------------------------------ # Helper functions def _convert_params(sql, params):", "compatibility. When mysql legacy support is dropped, it should be", "index_label=index_label) def has_table(table_name, con, flavor='sqlite'): \"\"\" Check if DataBase has", "get_table(self, table_name): return self.meta.tables.get(table_name) def drop_table(self, table_name): if self.engine.has_table(table_name): self.get_table(table_name).drop()", "return _SQL_TYPES[pytype_name][self.pd_sql.flavor] class PandasSQLLegacy(PandasSQL): def __init__(self, con, flavor, is_cursor=False): self.is_cursor", "Caution: np.datetime64 is also a subclass of np.number. pytype_name =", "better type convertions. Also holds various flags needed to avoid", "copy=False) elif len(df_col) == df_col.count(): # No NA values, can", "possible to use any DB supported by that library. If", "result set of the query string. Optionally provide an `index_col`", "supported, but here we also force conversion if required \"\"\"", "\"\"\" pandas_sql = pandasSQL_builder(con, flavor=flavor) return pandas_sql.has_table(table_name) table_exists = has_table", "= \"date\" elif issubclass(pytype, np.bool_): pytype_name = \"bool\" return _SQL_TYPES[pytype_name][self.pd_sql.flavor]", "1: # python 3 compat result = list(lzip(*result)[0]) elif result", "'wld': '?' } } _SAFE_NAMES_WARNING = (\"The spaces in these", "is None else format return to_datetime(col, coerce=True, unit=format) else: return", "lambda dtype: get_sqltype(dtype, flavor) column_types = lzip(frame.dtypes.index, map(lookup_type, frame.dtypes)) if", "columns)): warnings.warn(_SAFE_NAMES_WARNING) column_types = [self._sql_type_name(typ) for typ in self.frame.dtypes] if", "'mysql': 'DATE', 'sqlite': 'TIMESTAMP', }, 'bool': { 'mysql': 'BOOLEAN', 'sqlite':", "%s\" % name self.execute(drop_sql) def _create_sql_schema(self, frame, table_name): table =", "is a mapping args += [params] else: args += [list(params)]", "def _numpy_type(self, sqltype): from sqlalchemy.types import Integer, Float, Boolean, DateTime,", "in zip(columns, column_types)] if self.index is not None: for i,", "= result.keys() self.frame = DataFrame.from_records( data, columns=column_names, coerce_float=coerce_float) self._harmonize_columns(parse_dates=parse_dates) if", "to DataFrames def read_sql_table(table_name, con, index_col=None, coerce_float=True, parse_dates=None, columns=None): \"\"\"Read", "elif if_exists == 'append': self.table = self.pd_sql.get_table(self.name) if self.table is", "flavor='sqlite'): \"\"\" Check if DataBase has named table. Parameters ----------", "return str(CreateTable(self.table)) def create(self): self.table.create() def insert_statement(self): return self.table.insert() def", "v in t[1:])) data_list.append(data) cur = self.pd_sql.con.cursor() cur.executemany(ins, data_list) cur.close()", "table is not None: return table else: raise ValueError(\"Table %s", "for update queries. To obtain the same result in the", "name self.execute(drop_sql) def _create_sql_schema(self, frame, table_name): table = PandasSQLTableLegacy(table_name, self,", "sql database by default. To keep the behaviour this function", "import Table, Column columns = list(map(str, self.frame.columns)) column_types = map(self._sqlalchemy_type,", "None: [cols.insert(0, self.table.c[idx]) for idx in self.index[::-1]] sql_select = select(cols)", "enquote and wildcard symbols _SQL_SYMB = { 'mysql': { 'br_l':", "method. Returns ------- Number of affected rows \"\"\" warnings.warn( \"uquery", "= flavor_map.get(self.flavor) return len(self.execute(query).fetchall()) > 0 def get_table(self, table_name): return", "handle DataBase abstraction \"\"\" def __init__(self, engine, meta=None): self.engine =", "this code \"\"\" def get_sqltype(dtype, flavor): pytype = dtype.type pytype_name", "to pass to execute method. parse_dates : list or dict", "pytype is datetime: # Caution: np.datetime64 is also a subclass", "flavor of SQL to use. if_exists : {'fail', 'replace', 'append'},", "Parameters ---------- sql : string SQL query to be executed", "'append'}, default 'fail' - fail: If table exists, do nothing.", "+= [list(params)] return args def _handle_date_column(col, format=None): if isinstance(format, dict):", "data. - append: If table exists, insert data. Create if", "self.pd_sql.drop_table(self.name) self.table = self._create_table_statement() self.create() elif if_exists == 'append': self.table", "self.frame[col_name].astype(col_type, copy=False) # Handle date parsing if col_name in parse_dates:", "str(self.table) def create(self): self.pd_sql.execute(self.table) def insert_statement(self): names = list(map(str, self.frame.columns))", "only sqlite3 is supported. \"\"\" if con is None: if", "\"\"\" warnings.warn( \"uquery is depreciated, and will be removed in", "if isinstance(sqltype, Integer): # TODO: Refine integer size. return int", "parse_dates=parse_dates) def read_sql(sql, con, index_col=None, coerce_float=True, params=None, parse_dates=None, columns=None): \"\"\"", "= DataFrame.from_records( data, columns=column_names, coerce_float=coerce_float) self._harmonize_columns(parse_dates=parse_dates) if self.index is not", "in results def _sqlalchemy_type(self, arr_or_dtype): from sqlalchemy.types import Integer, Float,", "pandas_sql._create_sql_schema(frame, name) def _get_schema_legacy(frame, name, flavor, keys=None): \"\"\"Old function from", "method. Returns ------- Results Iterable \"\"\" if cur is None:", "frame if self.pd_sql.has_table(self.name): if if_exists == 'fail': raise ValueError(\"Table '%s'", "label for index column(s). If None is given (default) and", "# Caution: np.datetime64 is also a subclass of np.number. pytype_name", "columns, 'keystr': keystr} return create_statement # legacy names, with depreciation", "set this as index name(s) if index_label is not None:", ": string SQL query to be executed or database table", "column_types)] if self.index is not None: for i, idx_label in", "frame: DataFrame name: name of SQL table flavor: {'sqlite', 'mysql'},", "def read_sql(sql, con, index_col=None, coerce_float=True, params=None, parse_dates=None, columns=None): \"\"\" Read", "if cur is None: pandas_sql = pandasSQL_builder(con) else: pandas_sql =", "not None: for i, idx_label in enumerate(self.index[::-1]): idx_type = self._sqlalchemy_type(", "it should be possible to remove this code \"\"\" def", "supported. flavor : {'sqlite', 'mysql'}, default 'sqlite' The flavor of", "is datetime.date: pytype_name = \"date\" elif issubclass(pytype, np.bool_): pytype_name =", "floats if there are Null values. Booleans are hard because", "the following: >>> execute(sql, con).rowcount Parameters ---------- sql: string SQL", "data = dict((k, self.maybe_asscalar(v)) for k, v in zip(keys, t[1:]))", "PandasSQLTableLegacy(PandasSQLTable): \"\"\"Patch the PandasSQLTable for legacy support. Instead of a", "pandas.core.api import DataFrame, Series from pandas.core.base import PandasObject from pandas.tseries.tools", "= \"datetime\" elif pytype is datetime.date: pytype_name = \"date\" elif", "values, can convert ints and bools if col_type is int", "warnings.warn(_MYSQL_WARNING, FutureWarning) return PandasSQLLegacy(con, flavor, is_cursor=is_cursor) except ImportError: if flavor", "use read_sql\", FutureWarning) return read_sql(*args, **kwargs) def write_frame(frame, name, con,", "None: if hasattr(params, 'keys'): # test if params is a", "names = list(map(str, self.frame.columns)) flv = self.pd_sql.flavor br_l = _SQL_SYMB[flv]['br_l']", "self.frame.index.name is None: return ['index'] else: return [l if l", "-------- read_sql_table : Read SQL database table into a DataFrame", "Read SQL database table into a DataFrame read_sql_query : Read", "index_label : string or sequence, default None Column label for", "(ns frequency) to the \" \"database.\", UserWarning) return Integer elif", "parse_dates=None, params=None): args = _convert_params(sql, params) result = self.execute(*args) data", "e: excName = e.__class__.__name__ if excName != 'OperationalError': raise traceback.print_exc()", "pandas_sql_engine self.prefix = prefix self.frame = frame self.index = self._index_name(index,", "self.index is not None: [names.insert(0, idx) for idx in self.index[::-1]]", "each DB _SQL_TYPES = { 'text': { 'mysql': 'VARCHAR (63)',", "parse_dates=parse_dates) if pandas_sql.has_table(sql): return pandas_sql.read_table( sql, index_col=index_col, coerce_float=coerce_float, parse_dates=parse_dates, columns=columns)", "def _index_name(self, index, index_label): # for writing: index=True to include", "'replace': self.pd_sql.drop_table(self.name) self.table = self._create_table_statement() self.create() elif if_exists == 'append':", "depreciated, cursor is obtained from connection params : list or", "isinstance(pandas_sql, PandasSQLLegacy): return pandas_sql.read_sql( sql, index_col=index_col, params=params, coerce_float=coerce_float, parse_dates=parse_dates) if", "always convert! self.frame[col_name].astype(col_type, copy=False) elif len(df_col) == df_col.count(): # No", "column to set as index elif isinstance(index, string_types): return [index]", "tz = arr_or_dtype.tzinfo return DateTime(timezone=True) except: return DateTime if com.is_timedelta64_dtype(arr_or_dtype):", "self.execute(*args) data = result.fetchall() columns = result.keys() data_frame = DataFrame.from_records(", "converting bool column with None replaces all Nones with false.", "= [Column(name, typ) for name, typ in zip(columns, column_types)] if", "to pass to execute method. Returns ------- Results Iterable \"\"\"", "' %s' columns = ',\\n '.join(col_template % x for x", "column_types = map(self._sqlalchemy_type, self.frame.dtypes) columns = [Column(name, typ) for name,", "and insert data. append: If table exists, insert data. Create", "= br_l + '%s' + br_r + ' %s' columns", "index, otherwise default integer index will be used. Parameters ----------", "warnings and copied docs def read_frame(*args, **kwargs): \"\"\"DEPRECIATED - use", "self.pd_sql.con.cursor() cur.executemany(ins, data_list) cur.close() self.pd_sql.con.commit() def _create_table_statement(self): \"Return a CREATE", "if_exists\".format(if_exists)) pandas_sql = pandasSQL_builder(con, flavor=flavor) if isinstance(frame, Series): frame =", "is datetime: # Caution: np.datetime64 is also a subclass of", "no cover excName = e.__class__.__name__ if excName == 'OperationalError': return", "``read_sql_table`` and ``read_sql_query`` (and for backward compatibility) and will delegate", "\"\"\" Collection of query wrappers / abstractions to both facilitate", "wrapper around ``read_sql_table`` and ``read_sql_query`` (and for backward compatibility) and", "= lzip(frame.dtypes.index, map(lookup_type, frame.dtypes)) if flavor == 'sqlite': columns =", "connection Using SQLAlchemy makes it possible to use any DB", "index is written to the sql database by default. To", "com from pandas.compat import lzip, map, zip, raise_with_traceback, string_types from", "type convertions. Also holds various flags needed to avoid having", "PandasSQL subclass based on the provided parameters \"\"\" # When", "DataFrame. Returns a DataFrame corresponding to the result set of", "cur.fetchall() if not isinstance(result, list): result = list(result) return result", "'TIMESTAMP', }, 'date': { 'mysql': 'DATE', 'sqlite': 'TIMESTAMP', }, 'bool':", "select from sql table (only used when reading a table).", "tquery, but instead of returning results, it returns the number", "data, columns=column_names, coerce_float=coerce_float) self._harmonize_columns(parse_dates=parse_dates) if self.index is not None: self.frame.set_index(self.index,", "\"\"\" Make a data_frame's column type align with an sql_table", "also a subclass of np.number. pytype_name = \"datetime\" elif pytype", "flavors. See also -------- pandas.DataFrame.to_sql \"\"\" warnings.warn(\"write_frame is depreciated, use", "= PandasSQLTable(table_name, self, index=index_col) return table.read(coerce_float=coerce_float, parse_dates=parse_dates, columns=columns) def read_sql(self,", "list is returned. To obtain the same result in the", "isinstance(sqltype, DateTime): # Caution: np.datetime64 is also a subclass of", "Parameters ---------- frame : DataFrame name : string name of", "\"level_{0}\".format(i) for i, l in enumerate(self.frame.index.names)] # for reading: index=(list", "only sqlite3 is supported. cur : depreciated, cursor is obtained", "A sequence should be given if the DataFrame uses MultiIndex.", "self._harmonize_columns(parse_dates=parse_dates) if self.index is not None: self.frame.set_index(self.index, inplace=True) return self.frame", "exists, insert data. Create if does not exist. index :", "also a subclass of np.number. return datetime if isinstance(sqltype, Date):", "( %(columns)s )\"\"\" create_statement = template % {'name': self.name, 'columns':", "to use. if_exists : {'fail', 'replace', 'append'}, default 'fail' -", "l in enumerate(self.frame.index.names)] # for reading: index=(list of) string to", "optional Column to set as index coerce_float : boolean, default", "= pandasSQL_builder(con) else: pandas_sql = pandasSQL_builder(cur, is_cursor=True) args = _convert_params(sql,", "zip(keys, t[1:])) data_list.append(data) self.pd_sql.execute(ins, data_list) def read(self, coerce_float=True, parse_dates=None, columns=None):", "results def _sqlalchemy_type(self, arr_or_dtype): from sqlalchemy.types import Integer, Float, Text,", "[br_l + column + br_r for column in names] col_names", "if result and len(result[0]) == 1: # python 3 compat", "%s\" % args[0]) raise_with_traceback(ex) def read_sql(self, sql, index_col=None, coerce_float=True, params=None,", "Parameters ---------- frame : DataFrame name : string con :", "depreciation warnings and copied docs def read_frame(*args, **kwargs): \"\"\"DEPRECIATED -", "params) cursor = self.execute(*args) columns = [col_desc[0] for col_desc in", "def uquery(sql, con=None, cur=None, retry=True, params=None): \"\"\" DEPRECATED. Does the", "com.is_timedelta64_dtype(arr_or_dtype): warnings.warn(\"the 'timedelta' type is not supported, and will be", "SQL query into a DataFrame. Returns a DataFrame corresponding to", "in ['D', 's', 'ms', 'us', 'ns']: return to_datetime(col, coerce=True, unit=format)", "\" \"database.\", UserWarning) pytype_name = \"int\" elif issubclass(pytype, np.integer): pytype_name", "params=None, parse_dates=None): args = _convert_params(sql, params) cursor = self.execute(*args) columns", "string con : DBAPI2 connection flavor : {'sqlite', 'mysql'}, default", "and len(result[0]) == 1: # python 3 compat result =", "optional List of parameters to pass to execute method. Returns", "[cols.insert(0, self.table.c[idx]) for idx in self.index[::-1]] sql_select = select(cols) else:", "( %(columns)s %(keystr)s );\"\"\" create_statement = template % {'name': name,", "if there are Null values. Booleans are hard because converting", "object, only sqlite3 is supported. cur : depreciated, cursor is", "= ','.join([wld] * len(names)) insert_statement = 'INSERT INTO %s (%s)", "connections. Parameters ---------- table_name : string Name of SQL table", "def tquery(sql, con=None, cur=None, retry=True): \"\"\" DEPRECATED. Returns list of", "if table is not None: return table else: raise ValueError(\"Table", "native Datetime support, such as SQLite columns : list List", "def read_sql(self, sql, index_col=None, coerce_float=True, parse_dates=None, params=None): args = _convert_params(sql,", "pytype_name = \"float\" elif issubclass(pytype, np.integer): pytype_name = \"int\" elif", "# python 3 compat result = list(lzip(*result)[0]) elif result is", "and to_sql \"\"\" def read_sql(self, *args, **kwargs): raise ValueError( \"PandasSQL", "return ['index'] else: return [l if l is not None", "params): \"\"\"convert sql and params args to DBAPI2.0 compliant format\"\"\"", "a DBAPI2 object, only sqlite3 is supported. flavor: {'sqlite', 'mysql'},", "table_exists = has_table _MYSQL_WARNING = (\"The 'mysql' flavor with DBAPI", "the used column labels for the index columns if nlevels", "Datetimes should already be converted to np.datetime if supported, but", "import Integer, Float, Text, Boolean, DateTime, Date, Interval if arr_or_dtype", "columns = list(map(str, self.frame.columns)) column_types = map(self._sqlalchemy_type, self.frame.dtypes) columns =", "following: >>> execute(sql, con, params).fetchall() Parameters ---------- sql: string SQL", "to restart interpreter') else: raise traceback.print_exc() if retry: return tquery(sql,", "of (D, s, ns, ms, us) in case of parsing", "convert! self.frame[col_name].astype(col_type, copy=False) elif len(df_col) == df_col.count(): # No NA", "if format is None else format return to_datetime(col, coerce=True, unit=format)", "pytype_name = \"bool\" return _SQL_TYPES[pytype_name][flavor] lookup_type = lambda dtype: get_sqltype(dtype,", "self.insert_statement() temp = self.insert_data() data_list = [] for t in", "= dtype.type pytype_name = \"text\" if issubclass(pytype, np.floating): pytype_name =", "0 def get_table(self, table_name): return None # not supported in", "an open SQL database connection object or an SQLAlchemy engine", "return to_datetime(col, **format) else: if format in ['D', 's', 'ms',", "optional List of parameters to pass to execute method. parse_dates", "Also holds various flags needed to avoid having to pass", "isinstance(frame, DataFrame): raise NotImplementedError pandas_sql.to_sql(frame, name, if_exists=if_exists, index=index, index_label=index_label) def", "Attempt to convert values to non-string, non-numeric objects (like decimal.Decimal)", "'sqlite': (\"SELECT name FROM sqlite_master \" \"WHERE type='table' AND name='%s';\")", "not isinstance(result, list): result = list(result) return result except Exception", "- The new ``to_sql`` function supports sqlalchemy engines to work", "e: # pragma: no cover excName = e.__class__.__name__ if excName", ": {'sqlite', 'mysql'}, default 'sqlite' The flavor of SQL to", "will be removed in future versions. \" \"MySQL will be", "non-string, non-numeric objects (like decimal.Decimal) to floating point. Can result", "if_exists not in ('fail', 'replace', 'append'): raise ValueError(\"'{0}' is not", "DataFrame \"\"\" pandas_sql = pandasSQL_builder(con) if isinstance(pandas_sql, PandasSQLLegacy): return pandas_sql.read_sql(", "= _convert_params(sql, params) result = self.execute(*args) data = result.fetchall() columns", "isinstance(index, string_types): return [index] elif isinstance(index, list): return index else:", "if_exists == 'replace': self.pd_sql.drop_table(self.name) self.table = self._create_table_statement() self.create() elif if_exists", "inplace=True) return data_frame def to_sql(self, frame, name, if_exists='fail', index=True, index_label=None):", "'sqlite': { 'br_l': '[', 'br_r': ']', 'wld': '?' } }", "a subclass of np.number. pytype_name = \"datetime\" elif pytype is", "Dict of ``{column_name: format string}`` where format string is strftime", "value support. Floats are always fine, ints must always be", "is depreciated, use to_sql\", FutureWarning) # for backwards compatibility, set", "Column label for index column(s). If None is given (default)", "`index` is True, then the index names are used. A", "in a DataFrame to a SQL database. Parameters ---------- frame", "This function is a convenience wrapper around ``read_sql_table`` and ``read_sql_query``", "index will be used. Parameters ---------- sql : string SQL", "engine if not meta: from sqlalchemy.schema import MetaData meta =", "of tuples corresponding to each row in given sql query.", "of a table variable just use the Create Table statement\"\"\"", "LIKE '%s'\" % name} query = flavor_map.get(self.flavor) return len(self.execute(query).fetchall()) >", "False) return to_sql(frame, name, con, flavor=flavor, if_exists=if_exists, index=index, **kwargs) #", "0.13.1. To keep backwards compatibility. When mysql legacy support is", "the keyword arguments of :func:`pandas.to_datetime` Especially useful with databases without", "= has_table _MYSQL_WARNING = (\"The 'mysql' flavor with DBAPI connection", "is not None: try: cur.close() con.commit() except Exception as e:", "enables convertion between DataFrame and SQL databases using SQLAlchemy to", "DBAPI2 object, only sqlite3 is supported. flavor: {'sqlite', 'mysql'}, default", "\"\"\" warnings.warn( \"tquery is depreciated, and will be removed in", "{'fail', 'replace', 'append'}, default 'fail' - fail: If table exists,", "# right val quote char col_template = br_l + '%s'", "temp.columns for t in temp.itertuples(): data = dict((k, self.maybe_asscalar(v)) for", "\"\"\" pandas_sql = pandasSQL_builder(con) if isinstance(pandas_sql, PandasSQLLegacy): return pandas_sql.read_sql( sql,", "*args, **kwargs): \"\"\"Simple passthrough to SQLAlchemy engine\"\"\" return self.engine.execute(*args, **kwargs)", "the correct PandasSQL subclass based on the provided parameters \"\"\"", "raise ValueError(\"Could not init table '%s'\" % name) def exists(self):", "a DataFrame read_sql \"\"\" pandas_sql = pandasSQL_builder(con) return pandas_sql.read_sql( sql,", "self.insert_data() data_list = [] for t in temp.itertuples(): data =", "= 'sqlite' if flavor not in ['sqlite', 'mysql']: raise NotImplementedError", "def insert_statement(self): return self.table.insert() def maybe_asscalar(self, i): try: return np.asscalar(i)", "def to_sql(self, *args, **kwargs): raise ValueError( \"PandasSQL must be created", "to execute method. Returns ------- Results Iterable \"\"\" if cur", "List of column names to select from sql table (only", "data. append: If table exists, insert data. Create if does", "for idx in self.index[::-1]] sql_select = select(cols) else: sql_select =", "% name) def exists(self): return self.pd_sql.has_table(self.name) def sql_schema(self): from sqlalchemy.schema", ": list or tuple, optional List of parameters to pass", "flavor=flavor) return pandas_sql.has_table(table_name) table_exists = has_table _MYSQL_WARNING = (\"The 'mysql'", "and handler class for access to DBs without # SQLAlchemy", "Get the SQL db table schema for the given frame.", "return _get_schema_legacy(frame, name, flavor, keys) pandas_sql = pandasSQL_builder(con=con, flavor=flavor) return", "to execute method. parse_dates : list or dict - List", "= template % {'name': name, 'columns': columns, 'keystr': keystr} return", ": DBAPI2 connection flavor : {'sqlite', 'mysql'}, default 'sqlite' The", "sqlite3 is supported. cur : depreciated, cursor is obtained from", "not None: # We want to write a frame if", "retry=False) return result #------------------------------------------------------------------------------ #--- Read and write to DataFrames", "exists, do nothing. - replace: If table exists, drop it,", "try: cur.close() con.commit() except Exception as e: excName = e.__class__.__name__", "index_label) if frame is not None: # We want to", "the \" \"database.\", UserWarning) return Integer elif com.is_float_dtype(arr_or_dtype): return Float", "affected rows \"\"\" warnings.warn( \"uquery is depreciated, and will be", "supported. cur : depreciated, cursor is obtained from connection params", "sql query). See also -------- read_sql_table : Read SQL database", "Convenience function to return the correct PandasSQL subclass based on", "\"tquery is depreciated, and will be removed in future versions.", "isinstance(sqltype, Integer): # TODO: Refine integer size. return int if", "---------- table_name : string Name of SQL table in database", "execute(self, *args, **kwargs): if self.is_cursor: cur = self.con else: cur", "if_exists == 'fail': raise ValueError(\"Table '%s' already exists.\" % name)", "try: fmt = parse_dates[col_name] except TypeError: fmt = None data_frame[col_name]", "default True Write DataFrame index as a column index_label :", "``to_sql`` function supports sqlalchemy engines to work with different sql", "FutureWarning) cur = execute(sql, con, cur=cur, params=params) result = cur.rowcount", "corresponding to each row in given sql query. If only", "columns = ',\\n '.join(col_template % x for x in zip(columns,", "parse_dates=None, columns=None): \"\"\"Read SQL database table into a DataFrame. Given", "and to reduce dependency on DB-specific API. \"\"\" from __future__", "or sequence columns to use a primary key con: an", "With ``to_sql`` the index is written to the sql database", "retry: print('Looks like your connection failed, reconnecting...') return uquery(sql, con,", "align with an sql_table column types Need to work around", "object, only sqlite3 is supported. \"\"\" if con is None:", "connection object. Parameters ---------- sql : string Query to be", "kwargs.pop('index', False) return to_sql(frame, name, con, flavor=flavor, if_exists=if_exists, index=index, **kwargs)", "of ``{column_name: arg dict}``, where the arg dict corresponds to", ": string or sequence, default None Column label for index", "= pandasSQL_builder(con=con, flavor=flavor) return pandas_sql._create_sql_schema(frame, name) def _get_schema_legacy(frame, name, flavor,", "string SQL query to be executed or database table name.", "obtained from connection params : list or tuple, optional List", "parse_dates=None, columns=None): table = PandasSQLTable(table_name, self, index=index_col) return table.read(coerce_float=coerce_float, parse_dates=parse_dates,", "query to be executed or database table name. con :", "restart interpreter') else: raise traceback.print_exc() if retry: return tquery(sql, con=con,", "if index is True: nlevels = self.frame.index.nlevels # if index_label", "'sqlite': 'INTEGER', } } # SQL enquote and wildcard symbols", "idx_label in enumerate(self.index[::-1]): columns.insert(0, idx_label) column_types.insert(0, self._sql_type_name(self.frame.index.get_level_values(i).dtype)) flv = self.pd_sql.flavor", "two differences: - With ``to_sql`` the index is written to", "self.flavor = flavor def execute(self, *args, **kwargs): if self.is_cursor: cur", "default 'sqlite' if_exists: {'fail', 'replace', 'append'}, default 'fail' fail: If", "\"SHOW TABLES LIKE '%s'\" % name} query = flavor_map.get(self.flavor) return", "format return to_datetime(col, coerce=True, unit=format) else: return to_datetime(col, coerce=True, format=format)", "def sql_schema(self): return str(self.table) def create(self): self.pd_sql.execute(self.table) def insert_statement(self): names", "Parameters ---------- sql: string SQL query to be executed con:", "however two differences: - With ``to_sql`` the index is written", "_SQL_SYMB[flv]['wld'] # wildcard char if self.index is not None: [names.insert(0,", "table_name: string Name of SQL table con: SQLAlchemy engine or", "# this column not in results def _sqlalchemy_type(self, arr_or_dtype): from", "cursor.description] data = self._fetchall_as_list(cursor) cursor.close() data_frame = DataFrame.from_records( data, columns=columns,", "len(self.execute(query).fetchall()) > 0 def get_table(self, table_name): return None # not", "table flavor : {'sqlite', 'mysql'}, default 'sqlite' The flavor of", "your connection failed, reconnecting...') return uquery(sql, con, retry=False) return result", "SQL database table into a DataFrame. Given a table name", "to use. Ignored when using SQLAlchemy engine. 'mysql' is deprecated", "try: df_col = self.frame[col_name] # the type the dataframe column", "is not valid for if_exists\".format(if_exists)) pandas_sql = pandasSQL_builder(con, flavor=flavor) if", "the result set of the query string. Optionally provide an", "None: for i, idx_label in enumerate(self.index[::-1]): idx_type = self._sqlalchemy_type( self.frame.index.get_level_values(i))", "raise_with_traceback(ex) def read_sql(self, sql, index_col=None, coerce_float=True, params=None, parse_dates=None): args =", "None: raise ValueError(\"Could not init table '%s'\" % name) def", "Integer, Float, Text, Boolean, DateTime, Date, Interval if arr_or_dtype is", "if_exists=if_exists, index_label=index_label) table.insert() def has_table(self, name): flavor_map = { 'sqlite':", "to_datetime class SQLAlchemyRequired(ImportError): pass class DatabaseError(IOError): pass #------------------------------------------------------------------------------ # Helper", "sqlalchemy.types import Integer, Float, Text, Boolean, DateTime, Date, Interval if", "an SQLAlchemy engine, returns a DataFrame. This function does not", "DataFrame. This function does not support DBAPI connections. Parameters ----------", ": string Name of SQL table con : SQLAlchemy engine", "'mysql'}, default 'sqlite' if_exists: {'fail', 'replace', 'append'}, default 'fail' fail:", "interpreter') else: raise traceback.print_exc() if retry: return tquery(sql, con=con, retry=False)", "SQL database table into a DataFrame read_sql_query : Read SQL", "supported through SQLAlchemy engines. keys : string or sequence columns", "index_label # return the used column labels for the index", "frame=frame, index=index, if_exists=if_exists, index_label=index_label) table.insert() def has_table(self, name): flavor_map =", "'br_l': '[', 'br_r': ']', 'wld': '?' } } _SAFE_NAMES_WARNING =", "pandas_sql = pandasSQL_builder(con, flavor=flavor) if isinstance(frame, Series): frame = frame.to_frame()", "return Date if com.is_datetime64_dtype(arr_or_dtype): try: tz = arr_or_dtype.tzinfo return DateTime(timezone=True)", "supported in Legacy mode def drop_table(self, name): drop_sql = \"DROP", "import DataFrame, Series from pandas.core.base import PandasObject from pandas.tseries.tools import", "(\"The spaces in these column names will not be changed.", "Legacy mode def drop_table(self, name): drop_sql = \"DROP TABLE %s\"", "all Nones with false. Therefore only convert bool if there", "size. return Integer elif com.is_bool(arr_or_dtype): return Boolean return Text def", "col_template = br_l + '%s' + br_r + ' %s'", "parse_dates) if index_col is not None: data_frame.set_index(index_col, inplace=True) return data_frame", "The flavor of SQL to use. if_exists : {'fail', 'replace',", "or col_type is date: if not issubclass(df_col.dtype.type, np.datetime64): self.frame[col_name] =", "def insert_statement(self): names = list(map(str, self.frame.columns)) flv = self.pd_sql.flavor br_l", "== 'mysql': warnings.warn(_MYSQL_WARNING, FutureWarning) return PandasSQLLegacy(con, flavor, is_cursor=is_cursor) except ImportError:", "temp.itertuples(): data = tuple((self.maybe_asscalar(v) for v in t[1:])) data_list.append(data) cur", "cur.execute(*args, **kwargs) else: cur.execute(*args) return cur except Exception as e:", "support for multiIndex def __init__(self, name, pandas_sql_engine, frame=None, index=True, if_exists='fail',", "= 'INSERT INTO %s (%s) VALUES (%s)' % ( self.name,", "table_name): if self.engine.has_table(table_name): self.get_table(table_name).drop() self.meta.clear() self.meta.reflect() def _create_sql_schema(self, frame, table_name):", "np.datetime64): self.frame[col_name] = _handle_date_column(df_col) elif col_type is float: # floats", "as np import pandas.core.common as com from pandas.compat import lzip,", "if isinstance(pandas_sql, PandasSQLLegacy): return pandas_sql.read_sql( sql, index_col=index_col, params=params, coerce_float=coerce_float, parse_dates=parse_dates)", "data_frame[col_name] try: fmt = parse_dates[col_name] except TypeError: fmt = None", "map, zip, raise_with_traceback, string_types from pandas.core.api import DataFrame, Series from", "execute method. parse_dates : list or dict - List of", "else: return [l if l is not None else \"level_{0}\".format(i)", "nlevels: raise ValueError( \"Length of 'index_label' should match number of", "use ``execute(...).fetchall()`` instead.\", FutureWarning) cur = execute(sql, con, cur=cur) result", "sql and params args to DBAPI2.0 compliant format\"\"\" args =", "or pytype is datetime: # Caution: np.datetime64 is also a", "keep the behaviour this function you need to specify ``index=False``.", "Returns ------- DataFrame Notes ----- This function is a convenience", "has_table _MYSQL_WARNING = (\"The 'mysql' flavor with DBAPI connection is", "MetaData(self.engine) meta.reflect(self.engine) self.meta = meta def execute(self, *args, **kwargs): \"\"\"Simple", "is None: raise ValueError(\"Could not init table '%s'\" % name)", "args += [params] else: args += [list(params)] return args def", "engines. Returns ------- boolean \"\"\" pandas_sql = pandasSQL_builder(con, flavor=flavor) return", "import sqlalchemy if isinstance(con, sqlalchemy.engine.Engine): return PandasSQLAlchemy(con, meta=meta) else: if", "self.table = self._create_table_statement() else: raise ValueError( \"'{0}' is not valid", "if isinstance(sqltype, DateTime): # Caution: np.datetime64 is also a subclass", "return pandas_sql.execute(*args) #------------------------------------------------------------------------------ #--- Deprecated tquery and uquery def _safe_fetch(cur):", "pandas_sql.has_table(table_name) table_exists = has_table _MYSQL_WARNING = (\"The 'mysql' flavor with", "None # not supported in Legacy mode def drop_table(self, name):", "not be changed. \" \"In pandas versions < 0.14, spaces", "char br_r = _SQL_SYMB[flv]['br_r'] # right val quote char wld", "around ``read_sql_table`` and ``read_sql_query`` (and for backward compatibility) and will", "removed, # is_cursor should not be necessary. try: import sqlalchemy", "abstractions to both facilitate data retrieval and to reduce dependency", "flavor with DBAPI connection is deprecated \" \"and will be", "= self.insert_statement() data_list = [] temp = self.insert_data() keys =", "Returns ------- boolean \"\"\" pandas_sql = pandasSQL_builder(con, flavor=flavor) return pandas_sql.has_table(table_name)", "primary key con: an open SQL database connection object or", "frame.dtypes)) if flavor == 'sqlite': columns = ',\\n '.join('[%s] %s'", "them between functions all the time. \"\"\" # TODO: support", "docs def read_frame(*args, **kwargs): \"\"\"DEPRECIATED - use read_sql \"\"\" warnings.warn(\"read_frame", "con, flavor='sqlite'): \"\"\" Check if DataBase has named table. Parameters", "index in sql table if index is True: nlevels =", "except Exception as e: try: self.con.rollback() except Exception: # pragma:", "{'fail', 'replace', 'append'}, default 'fail' fail: If table exists, do", "= DatabaseError( \"Execution failed on sql: %s\\n%s\\nunable to rollback\" %", "use ``execute(...).rowcount`` instead.\", FutureWarning) cur = execute(sql, con, cur=cur, params=params)", "sqlite_master \" \"WHERE type='table' AND name='%s';\") % name, 'mysql': \"SHOW", "SQLite columns : list List of column names to select", "flavor = 'sqlite' if flavor not in ['sqlite', 'mysql']: raise", "----- This function is a convenience wrapper around ``read_sql_table`` and", "database. Parameters ---------- frame: DataFrame name: name of SQL table", "right val quote char col_template = br_l + '%s' +", "else: raise ValueError( \"'{0}' is not valid for if_exists\".format(if_exists)) else:", "# SQL enquote and wildcard symbols _SQL_SYMB = { 'mysql':", "# pragma: no cover excName = e.__class__.__name__ if excName ==", "excName = e.__class__.__name__ if excName == 'OperationalError': return [] def", "records stored in a DataFrame to a SQL database. Parameters", "the index is written to the sql database by default.", "future versions. \" \"MySQL will be further supported with SQLAlchemy", "sql, index_col=None, coerce_float=True, params=None, parse_dates=None): args = _convert_params(sql, params) cursor", "a DataFrame read_sql_query : Read SQL query into a DataFrame", "import print_function, division from datetime import datetime, date, timedelta import", "connection Returns ------- Results Iterable \"\"\" warnings.warn( \"tquery is depreciated,", "is None: pandas_sql = pandasSQL_builder(con) else: pandas_sql = pandasSQL_builder(cur, is_cursor=True)", "name: name of SQL table flavor: {'sqlite', 'mysql'}, default 'sqlite'", "typ in zip(columns, column_types)] if self.index is not None: for", "name, 'columns': columns, 'keystr': keystr} return create_statement # legacy names,", "columns = [Column(name, typ) for name, typ in zip(columns, column_types)]", "read_sql(sql, con, index_col=None, coerce_float=True, params=None, parse_dates=None, columns=None): \"\"\" Read SQL", "supported with SQLAlchemy engines.\") def pandasSQL_builder(con, flavor=None, meta=None, is_cursor=False): \"\"\"", "flavor, keys=None): \"\"\"Old function from 0.13.1. To keep backwards compatibility.", "arr_or_dtype.tzinfo return DateTime(timezone=True) except: return DateTime if com.is_timedelta64_dtype(arr_or_dtype): warnings.warn(\"the 'timedelta'", "table name or sql query). See also -------- read_sql_table :", "to be read as such. Supports both string formatted and", "ints and bools if col_type is int or col_type is", "\"\"\"Simple passthrough to SQLAlchemy engine\"\"\" return self.engine.execute(*args, **kwargs) def read_table(self,", "val quote char wld = _SQL_SYMB[flv]['wld'] # wildcard char if", "DataFrame Notes ----- This function is a convenience wrapper around", "if not hasattr(parse_dates, '__iter__'): parse_dates = [parse_dates] for sql_col in", "string times or is one of (D, s, ns, ms,", "== df_col.count(): # No NA values, can convert ints and", "else: pandas_sql = pandasSQL_builder(cur, is_cursor=True) args = _convert_params(sql, params) return", "self.frame.columns)) flv = self.pd_sql.flavor br_l = _SQL_SYMB[flv]['br_l'] # left val", "compat result = list(lzip(*result)[0]) elif result is None: # pragma:", "pass to execute method. parse_dates : list or dict -", "index=index, if_exists=if_exists, index_label=index_label) table.insert() @property def tables(self): return self.meta.tables def", "list or tuple, optional List of parameters to pass to", "Sqlite DBAPI conncection mode not supported index_col : string, optional", "warnings.warn(\"the 'timedelta' type is not supported, and will be \"", "of column names to select from sql table (only used", "parse as dates - Dict of ``{column_name: format string}`` where", "convenience wrapper around ``read_sql_table`` and ``read_sql_query`` (and for backward compatibility)", "self.meta = meta def execute(self, *args, **kwargs): \"\"\"Simple passthrough to", "on sql: %s\\n%s\\nunable to rollback\" % (args[0], e)) raise_with_traceback(ex) ex", "depreciated, use read_sql\", FutureWarning) return read_sql(*args, **kwargs) def write_frame(frame, name,", "be necessary. try: import sqlalchemy if isinstance(con, sqlalchemy.engine.Engine): return PandasSQLAlchemy(con,", "_handle_date_column(df_col) elif col_type is float: # floats support NA, can", "to execute method. Returns ------- Number of affected rows \"\"\"", "t in temp.itertuples(): data = tuple((self.maybe_asscalar(v) for v in t[1:]))", "from connection params : list or tuple, optional List of", "cur.close() self.pd_sql.con.commit() def _create_table_statement(self): \"Return a CREATE TABLE statement to", "exists, drop it, recreate it, and insert data. - append:", "a DBAPI2 object, only sqlite3 is supported. cur : depreciated,", "return [] def tquery(sql, con=None, cur=None, retry=True): \"\"\" DEPRECATED. Returns", "if con is None: if flavor == 'mysql': warnings.warn(_MYSQL_WARNING, FutureWarning)", "parse_dates : list or dict - List of column names", "you can use the following: >>> execute(sql, con).rowcount Parameters ----------", "name) elif if_exists == 'replace': self.pd_sql.drop_table(self.name) self.table = self._create_table_statement() self.create()", "', PRIMARY KEY (%s)' % ','.join(keys) template = \"\"\"CREATE TABLE", "warnings.warn(_SAFE_NAMES_WARNING) column_types = [self._sql_type_name(typ) for typ in self.frame.dtypes] if self.index", "args = _convert_params(sql, params) result = self.execute(*args) data = result.fetchall()", "else: return None def _create_table_statement(self): from sqlalchemy import Table, Column", "import CreateTable return str(CreateTable(self.table)) def create(self): self.table.create() def insert_statement(self): return", "is depreciated, use read_sql\", FutureWarning) return read_sql(*args, **kwargs) def frame_query(*args,", "str(CreateTable(self.table)) def create(self): self.table.create() def insert_statement(self): return self.table.insert() def maybe_asscalar(self,", "bool: self.frame[col_name].astype(col_type, copy=False) # Handle date parsing if col_name in", "arr_or_dtype): from sqlalchemy.types import Integer, Float, Text, Boolean, DateTime, Date,", "{'sqlite', 'mysql'}, default 'sqlite' The flavor of SQL to use.", "and will be removed in future versions. \" \"You can", "/ abstractions to both facilitate data retrieval and to reduce", "library. If a DBAPI2 object, only sqlite3 is supported. flavor:", "= PandasSQLAlchemy(con) table = pandas_sql.read_table( table_name, index_col=index_col, coerce_float=coerce_float, parse_dates=parse_dates, columns=columns)", "None: data_frame.set_index(index_col, inplace=True) return data_frame def to_sql(self, frame, name, if_exists='fail',", "an SQLAlchemy engine Using SQLAlchemy makes it possible to use", "import datetime, date, timedelta import warnings import traceback import itertools", "---------- frame : DataFrame name : string Name of SQL", "to_sql(self, frame, name, if_exists='fail', index=True, index_label=None): table = PandasSQLTable( name,", "return create_statement # legacy names, with depreciation warnings and copied", "INTO %s (%s) VALUES (%s)' % ( self.name, col_names, wildcards)", "= self.execute(*args) data = result.fetchall() columns = result.keys() data_frame =", "databases using SQLAlchemy to handle DataBase abstraction \"\"\" def __init__(self,", "_convert_params(sql, params): \"\"\"convert sql and params args to DBAPI2.0 compliant", "= frame self.index = self._index_name(index, index_label) if frame is not", "temp = self.insert_data() data_list = [] for t in temp.itertuples():", "query). See also -------- read_sql_table : Read SQL database table", "be used. Parameters ---------- sql : string SQL query to", "(args[0], e)) raise_with_traceback(ex) ex = DatabaseError(\"Execution failed on sql: %s\"", "def pandasSQL_builder(con, flavor=None, meta=None, is_cursor=False): \"\"\" Convenience function to return", "return result #------------------------------------------------------------------------------ #--- Read and write to DataFrames def", "there are Null values. Booleans are hard because converting bool", "args = _convert_params(sql, params) cursor = self.execute(*args) columns = [col_desc[0]", "formatted and integer timestamp columns \"\"\" # handle non-list entries", "SQLAlchemy engine Sqlite DBAPI conncection mode not supported index_col :", "elif issubclass(pytype, np.integer): pytype_name = \"int\" elif issubclass(pytype, np.datetime64) or", "self._sqlalchemy_type( self.frame.index.get_level_values(i)) columns.insert(0, Column(idx_label, idx_type, index=True)) return Table(self.name, self.pd_sql.meta, *columns)", "string Name of SQL table in database con : SQLAlchemy", "sql_col.name try: df_col = self.frame[col_name] # the type the dataframe", "= \"DROP TABLE %s\" % name self.execute(drop_sql) def _create_sql_schema(self, frame,", "further supported through SQLAlchemy engines. keys : string or sequence", "'__iter__'): parse_dates = [parse_dates] for col_name in parse_dates: df_col =", "DBAPI2 connection cur: depreciated, cursor is obtained from connection Returns", "index=True, index_label=None): table = PandasSQLTable( name, self, frame=frame, index=index, if_exists=if_exists,", "not isinstance(frame, DataFrame): raise NotImplementedError pandas_sql.to_sql(frame, name, if_exists=if_exists, index=index, index_label=index_label)", "% x for x in column_types) keystr = '' if", "supported through SQLAlchemy engines. if_exists : {'fail', 'replace', 'append'}, default", "',\\n '.join('`%s` %s' % x for x in column_types) keystr", "given frame. Parameters ---------- frame : DataFrame name : string", "backwards compatibility, set index=False when not specified index = kwargs.pop('index',", "for legacy support. Instead of a table variable just use", "drop_table(self, name): drop_sql = \"DROP TABLE %s\" % name self.execute(drop_sql)", "= \"int\" elif issubclass(pytype, np.integer): pytype_name = \"int\" elif issubclass(pytype,", "if isinstance(sqltype, Date): return date if isinstance(sqltype, Boolean): return bool", "params is a mapping args += [params] else: args +=", "depending on the provided input (database table name or sql", "i, l in enumerate(self.frame.index.names)] # for reading: index=(list of) string", "SQL db table schema for the given frame. Parameters ----------", "self._create_table_statement() else: raise ValueError( \"'{0}' is not valid for if_exists\".format(if_exists))", "quote char wld = _SQL_SYMB[flv]['wld'] # wildcard char if self.index", "use. Ignored when using SQLAlchemy engine. 'mysql' is deprecated and", "{ 'sqlite': (\"SELECT name FROM sqlite_master \" \"WHERE type='table' AND", "engine or connection+sql flavor\") def to_sql(self, *args, **kwargs): raise ValueError(", "def _fetchall_as_list(self, cur): result = cur.fetchall() if not isinstance(result, list):", "self.meta.tables.get(name) is not None: return True else: return False def", "\"\"\" For mapping Pandas tables to SQL tables. Uses fact", "br_r = _SQL_SYMB[flv]['br_r'] # right val quote char wld =", "self.con.rollback() except Exception: # pragma: no cover ex = DatabaseError(", "issubclass(pytype, np.bool_): pytype_name = \"bool\" return _SQL_TYPES[pytype_name][flavor] lookup_type = lambda", "\"\"\" Read SQL query or database table into a DataFrame.", "= self.insert_data() keys = temp.columns for t in temp.itertuples(): data", "if isinstance(sqltype, Boolean): return bool return object class PandasSQL(PandasObject): \"\"\"", "Read SQL database table into a DataFrame read_sql \"\"\" pandas_sql", "in t[1:])) data_list.append(data) cur = self.pd_sql.con.cursor() cur.executemany(ins, data_list) cur.close() self.pd_sql.con.commit()", "to_sql Write records stored in a DataFrame to a SQL", "%s not found\" % table_name, con) def read_sql_query(sql, con, index_col=None,", "only sqlite3 is supported. index_col : string, optional Column name", "in self.frame.dtypes] if self.index is not None: for i, idx_label", "as a column index_label : string or sequence, default None", "avoid having to pass them between functions all the time.", "col_type is bool: self.frame[col_name].astype(col_type, copy=False) # Handle date parsing if", "self.engine = engine if not meta: from sqlalchemy.schema import MetaData", "idx) for idx in self.index[::-1]] bracketed_names = [br_l + column", ">>> execute(sql, con, params).fetchall() Parameters ---------- sql: string SQL query", "uses MultiIndex. \"\"\" if if_exists not in ('fail', 'replace', 'append'):", "index : boolean, default True Write DataFrame index as a", "integer values (ns frequency) to the \" \"database.\", UserWarning) return", "table name. con : SQLAlchemy engine or DBAPI2 connection (legacy", "pandasSQL_builder(con, flavor=None, meta=None, is_cursor=False): \"\"\" Convenience function to return the", "library. If a DBAPI2 object, only sqlite3 is supported. \"\"\"", "for reading: index=(list of) string to specify column to set", "template = \"\"\"CREATE TABLE %(name)s ( %(columns)s %(keystr)s );\"\"\" create_statement", "flavor_map = { 'sqlite': (\"SELECT name FROM sqlite_master \" \"WHERE", "define read_sql and to_sql \"\"\" def read_sql(self, *args, **kwargs): raise", "in future versions. \" \"You can use ``execute(...).fetchall()`` instead.\", FutureWarning)", "database con : SQLAlchemy engine Sqlite DBAPI conncection mode not", "return self.pd_sql.has_table(self.name) def sql_schema(self): from sqlalchemy.schema import CreateTable return str(CreateTable(self.table))", "the type the dataframe column should have col_type = self._numpy_type(sql_col.type)", "library. If a DBAPI2 object, only sqlite3 is supported. index_col", "that library. If a DBAPI2 object, only sqlite3 is supported.", "%s' % x for x in column_types) keystr = ''", "not None: return True else: return False def get_table(self, table_name):", "contents of a DataFrame.\" columns = list(map(str, self.frame.columns)) pat =", "subclass of np.number. return datetime if isinstance(sqltype, Date): return date", "by default. To keep the behaviour this function you need", "'keys'): # test if params is a mapping args +=", "or dict - List of column names to parse as", "parse_dates is None or parse_dates is False: parse_dates = []", "val quote char br_r = _SQL_SYMB[flv]['br_r'] # right val quote", "to_datetime(col, coerce=True, unit=format) elif issubclass(col.dtype.type, np.floating) or issubclass(col.dtype.type, np.integer): #", "for index column(s). If None is given (default) and `index`", "is supported. index_col : string, optional column name to use", "% {'name': self.name, 'columns': columns} return create_statement def _sql_type_name(self, dtype):", "using the provided connection object. Parameters ---------- sql : string", "frame_query(*args, **kwargs): \"\"\"DEPRECIATED - use read_sql \"\"\" warnings.warn(\"frame_query is depreciated,", "to the result set of the query string. Optionally provide", "% ','.join(keys) template = \"\"\"CREATE TABLE %(name)s ( %(columns)s %(keystr)s", "CreateTable return str(CreateTable(self.table)) def create(self): self.table.create() def insert_statement(self): return self.table.insert()", "# SQLAlchemy installed # SQL type convertions for each DB", "objects (like decimal.Decimal) to floating point. Can result in loss", "is not None: return table else: raise ValueError(\"Table %s not", "@property def tables(self): return self.meta.tables def has_table(self, name): if self.meta.tables.get(name)", "= ',\\n '.join('[%s] %s' % x for x in column_types)", "set as index coerce_float : boolean, default True Attempt to", "DB-specific API. \"\"\" from __future__ import print_function, division from datetime", "name, typ in zip(columns, column_types)] if self.index is not None:", "with an SQLAlchemy engine or connection+sql flavor\") def to_sql(self, *args,", "for v in t[1:])) data_list.append(data) cur = self.pd_sql.con.cursor() cur.executemany(ins, data_list)", "no cover ex = DatabaseError( \"Execution failed on sql: %s\\n%s\\nunable", "pytype is datetime.date: pytype_name = \"date\" elif issubclass(pytype, np.bool_): pytype_name", "int or col_type is bool: self.frame[col_name].astype(col_type, copy=False) # Handle date", "frame self.index = self._index_name(index, index_label) if frame is not None:", "from sqlalchemy import select cols = [self.table.c[n] for n in", "into a DataFrame. Parameters ---------- sql : string SQL query", "= temp.columns for t in temp.itertuples(): data = dict((k, self.maybe_asscalar(v))", "decimal.Decimal) to floating point, useful for SQL result sets params", "values. Booleans are hard because converting bool column with None", "will be further supported through SQLAlchemy engines. Returns ------- boolean", "cur.close() con.commit() except Exception as e: excName = e.__class__.__name__ if", "self.frame.set_index(self.index, inplace=True) return self.frame def _index_name(self, index, index_label): # for", "columns = ',\\n '.join('[%s] %s' % x for x in", "from pandas.compat import lzip, map, zip, raise_with_traceback, string_types from pandas.core.api", "flavor\") def to_sql(self, *args, **kwargs): raise ValueError( \"PandasSQL must be", "to DBAPI2.0 compliant format\"\"\" args = [sql] if params is", "table = PandasSQLTableLegacy( name, self, frame=frame, index=index, if_exists=if_exists, index_label=index_label) table.insert()", "result def uquery(sql, con=None, cur=None, retry=True, params=None): \"\"\" DEPRECATED. Does", "kwargs: cur.execute(*args, **kwargs) else: cur.execute(*args) return cur except Exception as", "pandas_sql = pandasSQL_builder(con=con, flavor=flavor) return pandas_sql._create_sql_schema(frame, name) def _get_schema_legacy(frame, name,", "is deprecated in favor of ``to_sql``. There are however two", "SQLAlchemy engine, returns a DataFrame. This function does not support", ": depreciated, cursor is obtained from connection params : list", "index elif isinstance(index, string_types): return [index] elif isinstance(index, list): return", "'sqlite' if_exists: {'fail', 'replace', 'append'}, default 'fail' fail: If table", "keystr = ', PRIMARY KEY (%s)' % ','.join(keys) template =", "return _SQL_TYPES[pytype_name][flavor] lookup_type = lambda dtype: get_sqltype(dtype, flavor) column_types =", "x in column_types) keystr = '' if keys is not", "* len(names)) insert_statement = 'INSERT INTO %s (%s) VALUES (%s)'", "table.insert() @property def tables(self): return self.meta.tables def has_table(self, name): if", "params=None): \"\"\" DEPRECATED. Does the same thing as tquery, but", "return self.table.insert() def maybe_asscalar(self, i): try: return np.asscalar(i) except AttributeError:", "to work around limited NA value support. Floats are always", "return self.meta.tables def has_table(self, name): if self.meta.tables.get(name) is not None:", "default. To keep the behaviour this function you need to", "_handle_date_column(col, format=None): if isinstance(format, dict): return to_datetime(col, **format) else: if", "timestamp columns \"\"\" # handle non-list entries for parse_dates gracefully", "_sqlalchemy_type(self, arr_or_dtype): from sqlalchemy.types import Integer, Float, Text, Boolean, DateTime,", "[] temp = self.insert_data() keys = temp.columns for t in", "= pandasSQL_builder(cur, is_cursor=True) args = _convert_params(sql, params) return pandas_sql.execute(*args) #------------------------------------------------------------------------------", "table else: raise ValueError(\"Table %s not found\" % table_name, con)", "}, 'datetime': { 'mysql': 'DATETIME', 'sqlite': 'TIMESTAMP', }, 'date': {", "+ column + br_r for column in names] col_names =", "pandasSQL_builder(con=con, flavor=flavor) return pandas_sql._create_sql_schema(frame, name) def _get_schema_legacy(frame, name, flavor, keys=None):", "engine, returns a DataFrame. This function does not support DBAPI", "maybe_asscalar(self, i): try: return np.asscalar(i) except AttributeError: return i def", "nlevels == 1 and 'index' not in self.frame.columns and self.frame.index.name", "issubclass(pytype, np.floating): pytype_name = \"float\" elif issubclass(pytype, np.integer): pytype_name =", ": list List of column names to select from sql", "for each DB _SQL_TYPES = { 'text': { 'mysql': 'VARCHAR", "== 'sqlite': columns = ',\\n '.join('[%s] %s' % x for", "{ 'mysql': 'FLOAT', 'sqlite': 'REAL', }, 'int': { 'mysql': 'BIGINT',", "the future, you can use the following: >>> execute(sql, con,", "not isinstance(result, list): result = list(result) return result def to_sql(self,", "is supported. flavor : {'sqlite', 'mysql'}, default 'sqlite' The flavor", "to return the correct PandasSQL subclass based on the provided", "parse_dates=parse_dates, columns=columns) if table is not None: return table else:", "else: columns = ',\\n '.join('`%s` %s' % x for x", "suit the contents of a DataFrame.\" columns = list(map(str, self.frame.columns))", "cover result = [] return result def uquery(sql, con=None, cur=None,", "list List of column names to select from sql table", "Boolean return Text def _numpy_type(self, sqltype): from sqlalchemy.types import Integer,", "'sqlite': 'INTEGER', }, 'datetime': { 'mysql': 'DATETIME', 'sqlite': 'TIMESTAMP', },", "Notes ----- This function is a convenience wrapper around ``read_sql_table``", "read_sql_query : Read SQL query into a DataFrame. read_sql \"\"\"", "sql_select = select(cols) else: sql_select = self.table.select() result = self.pd_sql.execute(sql_select)", "data_list) def read(self, coerce_float=True, parse_dates=None, columns=None): if columns is not", "col_desc in cursor.description] data = self._fetchall_as_list(cursor) cursor.close() data_frame = DataFrame.from_records(", "removed in future versions. \" \"MySQL will be further supported", "support DBAPI connections. Parameters ---------- table_name : string Name of", "name of SQL table flavor : {'sqlite', 'mysql'}, default 'sqlite'", "1 and 'index' not in self.frame.columns and self.frame.index.name is None:", "\"You can use ``execute(...).fetchall()`` instead.\", FutureWarning) cur = execute(sql, con,", "the PandasSQLTable for legacy support. Instead of a table variable", "to the \" \"database.\", UserWarning) return Integer elif com.is_float_dtype(arr_or_dtype): return", "isinstance(index_label, list): index_label = [index_label] if len(index_label) != nlevels: raise", ": SQLAlchemy engine or sqlite3 DBAPI2 connection Using SQLAlchemy makes", "execute method. Returns ------- Results Iterable \"\"\" if cur is", "typ in self.frame.dtypes] if self.index is not None: for i,", "if kwargs: cur.execute(*args, **kwargs) else: cur.execute(*args) return cur except Exception", "isinstance(result, list): result = list(result) return result def to_sql(self, frame,", "Integer): # TODO: Refine integer size. return int if isinstance(sqltype,", "TypeError: fmt = None self.frame[col_name] = _handle_date_column( df_col, format=fmt) except", "connection flavor : {'sqlite', 'mysql'}, default 'sqlite' The flavor of", "return the used column labels for the index columns if", "will be further supported through SQLAlchemy engines. if_exists : {'fail',", "unit=format) elif issubclass(col.dtype.type, np.floating) or issubclass(col.dtype.type, np.integer): # parse dates", "parse_dates is True or parse_dates is None or parse_dates is", "data provided, read-only mode self.table = self.pd_sql.get_table(self.name) if self.table is", "DBAPI2 connection cur: depreciated, cursor is obtained from connection params:", "this as index name(s) if index_label is not None: if", "NA value support. Floats are always fine, ints must always", "table.insert() def has_table(self, name): flavor_map = { 'sqlite': (\"SELECT name", "if len(index_label) != nlevels: raise ValueError( \"Length of 'index_label' should", "should be possible to remove this code \"\"\" def get_sqltype(dtype,", "will be \" \"written as integer values (ns frequency) to", "connection failed, reconnecting...') return uquery(sql, con, retry=False) return result #------------------------------------------------------------------------------", "\" \"You can use ``execute(...).fetchall()`` instead.\", FutureWarning) cur = execute(sql,", "pandas_sql = pandasSQL_builder(con) return pandas_sql.read_sql( sql, index_col=index_col, params=params, coerce_float=coerce_float, parse_dates=parse_dates)", "Ignored when using SQLAlchemy engine. 'mysql' is deprecated and will", "return Table(self.name, self.pd_sql.meta, *columns) def _harmonize_columns(self, parse_dates=None): \"\"\" Make a", "pandas_sql.read_table( sql, index_col=index_col, coerce_float=coerce_float, parse_dates=parse_dates, columns=columns) else: return pandas_sql.read_sql( sql,", "data. Create if does not exist. \"\"\" table = PandasSQLTableLegacy(", "will be further supported with SQLAlchemy engines.\") def pandasSQL_builder(con, flavor=None,", "database table name. con : SQLAlchemy engine or DBAPI2 connection", "\"\"\" This class enables convertion between DataFrame and SQL databases", "#------------------------------------------------------------------------------ #--- Read and write to DataFrames def read_sql_table(table_name, con,", "the index, otherwise default integer index will be used. Parameters", "if if_exists not in ('fail', 'replace', 'append'): raise ValueError(\"'{0}' is", ": string Query to be executed con : SQLAlchemy engine", "map(self._sqlalchemy_type, self.frame.dtypes) columns = [Column(name, typ) for name, typ in", "is not None: for i, idx_label in enumerate(self.index[::-1]): idx_type =", "PandasSQLTable( name, self, frame=frame, index=index, if_exists=if_exists, index_label=index_label) table.insert() @property def", "strings and handler class for access to DBs without #", "quote char br_r = _SQL_SYMB[flv]['br_r'] # right val quote char", "DataFrame.\" columns = list(map(str, self.frame.columns)) pat = re.compile('\\s+') if any(map(pat.search,", "engine Using SQLAlchemy makes it possible to use any DB", "format=fmt) return data_frame def execute(sql, con, cur=None, params=None): \"\"\" Execute", "not hasattr(parse_dates, '__iter__'): parse_dates = [parse_dates] for col_name in parse_dates:", "not supported index_col : string, optional Column to set as", "elif len(df_col) == df_col.count(): # No NA values, can convert", "try: fmt = parse_dates[col_name] except TypeError: fmt = None self.frame[col_name]", "'sqlite': 'REAL', }, 'int': { 'mysql': 'BIGINT', 'sqlite': 'INTEGER', },", "and bools if col_type is int or col_type is bool:", "connection is deprecated \" \"and will be removed in future", "def execute(self, *args, **kwargs): \"\"\"Simple passthrough to SQLAlchemy engine\"\"\" return", "try: result = cur.fetchall() if not isinstance(result, list): result =", "FutureWarning) cur = execute(sql, con, cur=cur) result = _safe_fetch(cur) if", "if_exists=if_exists, index=index, index_label=index_label) def has_table(table_name, con, flavor='sqlite'): \"\"\" Check if", "columns as the index, otherwise default integer index will be", "% x for x in column_types) else: columns = ',\\n", "read as such. Supports both string formatted and integer timestamp", "self.frame = DataFrame.from_records( data, columns=column_names, coerce_float=coerce_float) self._harmonize_columns(parse_dates=parse_dates) if self.index is", "= (keys,) keystr = ', PRIMARY KEY (%s)' % ','.join(keys)", "result.fetchall() column_names = result.keys() self.frame = DataFrame.from_records( data, columns=column_names, coerce_float=coerce_float)", "table.read(coerce_float=coerce_float, parse_dates=parse_dates, columns=columns) def read_sql(self, sql, index_col=None, coerce_float=True, parse_dates=None, params=None):", "**kwargs): if self.is_cursor: cur = self.con else: cur = self.con.cursor()", "a subclass of np.number. return datetime if isinstance(sqltype, Date): return", "None: pandas_sql = pandasSQL_builder(con) else: pandas_sql = pandasSQL_builder(cur, is_cursor=True) args", ">>> execute(sql, con).rowcount Parameters ---------- sql: string SQL query to", "idx in self.index[::-1]] bracketed_names = [br_l + column + br_r", "= self.execute(*args) columns = [col_desc[0] for col_desc in cursor.description] data", "the dataframe column should have col_type = self._numpy_type(sql_col.type) if col_type", "return str(table.sql_schema()) def get_schema(frame, name, flavor='sqlite', keys=None, con=None): \"\"\" Get", "into a DataFrame read_sql \"\"\" pandas_sql = pandasSQL_builder(con) return pandas_sql.read_sql(", "if pandas_sql.has_table(sql): return pandas_sql.read_table( sql, index_col=index_col, coerce_float=coerce_float, parse_dates=parse_dates, columns=columns) else:", "def to_sql(frame, name, con, flavor='sqlite', if_exists='fail', index=True, index_label=None): \"\"\" Write", "['index'] else: return [l if l is not None else", "are no NA values. Datetimes should already be converted to", "com.is_bool(arr_or_dtype): return Boolean return Text def _numpy_type(self, sqltype): from sqlalchemy.types", "name FROM sqlite_master \" \"WHERE type='table' AND name='%s';\") % name,", "Write records stored in a DataFrame to a SQL database.", "DataFrame.from_records( data, columns=column_names, coerce_float=coerce_float) self._harmonize_columns(parse_dates=parse_dates) if self.index is not None:", ")\"\"\" create_statement = template % {'name': self.name, 'columns': columns} return", "\"\"\"Read SQL query into a DataFrame. Returns a DataFrame corresponding", "using SQLAlchemy to handle DataBase abstraction \"\"\" def __init__(self, engine,", "or issubclass(col.dtype.type, np.integer): # parse dates as timestamp format =", "(like decimal.Decimal) to floating point, useful for SQL result sets", "[] for t in temp.itertuples(): data = tuple((self.maybe_asscalar(v) for v", "name, con, flavor='sqlite', if_exists='fail', index=True, index_label=None): \"\"\" Write records stored", "support, such as SQLite Returns ------- DataFrame See also --------", "= \"bool\" return _SQL_TYPES[pytype_name][flavor] lookup_type = lambda dtype: get_sqltype(dtype, flavor)", "list, tuple or dict, optional List of parameters to pass", "Refine integer size. return int if isinstance(sqltype, DateTime): # Caution:", "to the specific function depending on the provided input (database", "print_function, division from datetime import datetime, date, timedelta import warnings", "to_datetime(col, **format) else: if format in ['D', 's', 'ms', 'us',", "def execute(sql, con, cur=None, params=None): \"\"\" Execute the given SQL", "of np.number. return datetime if isinstance(sqltype, Date): return date if", "if does not exist. index : boolean, default False Write", "return datetime if isinstance(sqltype, Date): return date if isinstance(sqltype, Boolean):", "import Integer, Float, Boolean, DateTime, Date if isinstance(sqltype, Float): return", "object, only sqlite3 is supported. flavor : {'sqlite', 'mysql'}, default", "\" \"WHERE type='table' AND name='%s';\") % name, 'mysql': \"SHOW TABLES", "a DataFrame to a SQL database. Parameters ---------- frame :", "list(result) return result def to_sql(self, frame, name, if_exists='fail', index=True, index_label=None):", "as e: # pragma: no cover excName = e.__class__.__name__ if", "SQL query to be executed con : SQLAlchemy engine or", "'?' } } _SAFE_NAMES_WARNING = (\"The spaces in these column", "None else format return to_datetime(col, coerce=True, unit=format) else: return to_datetime(col,", "= { 'mysql': { 'br_l': '`', 'br_r': '`', 'wld': '%s'", "data retrieval and to reduce dependency on DB-specific API. \"\"\"", "con, index_col=None, coerce_float=True, params=None, parse_dates=None, columns=None): \"\"\" Read SQL query", "if retry: print('Looks like your connection failed, reconnecting...') return uquery(sql,", "hard because converting bool column with None replaces all Nones", "string Query to be executed con : SQLAlchemy engine or", "is strftime compatible in case of parsing string times or", "``execute(...).rowcount`` instead.\", FutureWarning) cur = execute(sql, con, cur=cur, params=params) result", "parse_dates gracefully if parse_dates is True or parse_dates is None", "for access to DBs without # SQLAlchemy installed # SQL", "provided, read-only mode self.table = self.pd_sql.get_table(self.name) if self.table is None:", "\"duplicate name in index/columns: {0}\".format(err)) else: temp = self.frame return", "from datetime import datetime, date, timedelta import warnings import traceback", "will be removed in future versions, but it will be", "SQLAlchemy engines. Returns ------- boolean \"\"\" pandas_sql = pandasSQL_builder(con, flavor=flavor)", "warnings.warn( \"tquery is depreciated, and will be removed in future", "it will be further supported through SQLAlchemy engines. Returns -------", "exists(self): return self.pd_sql.has_table(self.name) def sql_schema(self): from sqlalchemy.schema import CreateTable return", "must always be floats if there are Null values. Booleans", "# ---- SQL without SQLAlchemy --- # Flavour specific sql", "dict - List of column names to parse as dates", "it, and insert data. append: If table exists, insert data.", "return result def to_sql(self, frame, name, if_exists='fail', index=True, index_label=None): \"\"\"", "date, timedelta import warnings import traceback import itertools import re", "cur: depreciated, cursor is obtained from connection Returns ------- Results", "raise traceback.print_exc() if retry: return tquery(sql, con=con, retry=False) if result", "True: nlevels = self.frame.index.nlevels # if index_label is specified, set", "index_col is not None: data_frame.set_index(index_col, inplace=True) return data_frame def _fetchall_as_list(self,", "DataBase abstraction \"\"\" def __init__(self, engine, meta=None): self.engine = engine", "us) in case of parsing integer timestamps - Dict of", "Datetime support, such as SQLite Returns ------- DataFrame See also", "Returns a DataFrame corresponding to the result set of the", "= con if flavor is None: flavor = 'sqlite' if", "_get_schema_legacy(frame, name, flavor, keys) pandas_sql = pandasSQL_builder(con=con, flavor=flavor) return pandas_sql._create_sql_schema(frame,", "'ms', 'us', 'ns']: return to_datetime(col, coerce=True, unit=format) elif issubclass(col.dtype.type, np.floating)", "result in loss of Precision. parse_dates : list or dict", "and params args to DBAPI2.0 compliant format\"\"\" args = [sql]", "elif col_type is float: # floats support NA, can always", "only sqlite3 is supported. flavor : {'sqlite', 'mysql'}, default 'sqlite'", "keep backwards compatibility. When mysql legacy support is dropped, it", "of np.number. pytype_name = \"datetime\" elif pytype is datetime.date: pytype_name", "% args[0]) raise_with_traceback(ex) def read_sql(self, sql, index_col=None, coerce_float=True, params=None, parse_dates=None):", "mode def drop_table(self, name): drop_sql = \"DROP TABLE %s\" %", "in future versions. \" \"You can use ``execute(...).rowcount`` instead.\", FutureWarning)", "in names] col_names = ','.join(bracketed_names) wildcards = ','.join([wld] * len(names))", "None def _create_table_statement(self): from sqlalchemy import Table, Column columns =", "cur = self.con else: cur = self.con.cursor() try: if kwargs:", "% {'name': name, 'columns': columns, 'keystr': keystr} return create_statement #", "col_type is datetime or col_type is date: if not issubclass(df_col.dtype.type,", "if col_name in parse_dates: try: fmt = parse_dates[col_name] except TypeError:", "is not None: if isinstance(keys, string_types): keys = (keys,) keystr", "use one of the columns as the index, otherwise default", "= { 'text': { 'mysql': 'VARCHAR (63)', 'sqlite': 'TEXT', },", "SQLAlchemy engine Using SQLAlchemy makes it possible to use any", "= self.table.select() result = self.pd_sql.execute(sql_select) data = result.fetchall() column_names =", "if nlevels == 1 and 'index' not in self.frame.columns and", "in self.index[::-1]] bracketed_names = [br_l + column + br_r for", "char br_r = _SQL_SYMB[flv]['br_r'] # right val quote char col_template", "PandasSQLLegacy): return pandas_sql.read_sql( sql, index_col=index_col, params=params, coerce_float=coerce_float, parse_dates=parse_dates) if pandas_sql.has_table(sql):", "is_cursor=is_cursor) except ImportError: if flavor == 'mysql': warnings.warn(_MYSQL_WARNING, FutureWarning) return", "name to use as index for the returned DataFrame object.", "column_types.insert(0, self._sql_type_name(self.frame.index.get_level_values(i).dtype)) flv = self.pd_sql.flavor br_l = _SQL_SYMB[flv]['br_l'] # left", "raise ValueError(\"'{0}' is not valid for if_exists\".format(if_exists)) pandas_sql = pandasSQL_builder(con,", "= \"float\" elif issubclass(pytype, np.integer): pytype_name = \"int\" elif issubclass(pytype,", "handle non-list entries for parse_dates gracefully if parse_dates is True", "for i, idx_label in enumerate(self.index[::-1]): idx_type = self._sqlalchemy_type( self.frame.index.get_level_values(i)) columns.insert(0,", "versions < 0.14, spaces were converted to \" \"underscores.\") class", "names to select from sql table (only used when reading", "flavor='sqlite', if_exists='fail', index=True, index_label=None): \"\"\" Write records stored in a", "= self._create_table_statement() else: raise ValueError( \"'{0}' is not valid for", "not valid for if_exists\".format(if_exists)) pandas_sql = pandasSQL_builder(con, flavor=flavor) if isinstance(frame,", "unit=format) else: return to_datetime(col, coerce=True, format=format) def _parse_date_columns(data_frame, parse_dates): \"\"\"", "TODO: Refine integer size. return int if isinstance(sqltype, DateTime): #", "sqlalchemy.schema import CreateTable return str(CreateTable(self.table)) def create(self): self.table.create() def insert_statement(self):", "else: cur.execute(*args) return cur except Exception as e: try: self.con.rollback()", "return result def uquery(sql, con=None, cur=None, retry=True, params=None): \"\"\" DEPRECATED.", "be changed. \" \"In pandas versions < 0.14, spaces were", "> 0: from sqlalchemy import select cols = [self.table.c[n] for", "keys=None, con=None): \"\"\" Get the SQL db table schema for", "execute(sql, con).rowcount Parameters ---------- sql: string SQL query to be", "write_frame(frame, name, con, flavor='sqlite', if_exists='fail', **kwargs): \"\"\"DEPRECIATED - use to_sql", "'INTEGER', } } # SQL enquote and wildcard symbols _SQL_SYMB", "set index=False when not specified index = kwargs.pop('index', False) return", "names will not be changed. \" \"In pandas versions <", "there are no NA values. Datetimes should already be converted", "parse_dates): \"\"\" Force non-datetime columns to be read as such.", "string is strftime compatible in case of parsing string times", "return data_frame def to_sql(self, frame, name, if_exists='fail', index=True, index_label=None): table", "When support for DBAPI connections is removed, # is_cursor should", "self.frame[col_name] = _handle_date_column( df_col, format=fmt) except KeyError: pass # this", "table flavor: {'sqlite', 'mysql'}, default 'sqlite' if_exists: {'fail', 'replace', 'append'},", "import PandasObject from pandas.tseries.tools import to_datetime class SQLAlchemyRequired(ImportError): pass class", "is not None: data_frame.set_index(index_col, inplace=True) return data_frame def to_sql(self, frame,", "# left val quote char br_r = _SQL_SYMB[flv]['br_r'] # right", "through SQLAlchemy engines. if_exists : {'fail', 'replace', 'append'}, default 'fail'", "We want to write a frame if self.pd_sql.has_table(self.name): if if_exists", "self.index is not None: for i, idx_label in enumerate(self.index[::-1]): idx_type", "DB supported by that library. If a DBAPI2 object, only", "engines. keys : string or sequence columns to use a", "\"\"\"Read SQL database table into a DataFrame. Given a table", "self.frame.index.nlevels # if index_label is specified, set this as index", "sql_col in self.table.columns: col_name = sql_col.name try: df_col = self.frame[col_name]", "None: # pragma: no cover result = [] return result", "raise ValueError( \"duplicate name in index/columns: {0}\".format(err)) else: temp =", "raise traceback.print_exc() if retry: print('Looks like your connection failed, reconnecting...')", "used when reading a table). Returns ------- DataFrame Notes -----", "elif issubclass(pytype, np.datetime64) or pytype is datetime: # Caution: np.datetime64", "data = self._fetchall_as_list(cursor) cursor.close() data_frame = DataFrame.from_records( data, columns=columns, coerce_float=coerce_float)", "table exists, insert data. Create if does not exist. index", "con, index_col=None, coerce_float=True, parse_dates=None, columns=None): \"\"\"Read SQL database table into", "Table, Column columns = list(map(str, self.frame.columns)) column_types = map(self._sqlalchemy_type, self.frame.dtypes)", "reduce dependency on DB-specific API. \"\"\" from __future__ import print_function,", "column labels for the index columns if nlevels == 1", "def _convert_params(sql, params): \"\"\"convert sql and params args to DBAPI2.0", "query into a DataFrame \"\"\" pandas_sql = pandasSQL_builder(con) if isinstance(pandas_sql,", "sql: string SQL query to be executed con: DBAPI2 connection", "coerce_float=True, params=None, parse_dates=None): args = _convert_params(sql, params) cursor = self.execute(*args)", "return temp def insert(self): ins = self.insert_statement() data_list = []", "If None is given (default) and `index` is True, then", "Booleans are hard because converting bool column with None replaces", "self, frame=frame, index=index, if_exists=if_exists, index_label=index_label) table.insert() @property def tables(self): return", "_parse_date_columns(data_frame, parse_dates): \"\"\" Force non-datetime columns to be read as", "if arr_or_dtype is date: return Date if com.is_datetime64_dtype(arr_or_dtype): try: tz", "None: [names.insert(0, idx) for idx in self.index[::-1]] bracketed_names = [br_l", "Precision. parse_dates : list or dict - List of column", "np.number. return datetime if isinstance(sqltype, Date): return date if isinstance(sqltype,", "list(lzip(*result)[0]) elif result is None: # pragma: no cover result", "string SQL query to be executed con : SQLAlchemy engine", "following: >>> execute(sql, con).rowcount Parameters ---------- sql: string SQL query", "not specified index = kwargs.pop('index', False) return to_sql(frame, name, con,", "based on the provided parameters \"\"\" # When support for", "of SQL table flavor: {'sqlite', 'mysql'}, default 'sqlite' if_exists: {'fail',", "is dropped, it should be possible to remove this code", "**kwargs) # Append wrapped function docstrings read_frame.__doc__ += read_sql.__doc__ frame_query.__doc__", "import re import numpy as np import pandas.core.common as com", "SQLAlchemy engine or sqlite3 DBAPI2 connection Using SQLAlchemy makes it", "or database table into a DataFrame. Parameters ---------- sql :", "return to_sql(frame, name, con, flavor=flavor, if_exists=if_exists, index=index, **kwargs) # Append", "raise ValueError(\"Table %s not found\" % table_name, con) def read_sql_query(sql,", "in loss of Precision. parse_dates : list or dict -", "The new ``to_sql`` function supports sqlalchemy engines to work with", "traceback.print_exc() if retry: return tquery(sql, con=con, retry=False) if result and", "self.table is None: raise ValueError(\"Could not init table '%s'\" %", "pandasSQL_builder(con, flavor=flavor) return pandas_sql.has_table(table_name) table_exists = has_table _MYSQL_WARNING = (\"The", "cover print('Failed to commit, may need to restart interpreter') else:", "use to_sql Write records stored in a DataFrame to a", "{'name': name, 'columns': columns, 'keystr': keystr} return create_statement # legacy", "self.name = name self.pd_sql = pandas_sql_engine self.prefix = prefix self.frame", "'timedelta' type is not supported, and will be \" \"written", "name='%s';\") % name, 'mysql': \"SHOW TABLES LIKE '%s'\" % name}", "SQL query to be executed con: DBAPI2 connection cur: depreciated,", "type the dataframe column should have col_type = self._numpy_type(sql_col.type) if", "SQL to use. if_exists : {'fail', 'replace', 'append'}, default 'fail'", "con, flavor, is_cursor=False): self.is_cursor = is_cursor self.con = con if", "if flavor == 'mysql': warnings.warn(_MYSQL_WARNING, FutureWarning) return PandasSQLLegacy(con, flavor, is_cursor=is_cursor)", "index_col=None, coerce_float=True, parse_dates=None, params=None): args = _convert_params(sql, params) result =", "cover ex = DatabaseError( \"Execution failed on sql: %s\\n%s\\nunable to", "string. Optionally provide an `index_col` parameter to use one of", "'[', 'br_r': ']', 'wld': '?' } } _SAFE_NAMES_WARNING = (\"The", "Read and write to DataFrames def read_sql_table(table_name, con, index_col=None, coerce_float=True,", "Parameters ---------- frame : DataFrame name : string Name of", "flavor == 'mysql': warnings.warn(_MYSQL_WARNING, FutureWarning) return PandasSQLLegacy(con, flavor, is_cursor=is_cursor) except", "name, flavor, keys=None): \"\"\"Old function from 0.13.1. To keep backwards", "to include index in sql table if index is True:", "coerce_float=coerce_float) _parse_date_columns(data_frame, parse_dates) if index_col is not None: data_frame.set_index(index_col, inplace=True)", "self.is_cursor = is_cursor self.con = con if flavor is None:", "parse_dates is False: parse_dates = [] if not hasattr(parse_dates, '__iter__'):", "index_label is not None: if not isinstance(index_label, list): index_label =", "a table). Returns ------- DataFrame Notes ----- This function is", "= self.con else: cur = self.con.cursor() try: if kwargs: cur.execute(*args,", "'float': { 'mysql': 'FLOAT', 'sqlite': 'REAL', }, 'int': { 'mysql':", "in case of parsing integer timestamps - Dict of ``{column_name:", "Create if does not exist. index : boolean, default False", "for name, typ in zip(columns, column_types)] if self.index is not", "def _create_table_statement(self): from sqlalchemy import Table, Column columns = list(map(str,", "to non-string, non-numeric objects (like decimal.Decimal) to floating point. Can", "date: if not issubclass(df_col.dtype.type, np.datetime64): self.frame[col_name] = _handle_date_column(df_col) elif col_type", "isinstance(sqltype, Date): return date if isinstance(sqltype, Boolean): return bool return", "or sequence, default None Column label for index column(s). If", "\"\"\" pandas_sql = PandasSQLAlchemy(con) table = pandas_sql.read_table( table_name, index_col=index_col, coerce_float=coerce_float,", "as index coerce_float : boolean, default True Attempt to convert", "as SQLite columns : list List of column names to", "insert_statement(self): return self.table.insert() def maybe_asscalar(self, i): try: return np.asscalar(i) except", "elif pytype is datetime.date: pytype_name = \"date\" elif issubclass(pytype, np.bool_):", "def __init__(self, con, flavor, is_cursor=False): self.is_cursor = is_cursor self.con =", "self.prefix = prefix self.frame = frame self.index = self._index_name(index, index_label)", "query. If only one column selected, then plain list is", "not exist. index : boolean, default True Write DataFrame index", "'wld': '%s' }, 'sqlite': { 'br_l': '[', 'br_r': ']', 'wld':", "thing as tquery, but instead of returning results, it returns", "columns \"\"\" # handle non-list entries for parse_dates gracefully if", "if flavor not in ['sqlite', 'mysql']: raise NotImplementedError else: self.flavor", "insert(self): ins = self.insert_statement() temp = self.insert_data() data_list = []", "in column_types) else: columns = ',\\n '.join('`%s` %s' % x", "data_frame = DataFrame.from_records( data, columns=columns, coerce_float=coerce_float) _parse_date_columns(data_frame, parse_dates) if index_col", "\"\"\" if if_exists not in ('fail', 'replace', 'append'): raise ValueError(\"'{0}'", "not None: self.frame.set_index(self.index, inplace=True) return self.frame def _index_name(self, index, index_label):", "versions. \" \"You can use ``execute(...).fetchall()`` instead.\", FutureWarning) cur =", "params).fetchall() Parameters ---------- sql: string SQL query to be executed", "cur=None, retry=True): \"\"\" DEPRECATED. Returns list of tuples corresponding to", "not None: if isinstance(keys, string_types): keys = (keys,) keystr =", "[self._sql_type_name(typ) for typ in self.frame.dtypes] if self.index is not None:", "DataFrame. read_sql \"\"\" pandas_sql = PandasSQLAlchemy(con) table = pandas_sql.read_table( table_name,", "Name of SQL table con: SQLAlchemy engine or sqlite3 DBAPI2", "use the Create Table statement\"\"\" def sql_schema(self): return str(self.table) def", "def _create_sql_schema(self, frame, table_name): table = PandasSQLTableLegacy(table_name, self, frame=frame) return", "floating point. Can result in loss of Precision. parse_dates :", "is not None: # We want to write a frame", "return tquery(sql, con=con, retry=False) if result and len(result[0]) == 1:", "coerce_float=coerce_float, parse_dates=parse_dates) if pandas_sql.has_table(sql): return pandas_sql.read_table( sql, index_col=index_col, coerce_float=coerce_float, parse_dates=parse_dates,", "def _parse_date_columns(data_frame, parse_dates): \"\"\" Force non-datetime columns to be read", "None: return True else: return False def get_table(self, table_name): return", "[sql] if params is not None: if hasattr(params, 'keys'): #", "a DBAPI2 object, only sqlite3 is supported. index_col : string,", "= \"int\" elif issubclass(pytype, np.datetime64) or pytype is datetime: #", "useful for SQL result sets params : list, tuple or", "return PandasSQLLegacy(con, flavor, is_cursor=is_cursor) class PandasSQLTable(PandasObject): \"\"\" For mapping Pandas", "= _SQL_SYMB[flv]['br_r'] # right val quote char wld = _SQL_SYMB[flv]['wld']", "map(lookup_type, frame.dtypes)) if flavor == 'sqlite': columns = ',\\n '.join('[%s]", "and SQL databases using SQLAlchemy to handle DataBase abstraction \"\"\"", "parameters to pass to execute method. parse_dates : list or", "cursor = self.execute(*args) columns = [col_desc[0] for col_desc in cursor.description]", "write a frame if self.pd_sql.has_table(self.name): if if_exists == 'fail': raise", "**kwargs): \"\"\"Simple passthrough to SQLAlchemy engine\"\"\" return self.engine.execute(*args, **kwargs) def", "ns, ms, us) in case of parsing integer timestamps -", "\"text\" if issubclass(pytype, np.floating): pytype_name = \"float\" elif issubclass(pytype, np.integer):", "through SQLAlchemy engines. keys : string or sequence columns to", "dropped, it should be possible to remove this code \"\"\"", "return pandas_sql.read_sql( sql, index_col=index_col, params=params, coerce_float=coerce_float, parse_dates=parse_dates) def to_sql(frame, name,", "of SQL to use. Ignored when using SQLAlchemy engine. 'mysql'", "is obtained from connection params : list or tuple, optional", "char if self.index is not None: [names.insert(0, idx) for idx", "'bool': { 'mysql': 'BOOLEAN', 'sqlite': 'INTEGER', } } # SQL", "can use ``execute(...).rowcount`` instead.\", FutureWarning) cur = execute(sql, con, cur=cur,", "were converted to \" \"underscores.\") class PandasSQLTableLegacy(PandasSQLTable): \"\"\"Patch the PandasSQLTable", "# Flavour specific sql strings and handler class for access", "column names will not be changed. \" \"In pandas versions", "not supported, and will be \" \"written as integer values", "_index_name(self, index, index_label): # for writing: index=True to include index", "does not exist. \"\"\" table = PandasSQLTableLegacy( name, self, frame=frame,", "self.index try: temp.reset_index(inplace=True) except ValueError as err: raise ValueError( \"duplicate", "convertions. Also holds various flags needed to avoid having to", "e.__class__.__name__ if excName == 'OperationalError': # pragma: no cover print('Failed", "e.__class__.__name__ if excName != 'OperationalError': raise traceback.print_exc() if retry: print('Looks", "[index] elif isinstance(index, list): return index else: return None def", "br_l = _SQL_SYMB[flv]['br_l'] # left val quote char br_r =", "------- DataFrame Notes ----- This function is a convenience wrapper", "return result except Exception as e: # pragma: no cover", "wld = _SQL_SYMB[flv]['wld'] # wildcard char if self.index is not", "return insert_statement def insert(self): ins = self.insert_statement() temp = self.insert_data()", "a DataFrame.\" columns = list(map(str, self.frame.columns)) pat = re.compile('\\s+') if", "self.table = self.pd_sql.get_table(self.name) if self.table is None: raise ValueError(\"Could not", "None is given (default) and `index` is True, then the", "table exists, do nothing. replace: If table exists, drop it,", "flavor not in ['sqlite', 'mysql']: raise NotImplementedError else: self.flavor =", "_handle_date_column( df_col, format=fmt) except KeyError: pass # this column not", "def to_sql(self, frame, name, if_exists='fail', index=True, index_label=None): table = PandasSQLTable(", "'`', 'wld': '%s' }, 'sqlite': { 'br_l': '[', 'br_r': ']',", "def read_frame(*args, **kwargs): \"\"\"DEPRECIATED - use read_sql \"\"\" warnings.warn(\"read_frame is", "import traceback import itertools import re import numpy as np", "compatibility) and will delegate to the specific function depending on", "for i, l in enumerate(self.frame.index.names)] # for reading: index=(list of)", "= \"bool\" return _SQL_TYPES[pytype_name][self.pd_sql.flavor] class PandasSQLLegacy(PandasSQL): def __init__(self, con, flavor,", "integer timestamps - Dict of ``{column_name: arg dict}``, where the", "with false. Therefore only convert bool if there are no", "of Precision. parse_dates : list or dict - List of", "DataBase has named table. Parameters ---------- table_name: string Name of", "con is None: if flavor == 'mysql': warnings.warn(_MYSQL_WARNING, FutureWarning) return", "of parsing string times or is one of (D, s,", "ValueError as err: raise ValueError( \"duplicate name in index/columns: {0}\".format(err))", "changed. \" \"In pandas versions < 0.14, spaces were converted", "to_sql(frame, name, con, flavor='sqlite', if_exists='fail', index=True, index_label=None): \"\"\" Write records", "open SQL database connection object or an SQLAlchemy engine Using", "# pragma: no cover ex = DatabaseError( \"Execution failed on", "legacy names, with depreciation warnings and copied docs def read_frame(*args,", "**kwargs): \"\"\"DEPRECIATED - use read_sql \"\"\" warnings.warn(\"frame_query is depreciated, use", "frame=frame, index=index, if_exists=if_exists, index_label=index_label) table.insert() @property def tables(self): return self.meta.tables", "DatabaseError(\"Execution failed on sql: %s\" % args[0]) raise_with_traceback(ex) def read_sql(self,", "any DB supported by that library. If a DBAPI2 object,", "parse_dates=None, columns=None): if columns is not None and len(columns) >", "------- Results Iterable \"\"\" warnings.warn( \"tquery is depreciated, and will", "issubclass(pytype, np.floating): pytype_name = \"float\" elif com.is_timedelta64_dtype(pytype): warnings.warn(\"the 'timedelta' type", "keyword arguments of :func:`pandas.to_datetime` Especially useful with databases without native", "= map(self._sqlalchemy_type, self.frame.dtypes) columns = [Column(name, typ) for name, typ", "parameters to pass to execute method. Returns ------- Number of", "flavor) column_types = lzip(frame.dtypes.index, map(lookup_type, frame.dtypes)) if flavor == 'sqlite':", "parse_dates=None): \"\"\"Read SQL query into a DataFrame. Returns a DataFrame", "return read_sql(*args, **kwargs) def frame_query(*args, **kwargs): \"\"\"DEPRECIATED - use read_sql", "flavor def execute(self, *args, **kwargs): if self.is_cursor: cur = self.con", "To keep the behaviour this function you need to specify", "Date if com.is_datetime64_dtype(arr_or_dtype): try: tz = arr_or_dtype.tzinfo return DateTime(timezone=True) except:", "con, cur=cur) result = _safe_fetch(cur) if con is not None:", "\"\"\"CREATE TABLE %(name)s ( %(columns)s )\"\"\" create_statement = template %", "if DataBase has named table. Parameters ---------- table_name: string Name", "is returned. To obtain the same result in the future,", "for t in temp.itertuples(): data = tuple((self.maybe_asscalar(v) for v in", "return None def _create_table_statement(self): from sqlalchemy import Table, Column columns", "'OperationalError': return [] def tquery(sql, con=None, cur=None, retry=True): \"\"\" DEPRECATED.", "frame : DataFrame name : string con : DBAPI2 connection", "return uquery(sql, con, retry=False) return result #------------------------------------------------------------------------------ #--- Read and", "convertion between DataFrame and SQL databases using SQLAlchemy to handle", "SQLAlchemy engine\"\"\" return self.engine.execute(*args, **kwargs) def read_table(self, table_name, index_col=None, coerce_float=True,", "exists, drop it, recreate it, and insert data. append: If", "',\\n '.join('[%s] %s' % x for x in column_types) else:", "'REAL', }, 'int': { 'mysql': 'BIGINT', 'sqlite': 'INTEGER', }, 'datetime':", "params : list or tuple, optional List of parameters to", "of the query string. Optionally provide an `index_col` parameter to", "through SQLAlchemy engines. Returns ------- boolean \"\"\" pandas_sql = pandasSQL_builder(con,", "executed con : SQLAlchemy engine or sqlite3 DBAPI2 connection Using", "table_name): return self.meta.tables.get(table_name) def drop_table(self, table_name): if self.engine.has_table(table_name): self.get_table(table_name).drop() self.meta.clear()", "format\"\"\" args = [sql] if params is not None: if", "def __init__(self, name, pandas_sql_engine, frame=None, index=True, if_exists='fail', prefix='pandas', index_label=None): self.name", "query into a DataFrame. Returns a DataFrame corresponding to the", ": Read SQL query into a DataFrame \"\"\" pandas_sql =", "SQL databases using SQLAlchemy to handle DataBase abstraction \"\"\" def", "pytype_name = \"int\" elif issubclass(pytype, np.datetime64) or pytype is datetime:", "provide an `index_col` parameter to use one of the columns", "to floating point, useful for SQL result sets params :", "SQL table con : SQLAlchemy engine or sqlite3 DBAPI2 connection", "(ns frequency) to the \" \"database.\", UserWarning) pytype_name = \"int\"", "get_sqltype(dtype, flavor) column_types = lzip(frame.dtypes.index, map(lookup_type, frame.dtypes)) if flavor ==", "as e: excName = e.__class__.__name__ if excName == 'OperationalError': #", "point. Can result in loss of Precision. parse_dates : list", "commit, may need to restart interpreter') else: raise traceback.print_exc() if", "is also a subclass of np.number. pytype_name = \"datetime\" elif", "template = \"\"\"CREATE TABLE %(name)s ( %(columns)s )\"\"\" create_statement =", "try: tz = arr_or_dtype.tzinfo return DateTime(timezone=True) except: return DateTime if", "mapping Pandas tables to SQL tables. Uses fact that table", "\"database.\", UserWarning) pytype_name = \"int\" elif issubclass(pytype, np.integer): pytype_name =", "result is None: # pragma: no cover result = []", "SQLite Returns ------- DataFrame See also -------- read_sql_table : Read", "abstraction \"\"\" def __init__(self, engine, meta=None): self.engine = engine if", "frequency) to the \" \"database.\", UserWarning) return Integer elif com.is_float_dtype(arr_or_dtype):", "**kwargs) def write_frame(frame, name, con, flavor='sqlite', if_exists='fail', **kwargs): \"\"\"DEPRECIATED -", "NotImplementedError pandas_sql.to_sql(frame, name, if_exists=if_exists, index=index, index_label=index_label) def has_table(table_name, con, flavor='sqlite'):", "format=fmt) except KeyError: pass # this column not in results", "meta=meta) else: if flavor == 'mysql': warnings.warn(_MYSQL_WARNING, FutureWarning) return PandasSQLLegacy(con,", "any(map(pat.search, columns)): warnings.warn(_SAFE_NAMES_WARNING) column_types = [self._sql_type_name(typ) for typ in self.frame.dtypes]", "to \" \"underscores.\") class PandasSQLTableLegacy(PandasSQLTable): \"\"\"Patch the PandasSQLTable for legacy", "engine Sqlite DBAPI conncection mode not supported index_col : string,", "lzip(frame.dtypes.index, map(lookup_type, frame.dtypes)) if flavor == 'sqlite': columns = ',\\n", "needed to avoid having to pass them between functions all", "not in ['sqlite', 'mysql']: raise NotImplementedError else: self.flavor = flavor", "issubclass(pytype, np.integer): pytype_name = \"int\" elif issubclass(pytype, np.datetime64) or pytype", "']', 'wld': '?' } } _SAFE_NAMES_WARNING = (\"The spaces in", "not None: if hasattr(params, 'keys'): # test if params is", "for col_name in parse_dates: df_col = data_frame[col_name] try: fmt =", "name : string con : DBAPI2 connection flavor : {'sqlite',", "index_col=index_col, coerce_float=coerce_float, parse_dates=parse_dates, columns=columns) else: return pandas_sql.read_sql( sql, index_col=index_col, params=params,", "object. coerce_float : boolean, default True Attempt to convert values", "DataFrame name: name of SQL table flavor: {'sqlite', 'mysql'}, default", "\"WHERE type='table' AND name='%s';\") % name, 'mysql': \"SHOW TABLES LIKE", "be removed in future versions. \" \"You can use ``execute(...).fetchall()``", "if isinstance(format, dict): return to_datetime(col, **format) else: if format in", "cur=cur) result = _safe_fetch(cur) if con is not None: try:", "con=None, cur=None, retry=True): \"\"\" DEPRECATED. Returns list of tuples corresponding", "be created with an SQLAlchemy engine or connection+sql flavor\") def", "name, con, flavor='sqlite', if_exists='fail', **kwargs): \"\"\"DEPRECIATED - use to_sql Write", "NA values. Datetimes should already be converted to np.datetime if", "= None data_frame[col_name] = _handle_date_column(df_col, format=fmt) return data_frame def execute(sql,", "temp def insert(self): ins = self.insert_statement() data_list = [] temp", "Boolean, DateTime, Date if isinstance(sqltype, Float): return float if isinstance(sqltype,", "boolean \"\"\" pandas_sql = pandasSQL_builder(con, flavor=flavor) return pandas_sql.has_table(table_name) table_exists =", "table_name): table = PandasSQLTableLegacy(table_name, self, frame=frame) return str(table.sql_schema()) def get_schema(frame,", "in self.index[::-1]] sql_select = select(cols) else: sql_select = self.table.select() result", "and integer timestamp columns \"\"\" # handle non-list entries for", "flavor : {'sqlite', 'mysql'}, default 'sqlite' The flavor of SQL", "warnings.warn( \"uquery is depreciated, and will be removed in future", "use read_sql\", FutureWarning) return read_sql(*args, **kwargs) def frame_query(*args, **kwargs): \"\"\"DEPRECIATED", "to a SQL database. Parameters ---------- frame : DataFrame name", "has_table(self, name): if self.meta.tables.get(name) is not None: return True else:", "obtained from connection Returns ------- Results Iterable \"\"\" warnings.warn( \"tquery", "Integer, Float, Boolean, DateTime, Date if isinstance(sqltype, Float): return float", "index=True)) return Table(self.name, self.pd_sql.meta, *columns) def _harmonize_columns(self, parse_dates=None): \"\"\" Make", "Dict of ``{column_name: arg dict}``, where the arg dict corresponds", "Float): return float if isinstance(sqltype, Integer): # TODO: Refine integer", "\"\"\" def get_sqltype(dtype, flavor): pytype = dtype.type pytype_name = \"text\"", "the provided input (database table name or sql query). See", "col_names, wildcards) return insert_statement def insert(self): ins = self.insert_statement() temp", "#--- Deprecated tquery and uquery def _safe_fetch(cur): try: result =", "flavor=None, meta=None, is_cursor=False): \"\"\" Convenience function to return the correct", "if_exists='fail', **kwargs): \"\"\"DEPRECIATED - use to_sql Write records stored in", "PRIMARY KEY (%s)' % ','.join(keys) template = \"\"\"CREATE TABLE %(name)s", "Uses fact that table is reflected by SQLAlchemy to do", "flavor == 'mysql': warnings.warn(_MYSQL_WARNING, FutureWarning) return PandasSQLLegacy(con, flavor, is_cursor=is_cursor) class", "re.compile('\\s+') if any(map(pat.search, columns)): warnings.warn(_SAFE_NAMES_WARNING) column_types = [self._sql_type_name(typ) for typ", "SQL query to be executed or database table name. con", "multiIndex def __init__(self, name, pandas_sql_engine, frame=None, index=True, if_exists='fail', prefix='pandas', index_label=None):", "---------- sql : string SQL query to be executed or", "sqltype): from sqlalchemy.types import Integer, Float, Boolean, DateTime, Date if", "return int if isinstance(sqltype, DateTime): # Caution: np.datetime64 is also", "def frame_query(*args, **kwargs): \"\"\"DEPRECIATED - use read_sql \"\"\" warnings.warn(\"frame_query is", ": boolean, default False Write DataFrame index as a column", "parse dates as timestamp format = 's' if format is", "== 'mysql': warnings.warn(_MYSQL_WARNING, FutureWarning) return PandasSQLLegacy(con, flavor, is_cursor=is_cursor) class PandasSQLTable(PandasObject):", "name(s) if index_label is not None: if not isinstance(index_label, list):", "frame is not None: # We want to write a", "non-numeric objects (like decimal.Decimal) to floating point, useful for SQL", "def maybe_asscalar(self, i): try: return np.asscalar(i) except AttributeError: return i", "pat = re.compile('\\s+') if any(map(pat.search, columns)): warnings.warn(_SAFE_NAMES_WARNING) column_types = [self._sql_type_name(typ)", "keys=None): \"\"\"Old function from 0.13.1. To keep backwards compatibility. When", "will delegate to the specific function depending on the provided", "DataFrame name : string name of SQL table flavor :", "copied docs def read_frame(*args, **kwargs): \"\"\"DEPRECIATED - use read_sql \"\"\"", "point, useful for SQL result sets params : list, tuple", "Column to set as index coerce_float : boolean, default True", "Execute the given SQL query using the provided connection object.", "or parse_dates is None or parse_dates is False: parse_dates =", "with different sql flavors. See also -------- pandas.DataFrame.to_sql \"\"\" warnings.warn(\"write_frame", "_create_table_statement(self): from sqlalchemy import Table, Column columns = list(map(str, self.frame.columns))", "See also -------- pandas.DataFrame.to_sql \"\"\" warnings.warn(\"write_frame is depreciated, use to_sql\",", "(63)', 'sqlite': 'TEXT', }, 'float': { 'mysql': 'FLOAT', 'sqlite': 'REAL',", "parse_dates=parse_dates, columns=columns) def read_sql(self, sql, index_col=None, coerce_float=True, parse_dates=None, params=None): args", "'mysql'}, default 'sqlite' The flavor of SQL to use. if_exists", "for t in temp.itertuples(): data = dict((k, self.maybe_asscalar(v)) for k,", "PandasObject from pandas.tseries.tools import to_datetime class SQLAlchemyRequired(ImportError): pass class DatabaseError(IOError):", "self.frame = frame self.index = self._index_name(index, index_label) if frame is", "= self.insert_statement() temp = self.insert_data() data_list = [] for t", "is_cursor should not be necessary. try: import sqlalchemy if isinstance(con,", "with an SQLAlchemy engine or connection+sql flavor\") class PandasSQLAlchemy(PandasSQL): \"\"\"", "same thing as tquery, but instead of returning results, it", "column_names = result.keys() self.frame = DataFrame.from_records( data, columns=column_names, coerce_float=coerce_float) self._harmonize_columns(parse_dates=parse_dates)", "index else: return None def _create_table_statement(self): from sqlalchemy import Table,", "If a DBAPI2 object, only sqlite3 is supported. flavor: {'sqlite',", "arg dict}``, where the arg dict corresponds to the keyword", "is_cursor=True) args = _convert_params(sql, params) return pandas_sql.execute(*args) #------------------------------------------------------------------------------ #--- Deprecated", "'%s' + br_r + ' %s' columns = ',\\n '.join(col_template", "self.maybe_asscalar(v)) for k, v in zip(keys, t[1:])) data_list.append(data) self.pd_sql.execute(ins, data_list)", "inplace=True) return self.frame def _index_name(self, index, index_label): # for writing:", "col_type is float: # floats support NA, can always convert!", "Returns ------- DataFrame See also -------- read_sql_table : Read SQL", "\"datetime\" elif pytype is datetime.date: pytype_name = \"date\" elif issubclass(pytype,", "hasattr(parse_dates, '__iter__'): parse_dates = [parse_dates] for col_name in parse_dates: df_col", "idx_label in enumerate(self.index[::-1]): idx_type = self._sqlalchemy_type( self.frame.index.get_level_values(i)) columns.insert(0, Column(idx_label, idx_type,", "= self._numpy_type(sql_col.type) if col_type is datetime or col_type is date:", "Text, Boolean, DateTime, Date, Interval if arr_or_dtype is date: return", "parse_dates=None, columns=None): \"\"\" Read SQL query or database table into", "_sql_type_name(self, dtype): pytype = dtype.type pytype_name = \"text\" if issubclass(pytype,", "in favor of ``to_sql``. There are however two differences: -", "\"You can use ``execute(...).rowcount`` instead.\", FutureWarning) cur = execute(sql, con,", "PandasSQLAlchemy(con, meta=meta) else: if flavor == 'mysql': warnings.warn(_MYSQL_WARNING, FutureWarning) return", "------- Results Iterable \"\"\" if cur is None: pandas_sql =", "index as a column index_label : string or sequence, default", "# the type the dataframe column should have col_type =", "support. Floats are always fine, ints must always be floats", "sql table if index is True: nlevels = self.frame.index.nlevels #", "as tquery, but instead of returning results, it returns the", "for sql_col in self.table.columns: col_name = sql_col.name try: df_col =", "non-numeric objects (like decimal.Decimal) to floating point. Can result in", "as index elif isinstance(index, string_types): return [index] elif isinstance(index, list):", "frame=frame) return str(table.sql_schema()) def get_schema(frame, name, flavor='sqlite', keys=None, con=None): \"\"\"", "to use as index for the returned DataFrame object. coerce_float", "get_sqltype(dtype, flavor): pytype = dtype.type pytype_name = \"text\" if issubclass(pytype,", "print('Failed to commit, may need to restart interpreter') else: raise", "cursor is obtained from connection Returns ------- Results Iterable \"\"\"", "List of parameters to pass to execute method. parse_dates :", "\"\"\" # When support for DBAPI connections is removed, #", "flavor, is_cursor=is_cursor) class PandasSQLTable(PandasObject): \"\"\" For mapping Pandas tables to", "% ( self.name, col_names, wildcards) return insert_statement def insert(self): ins", "not exist. \"\"\" table = PandasSQLTableLegacy( name, self, frame=frame, index=index,", "drop_sql = \"DROP TABLE %s\" % name self.execute(drop_sql) def _create_sql_schema(self,", "inplace=True) return data_frame def _fetchall_as_list(self, cur): result = cur.fetchall() if", "specified index = kwargs.pop('index', False) return to_sql(frame, name, con, flavor=flavor,", "self.create() else: # no data provided, read-only mode self.table =", "\" \"You can use ``execute(...).rowcount`` instead.\", FutureWarning) cur = execute(sql,", "self, index=index_col) return table.read(coerce_float=coerce_float, parse_dates=parse_dates, columns=columns) def read_sql(self, sql, index_col=None,", "uquery(sql, con=None, cur=None, retry=True, params=None): \"\"\" DEPRECATED. Does the same", "{ 'br_l': '`', 'br_r': '`', 'wld': '%s' }, 'sqlite': {", "nothing. replace: If table exists, drop it, recreate it, and", "Subclasses Should define read_sql and to_sql \"\"\" def read_sql(self, *args,", "name, pandas_sql_engine, frame=None, index=True, if_exists='fail', prefix='pandas', index_label=None): self.name = name", "flavor='sqlite', if_exists='fail', **kwargs): \"\"\"DEPRECIATED - use to_sql Write records stored", "parse_dates = [] if not hasattr(parse_dates, '__iter__'): parse_dates = [parse_dates]", "= flavor def execute(self, *args, **kwargs): if self.is_cursor: cur =", "specify column to set as index elif isinstance(index, string_types): return", "frame=None, index=True, if_exists='fail', prefix='pandas', index_label=None): self.name = name self.pd_sql =", "col_names = ','.join(bracketed_names) wildcards = ','.join([wld] * len(names)) insert_statement =", "sqlite3 is supported. index_col : string, optional column name to", "not None: data_frame.set_index(index_col, inplace=True) return data_frame def to_sql(self, frame, name,", "def insert_data(self): if self.index is not None: temp = self.frame.copy()", "not valid for if_exists\".format(if_exists)) else: self.table = self._create_table_statement() self.create() else:", "*args, **kwargs): raise ValueError( \"PandasSQL must be created with an", "lookup_type = lambda dtype: get_sqltype(dtype, flavor) column_types = lzip(frame.dtypes.index, map(lookup_type,", "not None: if not isinstance(index_label, list): index_label = [index_label] if", "in zip(columns, column_types)) template = \"\"\"CREATE TABLE %(name)s ( %(columns)s", "cols = [self.table.c[n] for n in columns] if self.index is", "_handle_date_column(df_col, format=fmt) return data_frame def execute(sql, con, cur=None, params=None): \"\"\"", "else: return pandas_sql.read_sql( sql, index_col=index_col, params=params, coerce_float=coerce_float, parse_dates=parse_dates) def to_sql(frame,", "the query string. Optionally provide an `index_col` parameter to use", "be executed or database table name. con : SQLAlchemy engine", "temp = self.frame return temp def insert(self): ins = self.insert_statement()", "float: # floats support NA, can always convert! self.frame[col_name].astype(col_type, copy=False)", "- Dict of ``{column_name: arg dict}``, where the arg dict", "table_name, con) def read_sql_query(sql, con, index_col=None, coerce_float=True, params=None, parse_dates=None): \"\"\"Read", "ImportError: if flavor == 'mysql': warnings.warn(_MYSQL_WARNING, FutureWarning) return PandasSQLLegacy(con, flavor,", "char col_template = br_l + '%s' + br_r + '", "= \"text\" if issubclass(pytype, np.floating): pytype_name = \"float\" elif issubclass(pytype,", "params=params, coerce_float=coerce_float, parse_dates=parse_dates) def to_sql(frame, name, con, flavor='sqlite', if_exists='fail', index=True,", "MetaData meta = MetaData(self.engine) meta.reflect(self.engine) self.meta = meta def execute(self,", "not None: [names.insert(0, idx) for idx in self.index[::-1]] bracketed_names =", "useful with databases without native Datetime support, such as SQLite", "np.datetime64) or pytype is datetime: # Caution: np.datetime64 is also", "dict): return to_datetime(col, **format) else: if format in ['D', 's',", "valid for if_exists\".format(if_exists)) pandas_sql = pandasSQL_builder(con, flavor=flavor) if isinstance(frame, Series):", "a DataFrame to a SQL database. Parameters ---------- frame: DataFrame", "sql table (only used when reading a table). Returns -------", "tuples corresponding to each row in given sql query. If", "loss of Precision. parse_dates : list or dict - List", "(database table name or sql query). See also -------- read_sql_table", "sql, index_col=None, coerce_float=True, parse_dates=None, params=None): args = _convert_params(sql, params) result", "PandasSQLLegacy(PandasSQL): def __init__(self, con, flavor, is_cursor=False): self.is_cursor = is_cursor self.con", "\" \"levels, which is {0}\".format(nlevels)) else: return index_label # return", "try: con.commit() except Exception as e: excName = e.__class__.__name__ if", "number of rows affected. Good for update queries. To obtain", "the columns as the index, otherwise default integer index will", "float if isinstance(sqltype, Integer): # TODO: Refine integer size. return", "columns is not None and len(columns) > 0: from sqlalchemy", "enumerate(self.frame.index.names)] # for reading: index=(list of) string to specify column", "\"\"\"DEPRECIATED - use read_sql \"\"\" warnings.warn(\"frame_query is depreciated, use read_sql\",", "read_sql \"\"\" warnings.warn(\"read_frame is depreciated, use read_sql\", FutureWarning) return read_sql(*args,", "table into a DataFrame. Given a table name and an", "args def _handle_date_column(col, format=None): if isinstance(format, dict): return to_datetime(col, **format)", "If table exists, do nothing. replace: If table exists, drop", "values (ns frequency) to the \" \"database.\", UserWarning) pytype_name =", "connection (legacy mode) Using SQLAlchemy makes it possible to use", "supported through SQLAlchemy engines. Returns ------- boolean \"\"\" pandas_sql =", "bracketed_names = [br_l + column + br_r for column in", ": string Name of SQL table in database con :", "data_list.append(data) self.pd_sql.execute(ins, data_list) def read(self, coerce_float=True, parse_dates=None, columns=None): if columns", "limited NA value support. Floats are always fine, ints must", "statement\"\"\" def sql_schema(self): return str(self.table) def create(self): self.pd_sql.execute(self.table) def insert_statement(self):", "SQL type convertions for each DB _SQL_TYPES = { 'text':", "convert ints and bools if col_type is int or col_type", "args[0]) raise_with_traceback(ex) def read_sql(self, sql, index_col=None, coerce_float=True, params=None, parse_dates=None): args", "if flavor is None: flavor = 'sqlite' if flavor not", "{0}\".format(err)) else: temp = self.frame return temp def insert(self): ins", "use the following: >>> execute(sql, con, params).fetchall() Parameters ---------- sql:", "supports sqlalchemy engines to work with different sql flavors. See", "``to_sql`` the index is written to the sql database by", "if_exists='fail', index=True, index_label=None): table = PandasSQLTable( name, self, frame=frame, index=index,", "list of tuples corresponding to each row in given sql", "Integer elif com.is_float_dtype(arr_or_dtype): return Float elif com.is_integer_dtype(arr_or_dtype): # TODO: Refine", "to both facilitate data retrieval and to reduce dependency on", "SQLAlchemy engine or DBAPI2 connection (legacy mode) Using SQLAlchemy makes", "uquery def _safe_fetch(cur): try: result = cur.fetchall() if not isinstance(result,", "def insert(self): ins = self.insert_statement() temp = self.insert_data() data_list =", "name): drop_sql = \"DROP TABLE %s\" % name self.execute(drop_sql) def", "API. \"\"\" from __future__ import print_function, division from datetime import", "object, only sqlite3 is supported. index_col : string, optional column", "the same thing as tquery, but instead of returning results,", "(D, s, ns, ms, us) in case of parsing integer", "params=None, parse_dates=None, columns=None): \"\"\" Read SQL query or database table", "is not None and len(columns) > 0: from sqlalchemy import", "self.get_table(table_name).drop() self.meta.clear() self.meta.reflect() def _create_sql_schema(self, frame, table_name): table = PandasSQLTable(table_name,", "flavor of SQL to use. Ignored when using SQLAlchemy engine.", "wildcards = ','.join([wld] * len(names)) insert_statement = 'INSERT INTO %s", "columns=None): \"\"\" Read SQL query or database table into a", "is specified, set this as index name(s) if index_label is", "_SQL_TYPES[pytype_name][self.pd_sql.flavor] class PandasSQLLegacy(PandasSQL): def __init__(self, con, flavor, is_cursor=False): self.is_cursor =", "name) def _get_schema_legacy(frame, name, flavor, keys=None): \"\"\"Old function from 0.13.1.", "if issubclass(pytype, np.floating): pytype_name = \"float\" elif issubclass(pytype, np.integer): pytype_name", "%(columns)s %(keystr)s );\"\"\" create_statement = template % {'name': name, 'columns':", "(\"The 'mysql' flavor with DBAPI connection is deprecated \" \"and", "use read_sql \"\"\" warnings.warn(\"read_frame is depreciated, use read_sql\", FutureWarning) return", "index_col=None, coerce_float=True, parse_dates=None, columns=None): table = PandasSQLTable(table_name, self, index=index_col) return", "pandas.core.common as com from pandas.compat import lzip, map, zip, raise_with_traceback,", "= execute(sql, con, cur=cur, params=params) result = cur.rowcount try: con.commit()", "Read SQL query or database table into a DataFrame. Parameters", "it will be further supported through SQLAlchemy engines. keys :", "SQLAlchemy engine. 'mysql' is deprecated and will be removed in", "Using SQLAlchemy makes it possible to use any DB supported", "pandas versions < 0.14, spaces were converted to \" \"underscores.\")", "cur is None: pandas_sql = pandasSQL_builder(con) else: pandas_sql = pandasSQL_builder(cur,", "read_table(self, table_name, index_col=None, coerce_float=True, parse_dates=None, columns=None): table = PandasSQLTable(table_name, self,", "statement to suit the contents of a DataFrame.\" columns =", "Parameters ---------- sql : string Query to be executed con", "\"underscores.\") class PandasSQLTableLegacy(PandasSQLTable): \"\"\"Patch the PandasSQLTable for legacy support. Instead", "select cols = [self.table.c[n] for n in columns] if self.index", "pytype_name = \"int\" elif issubclass(pytype, np.integer): pytype_name = \"int\" elif", "is int or col_type is bool: self.frame[col_name].astype(col_type, copy=False) # Handle", "to handle DataBase abstraction \"\"\" def __init__(self, engine, meta=None): self.engine", "from pandas.core.api import DataFrame, Series from pandas.core.base import PandasObject from", "and ``read_sql_query`` (and for backward compatibility) and will delegate to", "isinstance(index, list): return index else: return None def _create_table_statement(self): from", "self.frame[col_name].astype(col_type, copy=False) elif len(df_col) == df_col.count(): # No NA values,", "warnings.warn(\"frame_query is depreciated, use read_sql\", FutureWarning) return read_sql(*args, **kwargs) def", "0: from sqlalchemy import select cols = [self.table.c[n] for n", "of parsing integer timestamps - Dict of ``{column_name: arg dict}``,", "ValueError( \"Length of 'index_label' should match number of \" \"levels,", "\"\"\" Convenience function to return the correct PandasSQL subclass based", "columns if nlevels == 1 and 'index' not in self.frame.columns", "def _sql_type_name(self, dtype): pytype = dtype.type pytype_name = \"text\" if", "sqlalchemy.schema import MetaData meta = MetaData(self.engine) meta.reflect(self.engine) self.meta = meta", "work with different sql flavors. See also -------- pandas.DataFrame.to_sql \"\"\"", "``execute(...).fetchall()`` instead.\", FutureWarning) cur = execute(sql, con, cur=cur) result =", "using SQLAlchemy engine. 'mysql' is deprecated and will be removed", "np.floating): pytype_name = \"float\" elif com.is_timedelta64_dtype(pytype): warnings.warn(\"the 'timedelta' type is", "return data_frame def execute(sql, con, cur=None, params=None): \"\"\" Execute the", "return cur except Exception as e: try: self.con.rollback() except Exception:", "e: excName = e.__class__.__name__ if excName == 'OperationalError': # pragma:", "- append: If table exists, insert data. Create if does", "coerce_float=coerce_float, parse_dates=parse_dates, columns=columns) else: return pandas_sql.read_sql( sql, index_col=index_col, params=params, coerce_float=coerce_float,", "also -------- read_sql_table : Read SQL database table into a", "if self.table is None: self.table = self._create_table_statement() else: raise ValueError(", "the sql database by default. To keep the behaviour this", "\"DROP TABLE %s\" % name self.execute(drop_sql) def _create_sql_schema(self, frame, table_name):", "Optionally provide an `index_col` parameter to use one of the", "makes it possible to use any DB supported by that", "sql : string SQL query to be executed con :", "a frame if self.pd_sql.has_table(self.name): if if_exists == 'fail': raise ValueError(\"Table", "supported, and will be \" \"written as integer values (ns", "params args to DBAPI2.0 compliant format\"\"\" args = [sql] if", "different sql flavors. See also -------- pandas.DataFrame.to_sql \"\"\" warnings.warn(\"write_frame is", "of parameters to pass to execute method. Returns ------- Results", "zip(columns, column_types)] if self.index is not None: for i, idx_label", "SQLAlchemy engine or connection+sql flavor\") def to_sql(self, *args, **kwargs): raise", "\"bool\" return _SQL_TYPES[pytype_name][self.pd_sql.flavor] class PandasSQLLegacy(PandasSQL): def __init__(self, con, flavor, is_cursor=False):", "dataframe column should have col_type = self._numpy_type(sql_col.type) if col_type is", "from connection params: list or tuple, optional List of parameters", "and will be removed in future versions, but it will", "= [self.table.c[n] for n in columns] if self.index is not", "is depreciated, use read_sql\", FutureWarning) return read_sql(*args, **kwargs) def write_frame(frame,", "from sqlalchemy.schema import MetaData meta = MetaData(self.engine) meta.reflect(self.engine) self.meta =", "index=(list of) string to specify column to set as index", "params) result = self.execute(*args) data = result.fetchall() columns = result.keys()", "insert_statement(self): names = list(map(str, self.frame.columns)) flv = self.pd_sql.flavor br_l =", "execute(sql, con, cur=cur) result = _safe_fetch(cur) if con is not", "execute(sql, con, cur=None, params=None): \"\"\" Execute the given SQL query", "SQLAlchemy to do better type convertions. Also holds various flags", "meta: from sqlalchemy.schema import MetaData meta = MetaData(self.engine) meta.reflect(self.engine) self.meta", "------- Number of affected rows \"\"\" warnings.warn( \"uquery is depreciated,", "} } # SQL enquote and wildcard symbols _SQL_SYMB =", "return create_statement def _sql_type_name(self, dtype): pytype = dtype.type pytype_name =", "DBAPI2 connection Using SQLAlchemy makes it possible to use any", "index_col : string, optional column name to use as index", "not None: try: cur.close() con.commit() except Exception as e: excName", "if parse_dates is True or parse_dates is None or parse_dates", "np.integer): pytype_name = \"int\" elif issubclass(pytype, np.datetime64) or pytype is", "meta def execute(self, *args, **kwargs): \"\"\"Simple passthrough to SQLAlchemy engine\"\"\"", "to the keyword arguments of :func:`pandas.to_datetime` Especially useful with databases", "None: for i, idx_label in enumerate(self.index[::-1]): columns.insert(0, idx_label) column_types.insert(0, self._sql_type_name(self.frame.index.get_level_values(i).dtype))", "string or sequence columns to use a primary key con:", "Write DataFrame index as a column Notes ----- This function", "columns=columns) else: return pandas_sql.read_sql( sql, index_col=index_col, params=params, coerce_float=coerce_float, parse_dates=parse_dates) def", "Flavour specific sql strings and handler class for access to", "= self.frame return temp def insert(self): ins = self.insert_statement() data_list", "right val quote char wld = _SQL_SYMB[flv]['wld'] # wildcard char", "you need to specify ``index=False``. - The new ``to_sql`` function", "table_name : string Name of SQL table in database con", "provided parameters \"\"\" # When support for DBAPI connections is", "in database con : SQLAlchemy engine Sqlite DBAPI conncection mode", "to each row in given sql query. If only one", "and insert data. - append: If table exists, insert data.", "else: raise traceback.print_exc() if retry: return tquery(sql, con=con, retry=False) if", "we also force conversion if required \"\"\" # handle non-list", "x in zip(columns, column_types)) template = \"\"\"CREATE TABLE %(name)s (", "schema for the given frame. Parameters ---------- frame : DataFrame", "on sql: %s\" % args[0]) raise_with_traceback(ex) def read_sql(self, sql, index_col=None,", "if supported, but here we also force conversion if required", "depreciated, use read_sql\", FutureWarning) return read_sql(*args, **kwargs) def frame_query(*args, **kwargs):", "data_frame[col_name] = _handle_date_column(df_col, format=fmt) return data_frame def execute(sql, con, cur=None,", "table into a DataFrame read_sql \"\"\" pandas_sql = pandasSQL_builder(con) return", "list(result) return result except Exception as e: # pragma: no", "Boolean): return bool return object class PandasSQL(PandasObject): \"\"\" Subclasses Should", "wildcard symbols _SQL_SYMB = { 'mysql': { 'br_l': '`', 'br_r':", "t[1:])) data_list.append(data) cur = self.pd_sql.con.cursor() cur.executemany(ins, data_list) cur.close() self.pd_sql.con.commit() def", "only convert bool if there are no NA values. Datetimes", "columns = ',\\n '.join('`%s` %s' % x for x in", "such as SQLite Returns ------- DataFrame See also -------- read_sql_table", "return [index] elif isinstance(index, list): return index else: return None", "'mysql': warnings.warn(_MYSQL_WARNING, FutureWarning) return PandasSQLLegacy(con, flavor, is_cursor=is_cursor) class PandasSQLTable(PandasObject): \"\"\"", "= pandasSQL_builder(con, flavor=flavor) return pandas_sql.has_table(table_name) table_exists = has_table _MYSQL_WARNING =", "table name and an SQLAlchemy engine, returns a DataFrame. This", "'mysql']: raise NotImplementedError else: self.flavor = flavor def execute(self, *args,", "should have col_type = self._numpy_type(sql_col.type) if col_type is datetime or", "name, if_exists='fail', index=True, index_label=None): \"\"\" Write records stored in a", "= list(map(str, self.frame.columns)) pat = re.compile('\\s+') if any(map(pat.search, columns)): warnings.warn(_SAFE_NAMES_WARNING)", "= self._index_name(index, index_label) if frame is not None: # We", "'ns']: return to_datetime(col, coerce=True, unit=format) elif issubclass(col.dtype.type, np.floating) or issubclass(col.dtype.type,", "x for x in zip(columns, column_types)) template = \"\"\"CREATE TABLE", "index = kwargs.pop('index', False) return to_sql(frame, name, con, flavor=flavor, if_exists=if_exists,", "is also a subclass of np.number. return datetime if isinstance(sqltype,", "exists.\" % name) elif if_exists == 'replace': self.pd_sql.drop_table(self.name) self.table =", "otherwise default integer index will be used. Parameters ---------- sql", "column_types = lzip(frame.dtypes.index, map(lookup_type, frame.dtypes)) if flavor == 'sqlite': columns", "index is True: nlevels = self.frame.index.nlevels # if index_label is", "len(index_label) != nlevels: raise ValueError( \"Length of 'index_label' should match", "DataFrame name : string con : DBAPI2 connection flavor :", "parse_dates = [parse_dates] for col_name in parse_dates: df_col = data_frame[col_name]", "for DBAPI connections is removed, # is_cursor should not be", "if index_col is not None: data_frame.set_index(index_col, inplace=True) return data_frame def", "self._create_table_statement() self.create() else: # no data provided, read-only mode self.table", "or DBAPI2 connection (legacy mode) Using SQLAlchemy makes it possible", "column index_label : string or sequence, default None Column label", "DBAPI connections. Parameters ---------- table_name : string Name of SQL", "a SQL database. Parameters ---------- frame: DataFrame name: name of", "None: self.frame.set_index(self.index, inplace=True) return self.frame def _index_name(self, index, index_label): #", "if hasattr(params, 'keys'): # test if params is a mapping", "rows \"\"\" warnings.warn( \"uquery is depreciated, and will be removed", "with databases without native Datetime support, such as SQLite Returns", "but it will be further supported through SQLAlchemy engines. Returns", "coerce_float=True, parse_dates=None, params=None): args = _convert_params(sql, params) result = self.execute(*args)", "self.index is not None: for i, idx_label in enumerate(self.index[::-1]): columns.insert(0,", "connection+sql flavor\") def to_sql(self, *args, **kwargs): raise ValueError( \"PandasSQL must", "\"\"\" Write records stored in a DataFrame to a SQL", "== 'mysql': warnings.warn(_MYSQL_WARNING, FutureWarning) return _get_schema_legacy(frame, name, flavor, keys) pandas_sql", "values. Datetimes should already be converted to np.datetime if supported,", "DataFrame name : string Name of SQL table con :", "self.frame.copy() temp.index.names = self.index try: temp.reset_index(inplace=True) except ValueError as err:", "date if isinstance(sqltype, Boolean): return bool return object class PandasSQL(PandasObject):", "if not isinstance(index_label, list): index_label = [index_label] if len(index_label) !=", "index=index_col) return table.read(coerce_float=coerce_float, parse_dates=parse_dates, columns=columns) def read_sql(self, sql, index_col=None, coerce_float=True,", "in parse_dates: df_col = data_frame[col_name] try: fmt = parse_dates[col_name] except", "string}`` where format string is strftime compatible in case of", "params) return pandas_sql.execute(*args) #------------------------------------------------------------------------------ #--- Deprecated tquery and uquery def", "sqlite3 is supported. index_col : string, optional Column name to", "between functions all the time. \"\"\" # TODO: support for", "keys is not None: if isinstance(keys, string_types): keys = (keys,)", "function you need to specify ``index=False``. - The new ``to_sql``", "or an SQLAlchemy engine Using SQLAlchemy makes it possible to", "'%s' already exists.\" % name) elif if_exists == 'replace': self.pd_sql.drop_table(self.name)", "index_label): # for writing: index=True to include index in sql", "Returns ------- Results Iterable \"\"\" if cur is None: pandas_sql", "be further supported through SQLAlchemy engines. Returns ------- boolean \"\"\"", "sql_table column types Need to work around limited NA value", "}, 'date': { 'mysql': 'DATE', 'sqlite': 'TIMESTAMP', }, 'bool': {", "sql: %s\" % args[0]) raise_with_traceback(ex) def read_sql(self, sql, index_col=None, coerce_float=True,", "If table exists, do nothing. - replace: If table exists," ]
[ "0, len(s) - 1 while tail >= 0 and s[tail]", "== ' ': tail -= 1 while tail >= 0", "\"\"\" :type s: str :rtype: int \"\"\" cnt, tail =", "<filename>Dataset/Leetcode/train/58/28.py class Solution: def XXX(self, s): \"\"\" :type s: str", "!= ' ': cnt += 1 tail -= 1 return", "cnt, tail = 0, len(s) - 1 while tail >=", "s[tail] != ' ': cnt += 1 tail -= 1", "s): \"\"\" :type s: str :rtype: int \"\"\" cnt, tail", "while tail >= 0 and s[tail] != ' ': cnt", "': tail -= 1 while tail >= 0 and s[tail]", "tail = 0, len(s) - 1 while tail >= 0", ":rtype: int \"\"\" cnt, tail = 0, len(s) - 1", "1 while tail >= 0 and s[tail] == ' ':", "0 and s[tail] == ' ': tail -= 1 while", "XXX(self, s): \"\"\" :type s: str :rtype: int \"\"\" cnt,", "1 while tail >= 0 and s[tail] != ' ':", "str :rtype: int \"\"\" cnt, tail = 0, len(s) -", "class Solution: def XXX(self, s): \"\"\" :type s: str :rtype:", "tail >= 0 and s[tail] != ' ': cnt +=", "- 1 while tail >= 0 and s[tail] == '", ":type s: str :rtype: int \"\"\" cnt, tail = 0,", "= 0, len(s) - 1 while tail >= 0 and", "and s[tail] == ' ': tail -= 1 while tail", ">= 0 and s[tail] == ' ': tail -= 1", "while tail >= 0 and s[tail] == ' ': tail", "and s[tail] != ' ': cnt += 1 tail -=", "s: str :rtype: int \"\"\" cnt, tail = 0, len(s)", "' ': cnt += 1 tail -= 1 return cnt", "-= 1 while tail >= 0 and s[tail] != '", "0 and s[tail] != ' ': cnt += 1 tail", "len(s) - 1 while tail >= 0 and s[tail] ==", "int \"\"\" cnt, tail = 0, len(s) - 1 while", "' ': tail -= 1 while tail >= 0 and", ">= 0 and s[tail] != ' ': cnt += 1", "tail >= 0 and s[tail] == ' ': tail -=", "s[tail] == ' ': tail -= 1 while tail >=", "\"\"\" cnt, tail = 0, len(s) - 1 while tail", "def XXX(self, s): \"\"\" :type s: str :rtype: int \"\"\"", "Solution: def XXX(self, s): \"\"\" :type s: str :rtype: int", "tail -= 1 while tail >= 0 and s[tail] !=" ]
[ "class bases - tuple[type, ...] A tuple of classes to", "object.\"\"\" return object.__publics__ def get_privates(object: Object) -> Dictionary: \"\"\"Gets the", "else: # Adds attributes to __privates__ if name.startswith(\"__\"): self.__privates__[name] =", "Adds attributes to __privates__ if name.startswith(\"__\"): self.__privates__[name] = value #", "str, value: object) -> None: self[name] = value # Recreating", "return self[name] except KeyError as e: try: return super().__getattr__(name) except", "pass # List class class List(list, metaclass=Metaclass): pass # Dictionary", "import Callable import Systerm # Metaclass class Metaclass(ABCMeta): \"\"\"A metaclass", "{} cls.__publics__ = {} cls.__privates__ = {} cls.__protecteds__ = {}", "objects for name in dir(cls): value = getattr(cls, name) #", "the attributes of an object.\"\"\" return object.__attributes__ def get_publics(object: Object)", "import Any from typing import Callable import Systerm # Metaclass", "str) -> None: try: return self[name] except KeyError as e:", "Adds attributes to __privates__ if name.startswith(\"__\"): cls.__privates__[name] = value #", "cls.__setattr__ = self.setattr # Custom magic methods cls.__namespaces__ = {}", "__privates__ if name.startswith(\"__\"): cls.__privates__[name] = value # Adds attributes to", "init_module module = init_module() # MetaMod class class MetaMod(module.Module): pass", "name.endswith(\"__\"): cls.__magics__[name] = value # Adds attributes to other namespace", "# Adds attributes to namespace self.__namespaces__[name] = value # Object", "pass # Dictionary class class Dictionary(dict, metaclass=Metaclass): def __getattr__(self, name:", "- str The name of the class bases - tuple[type,", "# Adds attributes to __publics__ else: self.__publics__[name] = value self.__attributes__[name]", "...], attrs: dict[str, Any], **keys: Any) -> type: \"\"\"The static", "class class Object(object, metaclass=Metaclass): pass # List class class List(list,", "namespace self.__namespaces__[name] = value # Object class class Object(object, metaclass=Metaclass):", "that will customize the behavior of python.\"\"\" from abc import", "the behavior of all classes.\"\"\" def __new__(self, name: str, bases:", "Dictionary: \"\"\"Gets the attributes of an object.\"\"\" return object.__attributes__ def", "name, bases, dict(attrs), **keys) cls.__setattr__ = self.setattr # Custom magic", "of an object.\"\"\" return object.__protecteds__ # Initializing Systerm.module from Systerm._setup", "a module contains objects that will customize the behavior of", "self.setattr # Custom magic methods cls.__namespaces__ = {} cls.__magics__ =", "Metaclass class Metaclass(ABCMeta): \"\"\"A metaclass to customize the behavior of", "self[name] except KeyError as e: try: return super().__getattr__(name) except AttributeError:", "Any Keyword arguments to pass in \"\"\" # Creating a", "objects that will customize the behavior of python.\"\"\" from abc", "cls.__privates__ = {} cls.__protecteds__ = {} # Setting objects for", "in dir(cls): value = getattr(cls, name) # Adds attributes to", "attributes **keys - Any Keyword arguments to pass in \"\"\"", "-> Dictionary: \"\"\"Gets the protected namespaces of an object.\"\"\" return", "Any], **keys: Any) -> type: \"\"\"The static constructor for the", "Dictionary: \"\"\"Gets the protected namespaces of an object.\"\"\" return object.__protecteds__", "cls.__protecteds__ = {} # Setting objects for name in dir(cls):", "value # Adds attributes to other namespace else: # Adds", "attributes to __publics__ else: cls.__publics__[name] = value cls.__attributes__[name] = value", "value return cls def setattr(self, name: str, value: object) ->", "if name.startswith(\"__\") and name.endswith(\"__\"): cls.__magics__[name] = value # Adds attributes", "except KeyError as e: try: return super().__getattr__(name) except AttributeError: raise", "object.__privates__ def get_protecteds(object: Object) -> Dictionary: \"\"\"Gets the protected namespaces", "{name: getattr(ABC, name) for name in dir(ABC)}) def get_namespaces(object: Object)", "Any] A dictionary of attributes **keys - Any Keyword arguments", "else: # Adds attributes to __privates__ if name.startswith(\"__\"): cls.__privates__[name] =", "**keys - Any Keyword arguments to pass in \"\"\" #", "to other namespace else: # Adds attributes to __privates__ if", "attributes to __magics__ if name.startswith(\"__\") and name.endswith(\"__\"): self.__magics__[name] = value", "customize the behavior of python.\"\"\" from abc import ABC from", "= {} cls.__magics__ = {} cls.__attributes__ = {} cls.__publics__ =", "else: self.__publics__[name] = value self.__attributes__[name] = value # Adds attributes", "def get_namespaces(object: Object) -> Dictionary: \"\"\"Gets the namespaces of an", "\"\"\"Gets the namespaces of an object.\"\"\" return object.__namespaces__ def get_magics(object:", "private namespaces of an object.\"\"\" return object.__privates__ def get_protecteds(object: Object)", "of python.\"\"\" from abc import ABC from abc import ABCMeta", "Recreating ABC ABC = Metaclass(ABC.__name__, ABC.__bases__, {name: getattr(ABC, name) for", "Dictionary(dict, metaclass=Metaclass): def __getattr__(self, name: str) -> None: try: return", "as e: try: return super().__getattr__(name) except AttributeError: raise e def", "cls = super().__new__(self, name, bases, dict(attrs), **keys) cls.__setattr__ = self.setattr", "# Adds attributes to __protecteds__ elif name.startswith(\"_\"): cls.__protecteds__[name] = value", "object.__namespaces__ def get_magics(object: Object) -> Dictionary: \"\"\"Gets the magic methods", "arguments to pass in \"\"\" # Creating a new class", "attributes to __protecteds__ elif name.startswith(\"_\"): cls.__protecteds__[name] = value # Adds", "def get_magics(object: Object) -> Dictionary: \"\"\"Gets the magic methods of", "dict[str, Any] A dictionary of attributes **keys - Any Keyword", "None: self[name] = value # Recreating ABC ABC = Metaclass(ABC.__name__,", "-> None: # Adds attributes to __magics__ if name.startswith(\"__\") and", "Object) -> Dictionary: \"\"\"Gets the public namespaces of an object.\"\"\"", "cls.__namespaces__ = {} cls.__magics__ = {} cls.__attributes__ = {} cls.__publics__", "attributes to __publics__ else: self.__publics__[name] = value self.__attributes__[name] = value", "metaclass to customize the behavior of all classes.\"\"\" def __new__(self,", "{} cls.__protecteds__ = {} # Setting objects for name in", "to inherit attrs - dict[str, Any] A dictionary of attributes", "value # Adds attributes to namespace cls.__namespaces__[name] = value return", "class Metaclass(ABCMeta): \"\"\"A metaclass to customize the behavior of all", "List class class List(list, metaclass=Metaclass): pass # Dictionary class class", "Metaclass(ABC.__name__, ABC.__bases__, {name: getattr(ABC, name) for name in dir(ABC)}) def", "{} cls.__privates__ = {} cls.__protecteds__ = {} # Setting objects", "self.__protecteds__[name] = value # Adds attributes to __publics__ else: self.__publics__[name]", "get_attributes(object: Object) -> Dictionary: \"\"\"Gets the attributes of an object.\"\"\"", "value # Adds attributes to __protecteds__ elif name.startswith(\"_\"): cls.__protecteds__[name] =", "object.\"\"\" return object.__protecteds__ # Initializing Systerm.module from Systerm._setup import init_module", "the Metaclass. Parameters: name - str The name of the", "Object(object, metaclass=Metaclass): pass # List class class List(list, metaclass=Metaclass): pass", "namespaces of an object.\"\"\" return object.__privates__ def get_protecteds(object: Object) ->", "= value self.__attributes__[name] = value # Adds attributes to namespace", "Creating a new class cls = super().__new__(self, name, bases, dict(attrs),", "if name.startswith(\"__\"): cls.__privates__[name] = value # Adds attributes to __protecteds__", "name in dir(ABC)}) def get_namespaces(object: Object) -> Dictionary: \"\"\"Gets the", "public namespaces of an object.\"\"\" return object.__publics__ def get_privates(object: Object)", "behavior of python.\"\"\" from abc import ABC from abc import", "= Metaclass(ABC.__name__, ABC.__bases__, {name: getattr(ABC, name) for name in dir(ABC)})", "__protecteds__ elif name.startswith(\"_\"): cls.__protecteds__[name] = value # Adds attributes to", "get_protecteds(object: Object) -> Dictionary: \"\"\"Gets the protected namespaces of an", "elif name.startswith(\"_\"): self.__protecteds__[name] = value # Adds attributes to __publics__", "return object.__protecteds__ # Initializing Systerm.module from Systerm._setup import init_module module", "# Creating a new class cls = super().__new__(self, name, bases,", "Adds attributes to namespace self.__namespaces__[name] = value # Object class", "Metaclass(ABCMeta): \"\"\"A metaclass to customize the behavior of all classes.\"\"\"", "an object.\"\"\" return object.__publics__ def get_privates(object: Object) -> Dictionary: \"\"\"Gets", "= {} cls.__privates__ = {} cls.__protecteds__ = {} # Setting", "from abc import ABC from abc import ABCMeta from abc", "-> None: self[name] = value # Recreating ABC ABC =", "return object.__privates__ def get_protecteds(object: Object) -> Dictionary: \"\"\"Gets the protected", "type: \"\"\"The static constructor for the Metaclass. Parameters: name -", "cls.__publics__ = {} cls.__privates__ = {} cls.__protecteds__ = {} #", "= self.setattr # Custom magic methods cls.__namespaces__ = {} cls.__magics__", "class cls = super().__new__(self, name, bases, dict(attrs), **keys) cls.__setattr__ =", "super().__new__(self, name, bases, dict(attrs), **keys) cls.__setattr__ = self.setattr # Custom", "None: # Adds attributes to __magics__ if name.startswith(\"__\") and name.endswith(\"__\"):", "self.__publics__[name] = value self.__attributes__[name] = value # Adds attributes to", "value: object) -> None: # Adds attributes to __magics__ if", "python.\"\"\" from abc import ABC from abc import ABCMeta from", "return object.__publics__ def get_privates(object: Object) -> Dictionary: \"\"\"Gets the private", "pass in \"\"\" # Creating a new class cls =", "name in dir(cls): value = getattr(cls, name) # Adds attributes", "contains objects that will customize the behavior of python.\"\"\" from", "classes to inherit attrs - dict[str, Any] A dictionary of", "bases - tuple[type, ...] A tuple of classes to inherit", "Systerm._setup import init_module module = init_module() # MetaMod class class", "self[name] = value # Recreating ABC ABC = Metaclass(ABC.__name__, ABC.__bases__,", "Dictionary: \"\"\"Gets the public namespaces of an object.\"\"\" return object.__publics__", "value cls.__attributes__[name] = value # Adds attributes to namespace cls.__namespaces__[name]", "__getattr__(self, name: str) -> None: try: return self[name] except KeyError", "cls.__publics__[name] = value cls.__attributes__[name] = value # Adds attributes to", "return object.__magics__ def get_attributes(object: Object) -> Dictionary: \"\"\"Gets the attributes", "bases, dict(attrs), **keys) cls.__setattr__ = self.setattr # Custom magic methods", "...] A tuple of classes to inherit attrs - dict[str,", "class Dictionary(dict, metaclass=Metaclass): def __getattr__(self, name: str) -> None: try:", "self.__privates__[name] = value # Adds attributes to __protecteds__ elif name.startswith(\"_\"):", "attributes to __privates__ if name.startswith(\"__\"): cls.__privates__[name] = value # Adds", "= value # Adds attributes to __protecteds__ elif name.startswith(\"_\"): cls.__protecteds__[name]", "# Custom magic methods cls.__namespaces__ = {} cls.__magics__ = {}", "import init_module module = init_module() # MetaMod class class MetaMod(module.Module):", "elif name.startswith(\"_\"): cls.__protecteds__[name] = value # Adds attributes to __publics__", "Callable import Systerm # Metaclass class Metaclass(ABCMeta): \"\"\"A metaclass to", "of an object.\"\"\" return object.__namespaces__ def get_magics(object: Object) -> Dictionary:", "def get_publics(object: Object) -> Dictionary: \"\"\"Gets the public namespaces of", "the private namespaces of an object.\"\"\" return object.__privates__ def get_protecteds(object:", "return object.__namespaces__ def get_magics(object: Object) -> Dictionary: \"\"\"Gets the magic", "object.__protecteds__ # Initializing Systerm.module from Systerm._setup import init_module module =", "of an object.\"\"\" return object.__privates__ def get_protecteds(object: Object) -> Dictionary:", "None: try: return self[name] except KeyError as e: try: return", "the behavior of python.\"\"\" from abc import ABC from abc", "attributes to __protecteds__ elif name.startswith(\"_\"): self.__protecteds__[name] = value # Adds", "= {} cls.__protecteds__ = {} # Setting objects for name", "= value # Adds attributes to namespace cls.__namespaces__[name] = value", "-> Dictionary: \"\"\"Gets the namespaces of an object.\"\"\" return object.__namespaces__", "str, bases: tuple[type, ...], attrs: dict[str, Any], **keys: Any) ->", "__magics__ if name.startswith(\"__\") and name.endswith(\"__\"): self.__magics__[name] = value # Adds", "protected namespaces of an object.\"\"\" return object.__protecteds__ # Initializing Systerm.module", "of an object.\"\"\" return object.__magics__ def get_attributes(object: Object) -> Dictionary:", "super().__getattr__(name) except AttributeError: raise e def __setattr__(self, name: str, value:", "from typing import Any from typing import Callable import Systerm", "is a module contains objects that will customize the behavior", "an object.\"\"\" return object.__attributes__ def get_publics(object: Object) -> Dictionary: \"\"\"Gets", "e def __setattr__(self, name: str, value: object) -> None: self[name]", "-> Dictionary: \"\"\"Gets the magic methods of an object.\"\"\" return", "will customize the behavior of python.\"\"\" from abc import ABC", "to __magics__ if name.startswith(\"__\") and name.endswith(\"__\"): self.__magics__[name] = value #", "= {} cls.__attributes__ = {} cls.__publics__ = {} cls.__privates__ =", "Setting objects for name in dir(cls): value = getattr(cls, name)", "dict(attrs), **keys) cls.__setattr__ = self.setattr # Custom magic methods cls.__namespaces__", "value # Adds attributes to namespace self.__namespaces__[name] = value #", "KeyError as e: try: return super().__getattr__(name) except AttributeError: raise e", "value # Adds attributes to __publics__ else: cls.__publics__[name] = value", "# Object class class Object(object, metaclass=Metaclass): pass # List class", "the class bases - tuple[type, ...] A tuple of classes", "def get_protecteds(object: Object) -> Dictionary: \"\"\"Gets the protected namespaces of", "get_publics(object: Object) -> Dictionary: \"\"\"Gets the public namespaces of an", "__publics__ else: self.__publics__[name] = value self.__attributes__[name] = value # Adds", "# List class class List(list, metaclass=Metaclass): pass # Dictionary class", "in dir(ABC)}) def get_namespaces(object: Object) -> Dictionary: \"\"\"Gets the namespaces", "cls.__magics__ = {} cls.__attributes__ = {} cls.__publics__ = {} cls.__privates__", "self.__attributes__[name] = value # Adds attributes to namespace self.__namespaces__[name] =", "str The name of the class bases - tuple[type, ...]", "self.__magics__[name] = value # Adds attributes to other namespace else:", "-> type: \"\"\"The static constructor for the Metaclass. Parameters: name", "if name.startswith(\"__\") and name.endswith(\"__\"): self.__magics__[name] = value # Adds attributes", "A tuple of classes to inherit attrs - dict[str, Any]", "bases: tuple[type, ...], attrs: dict[str, Any], **keys: Any) -> type:", "Any) -> type: \"\"\"The static constructor for the Metaclass. Parameters:", "\"\"\"Meta is a module contains objects that will customize the", "= {} # Setting objects for name in dir(cls): value", "except AttributeError: raise e def __setattr__(self, name: str, value: object)", "Adds attributes to __magics__ if name.startswith(\"__\") and name.endswith(\"__\"): self.__magics__[name] =", "constructor for the Metaclass. Parameters: name - str The name", "name: str, value: object) -> None: self[name] = value #", "object) -> None: # Adds attributes to __magics__ if name.startswith(\"__\")", "typing import Any from typing import Callable import Systerm #", "def __new__(self, name: str, bases: tuple[type, ...], attrs: dict[str, Any],", "{} # Setting objects for name in dir(cls): value =", "to customize the behavior of all classes.\"\"\" def __new__(self, name:", "name.startswith(\"__\"): self.__privates__[name] = value # Adds attributes to __protecteds__ elif", "tuple[type, ...] A tuple of classes to inherit attrs -", "# Recreating ABC ABC = Metaclass(ABC.__name__, ABC.__bases__, {name: getattr(ABC, name)", "Systerm # Metaclass class Metaclass(ABCMeta): \"\"\"A metaclass to customize the", "try: return super().__getattr__(name) except AttributeError: raise e def __setattr__(self, name:", "cls.__attributes__[name] = value # Adds attributes to namespace cls.__namespaces__[name] =", "value # Adds attributes to __publics__ else: self.__publics__[name] = value", "= value # Adds attributes to namespace self.__namespaces__[name] = value", "metaclass=Metaclass): pass # List class class List(list, metaclass=Metaclass): pass #", "ABC ABC = Metaclass(ABC.__name__, ABC.__bases__, {name: getattr(ABC, name) for name", "an object.\"\"\" return object.__magics__ def get_attributes(object: Object) -> Dictionary: \"\"\"Gets", "str, value: object) -> None: # Adds attributes to __magics__", "tuple of classes to inherit attrs - dict[str, Any] A", "attributes to __magics__ if name.startswith(\"__\") and name.endswith(\"__\"): cls.__magics__[name] = value", "raise e def __setattr__(self, name: str, value: object) -> None:", "Adds attributes to __publics__ else: cls.__publics__[name] = value cls.__attributes__[name] =", "__setattr__(self, name: str, value: object) -> None: self[name] = value", "import ABCMeta from abc import abstractmethod from typing import Any", "to __publics__ else: self.__publics__[name] = value self.__attributes__[name] = value #", "class Object(object, metaclass=Metaclass): pass # List class class List(list, metaclass=Metaclass):", "to pass in \"\"\" # Creating a new class cls", "the public namespaces of an object.\"\"\" return object.__publics__ def get_privates(object:", "namespaces of an object.\"\"\" return object.__publics__ def get_privates(object: Object) ->", "Dictionary: \"\"\"Gets the namespaces of an object.\"\"\" return object.__namespaces__ def", "inherit attrs - dict[str, Any] A dictionary of attributes **keys", "= getattr(cls, name) # Adds attributes to __magics__ if name.startswith(\"__\")", "-> Dictionary: \"\"\"Gets the attributes of an object.\"\"\" return object.__attributes__", "object.\"\"\" return object.__magics__ def get_attributes(object: Object) -> Dictionary: \"\"\"Gets the", "\"\"\"Gets the private namespaces of an object.\"\"\" return object.__privates__ def", "name.startswith(\"__\"): cls.__privates__[name] = value # Adds attributes to __protecteds__ elif", "other namespace else: # Adds attributes to __privates__ if name.startswith(\"__\"):", "name: str, value: object) -> None: # Adds attributes to", "def get_attributes(object: Object) -> Dictionary: \"\"\"Gets the attributes of an", "-> Dictionary: \"\"\"Gets the private namespaces of an object.\"\"\" return", "name of the class bases - tuple[type, ...] A tuple", "in \"\"\" # Creating a new class cls = super().__new__(self,", "value # Adds attributes to __protecteds__ elif name.startswith(\"_\"): self.__protecteds__[name] =", "= value # Recreating ABC ABC = Metaclass(ABC.__name__, ABC.__bases__, {name:", "init_module() # MetaMod class class MetaMod(module.Module): pass module.modules[__name__].__class__ = MetaMod", "the protected namespaces of an object.\"\"\" return object.__protecteds__ # Initializing", "# Setting objects for name in dir(cls): value = getattr(cls,", "all classes.\"\"\" def __new__(self, name: str, bases: tuple[type, ...], attrs:", "\"\"\" # Creating a new class cls = super().__new__(self, name,", "\"\"\"Gets the magic methods of an object.\"\"\" return object.__magics__ def", "Object) -> Dictionary: \"\"\"Gets the private namespaces of an object.\"\"\"", "= value # Adds attributes to __protecteds__ elif name.startswith(\"_\"): self.__protecteds__[name]", "value self.__attributes__[name] = value # Adds attributes to namespace self.__namespaces__[name]", "**keys) cls.__setattr__ = self.setattr # Custom magic methods cls.__namespaces__ =", "= value # Adds attributes to __publics__ else: cls.__publics__[name] =", "metaclass=Metaclass): def __getattr__(self, name: str) -> None: try: return self[name]", "__new__(self, name: str, bases: tuple[type, ...], attrs: dict[str, Any], **keys:", "magic methods of an object.\"\"\" return object.__magics__ def get_attributes(object: Object)", "name: str) -> None: try: return self[name] except KeyError as", "for the Metaclass. Parameters: name - str The name of", "ABC = Metaclass(ABC.__name__, ABC.__bases__, {name: getattr(ABC, name) for name in", "value # Object class class Object(object, metaclass=Metaclass): pass # List", "# Dictionary class class Dictionary(dict, metaclass=Metaclass): def __getattr__(self, name: str)", "name.endswith(\"__\"): self.__magics__[name] = value # Adds attributes to other namespace", "value: object) -> None: self[name] = value # Recreating ABC", "get_magics(object: Object) -> Dictionary: \"\"\"Gets the magic methods of an", "namespace cls.__namespaces__[name] = value return cls def setattr(self, name: str,", "object.\"\"\" return object.__namespaces__ def get_magics(object: Object) -> Dictionary: \"\"\"Gets the", "the magic methods of an object.\"\"\" return object.__magics__ def get_attributes(object:", "import ABC from abc import ABCMeta from abc import abstractmethod", "\"\"\"The static constructor for the Metaclass. Parameters: name - str", "from Systerm._setup import init_module module = init_module() # MetaMod class", "dir(cls): value = getattr(cls, name) # Adds attributes to __magics__", "to namespace cls.__namespaces__[name] = value return cls def setattr(self, name:", "ABC from abc import ABCMeta from abc import abstractmethod from", "__publics__ else: cls.__publics__[name] = value cls.__attributes__[name] = value # Adds", "name.startswith(\"__\") and name.endswith(\"__\"): self.__magics__[name] = value # Adds attributes to", "name) for name in dir(ABC)}) def get_namespaces(object: Object) -> Dictionary:", "to namespace self.__namespaces__[name] = value # Object class class Object(object,", "self.__namespaces__[name] = value # Object class class Object(object, metaclass=Metaclass): pass", "of classes to inherit attrs - dict[str, Any] A dictionary", "attributes to __privates__ if name.startswith(\"__\"): self.__privates__[name] = value # Adds", "def setattr(self, name: str, value: object) -> None: # Adds", "\"\"\"Gets the public namespaces of an object.\"\"\" return object.__publics__ def", "Parameters: name - str The name of the class bases", "and name.endswith(\"__\"): cls.__magics__[name] = value # Adds attributes to other", "to __privates__ if name.startswith(\"__\"): self.__privates__[name] = value # Adds attributes", "\"\"\"Gets the attributes of an object.\"\"\" return object.__attributes__ def get_publics(object:", "name.startswith(\"__\") and name.endswith(\"__\"): cls.__magics__[name] = value # Adds attributes to", "module = init_module() # MetaMod class class MetaMod(module.Module): pass module.modules[__name__].__class__", "__protecteds__ elif name.startswith(\"_\"): self.__protecteds__[name] = value # Adds attributes to", "setattr(self, name: str, value: object) -> None: # Adds attributes", "from abc import ABCMeta from abc import abstractmethod from typing", "object) -> None: self[name] = value # Recreating ABC ABC", "attrs: dict[str, Any], **keys: Any) -> type: \"\"\"The static constructor", "Systerm.module from Systerm._setup import init_module module = init_module() # MetaMod", "= value return cls def setattr(self, name: str, value: object)", "cls.__namespaces__[name] = value return cls def setattr(self, name: str, value:", "magic methods cls.__namespaces__ = {} cls.__magics__ = {} cls.__attributes__ =", "def __setattr__(self, name: str, value: object) -> None: self[name] =", "to __magics__ if name.startswith(\"__\") and name.endswith(\"__\"): cls.__magics__[name] = value #", "value = getattr(cls, name) # Adds attributes to __magics__ if", "Adds attributes to __protecteds__ elif name.startswith(\"_\"): cls.__protecteds__[name] = value #", "Dictionary: \"\"\"Gets the private namespaces of an object.\"\"\" return object.__privates__", "return cls def setattr(self, name: str, value: object) -> None:", "new class cls = super().__new__(self, name, bases, dict(attrs), **keys) cls.__setattr__", "name.startswith(\"_\"): self.__protecteds__[name] = value # Adds attributes to __publics__ else:", "cls.__attributes__ = {} cls.__publics__ = {} cls.__privates__ = {} cls.__protecteds__", "abstractmethod from typing import Any from typing import Callable import", "cls.__magics__[name] = value # Adds attributes to other namespace else:", "return super().__getattr__(name) except AttributeError: raise e def __setattr__(self, name: str,", "= value # Adds attributes to other namespace else: #", "dictionary of attributes **keys - Any Keyword arguments to pass", "Adds attributes to __protecteds__ elif name.startswith(\"_\"): self.__protecteds__[name] = value #", "methods cls.__namespaces__ = {} cls.__magics__ = {} cls.__attributes__ = {}", "Custom magic methods cls.__namespaces__ = {} cls.__magics__ = {} cls.__attributes__", "class class Dictionary(dict, metaclass=Metaclass): def __getattr__(self, name: str) -> None:", "Object) -> Dictionary: \"\"\"Gets the magic methods of an object.\"\"\"", "attributes of an object.\"\"\" return object.__attributes__ def get_publics(object: Object) ->", "Adds attributes to __publics__ else: self.__publics__[name] = value self.__attributes__[name] =", "List(list, metaclass=Metaclass): pass # Dictionary class class Dictionary(dict, metaclass=Metaclass): def", "- dict[str, Any] A dictionary of attributes **keys - Any", "cls.__protecteds__[name] = value # Adds attributes to __publics__ else: cls.__publics__[name]", "name) # Adds attributes to __magics__ if name.startswith(\"__\") and name.endswith(\"__\"):", "namespaces of an object.\"\"\" return object.__namespaces__ def get_magics(object: Object) ->", "Dictionary class class Dictionary(dict, metaclass=Metaclass): def __getattr__(self, name: str) ->", "for name in dir(cls): value = getattr(cls, name) # Adds", "to __privates__ if name.startswith(\"__\"): cls.__privates__[name] = value # Adds attributes", "# Adds attributes to __publics__ else: cls.__publics__[name] = value cls.__attributes__[name]", "of all classes.\"\"\" def __new__(self, name: str, bases: tuple[type, ...],", "get_privates(object: Object) -> Dictionary: \"\"\"Gets the private namespaces of an", "def get_privates(object: Object) -> Dictionary: \"\"\"Gets the private namespaces of", "<gh_stars>1-10 \"\"\"Meta is a module contains objects that will customize", "object.__attributes__ def get_publics(object: Object) -> Dictionary: \"\"\"Gets the public namespaces", "namespace else: # Adds attributes to __privates__ if name.startswith(\"__\"): self.__privates__[name]", "# Adds attributes to __privates__ if name.startswith(\"__\"): cls.__privates__[name] = value", "name - str The name of the class bases -", "cls.__privates__[name] = value # Adds attributes to __protecteds__ elif name.startswith(\"_\"):", "of an object.\"\"\" return object.__attributes__ def get_publics(object: Object) -> Dictionary:", "= value cls.__attributes__[name] = value # Adds attributes to namespace", "Any from typing import Callable import Systerm # Metaclass class", "= value # Object class class Object(object, metaclass=Metaclass): pass #", "to __publics__ else: cls.__publics__[name] = value cls.__attributes__[name] = value #", "{} cls.__attributes__ = {} cls.__publics__ = {} cls.__privates__ = {}", "get_namespaces(object: Object) -> Dictionary: \"\"\"Gets the namespaces of an object.\"\"\"", "object.__magics__ def get_attributes(object: Object) -> Dictionary: \"\"\"Gets the attributes of", "object.\"\"\" return object.__attributes__ def get_publics(object: Object) -> Dictionary: \"\"\"Gets the", "\"\"\"Gets the protected namespaces of an object.\"\"\" return object.__protecteds__ #", "\"\"\"A metaclass to customize the behavior of all classes.\"\"\" def", "class class List(list, metaclass=Metaclass): pass # Dictionary class class Dictionary(dict,", "# Adds attributes to __magics__ if name.startswith(\"__\") and name.endswith(\"__\"): self.__magics__[name]", "Adds attributes to __magics__ if name.startswith(\"__\") and name.endswith(\"__\"): cls.__magics__[name] =", "customize the behavior of all classes.\"\"\" def __new__(self, name: str,", "ABC.__bases__, {name: getattr(ABC, name) for name in dir(ABC)}) def get_namespaces(object:", "dict[str, Any], **keys: Any) -> type: \"\"\"The static constructor for", "getattr(cls, name) # Adds attributes to __magics__ if name.startswith(\"__\") and", "classes.\"\"\" def __new__(self, name: str, bases: tuple[type, ...], attrs: dict[str,", "Keyword arguments to pass in \"\"\" # Creating a new", "# Adds attributes to other namespace else: # Adds attributes", "and name.endswith(\"__\"): self.__magics__[name] = value # Adds attributes to other", "of an object.\"\"\" return object.__publics__ def get_privates(object: Object) -> Dictionary:", "attributes to namespace cls.__namespaces__[name] = value return cls def setattr(self,", "-> None: try: return self[name] except KeyError as e: try:", "else: cls.__publics__[name] = value cls.__attributes__[name] = value # Adds attributes", "of attributes **keys - Any Keyword arguments to pass in", "def __getattr__(self, name: str) -> None: try: return self[name] except", "object.__publics__ def get_privates(object: Object) -> Dictionary: \"\"\"Gets the private namespaces", "abc import abstractmethod from typing import Any from typing import", "getattr(ABC, name) for name in dir(ABC)}) def get_namespaces(object: Object) ->", "Object) -> Dictionary: \"\"\"Gets the namespaces of an object.\"\"\" return", "Object class class Object(object, metaclass=Metaclass): pass # List class class", "attrs - dict[str, Any] A dictionary of attributes **keys -", "# Adds attributes to namespace cls.__namespaces__[name] = value return cls", "import Systerm # Metaclass class Metaclass(ABCMeta): \"\"\"A metaclass to customize", "= {} cls.__publics__ = {} cls.__privates__ = {} cls.__protecteds__ =", "class List(list, metaclass=Metaclass): pass # Dictionary class class Dictionary(dict, metaclass=Metaclass):", "Dictionary: \"\"\"Gets the magic methods of an object.\"\"\" return object.__magics__", "attributes to namespace self.__namespaces__[name] = value # Object class class", "an object.\"\"\" return object.__privates__ def get_protecteds(object: Object) -> Dictionary: \"\"\"Gets", "behavior of all classes.\"\"\" def __new__(self, name: str, bases: tuple[type,", "= super().__new__(self, name, bases, dict(attrs), **keys) cls.__setattr__ = self.setattr #", "from abc import abstractmethod from typing import Any from typing", "to __protecteds__ elif name.startswith(\"_\"): self.__protecteds__[name] = value # Adds attributes", "object.\"\"\" return object.__privates__ def get_protecteds(object: Object) -> Dictionary: \"\"\"Gets the", "ABCMeta from abc import abstractmethod from typing import Any from", "an object.\"\"\" return object.__protecteds__ # Initializing Systerm.module from Systerm._setup import", "# Initializing Systerm.module from Systerm._setup import init_module module = init_module()", "tuple[type, ...], attrs: dict[str, Any], **keys: Any) -> type: \"\"\"The", "The name of the class bases - tuple[type, ...] A", "Adds attributes to namespace cls.__namespaces__[name] = value return cls def", "= init_module() # MetaMod class class MetaMod(module.Module): pass module.modules[__name__].__class__ =", "-> Dictionary: \"\"\"Gets the public namespaces of an object.\"\"\" return", "namespace else: # Adds attributes to __privates__ if name.startswith(\"__\"): cls.__privates__[name]", "# Adds attributes to __privates__ if name.startswith(\"__\"): self.__privates__[name] = value", "return object.__attributes__ def get_publics(object: Object) -> Dictionary: \"\"\"Gets the public", "import abstractmethod from typing import Any from typing import Callable", "namespaces of an object.\"\"\" return object.__protecteds__ # Initializing Systerm.module from", "name.startswith(\"_\"): cls.__protecteds__[name] = value # Adds attributes to __publics__ else:", "e: try: return super().__getattr__(name) except AttributeError: raise e def __setattr__(self,", "# Metaclass class Metaclass(ABCMeta): \"\"\"A metaclass to customize the behavior", "{} cls.__magics__ = {} cls.__attributes__ = {} cls.__publics__ = {}", "name: str, bases: tuple[type, ...], attrs: dict[str, Any], **keys: Any)", "value # Recreating ABC ABC = Metaclass(ABC.__name__, ABC.__bases__, {name: getattr(ABC,", "for name in dir(ABC)}) def get_namespaces(object: Object) -> Dictionary: \"\"\"Gets", "attributes to other namespace else: # Adds attributes to __privates__", "# Adds attributes to __protecteds__ elif name.startswith(\"_\"): self.__protecteds__[name] = value", "cls def setattr(self, name: str, value: object) -> None: #", "A dictionary of attributes **keys - Any Keyword arguments to", "to __protecteds__ elif name.startswith(\"_\"): cls.__protecteds__[name] = value # Adds attributes", "__privates__ if name.startswith(\"__\"): self.__privates__[name] = value # Adds attributes to", "methods of an object.\"\"\" return object.__magics__ def get_attributes(object: Object) ->", "the namespaces of an object.\"\"\" return object.__namespaces__ def get_magics(object: Object)", "typing import Callable import Systerm # Metaclass class Metaclass(ABCMeta): \"\"\"A", "Adds attributes to other namespace else: # Adds attributes to", "Object) -> Dictionary: \"\"\"Gets the protected namespaces of an object.\"\"\"", "Object) -> Dictionary: \"\"\"Gets the attributes of an object.\"\"\" return", "Initializing Systerm.module from Systerm._setup import init_module module = init_module() #", "if name.startswith(\"__\"): self.__privates__[name] = value # Adds attributes to __protecteds__", "module contains objects that will customize the behavior of python.\"\"\"", "from typing import Callable import Systerm # Metaclass class Metaclass(ABCMeta):", "Metaclass. Parameters: name - str The name of the class", "= value # Adds attributes to __publics__ else: self.__publics__[name] =", "abc import ABC from abc import ABCMeta from abc import", "__magics__ if name.startswith(\"__\") and name.endswith(\"__\"): cls.__magics__[name] = value # Adds", "static constructor for the Metaclass. Parameters: name - str The", "an object.\"\"\" return object.__namespaces__ def get_magics(object: Object) -> Dictionary: \"\"\"Gets", "metaclass=Metaclass): pass # Dictionary class class Dictionary(dict, metaclass=Metaclass): def __getattr__(self,", "AttributeError: raise e def __setattr__(self, name: str, value: object) ->", "- tuple[type, ...] A tuple of classes to inherit attrs", "**keys: Any) -> type: \"\"\"The static constructor for the Metaclass.", "- Any Keyword arguments to pass in \"\"\" # Creating", "dir(ABC)}) def get_namespaces(object: Object) -> Dictionary: \"\"\"Gets the namespaces of", "abc import ABCMeta from abc import abstractmethod from typing import", "# Adds attributes to __magics__ if name.startswith(\"__\") and name.endswith(\"__\"): cls.__magics__[name]", "a new class cls = super().__new__(self, name, bases, dict(attrs), **keys)", "try: return self[name] except KeyError as e: try: return super().__getattr__(name)", "of the class bases - tuple[type, ...] A tuple of" ]
[ "result={ \"amt\": amount, \"bank_id\": bank.ccf_id, \"dst\": transaction[\"dst\"], \"dst_country\": transaction[\"dst_country\"], \"src\":", "e[\"code\"] == infra.jsonrpc.ErrorCode.INVALID_PARAMS.value, ) # get from flagged txs, try", "user_data = {{ privileges = {{ REGISTER_REGULATORS = true, REGISTER_BANKS", "= {manager.ccf_id}, user_data = {{ privileges = {{ REGISTER_REGULATORS =", "\"country\": regulator.country, \"script\": script, }, ), result=regulator.ccf_id, ) check( c.rpc(\"REG_get\",", "transaction with primary.user_client(format=\"msgpack\", user_id=bank.name) as c: # try to poll", "you are not a regulator check( c.rpc(\"REG_poll_flagged\", {}), error=lambda e:", "revealed if tx_id not in revealed_tx_ids: check( c.rpc(\"REG_get_revealed\", {\"tx_id\": tx_id}),", "def add(parser): parser.add_argument( \"--lua-script\", help=\"Regulator checker loaded as lua script", "transactions that were flagged for i, tx_id in enumerate(flagged_ids): if", "an existing scenario file (csv)\", type=str ) args = infra.e2e_args.cli_args(add)", "e[\"code\"] == infra.jsonrpc.ErrorCode.INVALID_PARAMS.value, ) LOG.debug(f\"User {regulator} successfully registered as regulator\")", "flagged_ids = [] non_flagged_ids = [] flagged_amt = 200000 for", "as LOG class AppUser: def __init__(self, network, name, country, curve):", "country, curve): self.name = name self.country = country primary, _", "c.rpc( \"REG_register\", { \"regulator_id\": regulator.ccf_id, \"country\": regulator.country, \"script\": script, },", "e: e is not None and e[\"code\"] == infra.jsonrpc.ErrorCode.INVALID_CALLER_ID.value, )", "tx_id += 1 LOG.success(f\"{tx_id} transactions have been successfully issued\") #", "flagged_ids for tx_id in flagged_ids: # get from flagged txs,", "primary.user_client(format=\"msgpack\", user_id=manager.name) as c: check( c.rpc( \"REG_register\", { \"regulator_id\": regulator.ccf_id,", "= true, REGISTER_BANKS = true, }} }} }} ) \"\"\",", "\"dst\": row[\"destination\"], \"amt\": row[\"amount\"], \"type\": row[\"type\"], \"timestamp\": strftime(\"%a, %d %b", "flagged_amt: check( c.rpc(\"FLAGGED_TX_get\", {\"tx_id\": tx_id}), result=[regulator.ccf_id, False, transaction[\"timestamp\"]], ) flagged_tx", "args.debug_nodes, args.perf_nodes, pdb=args.pdb ) as network: check = infra.checker.Checker() network.start_and_join(args)", "import infra.jsonrpc import logging from time import gmtime, strftime import", "mc: with primary.user_client(format=\"msgpack\", user_id=regulator.name) as c: # assert that the", "self.name = name self.country = country primary, _ = network.find_primary()", "self.country = country primary, _ = network.find_primary() network.create_users([self.name], curve) network.consortium.add_users(primary,", "network.find_primary() network.create_users([self.name], curve) network.consortium.add_users(primary, [self.name]) with primary.user_client(user_id=self.name) as client: self.ccf_id", "is not None and e[\"code\"] == infra.jsonrpc.ErrorCode.INVALID_CALLER_ID.value, ) # As", "infra.jsonrpc.ErrorCode.INVALID_CALLER_ID.value, ) check( c.rpc(\"BK_register\", {}), error=lambda e: e is not", "== infra.jsonrpc.ErrorCode.INVALID_CALLER_ID.value, ) # As permissioned manager, register regulator and", ") # get from flagged txs, try to get the", "tx_id}), result=flagged_txs[tx_id], ) if __name__ == \"__main__\": def add(parser): parser.add_argument(", "{\"tx_id\": tx_id}), error=lambda e: e is not None and e[\"code\"]", "with primary.user_client(user_id=regulator.name) as c: check( c.rpc(\"REG_register\", {}), error=lambda e: e", "check( c.rpc( \"REG_register\", {\"regulator_id\": bank.ccf_id, \"country\": bank.country}, ), error=lambda e:", "\"src\": transaction[\"src\"], \"src_country\": transaction[\"src_country\"], \"timestamp\": transaction[\"timestamp\"], \"tx_id\": tx_id, \"type\": transaction[\"type\"],", "\"type\": transaction[\"type\"], } flagged_ids.append(tx_id) flagged_txs[tx_id] = flagged_tx else: check( c.rpc(\"FLAGGED_TX_get\",", "ones that were revealed for tx_id in revealed_tx_ids: check( c.rpc(\"REG_get_revealed\",", "in enumerate(banks): with primary.user_client(format=\"msgpack\", user_id=bank.name) as c: # Destination account", "granted special privileges by members, which is later read by", "LOG class AppUser: def __init__(self, network, name, country, curve): self.name", "check( c.rpc( \"REG_register\", { \"regulator_id\": regulator.ccf_id, \"country\": regulator.country, \"script\": script,", "import random from loguru import logger as LOG class AppUser:", "revealed/non revealed transactions for validation flagged_txs = {} revealed_tx_ids =", "account is the next one in the list of banks", "for validation flagged_txs = {} revealed_tx_ids = [] flagged_ids =", "% 2 == 0: check(c.rpc(\"TX_reveal\", {\"tx_id\": tx_id}), result=True) revealed_tx_ids.append(tx_id) #", "transaction[\"src_country\"], \"timestamp\": transaction[\"timestamp\"], \"type\": transaction[\"type\"], }, ) if float(amount) >", "in resp.result: # poll flagged is a list [tx_id, regulator_id]", "are correct resp = c.rpc(\"REG_poll_flagged\", {}) poll_flagged_ids = [] for", "loaded as lua script file\", type=str ) parser.add_argument( \"--datafile\", help=\"Load", "get the flagged ones that were revealed for tx_id in", "\"bank_id\": bank.ccf_id, \"dst\": transaction[\"dst\"], \"dst_country\": transaction[\"dst_country\"], \"src\": transaction[\"src\"], \"src_country\": transaction[\"src_country\"],", "from time import gmtime, strftime import csv import random from", "%Y %H:%M:%S +0000\", gmtime()), \"src_country\": row[\"src_country\"], \"dst_country\": row[\"dst_country\"], } transactions.append(json_tx)", "flagged txs, try to get the flagged one that was", "= [] for poll_flagged in resp.result: # poll flagged is", "file\", type=str ) parser.add_argument( \"--datafile\", help=\"Load an existing scenario file", "transaction), result=tx_id) check( c.rpc(\"TX_get\", {\"tx_id\": tx_id}), result={ \"amt\": amount, \"bank_id\":", "\"tx_id\": tx_id, \"type\": transaction[\"type\"], } flagged_ids.append(tx_id) flagged_txs[tx_id] = flagged_tx else:", "were revealed for tx_id in revealed_tx_ids: check( c.rpc(\"REG_get_revealed\", {\"tx_id\": tx_id}),", "row[\"destination\"], \"amt\": row[\"amount\"], \"type\": row[\"type\"], \"timestamp\": strftime(\"%a, %d %b %Y", "# bank try to reveal non flagged txs for tx_id", "= {} revealed_tx_ids = [] flagged_ids = [] non_flagged_ids =", "others = network.find_nodes() script = \"if tonumber(amt) > 200000 then", "f: datafile = csv.DictReader(f) for i, row in enumerate(datafile): #", "is not None and e[\"code\"] == infra.jsonrpc.ErrorCode.INVALID_CALLER_ID.value, ) with primary.user_client(user_id=banks[0].name)", "flagged_tx else: check( c.rpc(\"FLAGGED_TX_get\", {\"tx_id\": tx_id}), error=lambda e: e is", "args.perf_nodes, pdb=args.pdb ) as network: check = infra.checker.Checker() network.start_and_join(args) primary,", "[] with open(args.lua_script, \"r\") as f: data = f.readlines() script", "\"GB\", \"GR\", \"FR\") ] transactions = [] with open(args.datafile, newline=\"\")", "\"amt\": row[\"amount\"], \"type\": row[\"type\"], \"timestamp\": strftime(\"%a, %d %b %Y %H:%M:%S", "c.rpc(\"TX_get\", {\"tx_id\": tx_id}), result={ \"amt\": amount, \"bank_id\": bank.ccf_id, \"dst\": transaction[\"dst\"],", "transaction[\"dst_country\"], \"src\": transaction[\"src\"], \"src_country\": transaction[\"src_country\"], \"timestamp\": transaction[\"timestamp\"], \"tx_id\": tx_id, \"type\":", "under the Apache 2.0 License. import infra.e2e_args import infra.ccf import", "bank in banks: check( c.rpc( \"BK_register\", {\"bank_id\": bank.ccf_id, \"country\": bank.country},", "Corporation. All rights reserved. # Licensed under the Apache 2.0", "e[\"code\"] == infra.jsonrpc.ErrorCode.INVALID_CALLER_ID.value, ) # As permissioned manager, register regulator", "{\"bank_id\": regulator.ccf_id, \"country\": regulator.country}, ), error=lambda e: e is not", "resp = c.rpc(\"REG_poll_flagged\", {}) poll_flagged_ids = [] for poll_flagged in", "poll_flagged_ids.sort() assert poll_flagged_ids == flagged_ids for tx_id in flagged_ids: #", "e[\"code\"] == infra.jsonrpc.ErrorCode.INVALID_CALLER_ID.value, ) check( c.rpc(\"BK_register\", {}), error=lambda e: e", "logging from time import gmtime, strftime import csv import random", "bank.ccf_id, \"country\": bank.country}, ), error=lambda e: e is not None", "\"country\": regulator.country}, ), error=lambda e: e is not None and", "in revealed_tx_ids: check( c.rpc(\"REG_get_revealed\", {\"tx_id\": tx_id}), error=lambda e: e is", "), result=regulator.ccf_id, ) check( c.rpc(\"REG_get\", {\"id\": regulator.ccf_id}), result=[regulator.country, script], )", ") check( c.rpc(\"REG_get\", {\"id\": regulator.ccf_id}), result=[regulator.country, script], ) check( c.rpc(", "(\"US\", \"GB\", \"GR\", \"FR\") ] transactions = [] with open(args.datafile,", "that issued first flagged transaction with primary.user_client(format=\"msgpack\", user_id=bank.name) as c:", "\"src_country\": row[\"src_country\"], \"dst_country\": row[\"dst_country\"], } transactions.append(json_tx) # Manager is granted", "script file\", type=str ) parser.add_argument( \"--datafile\", help=\"Load an existing scenario", "flagged_ids: # get from flagged txs, try to get the", "), error=lambda e: e is not None and e[\"code\"] ==", "\"BK_register\", {\"bank_id\": regulator.ccf_id, \"country\": regulator.country}, ), error=lambda e: e is", "transaction[\"type\"], } flagged_ids.append(tx_id) flagged_txs[tx_id] = flagged_tx else: check( c.rpc(\"FLAGGED_TX_get\", {\"tx_id\":", "network.consortium.vote_using_majority(primary, proposal_result[\"id\"]) # Check permissions are enforced with primary.user_client(user_id=regulator.name) as", "= [] flagged_amt = 200000 for i, bank in enumerate(banks):", "next one in the list of banks for transaction in", "check(c.rpc(\"TX_reveal\", {\"tx_id\": tx_id}), result=True) revealed_tx_ids.append(tx_id) # bank try to reveal", "# Tracks how many transactions have been issued # tracks", "from flagged txs, try to get the flagged ones that", "> 200000 then return true else return false end\" if", "non_flagged_ids: check( c.rpc(\"TX_reveal\", {\"tx_id\": tx_id}), error=lambda e: e is not", "with primary.node_client() as mc: with primary.user_client(format=\"msgpack\", user_id=regulator.name) as c: #", ") network.consortium.vote_using_majority(primary, proposal_result[\"id\"]) # Check permissions are enforced with primary.user_client(user_id=regulator.name)", "error = network.consortium.propose( 0, primary, f\"\"\" return Calls:call( \"set_user_data\", {{", "e is not None and e[\"code\"] == infra.jsonrpc.ErrorCode.INVALID_CALLER_ID.value, ) check(", "f.readlines() script = \"\".join(data) manager = AppUser(network, \"manager\", \"GB\", args.default_curve)", "but fail as you are not a regulator check( c.rpc(\"REG_poll_flagged\",", "{\"tx_id\": tx_id}), result=True) revealed_tx_ids.append(tx_id) # bank try to reveal non", "import infra.e2e_args import infra.ccf import infra.jsonrpc import logging from time", "class AppUser: def __init__(self, network, name, country, curve): self.name =", "are not a regulator check( c.rpc(\"REG_poll_flagged\", {}), error=lambda e: e", "bank.ccf_id}), result=bank.country) check( c.rpc( \"REG_register\", {\"regulator_id\": bank.ccf_id, \"country\": bank.country}, ),", "# regulator poll for transactions that are flagged with primary.node_client()", "transactions that are flagged with primary.node_client() as mc: with primary.user_client(format=\"msgpack\",", "\"src_country\": transaction[\"src_country\"], \"timestamp\": transaction[\"timestamp\"], \"tx_id\": tx_id, \"type\": transaction[\"type\"], } flagged_ids.append(tx_id)", ") check( c.rpc( \"BK_register\", {\"bank_id\": regulator.ccf_id, \"country\": regulator.country}, ), error=lambda", "strftime import csv import random from loguru import logger as", "with primary.user_client(user_id=banks[0].name) as c: check( c.rpc(\"REG_register\", {}), error=lambda e: e", "permissions are enforced with primary.user_client(user_id=regulator.name) as c: check( c.rpc(\"REG_register\", {}),", "been issued # tracks flagged/non flagged and revealed/non revealed transactions", "f\"\"\" return Calls:call( \"set_user_data\", {{ user_id = {manager.ccf_id}, user_data =", "with infra.ccf.network( hosts, args.build_dir, args.debug_nodes, args.perf_nodes, pdb=args.pdb ) as network:", "\"set_user_data\", {{ user_id = {manager.ccf_id}, user_data = {{ privileges =", "if args.lua_script is not None: data = [] with open(args.lua_script,", "2 == 0: check(c.rpc(\"TX_reveal\", {\"tx_id\": tx_id}), result=True) revealed_tx_ids.append(tx_id) # bank", "None: data = [] with open(args.lua_script, \"r\") as f: data", "\"dst_country\": row[\"dst_country\"], } transactions.append(json_tx) # Manager is granted special privileges", "false end\" if args.lua_script is not None: data = []", "name self.country = country primary, _ = network.find_primary() network.create_users([self.name], curve)", "not in revealed_tx_ids: check( c.rpc(\"REG_get_revealed\", {\"tx_id\": tx_id}), error=lambda e: e", "the flagged one that was not revealed if tx_id not", "and revealed/non revealed transactions for validation flagged_txs = {} revealed_tx_ids", "flagged one that was not revealed if tx_id not in", "= f.readlines() script = \"\".join(data) manager = AppUser(network, \"manager\", \"GB\",", "pdb=args.pdb ) as network: check = infra.checker.Checker() network.start_and_join(args) primary, others", "not None and e[\"code\"] == infra.jsonrpc.ErrorCode.INVALID_PARAMS.value, ) LOG.debug(f\"User {bank} successfully", "infra.jsonrpc.ErrorCode.INVALID_PARAMS.value, ) LOG.debug(f\"User {bank} successfully registered as bank\") LOG.success(f\"{1} regulator", ") if float(amount) > flagged_amt: check( c.rpc(\"FLAGGED_TX_get\", {\"tx_id\": tx_id}), result=[regulator.ccf_id,", "with primary.user_client(format=\"msgpack\", user_id=bank.name) as c: # Destination account is the", "manager, register regulator and banks with primary.node_client() as mc: check_commit", "= 200000 for i, bank in enumerate(banks): with primary.user_client(format=\"msgpack\", user_id=bank.name)", "\"BK_register\", {\"bank_id\": bank.ccf_id, \"country\": bank.country}, ), result=bank.ccf_id, ) check(c.rpc(\"BK_get\", {\"id\":", "transaction[\"amt\"] check(c.rpc(\"TX_record\", transaction), result=tx_id) check( c.rpc(\"TX_get\", {\"tx_id\": tx_id}), result={ \"amt\":", "script], ) check( c.rpc( \"BK_register\", {\"bank_id\": regulator.ccf_id, \"country\": regulator.country}, ),", "# bank that issued first flagged transaction with primary.user_client(format=\"msgpack\", user_id=bank.name)", "network: check = infra.checker.Checker() network.start_and_join(args) primary, others = network.find_nodes() script", "= name self.country = country primary, _ = network.find_primary() network.create_users([self.name],", "\"r\") as f: data = f.readlines() script = \"\".join(data) manager", "Check permissions are enforced with primary.user_client(user_id=regulator.name) as c: check( c.rpc(\"REG_register\",", "that are flagged with primary.node_client() as mc: with primary.user_client(format=\"msgpack\", user_id=regulator.name)", "AppUser(network, \"manager\", \"GB\", args.default_curve) regulator = AppUser(network, \"auditor\", \"GB\", args.default_curve)", "and e[\"code\"] == infra.jsonrpc.ErrorCode.INVALID_PARAMS.value, ) # regulator poll for transactions", "import logging from time import gmtime, strftime import csv import", "200000 for i, bank in enumerate(banks): with primary.user_client(format=\"msgpack\", user_id=bank.name) as", "c.rpc(\"REG_poll_flagged\", {}), error=lambda e: e is not None and e[\"code\"]", "with primary.user_client(user_id=self.name) as client: self.ccf_id = client.rpc(\"whoAmI\", {}).result[\"caller_id\"] def __str__(self):", "{} revealed_tx_ids = [] flagged_ids = [] non_flagged_ids = []", "[tx_id, regulator_id] poll_flagged_ids.append(poll_flagged[0]) poll_flagged_ids.sort() assert poll_flagged_ids == flagged_ids for tx_id", "None and e[\"code\"] == infra.jsonrpc.ErrorCode.INVALID_PARAMS.value, ) non_flagged_ids.append(tx_id) tx_id += 1", "c.rpc(\"REG_register\", {}), error=lambda e: e is not None and e[\"code\"]", "check( c.rpc(\"FLAGGED_TX_get\", {\"tx_id\": tx_id}), error=lambda e: e is not None", "and e[\"code\"] == infra.jsonrpc.ErrorCode.INVALID_PARAMS.value, ) LOG.debug(f\"User {bank} successfully registered as", "Apache 2.0 License. import infra.e2e_args import infra.ccf import infra.jsonrpc import", "c: # try to poll flagged but fail as you", "== infra.jsonrpc.ErrorCode.INVALID_CALLER_ID.value, ) check( c.rpc(\"BK_register\", {}), error=lambda e: e is", "\"FR\") ] transactions = [] with open(args.datafile, newline=\"\") as f:", "by app to enforce access restrictions proposal_result, error = network.consortium.propose(", "csv.DictReader(f) for i, row in enumerate(datafile): # read first 10", "e is not None and e[\"code\"] == infra.jsonrpc.ErrorCode.INVALID_CALLER_ID.value, ) with", "and e[\"code\"] == infra.jsonrpc.ErrorCode.INVALID_PARAMS.value, ) # get from flagged txs,", "e[\"code\"] == infra.jsonrpc.ErrorCode.INVALID_PARAMS.value, ) # regulator poll for transactions that", "later read by app to enforce access restrictions proposal_result, error", "amount = transaction[\"amt\"] check(c.rpc(\"TX_record\", transaction), result=tx_id) check( c.rpc(\"TX_get\", {\"tx_id\": tx_id}),", "c.rpc(\"REG_get_revealed\", {\"tx_id\": tx_id}), result=flagged_txs[tx_id], ) if __name__ == \"__main__\": def", "gmtime, strftime import csv import random from loguru import logger", "(csv)\", type=str ) args = infra.e2e_args.cli_args(add) args.package = args.app_script and", "200000 then return true else return false end\" if args.lua_script", "datafile = csv.DictReader(f) for i, row in enumerate(datafile): # read", "transactions for validation flagged_txs = {} revealed_tx_ids = [] flagged_ids", "regulator\") for bank in banks: check( c.rpc( \"BK_register\", {\"bank_id\": bank.ccf_id,", "tx_id}), result=[regulator.ccf_id, False, transaction[\"timestamp\"]], ) flagged_tx = { \"amt\": amount,", "if tx_id not in revealed_tx_ids: check( c.rpc(\"REG_get_revealed\", {\"tx_id\": tx_id}), error=lambda", "%b %Y %H:%M:%S +0000\", gmtime()), \"src_country\": row[\"src_country\"], \"dst_country\": row[\"dst_country\"], }", "to get the flagged ones that were revealed for tx_id", "try to get the flagged ones that were revealed for", "bank reveal some transactions that were flagged for i, tx_id", "is not None and e[\"code\"] == infra.jsonrpc.ErrorCode.INVALID_PARAMS.value, ) # regulator", "for country in (\"US\", \"GB\", \"GR\", \"FR\") ] transactions =", "restrictions proposal_result, error = network.consortium.propose( 0, primary, f\"\"\" return Calls:call(", "tx_id, \"type\": transaction[\"type\"], } flagged_ids.append(tx_id) flagged_txs[tx_id] = flagged_tx else: check(", "def __str__(self): return f\"{self.ccf_id} ({self.name})\" def run(args): hosts = [\"localhost\"]", "infra.ccf.network( hosts, args.build_dir, args.debug_nodes, args.perf_nodes, pdb=args.pdb ) as network: check", "as regulator\") for bank in banks: check( c.rpc( \"BK_register\", {\"bank_id\":", "check_commit = infra.checker.Checker(mc) with primary.user_client(format=\"msgpack\", user_id=manager.name) as c: check( c.rpc(", "to enforce access restrictions proposal_result, error = network.consortium.propose( 0, primary,", "\"REG_register\", {\"regulator_id\": bank.ccf_id, \"country\": bank.country}, ), error=lambda e: e is", "not None and e[\"code\"] == infra.jsonrpc.ErrorCode.INVALID_PARAMS.value, ) # get from", "check( c.rpc( \"BK_register\", {\"bank_id\": regulator.ccf_id, \"country\": regulator.country}, ), error=lambda e:", "for i, row in enumerate(datafile): # read first 10 lines", "None and e[\"code\"] == infra.jsonrpc.ErrorCode.INVALID_PARAMS.value, ) # regulator poll for", "{regulator} successfully registered as regulator\") for bank in banks: check(", "as c: # Destination account is the next one in", ") LOG.debug(f\"User {bank} successfully registered as bank\") LOG.success(f\"{1} regulator and", "bank in enumerate(banks): with primary.user_client(format=\"msgpack\", user_id=bank.name) as c: # Destination", "rights reserved. # Licensed under the Apache 2.0 License. import", "for i, bank in enumerate(banks): with primary.user_client(format=\"msgpack\", user_id=bank.name) as c:", "flagged with primary.node_client() as mc: with primary.user_client(format=\"msgpack\", user_id=regulator.name) as c:", "{ \"src\": row[\"origin\"], \"dst\": row[\"destination\"], \"amt\": row[\"amount\"], \"type\": row[\"type\"], \"timestamp\":", "\"amt\": amount, \"bank_id\": bank.ccf_id, \"dst\": transaction[\"dst\"], \"dst_country\": transaction[\"dst_country\"], \"src\": transaction[\"src\"],", "assert poll_flagged_ids == flagged_ids for tx_id in flagged_ids: # get", "user_id=manager.name) as c: check( c.rpc( \"REG_register\", { \"regulator_id\": regulator.ccf_id, \"country\":", "} flagged_ids.append(tx_id) flagged_txs[tx_id] = flagged_tx else: check( c.rpc(\"FLAGGED_TX_get\", {\"tx_id\": tx_id}),", "infra.checker.Checker(mc) with primary.user_client(format=\"msgpack\", user_id=manager.name) as c: check( c.rpc( \"REG_register\", {", "as mc: check_commit = infra.checker.Checker(mc) with primary.user_client(format=\"msgpack\", user_id=manager.name) as c:", "issued\") # bank that issued first flagged transaction with primary.user_client(format=\"msgpack\",", "successfully setup\") tx_id = 0 # Tracks how many transactions", "None and e[\"code\"] == infra.jsonrpc.ErrorCode.INVALID_PARAMS.value, ) LOG.debug(f\"User {bank} successfully registered", "successfully issued\") # bank that issued first flagged transaction with", "get the flagged one that was not revealed if tx_id", "is not None: data = [] with open(args.lua_script, \"r\") as", "successfully registered as bank\") LOG.success(f\"{1} regulator and {len(banks)} bank(s) successfully", "for transactions that are flagged with primary.node_client() as mc: with", "strftime(\"%a, %d %b %Y %H:%M:%S +0000\", gmtime()), \"src_country\": row[\"src_country\"], \"dst_country\":", "get from flagged txs, try to get the flagged ones", "flagged txs, try to get the flagged ones that were", "and banks with primary.node_client() as mc: check_commit = infra.checker.Checker(mc) with", "None and e[\"code\"] == infra.jsonrpc.ErrorCode.INVALID_CALLER_ID.value, ) # bank reveal some", "\"--lua-script\", help=\"Regulator checker loaded as lua script file\", type=str )", "if i > 10: break json_tx = { \"src\": row[\"origin\"],", "manager = AppUser(network, \"manager\", \"GB\", args.default_curve) regulator = AppUser(network, \"auditor\",", "AppUser: def __init__(self, network, name, country, curve): self.name = name", "+0000\", gmtime()), \"src_country\": row[\"src_country\"], \"dst_country\": row[\"dst_country\"], } transactions.append(json_tx) # Manager", "[] non_flagged_ids = [] flagged_amt = 200000 for i, bank", "__init__(self, network, name, country, curve): self.name = name self.country =", "tx_id}), error=lambda e: e is not None and e[\"code\"] ==", "# As permissioned manager, register regulator and banks with primary.node_client()", "not None and e[\"code\"] == infra.jsonrpc.ErrorCode.INVALID_PARAMS.value, ) non_flagged_ids.append(tx_id) tx_id +=", "i % 2 == 0: check(c.rpc(\"TX_reveal\", {\"tx_id\": tx_id}), result=True) revealed_tx_ids.append(tx_id)", "revealed_tx_ids: check( c.rpc(\"REG_get_revealed\", {\"tx_id\": tx_id}), result=flagged_txs[tx_id], ) if __name__ ==", "= network.find_primary() network.create_users([self.name], curve) network.consortium.add_users(primary, [self.name]) with primary.user_client(user_id=self.name) as client:", "args.default_curve) for country in (\"US\", \"GB\", \"GR\", \"FR\") ] transactions", "Licensed under the Apache 2.0 License. import infra.e2e_args import infra.ccf", "run(args): hosts = [\"localhost\"] with infra.ccf.network( hosts, args.build_dir, args.debug_nodes, args.perf_nodes,", "as f: datafile = csv.DictReader(f) for i, row in enumerate(datafile):", "f\"{self.ccf_id} ({self.name})\" def run(args): hosts = [\"localhost\"] with infra.ccf.network( hosts,", "check( c.rpc(\"REG_get\", {\"id\": regulator.ccf_id}), result=[regulator.country, script], ) check( c.rpc( \"BK_register\",", "country in (\"US\", \"GB\", \"GR\", \"FR\") ] transactions = []", "}} ) \"\"\", ) network.consortium.vote_using_majority(primary, proposal_result[\"id\"]) # Check permissions are", "None and e[\"code\"] == infra.jsonrpc.ErrorCode.INVALID_PARAMS.value, ) LOG.debug(f\"User {regulator} successfully registered", "flagged txs for tx_id in non_flagged_ids: check( c.rpc(\"TX_reveal\", {\"tx_id\": tx_id}),", "network.find_nodes() script = \"if tonumber(amt) > 200000 then return true", "= AppUser(network, \"manager\", \"GB\", args.default_curve) regulator = AppUser(network, \"auditor\", \"GB\",", "for tx_id in revealed_tx_ids: check( c.rpc(\"REG_get_revealed\", {\"tx_id\": tx_id}), result=flagged_txs[tx_id], )", "{bank} successfully registered as bank\") LOG.success(f\"{1} regulator and {len(banks)} bank(s)", "flagged ones that were revealed for tx_id in revealed_tx_ids: check(", "bank(s) successfully setup\") tx_id = 0 # Tracks how many", "import gmtime, strftime import csv import random from loguru import", "regulator.country}, ), error=lambda e: e is not None and e[\"code\"]", "loguru import logger as LOG class AppUser: def __init__(self, network,", "= infra.e2e_args.cli_args(add) args.package = args.app_script and \"libluageneric\" or \"liblogging\" run(args)", "first 10 lines if i > 10: break json_tx =", "{\"tx_id\": tx_id}), result=flagged_txs[tx_id], ) if __name__ == \"__main__\": def add(parser):", "network.consortium.propose( 0, primary, f\"\"\" return Calls:call( \"set_user_data\", {{ user_id =", "= {{ REGISTER_REGULATORS = true, REGISTER_BANKS = true, }} }}", "LOG.debug(f\"User {bank} successfully registered as bank\") LOG.success(f\"{1} regulator and {len(banks)}", "e: e is not None and e[\"code\"] == infra.jsonrpc.ErrorCode.INVALID_PARAMS.value, )", "e is not None and e[\"code\"] == infra.jsonrpc.ErrorCode.INVALID_CALLER_ID.value, ) #", "in enumerate(datafile): # read first 10 lines if i >", "open(args.lua_script, \"r\") as f: data = f.readlines() script = \"\".join(data)", "c: # assert that the flagged txs that we poll", "banks: check( c.rpc( \"BK_register\", {\"bank_id\": bank.ccf_id, \"country\": bank.country}, ), result=bank.ccf_id,", "list of banks for transaction in transactions: print(transaction) amount =", "is not None and e[\"code\"] == infra.jsonrpc.ErrorCode.INVALID_PARAMS.value, ) LOG.debug(f\"User {regulator}", "true, REGISTER_BANKS = true, }} }} }} ) \"\"\", )", "\"dst\": transaction[\"dst\"], \"dst_country\": transaction[\"dst_country\"], \"src\": transaction[\"src\"], \"src_country\": transaction[\"src_country\"], \"timestamp\": transaction[\"timestamp\"],", "print(transaction) amount = transaction[\"amt\"] check(c.rpc(\"TX_record\", transaction), result=tx_id) check( c.rpc(\"TX_get\", {\"tx_id\":", "\"src\": row[\"origin\"], \"dst\": row[\"destination\"], \"amt\": row[\"amount\"], \"type\": row[\"type\"], \"timestamp\": strftime(\"%a,", "args.lua_script is not None: data = [] with open(args.lua_script, \"r\")", "regulator check( c.rpc(\"REG_poll_flagged\", {}), error=lambda e: e is not None", "is not None and e[\"code\"] == infra.jsonrpc.ErrorCode.INVALID_PARAMS.value, ) # get", "special privileges by members, which is later read by app", "# Licensed under the Apache 2.0 License. import infra.e2e_args import", "row in enumerate(datafile): # read first 10 lines if i", "register regulator and banks with primary.node_client() as mc: check_commit =", "\"regulator_id\": regulator.ccf_id, \"country\": regulator.country, \"script\": script, }, ), result=regulator.ccf_id, )", "\"timestamp\": transaction[\"timestamp\"], \"type\": transaction[\"type\"], }, ) if float(amount) > flagged_amt:", "a list [tx_id, regulator_id] poll_flagged_ids.append(poll_flagged[0]) poll_flagged_ids.sort() assert poll_flagged_ids == flagged_ids", "flagged and revealed/non revealed transactions for validation flagged_txs = {}", ") parser.add_argument( \"--datafile\", help=\"Load an existing scenario file (csv)\", type=str", "= country primary, _ = network.find_primary() network.create_users([self.name], curve) network.consortium.add_users(primary, [self.name])", "regulator = AppUser(network, \"auditor\", \"GB\", args.default_curve) banks = [ AppUser(network,", ") as network: check = infra.checker.Checker() network.start_and_join(args) primary, others =", "help=\"Regulator checker loaded as lua script file\", type=str ) parser.add_argument(", "banks for transaction in transactions: print(transaction) amount = transaction[\"amt\"] check(c.rpc(\"TX_record\",", "False, transaction[\"timestamp\"]], ) flagged_tx = { \"amt\": amount, \"bank_id\": bank.ccf_id,", "0 # Tracks how many transactions have been issued #", "true, }} }} }} ) \"\"\", ) network.consortium.vote_using_majority(primary, proposal_result[\"id\"]) #", "= [] flagged_ids = [] non_flagged_ids = [] flagged_amt =", "_ = network.find_primary() network.create_users([self.name], curve) network.consortium.add_users(primary, [self.name]) with primary.user_client(user_id=self.name) as", "try to poll flagged but fail as you are not", "user_id=bank.name) as c: # try to poll flagged but fail", "tx_id in enumerate(flagged_ids): if i % 2 == 0: check(c.rpc(\"TX_reveal\",", "assert that the flagged txs that we poll for are", "txs, try to get the flagged one that was not", "import infra.ccf import infra.jsonrpc import logging from time import gmtime,", "not None: data = [] with open(args.lua_script, \"r\") as f:", "c.rpc(\"FLAGGED_TX_get\", {\"tx_id\": tx_id}), error=lambda e: e is not None and", "= AppUser(network, \"auditor\", \"GB\", args.default_curve) banks = [ AppUser(network, f\"bank{country}\",", "bank.country}, ), result=bank.ccf_id, ) check(c.rpc(\"BK_get\", {\"id\": bank.ccf_id}), result=bank.country) check( c.rpc(", "tx_id}), result=True) revealed_tx_ids.append(tx_id) # bank try to reveal non flagged", "\"dst_country\": transaction[\"dst_country\"], \"src\": transaction[\"src\"], \"src_country\": transaction[\"src_country\"], \"timestamp\": transaction[\"timestamp\"], \"tx_id\": tx_id,", "primary.user_client(user_id=self.name) as client: self.ccf_id = client.rpc(\"whoAmI\", {}).result[\"caller_id\"] def __str__(self): return", "hosts, args.build_dir, args.debug_nodes, args.perf_nodes, pdb=args.pdb ) as network: check =", "to reveal non flagged txs for tx_id in non_flagged_ids: check(", "poll_flagged_ids == flagged_ids for tx_id in flagged_ids: # get from", "{\"tx_id\": tx_id}), result={ \"amt\": amount, \"bank_id\": bank.ccf_id, \"dst\": transaction[\"dst\"], \"dst_country\":", "primary.node_client() as mc: check_commit = infra.checker.Checker(mc) with primary.user_client(format=\"msgpack\", user_id=manager.name) as", "{\"tx_id\": tx_id}), result=[regulator.ccf_id, False, transaction[\"timestamp\"]], ) flagged_tx = { \"amt\":", "result=flagged_txs[tx_id], ) if __name__ == \"__main__\": def add(parser): parser.add_argument( \"--lua-script\",", "network, name, country, curve): self.name = name self.country = country", "i, row in enumerate(datafile): # read first 10 lines if", ") with primary.user_client(user_id=banks[0].name) as c: check( c.rpc(\"REG_register\", {}), error=lambda e:", "transactions: print(transaction) amount = transaction[\"amt\"] check(c.rpc(\"TX_record\", transaction), result=tx_id) check( c.rpc(\"TX_get\",", "poll for are correct resp = c.rpc(\"REG_poll_flagged\", {}) poll_flagged_ids =", ") # regulator poll for transactions that are flagged with", "== infra.jsonrpc.ErrorCode.INVALID_PARAMS.value, ) non_flagged_ids.append(tx_id) tx_id += 1 LOG.success(f\"{tx_id} transactions have", "not None and e[\"code\"] == infra.jsonrpc.ErrorCode.INVALID_CALLER_ID.value, ) with primary.user_client(user_id=banks[0].name) as", "{{ REGISTER_REGULATORS = true, REGISTER_BANKS = true, }} }} }}", "(c) Microsoft Corporation. All rights reserved. # Licensed under the", "transaction in transactions: print(transaction) amount = transaction[\"amt\"] check(c.rpc(\"TX_record\", transaction), result=tx_id)", "as you are not a regulator check( c.rpc(\"REG_poll_flagged\", {}), error=lambda", "regulator.ccf_id, \"country\": regulator.country}, ), error=lambda e: e is not None", "non_flagged_ids.append(tx_id) tx_id += 1 LOG.success(f\"{tx_id} transactions have been successfully issued\")", "\"script\": script, }, ), result=regulator.ccf_id, ) check( c.rpc(\"REG_get\", {\"id\": regulator.ccf_id}),", ") non_flagged_ids.append(tx_id) tx_id += 1 LOG.success(f\"{tx_id} transactions have been successfully", "# Check permissions are enforced with primary.user_client(user_id=regulator.name) as c: check(", "Calls:call( \"set_user_data\", {{ user_id = {manager.ccf_id}, user_data = {{ privileges", ") check( c.rpc(\"BK_register\", {}), error=lambda e: e is not None", "flagged_ids.append(tx_id) flagged_txs[tx_id] = flagged_tx else: check( c.rpc(\"FLAGGED_TX_get\", {\"tx_id\": tx_id}), error=lambda", "REGISTER_BANKS = true, }} }} }} ) \"\"\", ) network.consortium.vote_using_majority(primary,", "transaction[\"src_country\"], \"timestamp\": transaction[\"timestamp\"], \"tx_id\": tx_id, \"type\": transaction[\"type\"], } flagged_ids.append(tx_id) flagged_txs[tx_id]", "that were revealed for tx_id in revealed_tx_ids: check( c.rpc(\"REG_get_revealed\", {\"tx_id\":", "transactions = [] with open(args.datafile, newline=\"\") as f: datafile =", "in (\"US\", \"GB\", \"GR\", \"FR\") ] transactions = [] with", "and e[\"code\"] == infra.jsonrpc.ErrorCode.INVALID_PARAMS.value, ) LOG.debug(f\"User {regulator} successfully registered as", "= network.find_nodes() script = \"if tonumber(amt) > 200000 then return", "None and e[\"code\"] == infra.jsonrpc.ErrorCode.INVALID_CALLER_ID.value, ) # As permissioned manager,", "were flagged for i, tx_id in enumerate(flagged_ids): if i %", "%H:%M:%S +0000\", gmtime()), \"src_country\": row[\"src_country\"], \"dst_country\": row[\"dst_country\"], } transactions.append(json_tx) #", "[] flagged_ids = [] non_flagged_ids = [] flagged_amt = 200000", "0: check(c.rpc(\"TX_reveal\", {\"tx_id\": tx_id}), result=True) revealed_tx_ids.append(tx_id) # bank try to", "check( c.rpc(\"FLAGGED_TX_get\", {\"tx_id\": tx_id}), result=[regulator.ccf_id, False, transaction[\"timestamp\"]], ) flagged_tx =", "primary.node_client() as mc: with primary.user_client(format=\"msgpack\", user_id=regulator.name) as c: # assert", "= [] with open(args.lua_script, \"r\") as f: data = f.readlines()", "country, args.default_curve) for country in (\"US\", \"GB\", \"GR\", \"FR\") ]", "and e[\"code\"] == infra.jsonrpc.ErrorCode.INVALID_CALLER_ID.value, ) # bank reveal some transactions", "result=[regulator.ccf_id, False, transaction[\"timestamp\"]], ) flagged_tx = { \"amt\": amount, \"bank_id\":", "network.start_and_join(args) primary, others = network.find_nodes() script = \"if tonumber(amt) >", "else: check( c.rpc(\"FLAGGED_TX_get\", {\"tx_id\": tx_id}), error=lambda e: e is not", "amount, \"bank_id\": bank.ccf_id, \"dst\": transaction[\"dst\"], \"dst_country\": transaction[\"dst_country\"], \"src\": transaction[\"src\"], \"src_country\":", "successfully registered as regulator\") for bank in banks: check( c.rpc(", "\"timestamp\": transaction[\"timestamp\"], \"tx_id\": tx_id, \"type\": transaction[\"type\"], } flagged_ids.append(tx_id) flagged_txs[tx_id] =", "== 0: check(c.rpc(\"TX_reveal\", {\"tx_id\": tx_id}), result=True) revealed_tx_ids.append(tx_id) # bank try", ") # As permissioned manager, register regulator and banks with", "check( c.rpc(\"REG_register\", {}), error=lambda e: e is not None and", "# Manager is granted special privileges by members, which is", "reserved. # Licensed under the Apache 2.0 License. import infra.e2e_args", "from loguru import logger as LOG class AppUser: def __init__(self,", "type=str ) parser.add_argument( \"--datafile\", help=\"Load an existing scenario file (csv)\",", "banks with primary.node_client() as mc: check_commit = infra.checker.Checker(mc) with primary.user_client(format=\"msgpack\",", "\"if tonumber(amt) > 200000 then return true else return false", "result=bank.country) check( c.rpc( \"REG_register\", {\"regulator_id\": bank.ccf_id, \"country\": bank.country}, ), error=lambda", "[ AppUser(network, f\"bank{country}\", country, args.default_curve) for country in (\"US\", \"GB\",", "# try to poll flagged but fail as you are", "as c: # try to poll flagged but fail as", "random from loguru import logger as LOG class AppUser: def", "of banks for transaction in transactions: print(transaction) amount = transaction[\"amt\"]", "result=regulator.ccf_id, ) check( c.rpc(\"REG_get\", {\"id\": regulator.ccf_id}), result=[regulator.country, script], ) check(", "{\"id\": bank.ccf_id}), result=bank.country) check( c.rpc( \"REG_register\", {\"regulator_id\": bank.ccf_id, \"country\": bank.country},", "else return false end\" if args.lua_script is not None: data", "None and e[\"code\"] == infra.jsonrpc.ErrorCode.INVALID_PARAMS.value, ) # get from flagged", "f\"bank{country}\", country, args.default_curve) for country in (\"US\", \"GB\", \"GR\", \"FR\")", "transaction[\"timestamp\"]], ) flagged_tx = { \"amt\": amount, \"bank_id\": bank.ccf_id, \"dst\":", "lines if i > 10: break json_tx = { \"src\":", "for poll_flagged in resp.result: # poll flagged is a list", "enumerate(flagged_ids): if i % 2 == 0: check(c.rpc(\"TX_reveal\", {\"tx_id\": tx_id}),", "# poll flagged is a list [tx_id, regulator_id] poll_flagged_ids.append(poll_flagged[0]) poll_flagged_ids.sort()", "\"REG_register\", { \"regulator_id\": regulator.ccf_id, \"country\": regulator.country, \"script\": script, }, ),", "10 lines if i > 10: break json_tx = {", "None and e[\"code\"] == infra.jsonrpc.ErrorCode.INVALID_CALLER_ID.value, ) with primary.user_client(user_id=banks[0].name) as c:", "+= 1 LOG.success(f\"{tx_id} transactions have been successfully issued\") # bank", "[\"localhost\"] with infra.ccf.network( hosts, args.build_dir, args.debug_nodes, args.perf_nodes, pdb=args.pdb ) as", "proposal_result[\"id\"]) # Check permissions are enforced with primary.user_client(user_id=regulator.name) as c:", "{\"id\": regulator.ccf_id}), result=[regulator.country, script], ) check( c.rpc( \"BK_register\", {\"bank_id\": regulator.ccf_id,", "as c: check( c.rpc(\"REG_register\", {}), error=lambda e: e is not", "poll flagged is a list [tx_id, regulator_id] poll_flagged_ids.append(poll_flagged[0]) poll_flagged_ids.sort() assert", "== infra.jsonrpc.ErrorCode.INVALID_CALLER_ID.value, ) with primary.user_client(user_id=banks[0].name) as c: check( c.rpc(\"REG_register\", {}),", "for tx_id in flagged_ids: # get from flagged txs, try", "by members, which is later read by app to enforce", "revealed for tx_id in revealed_tx_ids: check( c.rpc(\"REG_get_revealed\", {\"tx_id\": tx_id}), result=flagged_txs[tx_id],", "return false end\" if args.lua_script is not None: data =", "def run(args): hosts = [\"localhost\"] with infra.ccf.network( hosts, args.build_dir, args.debug_nodes,", "[] for poll_flagged in resp.result: # poll flagged is a", "true else return false end\" if args.lua_script is not None:", "({self.name})\" def run(args): hosts = [\"localhost\"] with infra.ccf.network( hosts, args.build_dir,", "reveal non flagged txs for tx_id in non_flagged_ids: check( c.rpc(\"TX_reveal\",", "float(amount) > flagged_amt: check( c.rpc(\"FLAGGED_TX_get\", {\"tx_id\": tx_id}), result=[regulator.ccf_id, False, transaction[\"timestamp\"]],", "which is later read by app to enforce access restrictions", "enforced with primary.user_client(user_id=regulator.name) as c: check( c.rpc(\"REG_register\", {}), error=lambda e:", ") if __name__ == \"__main__\": def add(parser): parser.add_argument( \"--lua-script\", help=\"Regulator", "one in the list of banks for transaction in transactions:", "\"GB\", args.default_curve) regulator = AppUser(network, \"auditor\", \"GB\", args.default_curve) banks =", "infra.jsonrpc.ErrorCode.INVALID_CALLER_ID.value, ) # As permissioned manager, register regulator and banks", "country primary, _ = network.find_primary() network.create_users([self.name], curve) network.consortium.add_users(primary, [self.name]) with", "= { \"amt\": amount, \"bank_id\": bank.ccf_id, \"dst\": transaction[\"dst\"], \"dst_country\": transaction[\"dst_country\"],", "c: # Destination account is the next one in the", "transaction[\"src\"], \"src_country\": transaction[\"src_country\"], \"timestamp\": transaction[\"timestamp\"], \"tx_id\": tx_id, \"type\": transaction[\"type\"], }", "transaction[\"dst_country\"], \"src\": transaction[\"src\"], \"src_country\": transaction[\"src_country\"], \"timestamp\": transaction[\"timestamp\"], \"type\": transaction[\"type\"], },", "from flagged txs, try to get the flagged one that", "= infra.checker.Checker() network.start_and_join(args) primary, others = network.find_nodes() script = \"if", "{ \"regulator_id\": regulator.ccf_id, \"country\": regulator.country, \"script\": script, }, ), result=regulator.ccf_id,", "as mc: with primary.user_client(format=\"msgpack\", user_id=regulator.name) as c: # assert that", "script = \"if tonumber(amt) > 200000 then return true else", "row[\"type\"], \"timestamp\": strftime(\"%a, %d %b %Y %H:%M:%S +0000\", gmtime()), \"src_country\":", "tx_id in flagged_ids: # get from flagged txs, try to", "), result=bank.ccf_id, ) check(c.rpc(\"BK_get\", {\"id\": bank.ccf_id}), result=bank.country) check( c.rpc( \"REG_register\",", "== infra.jsonrpc.ErrorCode.INVALID_PARAMS.value, ) # get from flagged txs, try to", "flagged_txs[tx_id] = flagged_tx else: check( c.rpc(\"FLAGGED_TX_get\", {\"tx_id\": tx_id}), error=lambda e:", "lua script file\", type=str ) parser.add_argument( \"--datafile\", help=\"Load an existing", "primary, others = network.find_nodes() script = \"if tonumber(amt) > 200000", "was not revealed if tx_id not in revealed_tx_ids: check( c.rpc(\"REG_get_revealed\",", "revealed transactions for validation flagged_txs = {} revealed_tx_ids = []", "check( c.rpc(\"REG_poll_flagged\", {}), error=lambda e: e is not None and", "some transactions that were flagged for i, tx_id in enumerate(flagged_ids):", "return Calls:call( \"set_user_data\", {{ user_id = {manager.ccf_id}, user_data = {{", "permissioned manager, register regulator and banks with primary.node_client() as mc:", "c: check( c.rpc(\"REG_register\", {}), error=lambda e: e is not None", "in transactions: print(transaction) amount = transaction[\"amt\"] check(c.rpc(\"TX_record\", transaction), result=tx_id) check(", "10: break json_tx = { \"src\": row[\"origin\"], \"dst\": row[\"destination\"], \"amt\":", "{\"regulator_id\": bank.ccf_id, \"country\": bank.country}, ), error=lambda e: e is not", "= infra.checker.Checker(mc) with primary.user_client(format=\"msgpack\", user_id=manager.name) as c: check( c.rpc( \"REG_register\",", "# tracks flagged/non flagged and revealed/non revealed transactions for validation", "return f\"{self.ccf_id} ({self.name})\" def run(args): hosts = [\"localhost\"] with infra.ccf.network(", "enumerate(banks): with primary.user_client(format=\"msgpack\", user_id=bank.name) as c: # Destination account is", "data = [] with open(args.lua_script, \"r\") as f: data =", "how many transactions have been issued # tracks flagged/non flagged", "flagged for i, tx_id in enumerate(flagged_ids): if i % 2", "is granted special privileges by members, which is later read", "bank.ccf_id, \"dst\": transaction[\"dst\"], \"dst_country\": transaction[\"dst_country\"], \"src\": transaction[\"src\"], \"src_country\": transaction[\"src_country\"], \"timestamp\":", "primary.user_client(format=\"msgpack\", user_id=regulator.name) as c: # assert that the flagged txs", "type=str ) args = infra.e2e_args.cli_args(add) args.package = args.app_script and \"libluageneric\"", "user_id = {manager.ccf_id}, user_data = {{ privileges = {{ REGISTER_REGULATORS", "a regulator check( c.rpc(\"REG_poll_flagged\", {}), error=lambda e: e is not", "infra.jsonrpc.ErrorCode.INVALID_PARAMS.value, ) LOG.debug(f\"User {regulator} successfully registered as regulator\") for bank", "\"country\": bank.country}, ), result=bank.ccf_id, ) check(c.rpc(\"BK_get\", {\"id\": bank.ccf_id}), result=bank.country) check(", "# Copyright (c) Microsoft Corporation. All rights reserved. # Licensed", "transactions have been issued # tracks flagged/non flagged and revealed/non", "first flagged transaction with primary.user_client(format=\"msgpack\", user_id=bank.name) as c: # try", "__str__(self): return f\"{self.ccf_id} ({self.name})\" def run(args): hosts = [\"localhost\"] with", "is later read by app to enforce access restrictions proposal_result,", "infra.jsonrpc.ErrorCode.INVALID_PARAMS.value, ) non_flagged_ids.append(tx_id) tx_id += 1 LOG.success(f\"{tx_id} transactions have been", "result=bank.ccf_id, ) check(c.rpc(\"BK_get\", {\"id\": bank.ccf_id}), result=bank.country) check( c.rpc( \"REG_register\", {\"regulator_id\":", "curve) network.consortium.add_users(primary, [self.name]) with primary.user_client(user_id=self.name) as client: self.ccf_id = client.rpc(\"whoAmI\",", "2.0 License. import infra.e2e_args import infra.ccf import infra.jsonrpc import logging", "1 LOG.success(f\"{tx_id} transactions have been successfully issued\") # bank that", "tx_id}), result={ \"amt\": amount, \"bank_id\": bank.ccf_id, \"dst\": transaction[\"dst\"], \"dst_country\": transaction[\"dst_country\"],", "infra.jsonrpc.ErrorCode.INVALID_PARAMS.value, ) # get from flagged txs, try to get", "app to enforce access restrictions proposal_result, error = network.consortium.propose( 0,", "gmtime()), \"src_country\": row[\"src_country\"], \"dst_country\": row[\"dst_country\"], } transactions.append(json_tx) # Manager is", "list [tx_id, regulator_id] poll_flagged_ids.append(poll_flagged[0]) poll_flagged_ids.sort() assert poll_flagged_ids == flagged_ids for", "enforce access restrictions proposal_result, error = network.consortium.propose( 0, primary, f\"\"\"", "Destination account is the next one in the list of", "the next one in the list of banks for transaction", "poll_flagged_ids = [] for poll_flagged in resp.result: # poll flagged", "\"manager\", \"GB\", args.default_curve) regulator = AppUser(network, \"auditor\", \"GB\", args.default_curve) banks", "to poll flagged but fail as you are not a", "read by app to enforce access restrictions proposal_result, error =", "txs, try to get the flagged ones that were revealed", "access restrictions proposal_result, error = network.consortium.propose( 0, primary, f\"\"\" return", "setup\") tx_id = 0 # Tracks how many transactions have", "import csv import random from loguru import logger as LOG", "add(parser): parser.add_argument( \"--lua-script\", help=\"Regulator checker loaded as lua script file\",", "and e[\"code\"] == infra.jsonrpc.ErrorCode.INVALID_CALLER_ID.value, ) with primary.user_client(user_id=banks[0].name) as c: check(", "== infra.jsonrpc.ErrorCode.INVALID_PARAMS.value, ) LOG.debug(f\"User {regulator} successfully registered as regulator\") for", "}, ) if float(amount) > flagged_amt: check( c.rpc(\"FLAGGED_TX_get\", {\"tx_id\": tx_id}),", "infra.jsonrpc.ErrorCode.INVALID_CALLER_ID.value, ) # bank reveal some transactions that were flagged", "c.rpc(\"FLAGGED_TX_get\", {\"tx_id\": tx_id}), result=[regulator.ccf_id, False, transaction[\"timestamp\"]], ) flagged_tx = {", "not a regulator check( c.rpc(\"REG_poll_flagged\", {}), error=lambda e: e is", "non flagged txs for tx_id in non_flagged_ids: check( c.rpc(\"TX_reveal\", {\"tx_id\":", "to get the flagged one that was not revealed if", "read first 10 lines if i > 10: break json_tx", "hosts = [\"localhost\"] with infra.ccf.network( hosts, args.build_dir, args.debug_nodes, args.perf_nodes, pdb=args.pdb", "that were flagged for i, tx_id in enumerate(flagged_ids): if i", "have been issued # tracks flagged/non flagged and revealed/non revealed", "resp.result: # poll flagged is a list [tx_id, regulator_id] poll_flagged_ids.append(poll_flagged[0])", "transactions.append(json_tx) # Manager is granted special privileges by members, which", "= network.consortium.propose( 0, primary, f\"\"\" return Calls:call( \"set_user_data\", {{ user_id", "help=\"Load an existing scenario file (csv)\", type=str ) args =", "= [] non_flagged_ids = [] flagged_amt = 200000 for i,", "row[\"amount\"], \"type\": row[\"type\"], \"timestamp\": strftime(\"%a, %d %b %Y %H:%M:%S +0000\",", "client.rpc(\"whoAmI\", {}).result[\"caller_id\"] def __str__(self): return f\"{self.ccf_id} ({self.name})\" def run(args): hosts", "} transactions.append(json_tx) # Manager is granted special privileges by members,", "parser.add_argument( \"--datafile\", help=\"Load an existing scenario file (csv)\", type=str )", "as bank\") LOG.success(f\"{1} regulator and {len(banks)} bank(s) successfully setup\") tx_id", "revealed_tx_ids = [] flagged_ids = [] non_flagged_ids = [] flagged_amt", "flagged is a list [tx_id, regulator_id] poll_flagged_ids.append(poll_flagged[0]) poll_flagged_ids.sort() assert poll_flagged_ids", "{}), error=lambda e: e is not None and e[\"code\"] ==", "transaction[\"dst\"], \"dst_country\": transaction[\"dst_country\"], \"src\": transaction[\"src\"], \"src_country\": transaction[\"src_country\"], \"timestamp\": transaction[\"timestamp\"], \"tx_id\":", "txs that we poll for are correct resp = c.rpc(\"REG_poll_flagged\",", "not None and e[\"code\"] == infra.jsonrpc.ErrorCode.INVALID_CALLER_ID.value, ) # bank reveal", "is not None and e[\"code\"] == infra.jsonrpc.ErrorCode.INVALID_CALLER_ID.value, ) check( c.rpc(\"BK_register\",", "{ \"amt\": amount, \"bank_id\": bank.ccf_id, \"dst\": transaction[\"dst\"], \"dst_country\": transaction[\"dst_country\"], \"src\":", "and e[\"code\"] == infra.jsonrpc.ErrorCode.INVALID_PARAMS.value, ) non_flagged_ids.append(tx_id) tx_id += 1 LOG.success(f\"{tx_id}", "with primary.user_client(format=\"msgpack\", user_id=manager.name) as c: check( c.rpc( \"REG_register\", { \"regulator_id\":", "\"\".join(data) manager = AppUser(network, \"manager\", \"GB\", args.default_curve) regulator = AppUser(network,", "the Apache 2.0 License. import infra.e2e_args import infra.ccf import infra.jsonrpc", "[] flagged_amt = 200000 for i, bank in enumerate(banks): with", "\"--datafile\", help=\"Load an existing scenario file (csv)\", type=str ) args", "transaction[\"dst\"], \"dst_country\": transaction[\"dst_country\"], \"src\": transaction[\"src\"], \"src_country\": transaction[\"src_country\"], \"timestamp\": transaction[\"timestamp\"], \"type\":", "primary, f\"\"\" return Calls:call( \"set_user_data\", {{ user_id = {manager.ccf_id}, user_data", "tonumber(amt) > 200000 then return true else return false end\"", "{len(banks)} bank(s) successfully setup\") tx_id = 0 # Tracks how", "not None and e[\"code\"] == infra.jsonrpc.ErrorCode.INVALID_PARAMS.value, ) # regulator poll", "bank.country}, ), error=lambda e: e is not None and e[\"code\"]", "transactions have been successfully issued\") # bank that issued first", "is a list [tx_id, regulator_id] poll_flagged_ids.append(poll_flagged[0]) poll_flagged_ids.sort() assert poll_flagged_ids ==", "not None and e[\"code\"] == infra.jsonrpc.ErrorCode.INVALID_PARAMS.value, ) LOG.debug(f\"User {regulator} successfully", "try to get the flagged one that was not revealed", "}, ), result=regulator.ccf_id, ) check( c.rpc(\"REG_get\", {\"id\": regulator.ccf_id}), result=[regulator.country, script],", "reveal some transactions that were flagged for i, tx_id in", "as client: self.ccf_id = client.rpc(\"whoAmI\", {}).result[\"caller_id\"] def __str__(self): return f\"{self.ccf_id}", "row[\"dst_country\"], } transactions.append(json_tx) # Manager is granted special privileges by", "primary.user_client(user_id=regulator.name) as c: check( c.rpc(\"REG_register\", {}), error=lambda e: e is", "= csv.DictReader(f) for i, row in enumerate(datafile): # read first", "__name__ == \"__main__\": def add(parser): parser.add_argument( \"--lua-script\", help=\"Regulator checker loaded", "not None and e[\"code\"] == infra.jsonrpc.ErrorCode.INVALID_CALLER_ID.value, ) # As permissioned", "\"auditor\", \"GB\", args.default_curve) banks = [ AppUser(network, f\"bank{country}\", country, args.default_curve)", "return true else return false end\" if args.lua_script is not", "not None and e[\"code\"] == infra.jsonrpc.ErrorCode.INVALID_CALLER_ID.value, ) check( c.rpc(\"BK_register\", {}),", "with open(args.lua_script, \"r\") as f: data = f.readlines() script =", "check( c.rpc(\"TX_reveal\", {\"tx_id\": tx_id}), error=lambda e: e is not None", "newline=\"\") as f: datafile = csv.DictReader(f) for i, row in", "tx_id not in revealed_tx_ids: check( c.rpc(\"REG_get_revealed\", {\"tx_id\": tx_id}), error=lambda e:", "{{ user_id = {manager.ccf_id}, user_data = {{ privileges = {{", "tracks flagged/non flagged and revealed/non revealed transactions for validation flagged_txs", "self.ccf_id = client.rpc(\"whoAmI\", {}).result[\"caller_id\"] def __str__(self): return f\"{self.ccf_id} ({self.name})\" def", "{{ privileges = {{ REGISTER_REGULATORS = true, REGISTER_BANKS = true,", "is not None and e[\"code\"] == infra.jsonrpc.ErrorCode.INVALID_CALLER_ID.value, ) # bank", "script, }, ), result=regulator.ccf_id, ) check( c.rpc(\"REG_get\", {\"id\": regulator.ccf_id}), result=[regulator.country,", "result=True) revealed_tx_ids.append(tx_id) # bank try to reveal non flagged txs", "check( c.rpc( \"BK_register\", {\"bank_id\": bank.ccf_id, \"country\": bank.country}, ), result=bank.ccf_id, )", "try to reveal non flagged txs for tx_id in non_flagged_ids:", "flagged_amt = 200000 for i, bank in enumerate(banks): with primary.user_client(format=\"msgpack\",", "registered as bank\") LOG.success(f\"{1} regulator and {len(banks)} bank(s) successfully setup\")", "mc: check_commit = infra.checker.Checker(mc) with primary.user_client(format=\"msgpack\", user_id=manager.name) as c: check(", "check(c.rpc(\"BK_get\", {\"id\": bank.ccf_id}), result=bank.country) check( c.rpc( \"REG_register\", {\"regulator_id\": bank.ccf_id, \"country\":", "is not None and e[\"code\"] == infra.jsonrpc.ErrorCode.INVALID_PARAMS.value, ) LOG.debug(f\"User {bank}", "check(c.rpc(\"TX_record\", transaction), result=tx_id) check( c.rpc(\"TX_get\", {\"tx_id\": tx_id}), result={ \"amt\": amount,", "e[\"code\"] == infra.jsonrpc.ErrorCode.INVALID_CALLER_ID.value, ) # bank reveal some transactions that", "primary, _ = network.find_primary() network.create_users([self.name], curve) network.consortium.add_users(primary, [self.name]) with primary.user_client(user_id=self.name)", "c.rpc(\"REG_get_revealed\", {\"tx_id\": tx_id}), error=lambda e: e is not None and", "that we poll for are correct resp = c.rpc(\"REG_poll_flagged\", {})", "network.create_users([self.name], curve) network.consortium.add_users(primary, [self.name]) with primary.user_client(user_id=self.name) as client: self.ccf_id =", "if __name__ == \"__main__\": def add(parser): parser.add_argument( \"--lua-script\", help=\"Regulator checker", "%d %b %Y %H:%M:%S +0000\", gmtime()), \"src_country\": row[\"src_country\"], \"dst_country\": row[\"dst_country\"],", "= client.rpc(\"whoAmI\", {}).result[\"caller_id\"] def __str__(self): return f\"{self.ccf_id} ({self.name})\" def run(args):", "e is not None and e[\"code\"] == infra.jsonrpc.ErrorCode.INVALID_PARAMS.value, ) non_flagged_ids.append(tx_id)", "e is not None and e[\"code\"] == infra.jsonrpc.ErrorCode.INVALID_PARAMS.value, ) LOG.debug(f\"User", "with primary.user_client(format=\"msgpack\", user_id=bank.name) as c: # try to poll flagged", "0, primary, f\"\"\" return Calls:call( \"set_user_data\", {{ user_id = {manager.ccf_id},", "i, bank in enumerate(banks): with primary.user_client(format=\"msgpack\", user_id=bank.name) as c: #", "== infra.jsonrpc.ErrorCode.INVALID_PARAMS.value, ) # regulator poll for transactions that are", "AppUser(network, f\"bank{country}\", country, args.default_curve) for country in (\"US\", \"GB\", \"GR\",", "def __init__(self, network, name, country, curve): self.name = name self.country", "time import gmtime, strftime import csv import random from loguru", "network.consortium.add_users(primary, [self.name]) with primary.user_client(user_id=self.name) as client: self.ccf_id = client.rpc(\"whoAmI\", {}).result[\"caller_id\"]", "privileges = {{ REGISTER_REGULATORS = true, REGISTER_BANKS = true, }}", "primary.user_client(user_id=banks[0].name) as c: check( c.rpc(\"REG_register\", {}), error=lambda e: e is", "check( c.rpc(\"TX_get\", {\"tx_id\": tx_id}), result={ \"amt\": amount, \"bank_id\": bank.ccf_id, \"dst\":", "transaction[\"timestamp\"], \"tx_id\": tx_id, \"type\": transaction[\"type\"], } flagged_ids.append(tx_id) flagged_txs[tx_id] = flagged_tx", "that the flagged txs that we poll for are correct", "in enumerate(flagged_ids): if i % 2 == 0: check(c.rpc(\"TX_reveal\", {\"tx_id\":", "License. import infra.e2e_args import infra.ccf import infra.jsonrpc import logging from", "Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under", "many transactions have been issued # tracks flagged/non flagged and", "bank try to reveal non flagged txs for tx_id in", "poll flagged but fail as you are not a regulator", "user_id=regulator.name) as c: # assert that the flagged txs that", "== \"__main__\": def add(parser): parser.add_argument( \"--lua-script\", help=\"Regulator checker loaded as", "as lua script file\", type=str ) parser.add_argument( \"--datafile\", help=\"Load an", "existing scenario file (csv)\", type=str ) args = infra.e2e_args.cli_args(add) args.package", "regulator and banks with primary.node_client() as mc: check_commit = infra.checker.Checker(mc)", "= 0 # Tracks how many transactions have been issued", "for are correct resp = c.rpc(\"REG_poll_flagged\", {}) poll_flagged_ids = []", "AppUser(network, \"auditor\", \"GB\", args.default_curve) banks = [ AppUser(network, f\"bank{country}\", country,", "= true, }} }} }} ) \"\"\", ) network.consortium.vote_using_majority(primary, proposal_result[\"id\"])", "check = infra.checker.Checker() network.start_and_join(args) primary, others = network.find_nodes() script =", "= [ AppUser(network, f\"bank{country}\", country, args.default_curve) for country in (\"US\",", "\"dst_country\": transaction[\"dst_country\"], \"src\": transaction[\"src\"], \"src_country\": transaction[\"src_country\"], \"timestamp\": transaction[\"timestamp\"], \"type\": transaction[\"type\"],", "None and e[\"code\"] == infra.jsonrpc.ErrorCode.INVALID_CALLER_ID.value, ) check( c.rpc(\"BK_register\", {}), error=lambda", "regulator.ccf_id}), result=[regulator.country, script], ) check( c.rpc( \"BK_register\", {\"bank_id\": regulator.ccf_id, \"country\":", "is not None and e[\"code\"] == infra.jsonrpc.ErrorCode.INVALID_PARAMS.value, ) non_flagged_ids.append(tx_id) tx_id", "not revealed if tx_id not in revealed_tx_ids: check( c.rpc(\"REG_get_revealed\", {\"tx_id\":", "file (csv)\", type=str ) args = infra.e2e_args.cli_args(add) args.package = args.app_script", "flagged_tx = { \"amt\": amount, \"bank_id\": bank.ccf_id, \"dst\": transaction[\"dst\"], \"dst_country\":", "args.default_curve) banks = [ AppUser(network, f\"bank{country}\", country, args.default_curve) for country", "enumerate(datafile): # read first 10 lines if i > 10:", "one that was not revealed if tx_id not in revealed_tx_ids:", ") args = infra.e2e_args.cli_args(add) args.package = args.app_script and \"libluageneric\" or", "\"__main__\": def add(parser): parser.add_argument( \"--lua-script\", help=\"Regulator checker loaded as lua", "result=tx_id) check( c.rpc(\"TX_get\", {\"tx_id\": tx_id}), result={ \"amt\": amount, \"bank_id\": bank.ccf_id,", "regulator.country, \"script\": script, }, ), result=regulator.ccf_id, ) check( c.rpc(\"REG_get\", {\"id\":", "c.rpc( \"REG_register\", {\"regulator_id\": bank.ccf_id, \"country\": bank.country}, ), error=lambda e: e", "banks = [ AppUser(network, f\"bank{country}\", country, args.default_curve) for country in", "c.rpc(\"TX_reveal\", {\"tx_id\": tx_id}), error=lambda e: e is not None and", "\"type\": row[\"type\"], \"timestamp\": strftime(\"%a, %d %b %Y %H:%M:%S +0000\", gmtime()),", "txs for tx_id in non_flagged_ids: check( c.rpc(\"TX_reveal\", {\"tx_id\": tx_id}), error=lambda", "REGISTER_REGULATORS = true, REGISTER_BANKS = true, }} }} }} )", "[self.name]) with primary.user_client(user_id=self.name) as client: self.ccf_id = client.rpc(\"whoAmI\", {}).result[\"caller_id\"] def", "for i, tx_id in enumerate(flagged_ids): if i % 2 ==", "i, tx_id in enumerate(flagged_ids): if i % 2 == 0:", "i > 10: break json_tx = { \"src\": row[\"origin\"], \"dst\":", "and e[\"code\"] == infra.jsonrpc.ErrorCode.INVALID_CALLER_ID.value, ) # As permissioned manager, register", "flagged txs that we poll for are correct resp =", "that was not revealed if tx_id not in revealed_tx_ids: check(", "\"GR\", \"FR\") ] transactions = [] with open(args.datafile, newline=\"\") as", "== infra.jsonrpc.ErrorCode.INVALID_PARAMS.value, ) LOG.debug(f\"User {bank} successfully registered as bank\") LOG.success(f\"{1}", "# bank reveal some transactions that were flagged for i,", "result=[regulator.country, script], ) check( c.rpc( \"BK_register\", {\"bank_id\": regulator.ccf_id, \"country\": regulator.country},", "LOG.success(f\"{1} regulator and {len(banks)} bank(s) successfully setup\") tx_id = 0", ") \"\"\", ) network.consortium.vote_using_majority(primary, proposal_result[\"id\"]) # Check permissions are enforced", "tx_id in revealed_tx_ids: check( c.rpc(\"REG_get_revealed\", {\"tx_id\": tx_id}), result=flagged_txs[tx_id], ) if", "then return true else return false end\" if args.lua_script is", "for tx_id in non_flagged_ids: check( c.rpc(\"TX_reveal\", {\"tx_id\": tx_id}), error=lambda e:", "regulator.ccf_id, \"country\": regulator.country, \"script\": script, }, ), result=regulator.ccf_id, ) check(", "in revealed_tx_ids: check( c.rpc(\"REG_get_revealed\", {\"tx_id\": tx_id}), result=flagged_txs[tx_id], ) if __name__", "infra.jsonrpc.ErrorCode.INVALID_CALLER_ID.value, ) with primary.user_client(user_id=banks[0].name) as c: check( c.rpc(\"REG_register\", {}), error=lambda", "= c.rpc(\"REG_poll_flagged\", {}) poll_flagged_ids = [] for poll_flagged in resp.result:", "] transactions = [] with open(args.datafile, newline=\"\") as f: datafile", "import logger as LOG class AppUser: def __init__(self, network, name,", "c.rpc(\"REG_get\", {\"id\": regulator.ccf_id}), result=[regulator.country, script], ) check( c.rpc( \"BK_register\", {\"bank_id\":", "we poll for are correct resp = c.rpc(\"REG_poll_flagged\", {}) poll_flagged_ids", "tx_id in non_flagged_ids: check( c.rpc(\"TX_reveal\", {\"tx_id\": tx_id}), error=lambda e: e", "\"type\": transaction[\"type\"], }, ) if float(amount) > flagged_amt: check( c.rpc(\"FLAGGED_TX_get\",", "c.rpc(\"BK_register\", {}), error=lambda e: e is not None and e[\"code\"]", "error=lambda e: e is not None and e[\"code\"] == infra.jsonrpc.ErrorCode.INVALID_CALLER_ID.value,", "infra.ccf import infra.jsonrpc import logging from time import gmtime, strftime", "= { \"src\": row[\"origin\"], \"dst\": row[\"destination\"], \"amt\": row[\"amount\"], \"type\": row[\"type\"],", "as f: data = f.readlines() script = \"\".join(data) manager =", "with open(args.datafile, newline=\"\") as f: datafile = csv.DictReader(f) for i,", "\"\"\", ) network.consortium.vote_using_majority(primary, proposal_result[\"id\"]) # Check permissions are enforced with", "c.rpc( \"BK_register\", {\"bank_id\": regulator.ccf_id, \"country\": regulator.country}, ), error=lambda e: e", "in the list of banks for transaction in transactions: print(transaction)", "the list of banks for transaction in transactions: print(transaction) amount", "for transaction in transactions: print(transaction) amount = transaction[\"amt\"] check(c.rpc(\"TX_record\", transaction),", "> flagged_amt: check( c.rpc(\"FLAGGED_TX_get\", {\"tx_id\": tx_id}), result=[regulator.ccf_id, False, transaction[\"timestamp\"]], )", "}} }} ) \"\"\", ) network.consortium.vote_using_majority(primary, proposal_result[\"id\"]) # Check permissions", "regulator poll for transactions that are flagged with primary.node_client() as", "the flagged txs that we poll for are correct resp", "transaction[\"src\"], \"src_country\": transaction[\"src_country\"], \"timestamp\": transaction[\"timestamp\"], \"type\": transaction[\"type\"], }, ) if", "regulator_id] poll_flagged_ids.append(poll_flagged[0]) poll_flagged_ids.sort() assert poll_flagged_ids == flagged_ids for tx_id in", "and e[\"code\"] == infra.jsonrpc.ErrorCode.INVALID_CALLER_ID.value, ) check( c.rpc(\"BK_register\", {}), error=lambda e:", "# read first 10 lines if i > 10: break", "open(args.datafile, newline=\"\") as f: datafile = csv.DictReader(f) for i, row", "are flagged with primary.node_client() as mc: with primary.user_client(format=\"msgpack\", user_id=regulator.name) as", "poll for transactions that are flagged with primary.node_client() as mc:", "\"timestamp\": strftime(\"%a, %d %b %Y %H:%M:%S +0000\", gmtime()), \"src_country\": row[\"src_country\"],", "e is not None and e[\"code\"] == infra.jsonrpc.ErrorCode.INVALID_PARAMS.value, ) #", "Microsoft Corporation. All rights reserved. # Licensed under the Apache", "row[\"origin\"], \"dst\": row[\"destination\"], \"amt\": row[\"amount\"], \"type\": row[\"type\"], \"timestamp\": strftime(\"%a, %d", "{\"bank_id\": bank.ccf_id, \"country\": bank.country}, ), result=bank.ccf_id, ) check(c.rpc(\"BK_get\", {\"id\": bank.ccf_id}),", "# assert that the flagged txs that we poll for", "revealed_tx_ids.append(tx_id) # bank try to reveal non flagged txs for", "as c: check( c.rpc( \"REG_register\", { \"regulator_id\": regulator.ccf_id, \"country\": regulator.country,", "c.rpc( \"BK_register\", {\"bank_id\": bank.ccf_id, \"country\": bank.country}, ), result=bank.ccf_id, ) check(c.rpc(\"BK_get\",", "\"GB\", args.default_curve) banks = [ AppUser(network, f\"bank{country}\", country, args.default_curve) for", "for bank in banks: check( c.rpc( \"BK_register\", {\"bank_id\": bank.ccf_id, \"country\":", "privileges by members, which is later read by app to", "tx_id = 0 # Tracks how many transactions have been", "\"src\": transaction[\"src\"], \"src_country\": transaction[\"src_country\"], \"timestamp\": transaction[\"timestamp\"], \"type\": transaction[\"type\"], }, )", "transaction[\"timestamp\"], \"type\": transaction[\"type\"], }, ) if float(amount) > flagged_amt: check(", "# get from flagged txs, try to get the flagged", "data = f.readlines() script = \"\".join(data) manager = AppUser(network, \"manager\",", "as c: # assert that the flagged txs that we", "= [] with open(args.datafile, newline=\"\") as f: datafile = csv.DictReader(f)", "user_id=bank.name) as c: # Destination account is the next one", "the flagged ones that were revealed for tx_id in revealed_tx_ids:", ") # bank reveal some transactions that were flagged for", "= flagged_tx else: check( c.rpc(\"FLAGGED_TX_get\", {\"tx_id\": tx_id}), error=lambda e: e", "Manager is granted special privileges by members, which is later", "infra.jsonrpc import logging from time import gmtime, strftime import csv", "logger as LOG class AppUser: def __init__(self, network, name, country,", "f: data = f.readlines() script = \"\".join(data) manager = AppUser(network,", "in banks: check( c.rpc( \"BK_register\", {\"bank_id\": bank.ccf_id, \"country\": bank.country}, ),", "curve): self.name = name self.country = country primary, _ =", "been successfully issued\") # bank that issued first flagged transaction", "All rights reserved. # Licensed under the Apache 2.0 License.", "non_flagged_ids = [] flagged_amt = 200000 for i, bank in", "is the next one in the list of banks for", "e[\"code\"] == infra.jsonrpc.ErrorCode.INVALID_CALLER_ID.value, ) with primary.user_client(user_id=banks[0].name) as c: check( c.rpc(\"REG_register\",", "c: check( c.rpc( \"REG_register\", { \"regulator_id\": regulator.ccf_id, \"country\": regulator.country, \"script\":", "# Destination account is the next one in the list", "== infra.jsonrpc.ErrorCode.INVALID_CALLER_ID.value, ) # bank reveal some transactions that were", "As permissioned manager, register regulator and banks with primary.node_client() as", "infra.checker.Checker() network.start_and_join(args) primary, others = network.find_nodes() script = \"if tonumber(amt)", "issued first flagged transaction with primary.user_client(format=\"msgpack\", user_id=bank.name) as c: #", "in non_flagged_ids: check( c.rpc(\"TX_reveal\", {\"tx_id\": tx_id}), error=lambda e: e is", "> 10: break json_tx = { \"src\": row[\"origin\"], \"dst\": row[\"destination\"],", "Tracks how many transactions have been issued # tracks flagged/non", "revealed_tx_ids: check( c.rpc(\"REG_get_revealed\", {\"tx_id\": tx_id}), error=lambda e: e is not", "= [\"localhost\"] with infra.ccf.network( hosts, args.build_dir, args.debug_nodes, args.perf_nodes, pdb=args.pdb )", ") flagged_tx = { \"amt\": amount, \"bank_id\": bank.ccf_id, \"dst\": transaction[\"dst\"],", "check( c.rpc(\"REG_get_revealed\", {\"tx_id\": tx_id}), result=flagged_txs[tx_id], ) if __name__ == \"__main__\":", "infra.e2e_args import infra.ccf import infra.jsonrpc import logging from time import", "with primary.user_client(format=\"msgpack\", user_id=regulator.name) as c: # assert that the flagged", "regulator and {len(banks)} bank(s) successfully setup\") tx_id = 0 #", "poll_flagged in resp.result: # poll flagged is a list [tx_id,", "break json_tx = { \"src\": row[\"origin\"], \"dst\": row[\"destination\"], \"amt\": row[\"amount\"],", "with primary.node_client() as mc: check_commit = infra.checker.Checker(mc) with primary.user_client(format=\"msgpack\", user_id=manager.name)", "e[\"code\"] == infra.jsonrpc.ErrorCode.INVALID_PARAMS.value, ) LOG.debug(f\"User {bank} successfully registered as bank\")", "have been successfully issued\") # bank that issued first flagged", "}} }} }} ) \"\"\", ) network.consortium.vote_using_majority(primary, proposal_result[\"id\"]) # Check", "bank that issued first flagged transaction with primary.user_client(format=\"msgpack\", user_id=bank.name) as", "poll_flagged_ids.append(poll_flagged[0]) poll_flagged_ids.sort() assert poll_flagged_ids == flagged_ids for tx_id in flagged_ids:", "get from flagged txs, try to get the flagged one", "scenario file (csv)\", type=str ) args = infra.e2e_args.cli_args(add) args.package =", "flagged/non flagged and revealed/non revealed transactions for validation flagged_txs =", "as network: check = infra.checker.Checker() network.start_and_join(args) primary, others = network.find_nodes()", "in flagged_ids: # get from flagged txs, try to get", "registered as regulator\") for bank in banks: check( c.rpc( \"BK_register\",", "infra.jsonrpc.ErrorCode.INVALID_PARAMS.value, ) # regulator poll for transactions that are flagged", "args.build_dir, args.debug_nodes, args.perf_nodes, pdb=args.pdb ) as network: check = infra.checker.Checker()", "flagged but fail as you are not a regulator check(", "= \"\".join(data) manager = AppUser(network, \"manager\", \"GB\", args.default_curve) regulator =", "client: self.ccf_id = client.rpc(\"whoAmI\", {}).result[\"caller_id\"] def __str__(self): return f\"{self.ccf_id} ({self.name})\"", "if float(amount) > flagged_amt: check( c.rpc(\"FLAGGED_TX_get\", {\"tx_id\": tx_id}), result=[regulator.ccf_id, False,", "csv import random from loguru import logger as LOG class", "[] with open(args.datafile, newline=\"\") as f: datafile = csv.DictReader(f) for", "if i % 2 == 0: check(c.rpc(\"TX_reveal\", {\"tx_id\": tx_id}), result=True)", "LOG.debug(f\"User {regulator} successfully registered as regulator\") for bank in banks:", "= {{ privileges = {{ REGISTER_REGULATORS = true, REGISTER_BANKS =", "primary.user_client(format=\"msgpack\", user_id=bank.name) as c: # Destination account is the next", "LOG.success(f\"{tx_id} transactions have been successfully issued\") # bank that issued", "issued # tracks flagged/non flagged and revealed/non revealed transactions for", "args.default_curve) regulator = AppUser(network, \"auditor\", \"GB\", args.default_curve) banks = [", "row[\"src_country\"], \"dst_country\": row[\"dst_country\"], } transactions.append(json_tx) # Manager is granted special", "are enforced with primary.user_client(user_id=regulator.name) as c: check( c.rpc(\"REG_register\", {}), error=lambda", "transaction[\"type\"], }, ) if float(amount) > flagged_amt: check( c.rpc(\"FLAGGED_TX_get\", {\"tx_id\":", "\"src_country\": transaction[\"src_country\"], \"timestamp\": transaction[\"timestamp\"], \"type\": transaction[\"type\"], }, ) if float(amount)", "== flagged_ids for tx_id in flagged_ids: # get from flagged", ") LOG.debug(f\"User {regulator} successfully registered as regulator\") for bank in", "flagged transaction with primary.user_client(format=\"msgpack\", user_id=bank.name) as c: # try to", "flagged_txs = {} revealed_tx_ids = [] flagged_ids = [] non_flagged_ids", "end\" if args.lua_script is not None: data = [] with", "checker loaded as lua script file\", type=str ) parser.add_argument( \"--datafile\",", "proposal_result, error = network.consortium.propose( 0, primary, f\"\"\" return Calls:call( \"set_user_data\",", "{manager.ccf_id}, user_data = {{ privileges = {{ REGISTER_REGULATORS = true,", ") check(c.rpc(\"BK_get\", {\"id\": bank.ccf_id}), result=bank.country) check( c.rpc( \"REG_register\", {\"regulator_id\": bank.ccf_id,", "e[\"code\"] == infra.jsonrpc.ErrorCode.INVALID_PARAMS.value, ) non_flagged_ids.append(tx_id) tx_id += 1 LOG.success(f\"{tx_id} transactions", "c.rpc(\"REG_poll_flagged\", {}) poll_flagged_ids = [] for poll_flagged in resp.result: #", "primary.user_client(format=\"msgpack\", user_id=bank.name) as c: # try to poll flagged but", "name, country, curve): self.name = name self.country = country primary,", "bank.ccf_id, \"country\": bank.country}, ), result=bank.ccf_id, ) check(c.rpc(\"BK_get\", {\"id\": bank.ccf_id}), result=bank.country)", "fail as you are not a regulator check( c.rpc(\"REG_poll_flagged\", {}),", "and {len(banks)} bank(s) successfully setup\") tx_id = 0 # Tracks", "json_tx = { \"src\": row[\"origin\"], \"dst\": row[\"destination\"], \"amt\": row[\"amount\"], \"type\":", "parser.add_argument( \"--lua-script\", help=\"Regulator checker loaded as lua script file\", type=str", "\"country\": bank.country}, ), error=lambda e: e is not None and", "= \"if tonumber(amt) > 200000 then return true else return", "error=lambda e: e is not None and e[\"code\"] == infra.jsonrpc.ErrorCode.INVALID_PARAMS.value,", "correct resp = c.rpc(\"REG_poll_flagged\", {}) poll_flagged_ids = [] for poll_flagged", "args = infra.e2e_args.cli_args(add) args.package = args.app_script and \"libluageneric\" or \"liblogging\"", "= transaction[\"amt\"] check(c.rpc(\"TX_record\", transaction), result=tx_id) check( c.rpc(\"TX_get\", {\"tx_id\": tx_id}), result={", "validation flagged_txs = {} revealed_tx_ids = [] flagged_ids = []", "{}) poll_flagged_ids = [] for poll_flagged in resp.result: # poll", "script = \"\".join(data) manager = AppUser(network, \"manager\", \"GB\", args.default_curve) regulator", "bank\") LOG.success(f\"{1} regulator and {len(banks)} bank(s) successfully setup\") tx_id =", "check( c.rpc(\"REG_get_revealed\", {\"tx_id\": tx_id}), error=lambda e: e is not None", "members, which is later read by app to enforce access", "check( c.rpc(\"BK_register\", {}), error=lambda e: e is not None and", "{}).result[\"caller_id\"] def __str__(self): return f\"{self.ccf_id} ({self.name})\" def run(args): hosts =" ]
[ "In particular, if only one external field should be present", "m_vals is a list of scalar values containing, for each", "H_ext_list: list of 3-vectors List of external fields, where each", "at which a relaxation is performed and m_vals is a", "H = Zeeman((0, 0, 0)) sim.add(H) # We keep track", "fun(sim) res.append(retval) log.debug(\"hysteresis callback function '{}' returned \" \"value: {}\".format(fun.__name__,", "x-component of the magnetisation at the end of each relaxation.", "cur_stage + 1, num_stages, H_cur)) H.set_value(H_cur) sim.relax(**kwargs) cur_stage += 1", "only one external field should be present then do not", "fileinput import numpy as np from finmag.energies import Zeeman from", "of field strengths at which a relaxation is performed and", "forms accepted by Zeeman.__init__() (see its docstring for more details).", "vector indicating the direction of the external field (will be", "current stage of the hysteresis loop. cur_stage = 0 num_stages", "for more details). fun: callable The user can pass a", "the applied field used for hysteresis.\") sim.remove_interaction(H.name) return res or", "(lambda sim: sim.m_average[0]) then the return value is a list", "stage of the hysteresis loop. cur_stage = 0 num_stages =", "here (which should accept the Simulation object as its only", "H_ext_list[cur_stage] log.info( \"Entering hysteresis stage #{} ({} out of {}).", "in H_ext_list are exhausted. Note: The fields in H_ext_list are", "containing an accumulation of all the return values of `fun`", "user can pass a function here (which should accept the", "except IndexError: log.info(\"Hysteresis is finished.\") log.info(\"Removing the applied field used", "the end of each relaxation. All other keyword arguments are", "not None then the return value is a list containing", "relax() method. See its documentation for details. *Return value* If", "Current field: \" \"{}\".format(cur_stage, cur_stage + 1, num_stages, H_cur)) H.set_value(H_cur)", "H_ext_list are applied *in addition to* any Zeeman interactions that", "can pass a function here (which should accept the Simulation", "pass a function here (which should accept the Simulation object", "0, 0)) sim.add(H) # We keep track of the current", "call the relax() method. When convergence is reached, the field", "numpy as np from finmag.energies import Zeeman from finmag.util.helpers import", "the return value is None. \"\"\" if H_ext_list == []:", "the more general `hysteresis` method. It computes a hysteresis loop", "perform a certain action (e.g. save a VTK snapshot of", "number of data points to compute in each direction (thus", "is the list of field strengths at which a relaxation", "The return value is a pair (H_vals, m_vals), where H_vals", "to -H_max and back (using N steps in each direction).", "direction of the external field (will be normalised automatically) H_max", "\\ list(np.linspace(-H_max, H_max, N)) H_vals = [h * H_dir for", "on to the relax() method. See its documentation for details.", "= hysteresis(sim, H_vals, fun=lambda sim: sim.m_average, **kwargs) # projected lengths", "import glob import logging import textwrap import fileinput import numpy", "-- number of data points to compute in each direction", "None def hysteresis_loop(sim, H_max, direction, N, **kwargs): \"\"\" Compute a", "method. It computes a hysteresis loop where the external field", "res = [] try: while True: H_cur = H_ext_list[cur_stage] log.info(", "loop. direction -- a vector indicating the direction of the", "(which should accept the Simulation object as its only argument);", "the hysteresis loop. cur_stage = 0 num_stages = len(H_ext_list) res", "Zeeman.__init__() (see its docstring for more details). fun: callable The", "loop will be 2*N-1) kwargs -- any keyword argument accepted", "with the directive 'at_end=True' as in the following example: sim.schedule('save_vtk',", "The user can pass a function here (which should accept", "direction (thus the total number of data points for the", "N)) H_vals = [h * H_dir for h in H_norms]", "each stage. Otherwise the return value is None. \"\"\" if", "H_cur = H_ext_list[cur_stage] log.info( \"Entering hysteresis stage #{} ({} out", "= logging.getLogger(name=\"finmag\") def hysteresis(sim, H_ext_list, fun=None, **kwargs): \"\"\" Set the", "H_vals = [h * H_dir for h in H_norms] m_avg", "in the simulation. In particular, if only one external field", "docstring for more details). fun: callable The user can pass", "H.set_value(H_cur) sim.relax(**kwargs) cur_stage += 1 if fun is not None:", "the magnetisation along the axis `direction` (after relaxation has been", "below). For example, if fun = (lambda sim: sim.m_average[0]) then", "to* any Zeeman interactions that are already present in the", "list of scalar values containing, for each field value, the", "value of the magnetisation along the axis `direction` (after relaxation", "return value is None. \"\"\" if H_ext_list == []: return", "one external field should be present then do not add", "If `fun` is not None then the return value is", "list containing an accumulation of all the return values of", "axis and changes magnitude from +H_max to -H_max and back", "H_norms = list(np.linspace(H_max, -H_max, N)) + \\ list(np.linspace(-H_max, H_max, N))", "on until all values in H_ext_list are exhausted. Note: The", "following example: sim.schedule('save_vtk', at_end=True, ...) sim.hysteresis(...) *Arguments* H_ext_list: list of", "be 2*N-1) kwargs -- any keyword argument accepted by the", "keyword arguments are passed on to the relax() method. See", "values containing, for each field value, the averaged value of", "sim.m_average, **kwargs) # projected lengths of the averaged magnetisation values", "m_vals = [np.dot(m, H_dir) for m in m_avg] return (H_norms,", "then the return value is a list of values representing", "changed to the next one in H_ext_list, and so on", "for each field value, the averaged value of the magnetisation", "and m_vals is a list of scalar values containing, for", "the magnetisation) at the end of each relaxation stage, use", "sim.schedule() command with the directive 'at_end=True' as in the following", "logging import textwrap import fileinput import numpy as np from", "if fun = (lambda sim: sim.m_average[0]) then the return value", "accumulation of all the return values of `fun` after each", "present then do not add any Zeeman interactions before calling", "= H_ext_list[cur_stage] log.info( \"Entering hysteresis stage #{} ({} out of", "\"\"\" Set the applied field to the first value in", "particular, if only one external field should be present then", "`H_ext_list` (which should be a list of external field vectors)", "to perform a certain action (e.g. save a VTK snapshot", "applied field used for hysteresis.\") sim.remove_interaction(H.name) return res or None", "plot the hysteresis loop. direction -- a vector indicating the", "Zeeman((0, 0, 0)) sim.add(H) # We keep track of the", "along the axis `direction` (after relaxation has been reached). Thus", "hysteresis loop. direction -- a vector indicating the direction of", "a vector indicating the direction of the external field (will", "a pair (H_vals, m_vals), where H_vals is the list of", "= Zeeman((0, 0, 0)) sim.add(H) # We keep track of", "H_ext_list, fun=None, **kwargs): \"\"\" Set the applied field to the", "argument accepted by the hysteresis() method \"\"\" d = np.array(direction)", "the hysteresis() method \"\"\" d = np.array(direction) H_dir = d", "\"\"\" if H_ext_list == []: return # Add a new", "norm log = logging.getLogger(name=\"finmag\") def hysteresis(sim, H_ext_list, fun=None, **kwargs): \"\"\"", "of the current stage of the hysteresis loop. cur_stage =", "*Arguments* H_ext_list: list of 3-vectors List of external fields, where", "+H_max to -H_max and back (using N steps in each", "os import re import glob import logging import textwrap import", "method. See its documentation for details. *Return value* If `fun`", "specialised convenience version of the more general `hysteresis` method. It", "value is a pair (H_vals, m_vals), where H_vals is the", "not add any Zeeman interactions before calling this method. If", "def hysteresis(sim, H_ext_list, fun=None, **kwargs): \"\"\" Set the applied field", "value (see below). For example, if fun = (lambda sim:", "data points for the entire loop will be 2*N-1) kwargs", "Zeeman interactions that are already present in the simulation. In", "along a single axis and changes magnitude from +H_max to", "of 3-vectors List of external fields, where each field can", "list of values representing the average x-component of the magnetisation", "-- any keyword argument accepted by the hysteresis() method \"\"\"", "the applied field to the first value in `H_ext_list` (which", "import norm log = logging.getLogger(name=\"finmag\") def hysteresis(sim, H_ext_list, fun=None, **kwargs):", "keep track of the current stage of the hysteresis loop.", "new Zeeman interaction, initialised to zero. H = Zeeman((0, 0,", "hysteresis(sim, H_ext_list, fun=None, **kwargs): \"\"\" Set the applied field to", "fun = (lambda sim: sim.m_average[0]) then the return value is", "return value is a pair (H_vals, m_vals), where H_vals is", "VTK snapshot of the magnetisation) at the end of each", "scalar values containing, for each field value, the averaged value", "a function here (which should accept the Simulation object as", "to the first value in `H_ext_list` (which should be a", "a new Zeeman interaction, initialised to zero. H = Zeeman((0,", "of each relaxation stage, use the sim.schedule() command with the", "method \"\"\" d = np.array(direction) H_dir = d / norm(d)", "res.append(retval) log.debug(\"hysteresis callback function '{}' returned \" \"value: {}\".format(fun.__name__, retval))", "the end of each relaxation stage, use the sim.schedule() command", "while True: H_cur = H_ext_list[cur_stage] log.info( \"Entering hysteresis stage #{}", "field strengths at which a relaxation is performed and m_vals", "used to plot the hysteresis loop. direction -- a vector", "import re import glob import logging import textwrap import fileinput", "convergence is reached, the field is changed to the next", "only argument); this function is called after each relaxation and", "= len(H_ext_list) res = [] try: while True: H_cur =", "m_vals), where H_vals is the list of field strengths at", "*Return value* If `fun` is not None then the return", "projected lengths of the averaged magnetisation values along the axis", "performed and m_vals is a list of scalar values containing,", "[] try: while True: H_cur = H_ext_list[cur_stage] log.info( \"Entering hysteresis", "this function is called after each relaxation and determines the", "\"Entering hysteresis stage #{} ({} out of {}). Current field:", "will be 2*N-1) kwargs -- any keyword argument accepted by", "of each relaxation. All other keyword arguments are passed on", "cur_stage += 1 if fun is not None: retval =", "(using N steps in each direction). The return value is", "sim: sim.m_average[0]) then the return value is a list of", "H_max -- maximum field strength N -- number of data", "use the sim.schedule() command with the directive 'at_end=True' as in", "Set the applied field to the first value in `H_ext_list`", "exhausted. Note: The fields in H_ext_list are applied *in addition", "/ norm(d) H_norms = list(np.linspace(H_max, -H_max, N)) + \\ list(np.linspace(-H_max,", "retval = fun(sim) res.append(retval) log.debug(\"hysteresis callback function '{}' returned \"", "version of the more general `hysteresis` method. It computes a", "is a list of scalar values containing, for each field", "stage. Otherwise the return value is None. \"\"\" if H_ext_list", "the average x-component of the magnetisation at the end of", "changes magnitude from +H_max to -H_max and back (using N", "Thus the command plot(H_vals, m_vals) could be used to plot", "return value (see below). For example, if fun = (lambda", "\" \"{}\".format(cur_stage, cur_stage + 1, num_stages, H_cur)) H.set_value(H_cur) sim.relax(**kwargs) cur_stage", "* H_dir for h in H_norms] m_avg = hysteresis(sim, H_vals,", "a VTK snapshot of the magnetisation) at the end of", "the axis `direction` (after relaxation has been reached). Thus the", "= fun(sim) res.append(retval) log.debug(\"hysteresis callback function '{}' returned \" \"value:", "method. When convergence is reached, the field is changed to", "magnitude from +H_max to -H_max and back (using N steps", "then the return value is a list containing an accumulation", "at the end of each relaxation. All other keyword arguments", "accept the Simulation object as its only argument); this function", "None. \"\"\" if H_ext_list == []: return # Add a", "snapshot of the magnetisation) at the end of each relaxation", "to the relax() method. See its documentation for details. *Return", "Note: The fields in H_ext_list are applied *in addition to*", "return # Add a new Zeeman interaction, initialised to zero.", "each direction (thus the total number of data points for", "H_dir = d / norm(d) H_norms = list(np.linspace(H_max, -H_max, N))", "**kwargs): \"\"\" Compute a hysteresis loop. This is a specialised", "action (e.g. save a VTK snapshot of the magnetisation) at", "None then the return value is a list containing an", "the entire loop will be 2*N-1) kwargs -- any keyword", "import os import re import glob import logging import textwrap", "add any Zeeman interactions before calling this method. If you", "simulation. In particular, if only one external field should be", "interaction, initialised to zero. H = Zeeman((0, 0, 0)) sim.add(H)", "its only argument); this function is called after each relaxation", "loop. This is a specialised convenience version of the more", "(see below). For example, if fun = (lambda sim: sim.m_average[0])", "for details. *Return value* If `fun` is not None then", "automatically) H_max -- maximum field strength N -- number of", "be present then do not add any Zeeman interactions before", "value, the averaged value of the magnetisation along the axis", "where H_vals is the list of field strengths at which", "at the end of each relaxation stage, use the sim.schedule()", "then call the relax() method. When convergence is reached, the", "from finmag.energies import Zeeman from finmag.util.helpers import norm log =", "called after each relaxation and determines the return value (see", "*in addition to* any Zeeman interactions that are already present", "until all values in H_ext_list are exhausted. Note: The fields", "any Zeeman interactions before calling this method. If you would", "of values representing the average x-component of the magnetisation at", "Compute a hysteresis loop. This is a specialised convenience version", "value is a list of values representing the average x-component", "len(H_ext_list) res = [] try: while True: H_cur = H_ext_list[cur_stage]", "so on until all values in H_ext_list are exhausted. Note:", "should be a list of external field vectors) and then", "its documentation for details. *Return value* If `fun` is not", "the directive 'at_end=True' as in the following example: sim.schedule('save_vtk', at_end=True,", "of `fun` after each stage. Otherwise the return value is", "+= 1 if fun is not None: retval = fun(sim)", "import logging import textwrap import fileinput import numpy as np", "direction -- a vector indicating the direction of the external", "finmag.util.helpers import norm log = logging.getLogger(name=\"finmag\") def hysteresis(sim, H_ext_list, fun=None,", "a list of scalar values containing, for each field value,", "object as its only argument); this function is called after", "direction, N, **kwargs): \"\"\" Compute a hysteresis loop. This is", "the averaged value of the magnetisation along the axis `direction`", "(after relaxation has been reached). Thus the command plot(H_vals, m_vals)", "None: retval = fun(sim) res.append(retval) log.debug(\"hysteresis callback function '{}' returned", "List of external fields, where each field can have any", "is a list of values representing the average x-component of", "`fun` is not None then the return value is a", "be used to plot the hysteresis loop. direction -- a", "textwrap import fileinput import numpy as np from finmag.energies import", "Zeeman from finmag.util.helpers import norm log = logging.getLogger(name=\"finmag\") def hysteresis(sim,", "are passed on to the relax() method. See its documentation", "a hysteresis loop where the external field is applied along", "N)) + \\ list(np.linspace(-H_max, H_max, N)) H_vals = [h *", "res or None def hysteresis_loop(sim, H_max, direction, N, **kwargs): \"\"\"", "import fileinput import numpy as np from finmag.energies import Zeeman", "magnetisation) at the end of each relaxation stage, use the", "kwargs -- any keyword argument accepted by the hysteresis() method", "of data points for the entire loop will be 2*N-1)", "This is a specialised convenience version of the more general", "field strength N -- number of data points to compute", "log.info(\"Removing the applied field used for hysteresis.\") sim.remove_interaction(H.name) return res", "can have any of the forms accepted by Zeeman.__init__() (see", "field is applied along a single axis and changes magnitude", "external fields, where each field can have any of the", "the external field (will be normalised automatically) H_max -- maximum", "should be present then do not add any Zeeman interactions", "the relax() method. When convergence is reached, the field is", "is finished.\") log.info(\"Removing the applied field used for hysteresis.\") sim.remove_interaction(H.name)", "0)) sim.add(H) # We keep track of the current stage", "its docstring for more details). fun: callable The user can", "of all the return values of `fun` after each stage.", "convenience version of the more general `hysteresis` method. It computes", "fun: callable The user can pass a function here (which", "other keyword arguments are passed on to the relax() method.", "({} out of {}). Current field: \" \"{}\".format(cur_stage, cur_stage +", "value in `H_ext_list` (which should be a list of external", "should accept the Simulation object as its only argument); this", "already present in the simulation. In particular, if only one", "by the hysteresis() method \"\"\" d = np.array(direction) H_dir =", "not None: retval = fun(sim) res.append(retval) log.debug(\"hysteresis callback function '{}'", "average x-component of the magnetisation at the end of each", "zero. H = Zeeman((0, 0, 0)) sim.add(H) # We keep", "reached, the field is changed to the next one in", "values of `fun` after each stage. Otherwise the return value", "+ 1, num_stages, H_cur)) H.set_value(H_cur) sim.relax(**kwargs) cur_stage += 1 if", "as in the following example: sim.schedule('save_vtk', at_end=True, ...) sim.hysteresis(...) *Arguments*", "return value is a list containing an accumulation of all", "a list of external field vectors) and then call the", "the next one in H_ext_list, and so on until all", "hysteresis loop. This is a specialised convenience version of the", "of the hysteresis loop. cur_stage = 0 num_stages = len(H_ext_list)", "initialised to zero. H = Zeeman((0, 0, 0)) sim.add(H) #", "any of the forms accepted by Zeeman.__init__() (see its docstring", "in `H_ext_list` (which should be a list of external field", "a certain action (e.g. save a VTK snapshot of the", "do not add any Zeeman interactions before calling this method.", "representing the average x-component of the magnetisation at the end", "have any of the forms accepted by Zeeman.__init__() (see its", "Add a new Zeeman interaction, initialised to zero. H =", "(thus the total number of data points for the entire", "\"{}\".format(cur_stage, cur_stage + 1, num_stages, H_cur)) H.set_value(H_cur) sim.relax(**kwargs) cur_stage +=", "(see its docstring for more details). fun: callable The user", "the field is changed to the next one in H_ext_list,", "fields in H_ext_list are applied *in addition to* any Zeeman", "before calling this method. If you would like to perform", "1, num_stages, H_cur)) H.set_value(H_cur) sim.relax(**kwargs) cur_stage += 1 if fun", "sim.add(H) # We keep track of the current stage of", "return value is a list of values representing the average", "addition to* any Zeeman interactions that are already present in", "the return value (see below). For example, if fun =", "along the axis # `H_dir` m_vals = [np.dot(m, H_dir) for", "strengths at which a relaxation is performed and m_vals is", "d = np.array(direction) H_dir = d / norm(d) H_norms =", "list of field strengths at which a relaxation is performed", "See its documentation for details. *Return value* If `fun` is", "the magnetisation at the end of each relaxation. All other", "the sim.schedule() command with the directive 'at_end=True' as in the", "field is changed to the next one in H_ext_list, and", "more details). fun: callable The user can pass a function", "to zero. H = Zeeman((0, 0, 0)) sim.add(H) # We", "value is None. \"\"\" if H_ext_list == []: return #", "np from finmag.energies import Zeeman from finmag.util.helpers import norm log", "of scalar values containing, for each field value, the averaged", "number of data points for the entire loop will be", "after each relaxation and determines the return value (see below).", "to the next one in H_ext_list, and so on until", "arguments are passed on to the relax() method. See its", "applied along a single axis and changes magnitude from +H_max", "method. If you would like to perform a certain action", "each relaxation. All other keyword arguments are passed on to", "hysteresis.\") sim.remove_interaction(H.name) return res or None def hysteresis_loop(sim, H_max, direction,", "relaxation is performed and m_vals is a list of scalar", "-H_max, N)) + \\ list(np.linspace(-H_max, H_max, N)) H_vals = [h", "**kwargs): \"\"\" Set the applied field to the first value", "averaged value of the magnetisation along the axis `direction` (after", "norm(d) H_norms = list(np.linspace(H_max, -H_max, N)) + \\ list(np.linspace(-H_max, H_max,", "save a VTK snapshot of the magnetisation) at the end", "and determines the return value (see below). For example, if", "computes a hysteresis loop where the external field is applied", "When convergence is reached, the field is changed to the", "of the averaged magnetisation values along the axis # `H_dir`", "If you would like to perform a certain action (e.g.", "(e.g. save a VTK snapshot of the magnetisation) at the", "in H_ext_list, and so on until all values in H_ext_list", "relaxation and determines the return value (see below). For example,", "values representing the average x-component of the magnetisation at the", "in the following example: sim.schedule('save_vtk', at_end=True, ...) sim.hysteresis(...) *Arguments* H_ext_list:", "one in H_ext_list, and so on until all values in", "list of external field vectors) and then call the relax()", "been reached). Thus the command plot(H_vals, m_vals) could be used", "are already present in the simulation. In particular, if only", "a hysteresis loop. This is a specialised convenience version of", "loop. cur_stage = 0 num_stages = len(H_ext_list) res = []", "strength N -- number of data points to compute in", "H_vals, fun=lambda sim: sim.m_average, **kwargs) # projected lengths of the", "external field vectors) and then call the relax() method. When", "of the external field (will be normalised automatically) H_max --", "magnetisation along the axis `direction` (after relaxation has been reached).", "axis # `H_dir` m_vals = [np.dot(m, H_dir) for m in", "return res or None def hysteresis_loop(sim, H_max, direction, N, **kwargs):", "function here (which should accept the Simulation object as its", "We keep track of the current stage of the hysteresis", "hysteresis stage #{} ({} out of {}). Current field: \"", "of the magnetisation at the end of each relaxation. All", "Zeeman interaction, initialised to zero. H = Zeeman((0, 0, 0))", "stage #{} ({} out of {}). Current field: \" \"{}\".format(cur_stage,", "in H_ext_list are applied *in addition to* any Zeeman interactions", "retval)) except IndexError: log.info(\"Hysteresis is finished.\") log.info(\"Removing the applied field", "import Zeeman from finmag.util.helpers import norm log = logging.getLogger(name=\"finmag\") def", "H_vals is the list of field strengths at which a", "\"value: {}\".format(fun.__name__, retval)) except IndexError: log.info(\"Hysteresis is finished.\") log.info(\"Removing the", "= [np.dot(m, H_dir) for m in m_avg] return (H_norms, m_vals)", "in each direction (thus the total number of data points", "is changed to the next one in H_ext_list, and so", "is called after each relaxation and determines the return value", "m_vals) could be used to plot the hysteresis loop. direction", "relax() method. When convergence is reached, the field is changed", "maximum field strength N -- number of data points to", "value is a list containing an accumulation of all the", "vectors) and then call the relax() method. When convergence is", "re import glob import logging import textwrap import fileinput import", "has been reached). Thus the command plot(H_vals, m_vals) could be", "h in H_norms] m_avg = hysteresis(sim, H_vals, fun=lambda sim: sim.m_average,", "would like to perform a certain action (e.g. save a", "from +H_max to -H_max and back (using N steps in", "import numpy as np from finmag.energies import Zeeman from finmag.util.helpers", "function '{}' returned \" \"value: {}\".format(fun.__name__, retval)) except IndexError: log.info(\"Hysteresis", "that are already present in the simulation. In particular, if", "points to compute in each direction (thus the total number", "a single axis and changes magnitude from +H_max to -H_max", "N steps in each direction). The return value is a", "an accumulation of all the return values of `fun` after", "sim.hysteresis(...) *Arguments* H_ext_list: list of 3-vectors List of external fields,", "H_ext_list == []: return # Add a new Zeeman interaction,", "the averaged magnetisation values along the axis # `H_dir` m_vals", "lengths of the averaged magnetisation values along the axis #", "the list of field strengths at which a relaxation is", "the following example: sim.schedule('save_vtk', at_end=True, ...) sim.hysteresis(...) *Arguments* H_ext_list: list", "field vectors) and then call the relax() method. When convergence", "stage, use the sim.schedule() command with the directive 'at_end=True' as", "magnetisation at the end of each relaxation. All other keyword", "each relaxation and determines the return value (see below). For", "glob import logging import textwrap import fileinput import numpy as", "next one in H_ext_list, and so on until all values", "= 0 num_stages = len(H_ext_list) res = [] try: while", "of the more general `hysteresis` method. It computes a hysteresis", "fun=lambda sim: sim.m_average, **kwargs) # projected lengths of the averaged", "log.info(\"Hysteresis is finished.\") log.info(\"Removing the applied field used for hysteresis.\")", "from finmag.util.helpers import norm log = logging.getLogger(name=\"finmag\") def hysteresis(sim, H_ext_list,", "reached). Thus the command plot(H_vals, m_vals) could be used to", "The fields in H_ext_list are applied *in addition to* any", "num_stages, H_cur)) H.set_value(H_cur) sim.relax(**kwargs) cur_stage += 1 if fun is", "field used for hysteresis.\") sim.remove_interaction(H.name) return res or None def", "be a list of external field vectors) and then call", "values along the axis # `H_dir` m_vals = [np.dot(m, H_dir)", "as np from finmag.energies import Zeeman from finmag.util.helpers import norm", "each field can have any of the forms accepted by", "for the entire loop will be 2*N-1) kwargs -- any", "the return value is a list containing an accumulation of", "a relaxation is performed and m_vals is a list of", "steps in each direction). The return value is a pair", "are exhausted. Note: The fields in H_ext_list are applied *in", "example, if fun = (lambda sim: sim.m_average[0]) then the return", "is reached, the field is changed to the next one", "Otherwise the return value is None. \"\"\" if H_ext_list ==", "{}\".format(fun.__name__, retval)) except IndexError: log.info(\"Hysteresis is finished.\") log.info(\"Removing the applied", "more general `hysteresis` method. It computes a hysteresis loop where", "field (will be normalised automatically) H_max -- maximum field strength", "for h in H_norms] m_avg = hysteresis(sim, H_vals, fun=lambda sim:", "direction). The return value is a pair (H_vals, m_vals), where", "[]: return # Add a new Zeeman interaction, initialised to", "sim: sim.m_average, **kwargs) # projected lengths of the averaged magnetisation", "list of 3-vectors List of external fields, where each field", "+ \\ list(np.linspace(-H_max, H_max, N)) H_vals = [h * H_dir", "if fun is not None: retval = fun(sim) res.append(retval) log.debug(\"hysteresis", "could be used to plot the hysteresis loop. direction --", "list(np.linspace(-H_max, H_max, N)) H_vals = [h * H_dir for h", "interactions before calling this method. If you would like to", "hysteresis(sim, H_vals, fun=lambda sim: sim.m_average, **kwargs) # projected lengths of", "the Simulation object as its only argument); this function is", "returned \" \"value: {}\".format(fun.__name__, retval)) except IndexError: log.info(\"Hysteresis is finished.\")", "`direction` (after relaxation has been reached). Thus the command plot(H_vals,", "def hysteresis_loop(sim, H_max, direction, N, **kwargs): \"\"\" Compute a hysteresis", "'{}' returned \" \"value: {}\".format(fun.__name__, retval)) except IndexError: log.info(\"Hysteresis is", "H_cur)) H.set_value(H_cur) sim.relax(**kwargs) cur_stage += 1 if fun is not", "the current stage of the hysteresis loop. cur_stage = 0", "applied *in addition to* any Zeeman interactions that are already", "\" \"value: {}\".format(fun.__name__, retval)) except IndexError: log.info(\"Hysteresis is finished.\") log.info(\"Removing", "if H_ext_list == []: return # Add a new Zeeman", "H_norms] m_avg = hysteresis(sim, H_vals, fun=lambda sim: sim.m_average, **kwargs) #", "the return values of `fun` after each stage. Otherwise the", "hysteresis() method \"\"\" d = np.array(direction) H_dir = d /", "num_stages = len(H_ext_list) res = [] try: while True: H_cur", "= list(np.linspace(H_max, -H_max, N)) + \\ list(np.linspace(-H_max, H_max, N)) H_vals", "and so on until all values in H_ext_list are exhausted.", "a specialised convenience version of the more general `hysteresis` method.", "the forms accepted by Zeeman.__init__() (see its docstring for more", "hysteresis loop. cur_stage = 0 num_stages = len(H_ext_list) res =", "pair (H_vals, m_vals), where H_vals is the list of field", "True: H_cur = H_ext_list[cur_stage] log.info( \"Entering hysteresis stage #{} ({}", "is None. \"\"\" if H_ext_list == []: return # Add", "and changes magnitude from +H_max to -H_max and back (using", "np.array(direction) H_dir = d / norm(d) H_norms = list(np.linspace(H_max, -H_max,", "passed on to the relax() method. See its documentation for", "the command plot(H_vals, m_vals) could be used to plot the", "sim.remove_interaction(H.name) return res or None def hysteresis_loop(sim, H_max, direction, N,", "a list of values representing the average x-component of the", "each relaxation stage, use the sim.schedule() command with the directive", "the hysteresis loop. direction -- a vector indicating the direction", "back (using N steps in each direction). The return value", "each field value, the averaged value of the magnetisation along", "relaxation has been reached). Thus the command plot(H_vals, m_vals) could", "used for hysteresis.\") sim.remove_interaction(H.name) return res or None def hysteresis_loop(sim,", "[h * H_dir for h in H_norms] m_avg = hysteresis(sim,", "IndexError: log.info(\"Hysteresis is finished.\") log.info(\"Removing the applied field used for", "at_end=True, ...) sim.hysteresis(...) *Arguments* H_ext_list: list of 3-vectors List of", "fields, where each field can have any of the forms", "details. *Return value* If `fun` is not None then the", "directive 'at_end=True' as in the following example: sim.schedule('save_vtk', at_end=True, ...)", "-H_max and back (using N steps in each direction). The", "magnetisation values along the axis # `H_dir` m_vals = [np.dot(m,", "averaged magnetisation values along the axis # `H_dir` m_vals =", "log.info( \"Entering hysteresis stage #{} ({} out of {}). Current", "for hysteresis.\") sim.remove_interaction(H.name) return res or None def hysteresis_loop(sim, H_max,", "accepted by Zeeman.__init__() (see its docstring for more details). fun:", "as its only argument); this function is called after each", "try: while True: H_cur = H_ext_list[cur_stage] log.info( \"Entering hysteresis stage", "like to perform a certain action (e.g. save a VTK", "= d / norm(d) H_norms = list(np.linspace(H_max, -H_max, N)) +", "import textwrap import fileinput import numpy as np from finmag.energies", "`fun` after each stage. Otherwise the return value is None.", "is a specialised convenience version of the more general `hysteresis`", "is applied along a single axis and changes magnitude from", "is not None then the return value is a list", "sim.relax(**kwargs) cur_stage += 1 if fun is not None: retval", "(which should be a list of external field vectors) and", "by Zeeman.__init__() (see its docstring for more details). fun: callable", "logging.getLogger(name=\"finmag\") def hysteresis(sim, H_ext_list, fun=None, **kwargs): \"\"\" Set the applied", "you would like to perform a certain action (e.g. save", "points for the entire loop will be 2*N-1) kwargs --", "Zeeman interactions before calling this method. If you would like", "out of {}). Current field: \" \"{}\".format(cur_stage, cur_stage + 1,", "0 num_stages = len(H_ext_list) res = [] try: while True:", "where each field can have any of the forms accepted", "# projected lengths of the averaged magnetisation values along the", "log = logging.getLogger(name=\"finmag\") def hysteresis(sim, H_ext_list, fun=None, **kwargs): \"\"\" Set", "external field is applied along a single axis and changes", "interactions that are already present in the simulation. In particular,", "N, **kwargs): \"\"\" Compute a hysteresis loop. This is a", "general `hysteresis` method. It computes a hysteresis loop where the", "to plot the hysteresis loop. direction -- a vector indicating", "then do not add any Zeeman interactions before calling this", "H_max, direction, N, **kwargs): \"\"\" Compute a hysteresis loop. This", "For example, if fun = (lambda sim: sim.m_average[0]) then the", "all the return values of `fun` after each stage. Otherwise", "#{} ({} out of {}). Current field: \" \"{}\".format(cur_stage, cur_stage", "the return value is a list of values representing the", "field: \" \"{}\".format(cur_stage, cur_stage + 1, num_stages, H_cur)) H.set_value(H_cur) sim.relax(**kwargs)", "external field should be present then do not add any", "applied field to the first value in `H_ext_list` (which should", "first value in `H_ext_list` (which should be a list of", "the axis # `H_dir` m_vals = [np.dot(m, H_dir) for m", "log.debug(\"hysteresis callback function '{}' returned \" \"value: {}\".format(fun.__name__, retval)) except", "keyword argument accepted by the hysteresis() method \"\"\" d =", "a list containing an accumulation of all the return values", "this method. If you would like to perform a certain", "or None def hysteresis_loop(sim, H_max, direction, N, **kwargs): \"\"\" Compute", "values in H_ext_list are exhausted. Note: The fields in H_ext_list", "hysteresis loop where the external field is applied along a", "present in the simulation. In particular, if only one external", "accepted by the hysteresis() method \"\"\" d = np.array(direction) H_dir", "is performed and m_vals is a list of scalar values", "relaxation stage, use the sim.schedule() command with the directive 'at_end=True'", "command plot(H_vals, m_vals) could be used to plot the hysteresis", "example: sim.schedule('save_vtk', at_end=True, ...) sim.hysteresis(...) *Arguments* H_ext_list: list of 3-vectors", "2*N-1) kwargs -- any keyword argument accepted by the hysteresis()", "= np.array(direction) H_dir = d / norm(d) H_norms = list(np.linspace(H_max,", "which a relaxation is performed and m_vals is a list", "It computes a hysteresis loop where the external field is", "certain action (e.g. save a VTK snapshot of the magnetisation)", "# `H_dir` m_vals = [np.dot(m, H_dir) for m in m_avg]", "be normalised automatically) H_max -- maximum field strength N --", "cur_stage = 0 num_stages = len(H_ext_list) res = [] try:", "\"\"\" Compute a hysteresis loop. This is a specialised convenience", "documentation for details. *Return value* If `fun` is not None", "end of each relaxation. All other keyword arguments are passed", "finished.\") log.info(\"Removing the applied field used for hysteresis.\") sim.remove_interaction(H.name) return", "callback function '{}' returned \" \"value: {}\".format(fun.__name__, retval)) except IndexError:", "== []: return # Add a new Zeeman interaction, initialised", "field to the first value in `H_ext_list` (which should be", "if only one external field should be present then do", "= [] try: while True: H_cur = H_ext_list[cur_stage] log.info( \"Entering", "to compute in each direction (thus the total number of", "(will be normalised automatically) H_max -- maximum field strength N", "containing, for each field value, the averaged value of the", "H_ext_list are exhausted. Note: The fields in H_ext_list are applied", "...) sim.hysteresis(...) *Arguments* H_ext_list: list of 3-vectors List of external", "H_max, N)) H_vals = [h * H_dir for h in", "of {}). Current field: \" \"{}\".format(cur_stage, cur_stage + 1, num_stages,", "any Zeeman interactions that are already present in the simulation.", "{}). Current field: \" \"{}\".format(cur_stage, cur_stage + 1, num_stages, H_cur))", "of external fields, where each field can have any of", "sim.m_average[0]) then the return value is a list of values", "relaxation. All other keyword arguments are passed on to the", "axis `direction` (after relaxation has been reached). Thus the command", "callable The user can pass a function here (which should", "and back (using N steps in each direction). The return", "of the magnetisation along the axis `direction` (after relaxation has", "# Add a new Zeeman interaction, initialised to zero. H", "All other keyword arguments are passed on to the relax()", "the total number of data points for the entire loop", "external field (will be normalised automatically) H_max -- maximum field", "indicating the direction of the external field (will be normalised", "determines the return value (see below). For example, if fun", "m_avg = hysteresis(sim, H_vals, fun=lambda sim: sim.m_average, **kwargs) # projected", "data points to compute in each direction (thus the total", "Simulation object as its only argument); this function is called", "of external field vectors) and then call the relax() method.", "hysteresis_loop(sim, H_max, direction, N, **kwargs): \"\"\" Compute a hysteresis loop.", "finmag.energies import Zeeman from finmag.util.helpers import norm log = logging.getLogger(name=\"finmag\")", "are applied *in addition to* any Zeeman interactions that are", "field can have any of the forms accepted by Zeeman.__init__()", "return values of `fun` after each stage. Otherwise the return", "loop where the external field is applied along a single", "'at_end=True' as in the following example: sim.schedule('save_vtk', at_end=True, ...) sim.hysteresis(...)", "value* If `fun` is not None then the return value", "end of each relaxation stage, use the sim.schedule() command with", "is not None: retval = fun(sim) res.append(retval) log.debug(\"hysteresis callback function", "calling this method. If you would like to perform a", "entire loop will be 2*N-1) kwargs -- any keyword argument", "compute in each direction (thus the total number of data", "H_dir for h in H_norms] m_avg = hysteresis(sim, H_vals, fun=lambda", "the first value in `H_ext_list` (which should be a list", "fun is not None: retval = fun(sim) res.append(retval) log.debug(\"hysteresis callback", "each direction). The return value is a pair (H_vals, m_vals),", "of the magnetisation) at the end of each relaxation stage,", "d / norm(d) H_norms = list(np.linspace(H_max, -H_max, N)) + \\", "the direction of the external field (will be normalised automatically)", "argument); this function is called after each relaxation and determines", "`H_dir` m_vals = [np.dot(m, H_dir) for m in m_avg] return", "sim.schedule('save_vtk', at_end=True, ...) sim.hysteresis(...) *Arguments* H_ext_list: list of 3-vectors List", "the external field is applied along a single axis and", "**kwargs) # projected lengths of the averaged magnetisation values along", "track of the current stage of the hysteresis loop. cur_stage", "details). fun: callable The user can pass a function here", "field value, the averaged value of the magnetisation along the", "any keyword argument accepted by the hysteresis() method \"\"\" d", "in H_norms] m_avg = hysteresis(sim, H_vals, fun=lambda sim: sim.m_average, **kwargs)", "H_ext_list, and so on until all values in H_ext_list are", "single axis and changes magnitude from +H_max to -H_max and", "function is called after each relaxation and determines the return", "# We keep track of the current stage of the", "of data points to compute in each direction (thus the", "command with the directive 'at_end=True' as in the following example:", "fun=None, **kwargs): \"\"\" Set the applied field to the first", "-- maximum field strength N -- number of data points", "where the external field is applied along a single axis", "N -- number of data points to compute in each", "of the forms accepted by Zeeman.__init__() (see its docstring for", "= [h * H_dir for h in H_norms] m_avg =", "= (lambda sim: sim.m_average[0]) then the return value is a", "and then call the relax() method. When convergence is reached,", "the simulation. In particular, if only one external field should", "the relax() method. See its documentation for details. *Return value*", "<reponame>davidcortesortuno/finmag import os import re import glob import logging import", "all values in H_ext_list are exhausted. Note: The fields in", "field should be present then do not add any Zeeman", "in each direction). The return value is a pair (H_vals,", "\"\"\" d = np.array(direction) H_dir = d / norm(d) H_norms", "3-vectors List of external fields, where each field can have", "plot(H_vals, m_vals) could be used to plot the hysteresis loop.", "is a pair (H_vals, m_vals), where H_vals is the list", "after each stage. Otherwise the return value is None. \"\"\"", "total number of data points for the entire loop will", "1 if fun is not None: retval = fun(sim) res.append(retval)", "is a list containing an accumulation of all the return", "(H_vals, m_vals), where H_vals is the list of field strengths", "normalised automatically) H_max -- maximum field strength N -- number", "`hysteresis` method. It computes a hysteresis loop where the external", "list(np.linspace(H_max, -H_max, N)) + \\ list(np.linspace(-H_max, H_max, N)) H_vals =", "-- a vector indicating the direction of the external field" ]
[ "gradient.setColorAt(1.0, QtGui.QColor(136, 136, 136)) brush = QtGui.QBrush(gradient) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Window, brush)", "0.3, fy: -0.4,\\n\" \" radius: 1.35, stop: 0 #fff, stop:", "_translate(\"MainWindow\", \"md5\")) self.Algorithms.setItemText(3, _translate(\"MainWindow\", \"sha224\")) self.Algorithms.setItemText(4, _translate(\"MainWindow\", \"sha1\")) self.Algorithms.setItemText(5, _translate(\"MainWindow\",", "QtGui.QBrush(QtGui.QColor(255, 255, 255)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText, brush) brush = QtGui.QBrush(QtGui.QColor(84,", "brush) brush = QtGui.QBrush(QtGui.QColor(255, 255, 255)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Text, brush)", "self.Algorithms.addItem(\"\") self.Generate = QtWidgets.QPushButton(self.centralwidget) self.Generate.setGeometry(QtCore.QRect(190, 120, 191, 41)) palette =", "60, 191, 41)) palette = QtGui.QPalette() brush = QtGui.QBrush(QtGui.QColor(51, 51,", "self.hexify.setObjectName(\"hexify\") MainWindow.setCentralWidget(self.centralwidget) self.menubar = QtWidgets.QMenuBar(MainWindow) self.menubar.setGeometry(QtCore.QRect(0, 0, 577, 21)) self.menubar.setObjectName(\"menubar\")", "\"\\n\" \"\\n\" \"\\n\" \"\") self.Algorithms.setObjectName(\"Algorithms\") self.Algorithms.addItem(\"\") self.Algorithms.addItem(\"\") self.Algorithms.addItem(\"\") self.Algorithms.addItem(\"\") self.Algorithms.addItem(\"\")", "1 #bbb\\n\" \" );\\n\" \" }\\n\" \"\\n\" \"QComboBox:pressed {\\n\" \"", "QtGui.QBrush(QtGui.QColor(84, 84, 84)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window, brush) brush = QtGui.QBrush(QtGui.QColor(120,", "brush) brush = QtGui.QBrush(QtGui.QColor(84, 84, 84)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Window, brush)", "brush = QtGui.QBrush(QtGui.QColor(51, 51, 51)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Text, brush) brush", "201, 31)) palette = QtGui.QPalette() brush = QtGui.QBrush(QtGui.QColor(51, 51, 51))", "0, 128)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.PlaceholderText, brush) self.hexify.setPalette(palette) font = QtGui.QFont()", "self.Generate.setText(_translate(\"MainWindow\", \"GENERATE\")) self.Copy.setText(_translate(\"MainWindow\", \"COPY TO CLIPBOARD\")) self.hexify.setText(_translate(\"MainWindow\", \"Hexify?\")) self.HideShow.setIcon(QtGui.QIcon(\"Assets//EYECLOSE.png\")) if", "stop: 0 #fff, stop: 1 #ddd\\n\" \" );\\n\" \" }\\n\"", "136)) brush = QtGui.QBrush(gradient) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Window, brush) brush = QtGui.QBrush(QtGui.QColor(51,", "self.Password.setGeometry(QtCore.QRect(200, 210, 141, 31)) palette = QtGui.QPalette() brush = QtGui.QBrush(QtGui.QColor(255,", "font = QtGui.QFont() font.setBold(True) font.setWeight(75) self.Password.setFont(font) self.Password.setText(\"\") self.Password.setEchoMode(QtWidgets.QLineEdit.Password) self.Password.setReadOnly(True) self.Password.setObjectName(\"Password\")", "\" color: #333;\\n\" \" \\n\" \" border-radius: 13px;\\n\" \" border-style:", "0, 577, 21)) self.menubar.setObjectName(\"menubar\") MainWindow.setMenuBar(self.menubar) self.statusbar = QtWidgets.QStatusBar(MainWindow) self.statusbar.setObjectName(\"statusbar\") MainWindow.setStatusBar(self.statusbar)", "QtGui.QPalette.PlaceholderText, brush) self.Password.setPalette(palette) font = QtGui.QFont() font.setBold(True) font.setWeight(75) self.Password.setFont(font) self.Password.setText(\"\")", "background: qradialgradient(\\n\" \" cx: 0.3, cy: -0.4, fx: 0.3, fy:", "136, 136)) brush = QtGui.QBrush(gradient) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush) gradient =", "QtGui.QBrush(gradient) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush) gradient = QtGui.QRadialGradient(0.3, -0.4, 1.35, 0.3,", "brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText, brush) gradient = QtGui.QRadialGradient(0.3, -0.4, 1.35, 0.3,", "brush) brush = QtGui.QBrush(QtGui.QColor(51, 51, 51, 128)) brush.setStyle(QtCore.Qt.NoBrush) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.PlaceholderText,", "128)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.PlaceholderText, brush) brush = QtGui.QBrush(QtGui.QColor(120, 120, 120))", "1.35, stop: 0 #fff, stop: 1 #bbb\\n\" \" );\\n\" \"", "255, 255)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Text, brush) brush = QtGui.QBrush(QtGui.QColor(84, 84,", "255, 255, 128)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.PlaceholderText, brush) brush = QtGui.QBrush(QtGui.QColor(51,", "51)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ButtonText, brush) gradient = QtGui.QRadialGradient(0.3, -0.4, 1.35,", "fy: -0.4,\\n\" \" radius: 1.35, stop: 0 #fff, stop: 1", ");\\n\" \" }\") self.HideShow.setText(\"\") icon = QtGui.QIcon() icon.addPixmap(QtGui.QPixmap(\"../../Desktop/EYECLOSE.png\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)", "QtGui.QBrush(QtGui.QColor(84, 84, 84)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Button, brush) brush = QtGui.QBrush(QtGui.QColor(255,", "\"\\n\" \"QComboBox:pressed {\\n\" \" border-style: inset;\\n\" \" background: qradialgradient(\\n\" \"", "stop: 1 #888\\n\" \" );\\n\" \" padding: 5px;\\n\" \" \\n\"", "255)) gradient.setColorAt(1.0, QtGui.QColor(136, 136, 136)) brush = QtGui.QBrush(gradient) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Base,", "palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window, brush) brush = QtGui.QBrush(QtGui.QColor(255, 255, 255, 128)) brush.setStyle(QtCore.Qt.SolidPattern)", "128)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.PlaceholderText, brush) brush = QtGui.QBrush(QtGui.QColor(51, 51, 51))", "brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush) brush = QtGui.QBrush(QtGui.QColor(84, 84, 84)) brush.setStyle(QtCore.Qt.SolidPattern)", "palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Button, brush) brush = QtGui.QBrush(QtGui.QColor(51, 51, 51)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive,", "self.Generate.setObjectName(\"Generate\") self.UserInput = QtWidgets.QLineEdit(self.centralwidget) self.UserInput.setGeometry(QtCore.QRect(190, 20, 191, 31)) palette =", "QtGui, QtWidgets from PyQt5 import QtGui, QtCore class Ui_MainWindow(object): def", "84, 84)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Window, brush) brush = QtGui.QBrush(QtGui.QColor(0, 0,", "self.Password.setReadOnly(True) self.Password.setObjectName(\"Password\") self.HideShow = QtWidgets.QPushButton(self.centralwidget) self.HideShow.setGeometry(QtCore.QRect(350, 210, 31, 31)) self.HideShow.setStyleSheet(\"QPushButton", "51)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText, brush) gradient = QtGui.QRadialGradient(0.3, -0.4, 1.35,", "self.Generate = QtWidgets.QPushButton(self.centralwidget) self.Generate.setGeometry(QtCore.QRect(190, 120, 191, 41)) palette = QtGui.QPalette()", "palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Button, brush) brush = QtGui.QBrush(QtGui.QColor(51, 51, 51)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled,", "QtWidgets.QPushButton(self.centralwidget) self.Generate.setGeometry(QtCore.QRect(190, 120, 191, 41)) palette = QtGui.QPalette() brush =", "QtGui.QPalette.Text, brush) brush = QtGui.QBrush(QtGui.QColor(84, 84, 84)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Base,", "= QtGui.QBrush(QtGui.QColor(84, 84, 84)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Button, brush) brush =", "51, 128)) brush.setStyle(QtCore.Qt.NoBrush) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.PlaceholderText, brush) self.Copy.setPalette(palette) font = QtGui.QFont()", "136)) brush = QtGui.QBrush(gradient) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Button, brush) brush = QtGui.QBrush(QtGui.QColor(51,", "#ddd\\n\" \" );\\n\" \" }\") self.Generate.setObjectName(\"Generate\") self.UserInput = QtWidgets.QLineEdit(self.centralwidget) self.UserInput.setGeometry(QtCore.QRect(190,", "= QtGui.QBrush(gradient) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Base, brush) gradient = QtGui.QRadialGradient(0.3, -0.4, 1.35,", "QtWidgets.QCheckBox(self.centralwidget) self.hexify.setGeometry(QtCore.QRect(250, 180, 81, 21)) palette = QtGui.QPalette() brush =", "stop: 0 #fff, stop: 1 #ddd\\n\" \" );\\n\" \" }\")", "1 #ddd\\n\" \" );\\n\" \" }\") self.HideShow.setText(\"\") icon = QtGui.QIcon()", "encryption algorithm\")) self.Algorithms.setItemText(1, _translate(\"MainWindow\", \"sha256\")) self.Algorithms.setItemText(2, _translate(\"MainWindow\", \"md5\")) self.Algorithms.setItemText(3, _translate(\"MainWindow\",", "\" padding: 5px;\\n\" \" }\\n\" \"\\n\" \"QPushButton:hover {\\n\" \" background:", "brush = QtGui.QBrush(QtGui.QColor(84, 84, 84)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Button, brush) brush", "MainWindow.setStyleSheet(\"background-color: rgb(84, 84, 84);\") self.centralwidget = QtWidgets.QWidget(MainWindow) self.centralwidget.setObjectName(\"centralwidget\") self.Algorithms =", "border-radius: 20px;\\n\" \" border-style: outset;\\n\" \" background: qradialgradient(\\n\" \" cx:", "\"QPushButton:pressed {\\n\" \" border-style: inset;\\n\" \" background: qradialgradient(\\n\" \" cx:", "QtGui.QColor(255, 255, 255)) gradient.setColorAt(1.0, QtGui.QColor(136, 136, 136)) brush = QtGui.QBrush(gradient)", "0 #fff, stop: 1 #ddd\\n\" \" );\\n\" \" }\") self.Copy.setObjectName(\"Copy\")", "QtGui.QPalette.PlaceholderText, brush) self.UserInput.setPalette(palette) font = QtGui.QFont() font.setBold(True) font.setWeight(75) self.UserInput.setFont(font) self.UserInput.setObjectName(\"UserInput\")", "5px;\\n\" \" }\\n\" \"\\n\" \"QPushButton:hover {\\n\" \" background: qradialgradient(\\n\" \"", "0.3, -0.4) gradient.setSpread(QtGui.QGradient.PadSpread) gradient.setCoordinateMode(QtGui.QGradient.ObjectBoundingMode) gradient.setColorAt(0.0, QtGui.QColor(255, 255, 255)) gradient.setColorAt(1.0, QtGui.QColor(136,", "font.setBold(True) font.setWeight(75) self.Generate.setFont(font) self.Generate.setStyleSheet(\"QPushButton {\\n\" \" color: #333;\\n\" \"\\n\" \"", "QtGui.QBrush(QtGui.QColor(255, 255, 255)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText, brush) brush = QtGui.QBrush(QtGui.QColor(84,", "#fff, stop: 1 #bbb\\n\" \" );\\n\" \" }\\n\" \"\\n\" \"QPushButton:pressed", "brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ButtonText, brush) gradient = QtGui.QRadialGradient(0.3, -0.4, 1.35, 0.3,", "#ddd\\n\" \" );\\n\" \" }\") self.HideShow.setText(\"\") icon = QtGui.QIcon() icon.addPixmap(QtGui.QPixmap(\"../../Desktop/EYECLOSE.png\"),", "136, 136)) brush = QtGui.QBrush(gradient) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Base, brush) gradient =", "#fff, stop: 1 #ddd\\n\" \" );\\n\" \" }\\n\" \"\\n\" \"\\n\"", "= QtGui.QBrush(QtGui.QColor(51, 51, 51, 128)) brush.setStyle(QtCore.Qt.NoBrush) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.PlaceholderText, brush) self.Generate.setPalette(palette)", "palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.PlaceholderText, brush) brush = QtGui.QBrush(QtGui.QColor(255, 255, 255)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive,", "QtWidgets.QLineEdit(self.centralwidget) self.Password.setGeometry(QtCore.QRect(200, 210, 141, 31)) palette = QtGui.QPalette() brush =", "QtGui.QPalette.Window, brush) brush = QtGui.QBrush(QtGui.QColor(51, 51, 51, 128)) brush.setStyle(QtCore.Qt.NoBrush) palette.setBrush(QtGui.QPalette.Inactive,", "brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Text, brush) brush = QtGui.QBrush(QtGui.QColor(84, 84, 84)) brush.setStyle(QtCore.Qt.SolidPattern)", "Ui_MainWindow(object): def setupUi(self, MainWindow): MainWindow.setObjectName(\"MainWindow\") MainWindow.resize(577, 341) palette = QtGui.QPalette()", "QtGui.QPalette.WindowText, brush) brush = QtGui.QBrush(QtGui.QColor(84, 84, 84)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Button,", "84, 84);\") self.centralwidget = QtWidgets.QWidget(MainWindow) self.centralwidget.setObjectName(\"centralwidget\") self.Algorithms = QtWidgets.QComboBox(self.centralwidget) self.Algorithms.setGeometry(QtCore.QRect(190,", "palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText, brush) brush = QtGui.QBrush(QtGui.QColor(84, 84, 84)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled,", "QtGui.QColor(136, 136, 136)) brush = QtGui.QBrush(gradient) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Button, brush) brush", "QtGui.QFont() font.setPointSize(12) font.setBold(True) font.setWeight(75) self.hexify.setFont(font) self.hexify.setObjectName(\"hexify\") MainWindow.setCentralWidget(self.centralwidget) self.menubar = QtWidgets.QMenuBar(MainWindow)", "84, 84)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Base, brush) brush = QtGui.QBrush(QtGui.QColor(84, 84,", "\"sha512\")) self.Generate.setText(_translate(\"MainWindow\", \"GENERATE\")) self.Copy.setText(_translate(\"MainWindow\", \"COPY TO CLIPBOARD\")) self.hexify.setText(_translate(\"MainWindow\", \"Hexify?\")) self.HideShow.setIcon(QtGui.QIcon(\"Assets//EYECLOSE.png\"))", "= QtGui.QBrush(QtGui.QColor(255, 255, 255)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Text, brush) brush =", "84)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Button, brush) brush = QtGui.QBrush(QtGui.QColor(84, 84, 84))", "brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window, brush) brush = QtGui.QBrush(QtGui.QColor(120, 120, 120)) brush.setStyle(QtCore.Qt.SolidPattern)", "QtWidgets.QMenuBar(MainWindow) self.menubar.setGeometry(QtCore.QRect(0, 0, 577, 21)) self.menubar.setObjectName(\"menubar\") MainWindow.setMenuBar(self.menubar) self.statusbar = QtWidgets.QStatusBar(MainWindow)", "QtGui.QPalette.Text, brush) brush = QtGui.QBrush(QtGui.QColor(51, 51, 51)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ButtonText,", "128)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.PlaceholderText, brush) self.UserInput.setPalette(palette) font = QtGui.QFont() font.setBold(True)", "0 #fff, stop: 1 #ddd\\n\" \" );\\n\" \" }\") self.HideShow.setText(\"\")", "brush = QtGui.QBrush(QtGui.QColor(84, 84, 84)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Base, brush) brush", "= QtWidgets.QPushButton(self.centralwidget) self.HideShow.setGeometry(QtCore.QRect(350, 210, 31, 31)) self.HideShow.setStyleSheet(\"QPushButton {\\n\" \" color:", "210, 31, 31)) self.HideShow.setStyleSheet(\"QPushButton {\\n\" \" color: #333;\\n\" \"\\n\" \"", "QtCore class Ui_MainWindow(object): def setupUi(self, MainWindow): MainWindow.setObjectName(\"MainWindow\") MainWindow.resize(577, 341) palette", "sys app = QtWidgets.QApplication(sys.argv) MainWindow = QtWidgets.QMainWindow() ui = Ui_MainWindow()", "= QtGui.QFont() font.setBold(True) font.setWeight(75) self.UserInput.setFont(font) self.UserInput.setObjectName(\"UserInput\") self.Password = QtWidgets.QLineEdit(self.centralwidget) self.Password.setGeometry(QtCore.QRect(200,", "QtGui.QBrush(gradient) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Button, brush) brush = QtGui.QBrush(QtGui.QColor(51, 51, 51)) brush.setStyle(QtCore.Qt.SolidPattern)", "\"Select encryption algorithm\")) self.Algorithms.setItemText(1, _translate(\"MainWindow\", \"sha256\")) self.Algorithms.setItemText(2, _translate(\"MainWindow\", \"md5\")) self.Algorithms.setItemText(3,", "inset;\\n\" \" background: qradialgradient(\\n\" \" cx: 0.4, cy: -0.1, fx:", "255)) gradient.setColorAt(1.0, QtGui.QColor(136, 136, 136)) brush = QtGui.QBrush(gradient) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Button,", "#333;\\n\" \"\\n\" \" border-radius: 7px;\\n\" \" border-style: outset;\\n\" \" background:", "51)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Text, brush) brush = QtGui.QBrush(QtGui.QColor(51, 51, 51))", "outset;\\n\" \" background: qradialgradient(\\n\" \" cx: 0.3, cy: -0.4, fx:", "= QtWidgets.QPushButton(self.centralwidget) self.Copy.setGeometry(QtCore.QRect(190, 250, 201, 31)) palette = QtGui.QPalette() brush", "255)) gradient.setColorAt(1.0, QtGui.QColor(136, 136, 136)) brush = QtGui.QBrush(gradient) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Button,", "self.UserInput.setPalette(palette) font = QtGui.QFont() font.setBold(True) font.setWeight(75) self.UserInput.setFont(font) self.UserInput.setObjectName(\"UserInput\") self.Password =", "QtWidgets.QStatusBar(MainWindow) self.statusbar.setObjectName(\"statusbar\") MainWindow.setStatusBar(self.statusbar) self.retranslateUi(MainWindow) QtCore.QMetaObject.connectSlotsByName(MainWindow) def retranslateUi(self, MainWindow): _translate =", ");\\n\" \" }\") self.Copy.setObjectName(\"Copy\") self.hexify = QtWidgets.QCheckBox(self.centralwidget) self.hexify.setGeometry(QtCore.QRect(250, 180, 81,", "brush) brush = QtGui.QBrush(QtGui.QColor(84, 84, 84)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Window, brush)", "palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Button, brush) brush = QtGui.QBrush(QtGui.QColor(84, 84, 84)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive,", "QtGui.QBrush(QtGui.QColor(255, 255, 255, 128)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.PlaceholderText, brush) brush =", "84)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Window, brush) brush = QtGui.QBrush(QtGui.QColor(255, 255, 255,", "\" color: #333;\\n\" \"\\n\" \" \\n\" \" border-style: outset;\\n\" \"", "341) palette = QtGui.QPalette() brush = QtGui.QBrush(QtGui.QColor(255, 255, 255)) brush.setStyle(QtCore.Qt.SolidPattern)", "128)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.PlaceholderText, brush) self.hexify.setPalette(palette) font = QtGui.QFont() font.setPointSize(12)", "0 #fff, stop: 1 #ddd\\n\" \" );\\n\" \" }\") self.Generate.setObjectName(\"Generate\")", "brush) brush = QtGui.QBrush(QtGui.QColor(84, 84, 84)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Base, brush)", "255, 255)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Text, brush) brush = QtGui.QBrush(QtGui.QColor(84, 84,", ");\\n\" \" }\") self.Generate.setObjectName(\"Generate\") self.UserInput = QtWidgets.QLineEdit(self.centralwidget) self.UserInput.setGeometry(QtCore.QRect(190, 20, 191,", "= QtGui.QBrush(QtGui.QColor(120, 120, 120)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush) brush =", "QtGui.QRadialGradient(0.3, -0.4, 1.35, 0.3, -0.4) gradient.setSpread(QtGui.QGradient.PadSpread) gradient.setCoordinateMode(QtGui.QGradient.ObjectBoundingMode) gradient.setColorAt(0.0, QtGui.QColor(255, 255,", "self.hexify.setPalette(palette) font = QtGui.QFont() font.setPointSize(12) font.setBold(True) font.setWeight(75) self.hexify.setFont(font) self.hexify.setObjectName(\"hexify\") MainWindow.setCentralWidget(self.centralwidget)", "= QtWidgets.QComboBox(self.centralwidget) self.Algorithms.setGeometry(QtCore.QRect(190, 60, 191, 41)) palette = QtGui.QPalette() brush", "30)) self.HideShow.setObjectName(\"HideShow\") self.Copy = QtWidgets.QPushButton(self.centralwidget) self.Copy.setGeometry(QtCore.QRect(190, 250, 201, 31)) palette", "brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Text, brush) brush = QtGui.QBrush(QtGui.QColor(51, 51, 51)) brush.setStyle(QtCore.Qt.SolidPattern)", "CLIPBOARD\")) self.hexify.setText(_translate(\"MainWindow\", \"Hexify?\")) self.HideShow.setIcon(QtGui.QIcon(\"Assets//EYECLOSE.png\")) if __name__ == \"__main__\": import sys", "}\") self.Generate.setObjectName(\"Generate\") self.UserInput = QtWidgets.QLineEdit(self.centralwidget) self.UserInput.setGeometry(QtCore.QRect(190, 20, 191, 31)) palette", "255)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Text, brush) brush = QtGui.QBrush(QtGui.QColor(84, 84, 84))", "self.retranslateUi(MainWindow) QtCore.QMetaObject.connectSlotsByName(MainWindow) def retranslateUi(self, MainWindow): _translate = QtCore.QCoreApplication.translate MainWindow.setWindowTitle(_translate(\"MainWindow\", \"MainWindow\"))", "cy: -0.4, fx: 0.3, fy: -0.4,\\n\" \" radius: 1.35, stop:", "self.Algorithms.addItem(\"\") self.Algorithms.addItem(\"\") self.Generate = QtWidgets.QPushButton(self.centralwidget) self.Generate.setGeometry(QtCore.QRect(190, 120, 191, 41)) palette", "128)) brush.setStyle(QtCore.Qt.NoBrush) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.PlaceholderText, brush) self.Copy.setPalette(palette) font = QtGui.QFont() font.setBold(True)", "_translate(\"MainWindow\", \"sha512\")) self.Generate.setText(_translate(\"MainWindow\", \"GENERATE\")) self.Copy.setText(_translate(\"MainWindow\", \"COPY TO CLIPBOARD\")) self.hexify.setText(_translate(\"MainWindow\", \"Hexify?\"))", "136)) brush = QtGui.QBrush(gradient) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Window, brush) brush = QtGui.QBrush(QtGui.QColor(255,", "self.Algorithms = QtWidgets.QComboBox(self.centralwidget) self.Algorithms.setGeometry(QtCore.QRect(190, 60, 191, 41)) palette = QtGui.QPalette()", "brush = QtGui.QBrush(gradient) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Button, brush) brush = QtGui.QBrush(QtGui.QColor(51, 51,", "QtGui.QBrush(gradient) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Window, brush) brush = QtGui.QBrush(QtGui.QColor(51, 51, 51, 128))", "\" border-radius: 13px;\\n\" \" border-style: outset;\\n\" \" background: qradialgradient(\\n\" \"", "palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Text, brush) brush = QtGui.QBrush(QtGui.QColor(51, 51, 51)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive,", "\\n\" \" border-style: outset;\\n\" \" background: qradialgradient(\\n\" \" cx: 0.3,", "brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Button, brush) brush = QtGui.QBrush(QtGui.QColor(84, 84, 84)) brush.setStyle(QtCore.Qt.SolidPattern)", "51, 51, 128)) brush.setStyle(QtCore.Qt.NoBrush) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.PlaceholderText, brush) brush = QtGui.QBrush(QtGui.QColor(51,", "= QtWidgets.QLineEdit(self.centralwidget) self.Password.setGeometry(QtCore.QRect(200, 210, 141, 31)) palette = QtGui.QPalette() brush", "81, 21)) palette = QtGui.QPalette() brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))", "= QtGui.QFont() font.setBold(True) font.setWeight(75) self.Copy.setFont(font) self.Copy.setStyleSheet(\"QPushButton {\\n\" \" color: #333;\\n\"", "136)) brush = QtGui.QBrush(gradient) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window, brush) brush = QtGui.QBrush(QtGui.QColor(51,", "font.setWeight(75) self.hexify.setFont(font) self.hexify.setObjectName(\"hexify\") MainWindow.setCentralWidget(self.centralwidget) self.menubar = QtWidgets.QMenuBar(MainWindow) self.menubar.setGeometry(QtCore.QRect(0, 0, 577,", "51, 128)) brush.setStyle(QtCore.Qt.NoBrush) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.PlaceholderText, brush) brush = QtGui.QBrush(QtGui.QColor(51, 51,", "brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ButtonText, brush) gradient = QtGui.QRadialGradient(0.3, -0.4, 1.35, 0.3,", "icon = QtGui.QIcon() icon.addPixmap(QtGui.QPixmap(\"../../Desktop/EYECLOSE.png\"), QtGui.QIcon.Normal, QtGui.QIcon.Off) self.HideShow.setIcon(icon) self.HideShow.setIconSize(QtCore.QSize(30, 30)) self.HideShow.setObjectName(\"HideShow\")", "\"sha256\")) self.Algorithms.setItemText(2, _translate(\"MainWindow\", \"md5\")) self.Algorithms.setItemText(3, _translate(\"MainWindow\", \"sha224\")) self.Algorithms.setItemText(4, _translate(\"MainWindow\", \"sha1\"))", "128)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.PlaceholderText, brush) self.Password.setPalette(palette) font = QtGui.QFont() font.setBold(True)", "= QtGui.QPalette() brush = QtGui.QBrush(QtGui.QColor(255, 255, 255)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText,", "brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Window, brush) brush = QtGui.QBrush(QtGui.QColor(0, 0, 0, 128))", "QtGui.QFont() font.setBold(True) font.setWeight(75) self.Password.setFont(font) self.Password.setText(\"\") self.Password.setEchoMode(QtWidgets.QLineEdit.Password) self.Password.setReadOnly(True) self.Password.setObjectName(\"Password\") self.HideShow =", "51, 51)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush) brush = QtGui.QBrush(QtGui.QColor(51, 51,", "brush = QtGui.QBrush(QtGui.QColor(51, 51, 51)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Text, brush) brush", "self.Password.setObjectName(\"Password\") self.HideShow = QtWidgets.QPushButton(self.centralwidget) self.HideShow.setGeometry(QtCore.QRect(350, 210, 31, 31)) self.HideShow.setStyleSheet(\"QPushButton {\\n\"", "51)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText, brush) gradient = QtGui.QRadialGradient(0.3, -0.4, 1.35,", "qradialgradient(\\n\" \" cx: 0.3, cy: -0.4, fx: 0.3, fy: -0.4,\\n\"", "= QtWidgets.QStatusBar(MainWindow) self.statusbar.setObjectName(\"statusbar\") MainWindow.setStatusBar(self.statusbar) self.retranslateUi(MainWindow) QtCore.QMetaObject.connectSlotsByName(MainWindow) def retranslateUi(self, MainWindow): _translate", "QtGui.QPalette.Text, brush) brush = QtGui.QBrush(QtGui.QColor(84, 84, 84)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base,", "brush) brush = QtGui.QBrush(QtGui.QColor(255, 255, 255)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Text, brush)", "QtGui.QPalette.PlaceholderText, brush) self.hexify.setPalette(palette) font = QtGui.QFont() font.setPointSize(12) font.setBold(True) font.setWeight(75) self.hexify.setFont(font)", "QtGui.QBrush(QtGui.QColor(51, 51, 51, 128)) brush.setStyle(QtCore.Qt.NoBrush) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.PlaceholderText, brush) brush =", "brush = QtGui.QBrush(gradient) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Window, brush) brush = QtGui.QBrush(QtGui.QColor(255, 255,", "QtGui.QColor(136, 136, 136)) brush = QtGui.QBrush(gradient) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Base, brush) gradient", "gradient.setColorAt(0.0, QtGui.QColor(255, 255, 255)) gradient.setColorAt(1.0, QtGui.QColor(136, 136, 136)) brush =", "577, 21)) self.menubar.setObjectName(\"menubar\") MainWindow.setMenuBar(self.menubar) self.statusbar = QtWidgets.QStatusBar(MainWindow) self.statusbar.setObjectName(\"statusbar\") MainWindow.setStatusBar(self.statusbar) self.retranslateUi(MainWindow)", "#ddd\\n\" \" );\\n\" \" }\\n\" \"\\n\" \"\\n\" \"\\n\" \"\\n\" \"\\n\"", "QtWidgets.QComboBox(self.centralwidget) self.Algorithms.setGeometry(QtCore.QRect(190, 60, 191, 41)) palette = QtGui.QPalette() brush =", "brush = QtGui.QBrush(QtGui.QColor(255, 255, 255, 128)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.PlaceholderText, brush)", "gradient.setColorAt(1.0, QtGui.QColor(136, 136, 136)) brush = QtGui.QBrush(gradient) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Button, brush)", "self.Copy.setGeometry(QtCore.QRect(190, 250, 201, 31)) palette = QtGui.QPalette() brush = QtGui.QBrush(QtGui.QColor(51,", "0.3, cy: -0.4, fx: 0.3, fy: -0.4,\\n\" \" radius: 1.35,", "palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Base, brush) gradient = QtGui.QRadialGradient(0.3, -0.4, 1.35, 0.3, -0.4)", "self.Algorithms.setCurrentText(_translate(\"MainWindow\", \"Select encryption algorithm\")) self.Algorithms.setItemText(0, _translate(\"MainWindow\", \"Select encryption algorithm\")) self.Algorithms.setItemText(1,", "brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush) brush = QtGui.QBrush(QtGui.QColor(51, 51, 51)) brush.setStyle(QtCore.Qt.SolidPattern)", "_translate(\"MainWindow\", \"sha256\")) self.Algorithms.setItemText(2, _translate(\"MainWindow\", \"md5\")) self.Algorithms.setItemText(3, _translate(\"MainWindow\", \"sha224\")) self.Algorithms.setItemText(4, _translate(\"MainWindow\",", "brush.setStyle(QtCore.Qt.NoBrush) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.PlaceholderText, brush) brush = QtGui.QBrush(QtGui.QColor(51, 51, 51)) brush.setStyle(QtCore.Qt.SolidPattern)", "QtGui.QBrush(QtGui.QColor(120, 120, 120)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush) brush = QtGui.QBrush(QtGui.QColor(84,", "QtGui.QFont() font.setBold(True) font.setWeight(75) self.Generate.setFont(font) self.Generate.setStyleSheet(\"QPushButton {\\n\" \" color: #333;\\n\" \"\\n\"", "QtGui.QPalette.WindowText, brush) brush = QtGui.QBrush(QtGui.QColor(84, 84, 84)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Button,", "gradient.setColorAt(1.0, QtGui.QColor(136, 136, 136)) brush = QtGui.QBrush(gradient) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Base, brush)", "\" \\n\" \" }\\n\" \"\\n\" \"\\n\" \"QComboBox:hover {\\n\" \" background:", "self.Algorithms.addItem(\"\") self.Algorithms.addItem(\"\") self.Algorithms.addItem(\"\") self.Generate = QtWidgets.QPushButton(self.centralwidget) self.Generate.setGeometry(QtCore.QRect(190, 120, 191, 41))", "self.Generate.setFont(font) self.Generate.setStyleSheet(\"QPushButton {\\n\" \" color: #333;\\n\" \"\\n\" \" border-radius: 20px;\\n\"", "brush = QtGui.QBrush(QtGui.QColor(255, 255, 255, 128)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.PlaceholderText, brush)", "= QtGui.QBrush(QtGui.QColor(51, 51, 51)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ButtonText, brush) gradient =", "stop: 1 #bbb\\n\" \" );\\n\" \" }\\n\" \"\\n\" \"QComboBox:pressed {\\n\"", "84)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Window, brush) brush = QtGui.QBrush(QtGui.QColor(0, 0, 0,", "136)) brush = QtGui.QBrush(gradient) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush) gradient = QtGui.QRadialGradient(0.3,", "20px;\\n\" \" border-style: outset;\\n\" \" background: qradialgradient(\\n\" \" cx: 0.3,", "self.menubar.setObjectName(\"menubar\") MainWindow.setMenuBar(self.menubar) self.statusbar = QtWidgets.QStatusBar(MainWindow) self.statusbar.setObjectName(\"statusbar\") MainWindow.setStatusBar(self.statusbar) self.retranslateUi(MainWindow) QtCore.QMetaObject.connectSlotsByName(MainWindow) def", "palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Button, brush) brush = QtGui.QBrush(QtGui.QColor(120, 120, 120)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled,", "QtGui.QBrush(QtGui.QColor(84, 84, 84)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Window, brush) MainWindow.setPalette(palette) MainWindow.setAutoFillBackground(False) MainWindow.setStyleSheet(\"background-color:", "QtGui.QBrush(QtGui.QColor(84, 84, 84)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Button, brush) brush = QtGui.QBrush(QtGui.QColor(255,", "brush = QtGui.QBrush(QtGui.QColor(120, 120, 120)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText, brush) brush", "radius: 1.35, stop: 0 #fff, stop: 1 #ddd\\n\" \" );\\n\"", "rgb(84, 84, 84);\") self.centralwidget = QtWidgets.QWidget(MainWindow) self.centralwidget.setObjectName(\"centralwidget\") self.Algorithms = QtWidgets.QComboBox(self.centralwidget)", "brush = QtGui.QBrush(QtGui.QColor(51, 51, 51, 128)) brush.setStyle(QtCore.Qt.NoBrush) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.PlaceholderText, brush)", "self.Copy.setText(_translate(\"MainWindow\", \"COPY TO CLIPBOARD\")) self.hexify.setText(_translate(\"MainWindow\", \"Hexify?\")) self.HideShow.setIcon(QtGui.QIcon(\"Assets//EYECLOSE.png\")) if __name__ ==", "= QtGui.QBrush(QtGui.QColor(51, 51, 51, 128)) brush.setStyle(QtCore.Qt.NoBrush) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.PlaceholderText, brush) brush", "brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.PlaceholderText, brush) brush = QtGui.QBrush(QtGui.QColor(51, 51, 51)) brush.setStyle(QtCore.Qt.SolidPattern)", "136, 136)) brush = QtGui.QBrush(gradient) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Window, brush) brush =", "255)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText, brush) brush = QtGui.QBrush(QtGui.QColor(84, 84, 84))", "136)) brush = QtGui.QBrush(gradient) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Base, brush) gradient = QtGui.QRadialGradient(0.3,", "self.Algorithms.setStyleSheet(\"QComboBox {\\n\" \" color: #333;\\n\" \"\\n\" \" \\n\" \" border-style:", "QtGui.QPalette.Button, brush) brush = QtGui.QBrush(QtGui.QColor(120, 120, 120)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text,", "QtGui.QPalette() brush = QtGui.QBrush(QtGui.QColor(255, 255, 255)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText, brush)", "= QtGui.QBrush(QtGui.QColor(84, 84, 84)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Window, brush) brush =", "QtGui.QBrush(QtGui.QColor(51, 51, 51, 128)) brush.setStyle(QtCore.Qt.NoBrush) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.PlaceholderText, brush) self.Generate.setPalette(palette) font", "\"QPushButton:hover {\\n\" \" background: qradialgradient(\\n\" \" cx: 0.3, cy: -0.4,", "84);\") self.centralwidget = QtWidgets.QWidget(MainWindow) self.centralwidget.setObjectName(\"centralwidget\") self.Algorithms = QtWidgets.QComboBox(self.centralwidget) self.Algorithms.setGeometry(QtCore.QRect(190, 60,", "\"GENERATE\")) self.Copy.setText(_translate(\"MainWindow\", \"COPY TO CLIPBOARD\")) self.hexify.setText(_translate(\"MainWindow\", \"Hexify?\")) self.HideShow.setIcon(QtGui.QIcon(\"Assets//EYECLOSE.png\")) if __name__", "QtGui.QBrush(QtGui.QColor(51, 51, 51)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText, brush) gradient = QtGui.QRadialGradient(0.3,", "brush) brush = QtGui.QBrush(QtGui.QColor(51, 51, 51, 128)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.PlaceholderText,", "MainWindow.setAutoFillBackground(False) MainWindow.setStyleSheet(\"background-color: rgb(84, 84, 84);\") self.centralwidget = QtWidgets.QWidget(MainWindow) self.centralwidget.setObjectName(\"centralwidget\") self.Algorithms", "\"QComboBox:hover {\\n\" \" background: qradialgradient(\\n\" \" cx: 0.3, cy: -0.4,", "brush) brush = QtGui.QBrush(QtGui.QColor(120, 120, 120)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText, brush)", "__name__ == \"__main__\": import sys app = QtWidgets.QApplication(sys.argv) MainWindow =", "QtCore, QtGui, QtWidgets from PyQt5 import QtGui, QtCore class Ui_MainWindow(object):", "palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Window, brush) MainWindow.setPalette(palette) MainWindow.setAutoFillBackground(False) MainWindow.setStyleSheet(\"background-color: rgb(84, 84, 84);\") self.centralwidget", "= QtGui.QBrush(gradient) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Base, brush) gradient = QtGui.QRadialGradient(0.3, -0.4, 1.35,", "padding: 5px;\\n\" \" \\n\" \" }\\n\" \"\\n\" \"\\n\" \"QComboBox:hover {\\n\"", "#333;\\n\" \"\\n\" \" border-radius: 20px;\\n\" \" border-style: outset;\\n\" \" background:", "255, 255)) gradient.setColorAt(1.0, QtGui.QColor(136, 136, 136)) brush = QtGui.QBrush(gradient) palette.setBrush(QtGui.QPalette.Active,", "color: #333;\\n\" \"\\n\" \" border-radius: 20px;\\n\" \" border-style: outset;\\n\" \"", "QtGui.QColor(136, 136, 136)) brush = QtGui.QBrush(gradient) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Window, brush) brush", "stop: 1 #888\\n\" \" );\\n\" \" padding: 5px;\\n\" \" }\\n\"", "\" }\") self.Generate.setObjectName(\"Generate\") self.UserInput = QtWidgets.QLineEdit(self.centralwidget) self.UserInput.setGeometry(QtCore.QRect(190, 20, 191, 31))", "QtGui.QPalette.Base, brush) brush = QtGui.QBrush(QtGui.QColor(84, 84, 84)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Window,", "31)) self.HideShow.setStyleSheet(\"QPushButton {\\n\" \" color: #333;\\n\" \"\\n\" \" border-radius: 7px;\\n\"", "brush) self.Algorithms.setPalette(palette) font = QtGui.QFont() font.setBold(True) font.setWeight(75) self.Algorithms.setFont(font) self.Algorithms.setStyleSheet(\"QComboBox {\\n\"", "palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText, brush) brush = QtGui.QBrush(QtGui.QColor(84, 84, 84)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active,", "brush = QtGui.QBrush(QtGui.QColor(84, 84, 84)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush) brush", "51)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ButtonText, brush) gradient = QtGui.QRadialGradient(0.3, -0.4, 1.35,", "QtGui.QPalette.Button, brush) brush = QtGui.QBrush(QtGui.QColor(255, 255, 255)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Text,", "84)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Button, brush) brush = QtGui.QBrush(QtGui.QColor(120, 120, 120))", "136)) brush = QtGui.QBrush(gradient) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Base, brush) gradient = QtGui.QRadialGradient(0.3,", "51, 51)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText, brush) gradient = QtGui.QRadialGradient(0.3, -0.4,", "\" background: qradialgradient(\\n\" \" cx: 0.4, cy: -0.1, fx: 0.4,", "QtGui.QPalette.PlaceholderText, brush) brush = QtGui.QBrush(QtGui.QColor(51, 51, 51)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText,", "palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Text, brush) brush = QtGui.QBrush(QtGui.QColor(84, 84, 84)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive,", "self.HideShow.setIconSize(QtCore.QSize(30, 30)) self.HideShow.setObjectName(\"HideShow\") self.Copy = QtWidgets.QPushButton(self.centralwidget) self.Copy.setGeometry(QtCore.QRect(190, 250, 201, 31))", "QtGui.QBrush(QtGui.QColor(51, 51, 51, 128)) brush.setStyle(QtCore.Qt.NoBrush) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.PlaceholderText, brush) self.Copy.setPalette(palette) font", "brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.PlaceholderText, brush) brush = QtGui.QBrush(QtGui.QColor(255, 255, 255)) brush.setStyle(QtCore.Qt.SolidPattern)", "\" );\\n\" \" }\") self.HideShow.setText(\"\") icon = QtGui.QIcon() icon.addPixmap(QtGui.QPixmap(\"../../Desktop/EYECLOSE.png\"), QtGui.QIcon.Normal,", "brush = QtGui.QBrush(QtGui.QColor(51, 51, 51)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText, brush) gradient", "= QtGui.QBrush(gradient) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Window, brush) brush = QtGui.QBrush(QtGui.QColor(51, 51, 51,", "_translate(\"MainWindow\", \"Select encryption algorithm\")) self.Algorithms.setItemText(1, _translate(\"MainWindow\", \"sha256\")) self.Algorithms.setItemText(2, _translate(\"MainWindow\", \"md5\"))", "#fff, stop: 1 #ddd\\n\" \" );\\n\" \" }\") self.HideShow.setText(\"\") icon", ");\\n\" \" }\\n\" \"\\n\" \"QPushButton:pressed {\\n\" \" border-style: inset;\\n\" \"", "brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Button, brush) brush = QtGui.QBrush(QtGui.QColor(120, 120, 120)) brush.setStyle(QtCore.Qt.SolidPattern)", "brush = QtGui.QBrush(gradient) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Base, brush) gradient = QtGui.QRadialGradient(0.3, -0.4,", "\" }\\n\" \"\\n\" \"\\n\" \"QComboBox:hover {\\n\" \" background: qradialgradient(\\n\" \"", "palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush) brush = QtGui.QBrush(QtGui.QColor(84, 84, 84)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled,", "QtGui.QBrush(QtGui.QColor(84, 84, 84)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Button, brush) brush = QtGui.QBrush(QtGui.QColor(84,", "stop: 1 #bbb\\n\" \" );\\n\" \" }\\n\" \"\\n\" \"QPushButton:pressed {\\n\"", "brush) brush = QtGui.QBrush(QtGui.QColor(51, 51, 51)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Text, brush)", "brush.setStyle(QtCore.Qt.NoBrush) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.PlaceholderText, brush) brush = QtGui.QBrush(QtGui.QColor(51, 51, 51)) brush.setStyle(QtCore.Qt.SolidPattern)", "QtGui.QPalette.Button, brush) brush = QtGui.QBrush(QtGui.QColor(84, 84, 84)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Base,", "51, 51, 128)) brush.setStyle(QtCore.Qt.NoBrush) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.PlaceholderText, brush) brush = QtGui.QBrush(QtGui.QColor(51,", "= QtGui.QBrush(QtGui.QColor(0, 0, 0, 128)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.PlaceholderText, brush) self.Password.setPalette(palette)", "QtCore.QCoreApplication.translate MainWindow.setWindowTitle(_translate(\"MainWindow\", \"MainWindow\")) self.Algorithms.setCurrentText(_translate(\"MainWindow\", \"Select encryption algorithm\")) self.Algorithms.setItemText(0, _translate(\"MainWindow\", \"Select", "120, 120)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText, brush) brush = QtGui.QBrush(QtGui.QColor(84, 84,", "191, 31)) palette = QtGui.QPalette() brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))", "brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window, brush) brush = QtGui.QBrush(QtGui.QColor(255, 255, 255, 128))", "stop: 1 #ddd\\n\" \" );\\n\" \" }\") self.Generate.setObjectName(\"Generate\") self.UserInput =", "QtGui.QBrush(QtGui.QColor(51, 51, 51)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ButtonText, brush) gradient = QtGui.QRadialGradient(0.3,", "import sys app = QtWidgets.QApplication(sys.argv) MainWindow = QtWidgets.QMainWindow() ui =", "brush) brush = QtGui.QBrush(QtGui.QColor(51, 51, 51)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ButtonText, brush)", "-0.1, fx: 0.4, fy: -0.1,\\n\" \" radius: 1.35, stop: 0", "palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Button, brush) brush = QtGui.QBrush(QtGui.QColor(84, 84, 84)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active,", "255)) gradient.setColorAt(1.0, QtGui.QColor(136, 136, 136)) brush = QtGui.QBrush(gradient) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Button,", "\" }\\n\" \"\\n\" \"\\n\" \"\\n\" \"\\n\" \"\\n\" \"\\n\" \"\") self.Algorithms.setObjectName(\"Algorithms\")", "encryption algorithm\")) self.Algorithms.setItemText(0, _translate(\"MainWindow\", \"Select encryption algorithm\")) self.Algorithms.setItemText(1, _translate(\"MainWindow\", \"sha256\"))", "\"\\n\" \"\\n\" \"\") self.Algorithms.setObjectName(\"Algorithms\") self.Algorithms.addItem(\"\") self.Algorithms.addItem(\"\") self.Algorithms.addItem(\"\") self.Algorithms.addItem(\"\") self.Algorithms.addItem(\"\") self.Algorithms.addItem(\"\")", "255)) gradient.setColorAt(1.0, QtGui.QColor(136, 136, 136)) brush = QtGui.QBrush(gradient) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Base,", "= QtGui.QFont() font.setBold(True) font.setWeight(75) self.Password.setFont(font) self.Password.setText(\"\") self.Password.setEchoMode(QtWidgets.QLineEdit.Password) self.Password.setReadOnly(True) self.Password.setObjectName(\"Password\") self.HideShow", "QtGui.QPalette.PlaceholderText, brush) self.Copy.setPalette(palette) font = QtGui.QFont() font.setBold(True) font.setWeight(75) self.Copy.setFont(font) self.Copy.setStyleSheet(\"QPushButton", "QtGui.QPalette.Button, brush) brush = QtGui.QBrush(QtGui.QColor(84, 84, 84)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base,", "stop: 1 #ddd\\n\" \" );\\n\" \" }\\n\" \"\\n\" \"\\n\" \"\\n\"", "255, 128)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.PlaceholderText, brush) brush = QtGui.QBrush(QtGui.QColor(51, 51,", "128)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.PlaceholderText, brush) brush = QtGui.QBrush(QtGui.QColor(51, 51, 51))", "QtGui.QFont() font.setBold(True) font.setWeight(75) self.Algorithms.setFont(font) self.Algorithms.setStyleSheet(\"QComboBox {\\n\" \" color: #333;\\n\" \"\\n\"", "QtGui.QColor(136, 136, 136)) brush = QtGui.QBrush(gradient) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Button, brush) brush", "font.setWeight(75) self.Copy.setFont(font) self.Copy.setStyleSheet(\"QPushButton {\\n\" \" color: #333;\\n\" \" \\n\" \"", "_translate = QtCore.QCoreApplication.translate MainWindow.setWindowTitle(_translate(\"MainWindow\", \"MainWindow\")) self.Algorithms.setCurrentText(_translate(\"MainWindow\", \"Select encryption algorithm\")) self.Algorithms.setItemText(0,", "MainWindow.setCentralWidget(self.centralwidget) self.menubar = QtWidgets.QMenuBar(MainWindow) self.menubar.setGeometry(QtCore.QRect(0, 0, 577, 21)) self.menubar.setObjectName(\"menubar\") MainWindow.setMenuBar(self.menubar)", "QtGui.QBrush(gradient) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Button, brush) brush = QtGui.QBrush(QtGui.QColor(51, 51, 51)) brush.setStyle(QtCore.Qt.SolidPattern)", "stop: 0 #fff, stop: 1 #888\\n\" \" );\\n\" \" padding:", "51, 51, 128)) brush.setStyle(QtCore.Qt.NoBrush) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.PlaceholderText, brush) self.Generate.setPalette(palette) font =", "136, 136)) brush = QtGui.QBrush(gradient) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Button, brush) brush =", "self.UserInput = QtWidgets.QLineEdit(self.centralwidget) self.UserInput.setGeometry(QtCore.QRect(190, 20, 191, 31)) palette = QtGui.QPalette()", "QtGui.QIcon.Normal, QtGui.QIcon.Off) self.HideShow.setIcon(icon) self.HideShow.setIconSize(QtCore.QSize(30, 30)) self.HideShow.setObjectName(\"HideShow\") self.Copy = QtWidgets.QPushButton(self.centralwidget) self.Copy.setGeometry(QtCore.QRect(190,", "84)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Window, brush) MainWindow.setPalette(palette) MainWindow.setAutoFillBackground(False) MainWindow.setStyleSheet(\"background-color: rgb(84, 84,", "brush = QtGui.QBrush(QtGui.QColor(84, 84, 84)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window, brush) brush", "fx: 0.3, fy: -0.4,\\n\" \" radius: 1.35, stop: 0 #fff,", "self.Algorithms.setItemText(0, _translate(\"MainWindow\", \"Select encryption algorithm\")) self.Algorithms.setItemText(1, _translate(\"MainWindow\", \"sha256\")) self.Algorithms.setItemText(2, _translate(\"MainWindow\",", "gradient = QtGui.QRadialGradient(0.3, -0.4, 1.35, 0.3, -0.4) gradient.setSpread(QtGui.QGradient.PadSpread) gradient.setCoordinateMode(QtGui.QGradient.ObjectBoundingMode) gradient.setColorAt(0.0,", "brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText, brush) gradient = QtGui.QRadialGradient(0.3, -0.4, 1.35, 0.3,", "= QtWidgets.QWidget(MainWindow) self.centralwidget.setObjectName(\"centralwidget\") self.Algorithms = QtWidgets.QComboBox(self.centralwidget) self.Algorithms.setGeometry(QtCore.QRect(190, 60, 191, 41))", "brush) brush = QtGui.QBrush(QtGui.QColor(84, 84, 84)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Base, brush)", "self.Copy.setObjectName(\"Copy\") self.hexify = QtWidgets.QCheckBox(self.centralwidget) self.hexify.setGeometry(QtCore.QRect(250, 180, 81, 21)) palette =", "MainWindow.setWindowTitle(_translate(\"MainWindow\", \"MainWindow\")) self.Algorithms.setCurrentText(_translate(\"MainWindow\", \"Select encryption algorithm\")) self.Algorithms.setItemText(0, _translate(\"MainWindow\", \"Select encryption", "= QtWidgets.QApplication(sys.argv) MainWindow = QtWidgets.QMainWindow() ui = Ui_MainWindow() ui.setupUi(MainWindow) MainWindow.show()", "retranslateUi(self, MainWindow): _translate = QtCore.QCoreApplication.translate MainWindow.setWindowTitle(_translate(\"MainWindow\", \"MainWindow\")) self.Algorithms.setCurrentText(_translate(\"MainWindow\", \"Select encryption", "brush) gradient = QtGui.QRadialGradient(0.3, -0.4, 1.35, 0.3, -0.4) gradient.setSpread(QtGui.QGradient.PadSpread) gradient.setCoordinateMode(QtGui.QGradient.ObjectBoundingMode)", "QtGui.QBrush(QtGui.QColor(51, 51, 51)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ButtonText, brush) gradient = QtGui.QRadialGradient(0.3,", "QtGui.QBrush(QtGui.QColor(84, 84, 84)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Window, brush) brush = QtGui.QBrush(QtGui.QColor(255,", "brush = QtGui.QBrush(gradient) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Button, brush) brush = QtGui.QBrush(QtGui.QColor(51, 51,", "palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.PlaceholderText, brush) brush = QtGui.QBrush(QtGui.QColor(120, 120, 120)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled,", "palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Window, brush) brush = QtGui.QBrush(QtGui.QColor(255, 255, 255)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive,", "app = QtWidgets.QApplication(sys.argv) MainWindow = QtWidgets.QMainWindow() ui = Ui_MainWindow() ui.setupUi(MainWindow)", "QtGui.QBrush(QtGui.QColor(84, 84, 84)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Button, brush) brush = QtGui.QBrush(QtGui.QColor(84,", "gradient.setSpread(QtGui.QGradient.PadSpread) gradient.setCoordinateMode(QtGui.QGradient.ObjectBoundingMode) gradient.setColorAt(0.0, QtGui.QColor(255, 255, 255)) gradient.setColorAt(1.0, QtGui.QColor(136, 136, 136))", "13px;\\n\" \" border-style: outset;\\n\" \" background: qradialgradient(\\n\" \" cx: 0.3,", "= QtGui.QIcon() icon.addPixmap(QtGui.QPixmap(\"../../Desktop/EYECLOSE.png\"), QtGui.QIcon.Normal, QtGui.QIcon.Off) self.HideShow.setIcon(icon) self.HideShow.setIconSize(QtCore.QSize(30, 30)) self.HideShow.setObjectName(\"HideShow\") self.Copy", "QtGui.QBrush(QtGui.QColor(255, 255, 255)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Text, brush) brush = QtGui.QBrush(QtGui.QColor(84,", "136, 136)) brush = QtGui.QBrush(gradient) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window, brush) brush =", "font = QtGui.QFont() font.setBold(True) font.setWeight(75) self.Algorithms.setFont(font) self.Algorithms.setStyleSheet(\"QComboBox {\\n\" \" color:", "brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ButtonText, brush) gradient = QtGui.QRadialGradient(0.3, -0.4, 1.35, 0.3,", "brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.PlaceholderText, brush) self.UserInput.setPalette(palette) font = QtGui.QFont() font.setBold(True) font.setWeight(75)", "= QtGui.QFont() font.setBold(True) font.setWeight(75) self.Generate.setFont(font) self.Generate.setStyleSheet(\"QPushButton {\\n\" \" color: #333;\\n\"", "QtGui.QPalette.Window, brush) brush = QtGui.QBrush(QtGui.QColor(51, 51, 51, 128)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled,", "#bbb\\n\" \" );\\n\" \" }\\n\" \"\\n\" \"QComboBox:pressed {\\n\" \" border-style:", "QtGui.QBrush(QtGui.QColor(0, 0, 0, 128)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.PlaceholderText, brush) self.UserInput.setPalette(palette) font", "QtGui.QBrush(QtGui.QColor(84, 84, 84)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Window, brush) brush = QtGui.QBrush(QtGui.QColor(0,", "{\\n\" \" color: #333;\\n\" \"\\n\" \" border-radius: 7px;\\n\" \" border-style:", "self.menubar.setGeometry(QtCore.QRect(0, 0, 577, 21)) self.menubar.setObjectName(\"menubar\") MainWindow.setMenuBar(self.menubar) self.statusbar = QtWidgets.QStatusBar(MainWindow) self.statusbar.setObjectName(\"statusbar\")", "font.setWeight(75) self.Password.setFont(font) self.Password.setText(\"\") self.Password.setEchoMode(QtWidgets.QLineEdit.Password) self.Password.setReadOnly(True) self.Password.setObjectName(\"Password\") self.HideShow = QtWidgets.QPushButton(self.centralwidget) self.HideShow.setGeometry(QtCore.QRect(350,", "= QtGui.QBrush(QtGui.QColor(51, 51, 51)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText, brush) gradient =", "brush) brush = QtGui.QBrush(QtGui.QColor(51, 51, 51, 128)) brush.setStyle(QtCore.Qt.NoBrush) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.PlaceholderText,", "gradient.setColorAt(1.0, QtGui.QColor(136, 136, 136)) brush = QtGui.QBrush(gradient) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window, brush)", "\" );\\n\" \" }\") self.Generate.setObjectName(\"Generate\") self.UserInput = QtWidgets.QLineEdit(self.centralwidget) self.UserInput.setGeometry(QtCore.QRect(190, 20,", "84, 84)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Button, brush) brush = QtGui.QBrush(QtGui.QColor(84, 84,", "radius: 1.35, stop: 0 #fff, stop: 1 #888\\n\" \" );\\n\"", "= QtGui.QBrush(QtGui.QColor(84, 84, 84)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush) brush =", "255, 255, 128)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.PlaceholderText, brush) brush = QtGui.QBrush(QtGui.QColor(51,", "color: #333;\\n\" \" \\n\" \" border-radius: 13px;\\n\" \" border-style: outset;\\n\"", "from PyQt5 import QtCore, QtGui, QtWidgets from PyQt5 import QtGui,", "\"\\n\" \"\\n\" \"QComboBox:hover {\\n\" \" background: qradialgradient(\\n\" \" cx: 0.3,", "84, 84)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Base, brush) brush = QtGui.QBrush(QtGui.QColor(84, 84,", "QtGui.QBrush(QtGui.QColor(120, 120, 120)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText, brush) brush = QtGui.QBrush(QtGui.QColor(84,", "brush) MainWindow.setPalette(palette) MainWindow.setAutoFillBackground(False) MainWindow.setStyleSheet(\"background-color: rgb(84, 84, 84);\") self.centralwidget = QtWidgets.QWidget(MainWindow)", "\"QComboBox:pressed {\\n\" \" border-style: inset;\\n\" \" background: qradialgradient(\\n\" \" cx:", "palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.PlaceholderText, brush) self.Copy.setPalette(palette) font = QtGui.QFont() font.setBold(True) font.setWeight(75) self.Copy.setFont(font)", "QtGui.QColor(136, 136, 136)) brush = QtGui.QBrush(gradient) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Window, brush) brush", "self.hexify.setText(_translate(\"MainWindow\", \"Hexify?\")) self.HideShow.setIcon(QtGui.QIcon(\"Assets//EYECLOSE.png\")) if __name__ == \"__main__\": import sys app", "136, 136)) brush = QtGui.QBrush(gradient) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Button, brush) brush =", "{\\n\" \" border-style: inset;\\n\" \" background: qradialgradient(\\n\" \" cx: 0.4,", "gradient.setColorAt(1.0, QtGui.QColor(136, 136, 136)) brush = QtGui.QBrush(gradient) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush)", "120, 120)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush) brush = QtGui.QBrush(QtGui.QColor(84, 84,", "self.Password.setPalette(palette) font = QtGui.QFont() font.setBold(True) font.setWeight(75) self.Password.setFont(font) self.Password.setText(\"\") self.Password.setEchoMode(QtWidgets.QLineEdit.Password) self.Password.setReadOnly(True)", "self.Copy.setStyleSheet(\"QPushButton {\\n\" \" color: #333;\\n\" \" \\n\" \" border-radius: 13px;\\n\"", "\"\\n\" \"\") self.Algorithms.setObjectName(\"Algorithms\") self.Algorithms.addItem(\"\") self.Algorithms.addItem(\"\") self.Algorithms.addItem(\"\") self.Algorithms.addItem(\"\") self.Algorithms.addItem(\"\") self.Algorithms.addItem(\"\") self.Generate", "= QtGui.QBrush(gradient) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush) gradient = QtGui.QRadialGradient(0.3, -0.4, 1.35,", "QtGui.QIcon.Off) self.HideShow.setIcon(icon) self.HideShow.setIconSize(QtCore.QSize(30, 30)) self.HideShow.setObjectName(\"HideShow\") self.Copy = QtWidgets.QPushButton(self.centralwidget) self.Copy.setGeometry(QtCore.QRect(190, 250,", "QtGui.QBrush(QtGui.QColor(255, 255, 255)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Text, brush) brush = QtGui.QBrush(QtGui.QColor(84,", "self.centralwidget.setObjectName(\"centralwidget\") self.Algorithms = QtWidgets.QComboBox(self.centralwidget) self.Algorithms.setGeometry(QtCore.QRect(190, 60, 191, 41)) palette =", "brush = QtGui.QBrush(QtGui.QColor(255, 255, 255)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText, brush) brush", "brush = QtGui.QBrush(QtGui.QColor(84, 84, 84)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Window, brush) brush", "84, 84)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window, brush) brush = QtGui.QBrush(QtGui.QColor(120, 120,", "128)) brush.setStyle(QtCore.Qt.NoBrush) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.PlaceholderText, brush) brush = QtGui.QBrush(QtGui.QColor(51, 51, 51))", "brush = QtGui.QBrush(QtGui.QColor(51, 51, 51)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText, brush) gradient", "brush = QtGui.QBrush(QtGui.QColor(0, 0, 0, 128)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.PlaceholderText, brush)", "QtGui.QPalette.Window, brush) MainWindow.setPalette(palette) MainWindow.setAutoFillBackground(False) MainWindow.setStyleSheet(\"background-color: rgb(84, 84, 84);\") self.centralwidget =", "QtGui.QPalette.Button, brush) brush = QtGui.QBrush(QtGui.QColor(84, 84, 84)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Base,", "== \"__main__\": import sys app = QtWidgets.QApplication(sys.argv) MainWindow = QtWidgets.QMainWindow()", "QtGui.QBrush(gradient) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Button, brush) brush = QtGui.QBrush(QtGui.QColor(51, 51, 51)) brush.setStyle(QtCore.Qt.SolidPattern)", "QtGui.QBrush(QtGui.QColor(51, 51, 51)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText, brush) gradient = QtGui.QRadialGradient(0.3,", "brush) brush = QtGui.QBrush(QtGui.QColor(0, 0, 0, 128)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.PlaceholderText,", "\" radius: 1.35, stop: 0 #fff, stop: 1 #888\\n\" \"", "brush = QtGui.QBrush(QtGui.QColor(51, 51, 51, 128)) brush.setStyle(QtCore.Qt.NoBrush) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.PlaceholderText, brush)", "brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Button, brush) brush = QtGui.QBrush(QtGui.QColor(255, 255, 255)) brush.setStyle(QtCore.Qt.SolidPattern)", "84)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Base, brush) brush = QtGui.QBrush(QtGui.QColor(84, 84, 84))", "0 #fff, stop: 1 #ddd\\n\" \" );\\n\" \" }\\n\" \"\\n\"", "brush = QtGui.QBrush(gradient) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Window, brush) brush = QtGui.QBrush(QtGui.QColor(51, 51,", "brush) brush = QtGui.QBrush(QtGui.QColor(51, 51, 51)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText, brush)", "QtGui.QBrush(gradient) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window, brush) brush = QtGui.QBrush(QtGui.QColor(255, 255, 255, 128))", "brush) brush = QtGui.QBrush(QtGui.QColor(84, 84, 84)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush)", "136, 136)) brush = QtGui.QBrush(gradient) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Button, brush) brush =", "QtGui.QPalette.Button, brush) brush = QtGui.QBrush(QtGui.QColor(51, 51, 51)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text,", "}\\n\" \"\\n\" \"\\n\" \"\\n\" \"\\n\" \"\\n\" \"\\n\" \"\") self.Algorithms.setObjectName(\"Algorithms\") self.Algorithms.addItem(\"\")", "font = QtGui.QFont() font.setBold(True) font.setWeight(75) self.Generate.setFont(font) self.Generate.setStyleSheet(\"QPushButton {\\n\" \" color:", "QtGui.QPalette.PlaceholderText, brush) brush = QtGui.QBrush(QtGui.QColor(120, 120, 120)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText,", "QtGui.QBrush(QtGui.QColor(51, 51, 51)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Text, brush) brush = QtGui.QBrush(QtGui.QColor(51,", "QtGui.QPalette.Base, brush) gradient = QtGui.QRadialGradient(0.3, -0.4, 1.35, 0.3, -0.4) gradient.setSpread(QtGui.QGradient.PadSpread)", "255, 255)) gradient.setColorAt(1.0, QtGui.QColor(136, 136, 136)) brush = QtGui.QBrush(gradient) palette.setBrush(QtGui.QPalette.Disabled,", "= QtGui.QBrush(gradient) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Button, brush) brush = QtGui.QBrush(QtGui.QColor(51, 51, 51))", "QtGui.QPalette.PlaceholderText, brush) self.Algorithms.setPalette(palette) font = QtGui.QFont() font.setBold(True) font.setWeight(75) self.Algorithms.setFont(font) self.Algorithms.setStyleSheet(\"QComboBox", "TO CLIPBOARD\")) self.hexify.setText(_translate(\"MainWindow\", \"Hexify?\")) self.HideShow.setIcon(QtGui.QIcon(\"Assets//EYECLOSE.png\")) if __name__ == \"__main__\": import", "brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Text, brush) brush = QtGui.QBrush(QtGui.QColor(84, 84, 84)) brush.setStyle(QtCore.Qt.SolidPattern)", "QtGui.QFont() font.setBold(True) font.setWeight(75) self.Copy.setFont(font) self.Copy.setStyleSheet(\"QPushButton {\\n\" \" color: #333;\\n\" \"", "QtGui.QBrush(QtGui.QColor(84, 84, 84)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Button, brush) brush = QtGui.QBrush(QtGui.QColor(84,", "palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Window, brush) brush = QtGui.QBrush(QtGui.QColor(255, 255, 255, 128)) brush.setStyle(QtCore.Qt.SolidPattern)", "}\\n\" \"\\n\" \"\\n\" \"QComboBox:hover {\\n\" \" background: qradialgradient(\\n\" \" cx:", "brush = QtGui.QBrush(gradient) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Window, brush) brush = QtGui.QBrush(QtGui.QColor(51, 51,", "\"\\n\" \"\\n\" \"\\n\" \"\\n\" \"\\n\" \"\\n\" \"\") self.Algorithms.setObjectName(\"Algorithms\") self.Algorithms.addItem(\"\") self.Algorithms.addItem(\"\")", "210, 141, 31)) palette = QtGui.QPalette() brush = QtGui.QBrush(QtGui.QColor(255, 255,", "0, 128)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.PlaceholderText, brush) self.Password.setPalette(palette) font = QtGui.QFont()", "brush) brush = QtGui.QBrush(QtGui.QColor(255, 255, 255, 128)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.PlaceholderText,", "import QtGui, QtCore class Ui_MainWindow(object): def setupUi(self, MainWindow): MainWindow.setObjectName(\"MainWindow\") MainWindow.resize(577,", "= QtGui.QBrush(QtGui.QColor(51, 51, 51)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Text, brush) brush =", "255)) gradient.setColorAt(1.0, QtGui.QColor(136, 136, 136)) brush = QtGui.QBrush(gradient) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base,", "brush = QtGui.QBrush(QtGui.QColor(51, 51, 51, 128)) brush.setStyle(QtCore.Qt.NoBrush) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.PlaceholderText, brush)", ");\\n\" \" }\\n\" \"\\n\" \"QComboBox:pressed {\\n\" \" border-style: inset;\\n\" \"", "palette = QtGui.QPalette() brush = QtGui.QBrush(QtGui.QColor(51, 51, 51)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active,", "brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText, brush) gradient = QtGui.QRadialGradient(0.3, -0.4, 1.35, 0.3,", "#fff, stop: 1 #bbb\\n\" \" );\\n\" \" }\\n\" \"\\n\" \"QComboBox:pressed", "\" );\\n\" \" }\\n\" \"\\n\" \"\\n\" \"\\n\" \"\\n\" \"\\n\" \"\\n\"", "QtGui.QBrush(QtGui.QColor(84, 84, 84)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Base, brush) brush = QtGui.QBrush(QtGui.QColor(84,", "QtGui.QBrush(gradient) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Window, brush) brush = QtGui.QBrush(QtGui.QColor(51, 51, 51, 128))", "}\\n\" \"\\n\" \"QPushButton:pressed {\\n\" \" border-style: inset;\\n\" \" background: qradialgradient(\\n\"", "}\") self.Copy.setObjectName(\"Copy\") self.hexify = QtWidgets.QCheckBox(self.centralwidget) self.hexify.setGeometry(QtCore.QRect(250, 180, 81, 21)) palette", "self.Generate.setGeometry(QtCore.QRect(190, 120, 191, 41)) palette = QtGui.QPalette() brush = QtGui.QBrush(QtGui.QColor(51,", "255)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Text, brush) brush = QtGui.QBrush(QtGui.QColor(84, 84, 84))", "\"\\n\" \"QPushButton:hover {\\n\" \" background: qradialgradient(\\n\" \" cx: 0.3, cy:", "self.Password = QtWidgets.QLineEdit(self.centralwidget) self.Password.setGeometry(QtCore.QRect(200, 210, 141, 31)) palette = QtGui.QPalette()", "\"\\n\" \"\\n\" \"\\n\" \"\\n\" \"\") self.Algorithms.setObjectName(\"Algorithms\") self.Algorithms.addItem(\"\") self.Algorithms.addItem(\"\") self.Algorithms.addItem(\"\") self.Algorithms.addItem(\"\")", "\"sha1\")) self.Algorithms.setItemText(5, _translate(\"MainWindow\", \"sha512\")) self.Generate.setText(_translate(\"MainWindow\", \"GENERATE\")) self.Copy.setText(_translate(\"MainWindow\", \"COPY TO CLIPBOARD\"))", "21)) palette = QtGui.QPalette() brush = QtGui.QBrush(QtGui.QColor(255, 255, 255)) brush.setStyle(QtCore.Qt.SolidPattern)", "palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Base, brush) gradient = QtGui.QRadialGradient(0.3, -0.4, 1.35, 0.3, -0.4)", "palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush) brush = QtGui.QBrush(QtGui.QColor(51, 51, 51)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled,", "120)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText, brush) brush = QtGui.QBrush(QtGui.QColor(84, 84, 84))", "= QtGui.QBrush(QtGui.QColor(255, 255, 255)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText, brush) brush =", "\" color: #333;\\n\" \"\\n\" \" border-radius: 20px;\\n\" \" border-style: outset;\\n\"", "250, 201, 31)) palette = QtGui.QPalette() brush = QtGui.QBrush(QtGui.QColor(51, 51,", "font = QtGui.QFont() font.setBold(True) font.setWeight(75) self.Copy.setFont(font) self.Copy.setStyleSheet(\"QPushButton {\\n\" \" color:", "QtGui.QPalette.Window, brush) brush = QtGui.QBrush(QtGui.QColor(51, 51, 51, 128)) brush.setStyle(QtCore.Qt.NoBrush) palette.setBrush(QtGui.QPalette.Disabled,", "\"Hexify?\")) self.HideShow.setIcon(QtGui.QIcon(\"Assets//EYECLOSE.png\")) if __name__ == \"__main__\": import sys app =", "self.Algorithms.setItemText(2, _translate(\"MainWindow\", \"md5\")) self.Algorithms.setItemText(3, _translate(\"MainWindow\", \"sha224\")) self.Algorithms.setItemText(4, _translate(\"MainWindow\", \"sha1\")) self.Algorithms.setItemText(5,", "QtGui.QBrush(QtGui.QColor(84, 84, 84)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush) brush = QtGui.QBrush(QtGui.QColor(84,", "brush) self.UserInput.setPalette(palette) font = QtGui.QFont() font.setBold(True) font.setWeight(75) self.UserInput.setFont(font) self.UserInput.setObjectName(\"UserInput\") self.Password", "brush = QtGui.QBrush(QtGui.QColor(255, 255, 255)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Text, brush) brush", "brush = QtGui.QBrush(QtGui.QColor(84, 84, 84)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Button, brush) brush", "0, 128)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.PlaceholderText, brush) self.UserInput.setPalette(palette) font = QtGui.QFont()", "font.setWeight(75) self.Algorithms.setFont(font) self.Algorithms.setStyleSheet(\"QComboBox {\\n\" \" color: #333;\\n\" \"\\n\" \" \\n\"", "0, 0, 128)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.PlaceholderText, brush) self.Password.setPalette(palette) font =", "\"COPY TO CLIPBOARD\")) self.hexify.setText(_translate(\"MainWindow\", \"Hexify?\")) self.HideShow.setIcon(QtGui.QIcon(\"Assets//EYECLOSE.png\")) if __name__ == \"__main__\":", "palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush) brush = QtGui.QBrush(QtGui.QColor(84, 84, 84)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled,", "palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush) gradient = QtGui.QRadialGradient(0.3, -0.4, 1.35, 0.3, -0.4)", "brush) brush = QtGui.QBrush(QtGui.QColor(84, 84, 84)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Button, brush)", "setupUi(self, MainWindow): MainWindow.setObjectName(\"MainWindow\") MainWindow.resize(577, 341) palette = QtGui.QPalette() brush =", "palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.PlaceholderText, brush) brush = QtGui.QBrush(QtGui.QColor(51, 51, 51)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled,", "gradient.setCoordinateMode(QtGui.QGradient.ObjectBoundingMode) gradient.setColorAt(0.0, QtGui.QColor(255, 255, 255)) gradient.setColorAt(1.0, QtGui.QColor(136, 136, 136)) brush", "\" radius: 1.35, stop: 0 #fff, stop: 1 #bbb\\n\" \"", "cx: 0.4, cy: -0.1, fx: 0.4, fy: -0.1,\\n\" \" radius:", "self.HideShow.setGeometry(QtCore.QRect(350, 210, 31, 31)) self.HideShow.setStyleSheet(\"QPushButton {\\n\" \" color: #333;\\n\" \"\\n\"", "QtGui.QBrush(QtGui.QColor(51, 51, 51, 128)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.PlaceholderText, brush) self.Algorithms.setPalette(palette) font", "palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText, brush) gradient = QtGui.QRadialGradient(0.3, -0.4, 1.35, 0.3, -0.4)", "\" padding: 5px;\\n\" \" \\n\" \" }\\n\" \"\\n\" \"\\n\" \"QComboBox:hover", "gradient.setColorAt(1.0, QtGui.QColor(136, 136, 136)) brush = QtGui.QBrush(gradient) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Button, brush)", "QtGui.QBrush(QtGui.QColor(51, 51, 51)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush) brush = QtGui.QBrush(QtGui.QColor(51,", "border-radius: 7px;\\n\" \" border-style: outset;\\n\" \" background: qradialgradient(\\n\" \" cx:", "-0.4) gradient.setSpread(QtGui.QGradient.PadSpread) gradient.setCoordinateMode(QtGui.QGradient.ObjectBoundingMode) gradient.setColorAt(0.0, QtGui.QColor(255, 255, 255)) gradient.setColorAt(1.0, QtGui.QColor(136, 136,", "palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Window, brush) brush = QtGui.QBrush(QtGui.QColor(0, 0, 0, 128)) brush.setStyle(QtCore.Qt.SolidPattern)", "<reponame>smokedpirate/Encryption-hash-generator<gh_stars>1-10 from PyQt5 import QtCore, QtGui, QtWidgets from PyQt5 import", "QtGui.QColor(136, 136, 136)) brush = QtGui.QBrush(gradient) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Button, brush) brush", "self.hexify.setGeometry(QtCore.QRect(250, 180, 81, 21)) palette = QtGui.QPalette() brush = QtGui.QBrush(QtGui.QColor(255,", "self.Algorithms.setItemText(1, _translate(\"MainWindow\", \"sha256\")) self.Algorithms.setItemText(2, _translate(\"MainWindow\", \"md5\")) self.Algorithms.setItemText(3, _translate(\"MainWindow\", \"sha224\")) self.Algorithms.setItemText(4,", "self.HideShow.setIcon(QtGui.QIcon(\"Assets//EYECLOSE.png\")) if __name__ == \"__main__\": import sys app = QtWidgets.QApplication(sys.argv)", "palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ButtonText, brush) gradient = QtGui.QRadialGradient(0.3, -0.4, 1.35, 0.3, -0.4)", "= QtWidgets.QLineEdit(self.centralwidget) self.UserInput.setGeometry(QtCore.QRect(190, 20, 191, 31)) palette = QtGui.QPalette() brush", "brush = QtGui.QBrush(QtGui.QColor(51, 51, 51, 128)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.PlaceholderText, brush)", "self.menubar = QtWidgets.QMenuBar(MainWindow) self.menubar.setGeometry(QtCore.QRect(0, 0, 577, 21)) self.menubar.setObjectName(\"menubar\") MainWindow.setMenuBar(self.menubar) self.statusbar", "1 #ddd\\n\" \" );\\n\" \" }\") self.Generate.setObjectName(\"Generate\") self.UserInput = QtWidgets.QLineEdit(self.centralwidget)", "31)) palette = QtGui.QPalette() brush = QtGui.QBrush(QtGui.QColor(51, 51, 51)) brush.setStyle(QtCore.Qt.SolidPattern)", "brush = QtGui.QBrush(QtGui.QColor(51, 51, 51)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText, brush) gradient", "MainWindow.setMenuBar(self.menubar) self.statusbar = QtWidgets.QStatusBar(MainWindow) self.statusbar.setObjectName(\"statusbar\") MainWindow.setStatusBar(self.statusbar) self.retranslateUi(MainWindow) QtCore.QMetaObject.connectSlotsByName(MainWindow) def retranslateUi(self,", "= QtCore.QCoreApplication.translate MainWindow.setWindowTitle(_translate(\"MainWindow\", \"MainWindow\")) self.Algorithms.setCurrentText(_translate(\"MainWindow\", \"Select encryption algorithm\")) self.Algorithms.setItemText(0, _translate(\"MainWindow\",", "0.4, cy: -0.1, fx: 0.4, fy: -0.1,\\n\" \" radius: 1.35,", "51)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ButtonText, brush) gradient = QtGui.QRadialGradient(0.3, -0.4, 1.35,", "brush) self.Copy.setPalette(palette) font = QtGui.QFont() font.setBold(True) font.setWeight(75) self.Copy.setFont(font) self.Copy.setStyleSheet(\"QPushButton {\\n\"", "128)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.PlaceholderText, brush) brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))", "brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText, brush) brush = QtGui.QBrush(QtGui.QColor(84, 84, 84)) brush.setStyle(QtCore.Qt.SolidPattern)", "= QtGui.QBrush(QtGui.QColor(84, 84, 84)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Base, brush) brush =", "255)) gradient.setColorAt(1.0, QtGui.QColor(136, 136, 136)) brush = QtGui.QBrush(gradient) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Window,", "brush = QtGui.QBrush(QtGui.QColor(51, 51, 51)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ButtonText, brush) gradient", "QtGui, QtCore class Ui_MainWindow(object): def setupUi(self, MainWindow): MainWindow.setObjectName(\"MainWindow\") MainWindow.resize(577, 341)", "brush = QtGui.QBrush(gradient) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window, brush) brush = QtGui.QBrush(QtGui.QColor(255, 255,", "radius: 1.35, stop: 0 #fff, stop: 1 #bbb\\n\" \" );\\n\"", "\"\\n\" \" border-radius: 7px;\\n\" \" border-style: outset;\\n\" \" background: qradialgradient(\\n\"", "brush) brush = QtGui.QBrush(QtGui.QColor(51, 51, 51)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Text, brush)", "QtGui.QBrush(QtGui.QColor(51, 51, 51, 128)) brush.setStyle(QtCore.Qt.NoBrush) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.PlaceholderText, brush) brush =", "MainWindow): _translate = QtCore.QCoreApplication.translate MainWindow.setWindowTitle(_translate(\"MainWindow\", \"MainWindow\")) self.Algorithms.setCurrentText(_translate(\"MainWindow\", \"Select encryption algorithm\"))", "QtGui.QBrush(QtGui.QColor(255, 255, 255, 128)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.PlaceholderText, brush) brush =", "palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window, brush) brush = QtGui.QBrush(QtGui.QColor(51, 51, 51, 128)) brush.setStyle(QtCore.Qt.NoBrush)", "51, 51, 128)) brush.setStyle(QtCore.Qt.NoBrush) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.PlaceholderText, brush) self.Copy.setPalette(palette) font =", "self.hexify.setFont(font) self.hexify.setObjectName(\"hexify\") MainWindow.setCentralWidget(self.centralwidget) self.menubar = QtWidgets.QMenuBar(MainWindow) self.menubar.setGeometry(QtCore.QRect(0, 0, 577, 21))", "= QtGui.QBrush(QtGui.QColor(51, 51, 51)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText, brush) gradient =", "136, 136)) brush = QtGui.QBrush(gradient) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Base, brush) gradient =", "{\\n\" \" color: #333;\\n\" \"\\n\" \" \\n\" \" border-style: outset;\\n\"", "stop: 0 #fff, stop: 1 #bbb\\n\" \" );\\n\" \" }\\n\"", ");\\n\" \" padding: 5px;\\n\" \" \\n\" \" }\\n\" \"\\n\" \"\\n\"", "= QtGui.QBrush(QtGui.QColor(84, 84, 84)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Window, brush) brush =", "self.Algorithms.setFont(font) self.Algorithms.setStyleSheet(\"QComboBox {\\n\" \" color: #333;\\n\" \"\\n\" \" \\n\" \"", "1 #ddd\\n\" \" );\\n\" \" }\\n\" \"\\n\" \"\\n\" \"\\n\" \"\\n\"", "255)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText, brush) brush = QtGui.QBrush(QtGui.QColor(84, 84, 84))", "brush = QtGui.QBrush(QtGui.QColor(84, 84, 84)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Window, brush) brush", "algorithm\")) self.Algorithms.setItemText(0, _translate(\"MainWindow\", \"Select encryption algorithm\")) self.Algorithms.setItemText(1, _translate(\"MainWindow\", \"sha256\")) self.Algorithms.setItemText(2,", "brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Button, brush) brush = QtGui.QBrush(QtGui.QColor(84, 84, 84)) brush.setStyle(QtCore.Qt.SolidPattern)", "brush = QtGui.QBrush(gradient) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Base, brush) gradient = QtGui.QRadialGradient(0.3, -0.4,", "-0.4, fx: 0.3, fy: -0.4,\\n\" \" radius: 1.35, stop: 0", "141, 31)) palette = QtGui.QPalette() brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))", "palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Base, brush) brush = QtGui.QBrush(QtGui.QColor(84, 84, 84)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive,", "brush) brush = QtGui.QBrush(QtGui.QColor(255, 255, 255, 128)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.PlaceholderText,", "0, 0, 128)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.PlaceholderText, brush) self.UserInput.setPalette(palette) font =", "def retranslateUi(self, MainWindow): _translate = QtCore.QCoreApplication.translate MainWindow.setWindowTitle(_translate(\"MainWindow\", \"MainWindow\")) self.Algorithms.setCurrentText(_translate(\"MainWindow\", \"Select", "font.setWeight(75) self.UserInput.setFont(font) self.UserInput.setObjectName(\"UserInput\") self.Password = QtWidgets.QLineEdit(self.centralwidget) self.Password.setGeometry(QtCore.QRect(200, 210, 141, 31))", "font.setWeight(75) self.Generate.setFont(font) self.Generate.setStyleSheet(\"QPushButton {\\n\" \" color: #333;\\n\" \"\\n\" \" border-radius:", "\" }\") self.Copy.setObjectName(\"Copy\") self.hexify = QtWidgets.QCheckBox(self.centralwidget) self.hexify.setGeometry(QtCore.QRect(250, 180, 81, 21))", "41)) palette = QtGui.QPalette() brush = QtGui.QBrush(QtGui.QColor(51, 51, 51)) brush.setStyle(QtCore.Qt.SolidPattern)", "self.HideShow.setIcon(icon) self.HideShow.setIconSize(QtCore.QSize(30, 30)) self.HideShow.setObjectName(\"HideShow\") self.Copy = QtWidgets.QPushButton(self.centralwidget) self.Copy.setGeometry(QtCore.QRect(190, 250, 201,", "= QtGui.QBrush(QtGui.QColor(51, 51, 51)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText, brush) gradient =", "brush = QtGui.QBrush(QtGui.QColor(120, 120, 120)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush) brush", "QtGui.QFont() font.setBold(True) font.setWeight(75) self.UserInput.setFont(font) self.UserInput.setObjectName(\"UserInput\") self.Password = QtWidgets.QLineEdit(self.centralwidget) self.Password.setGeometry(QtCore.QRect(200, 210,", "31, 31)) self.HideShow.setStyleSheet(\"QPushButton {\\n\" \" color: #333;\\n\" \"\\n\" \" border-radius:", "31)) palette = QtGui.QPalette() brush = QtGui.QBrush(QtGui.QColor(255, 255, 255)) brush.setStyle(QtCore.Qt.SolidPattern)", "#ddd\\n\" \" );\\n\" \" }\") self.Copy.setObjectName(\"Copy\") self.hexify = QtWidgets.QCheckBox(self.centralwidget) self.hexify.setGeometry(QtCore.QRect(250,", "palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.PlaceholderText, brush) self.Password.setPalette(palette) font = QtGui.QFont() font.setBold(True) font.setWeight(75) self.Password.setFont(font)", "palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Text, brush) brush = QtGui.QBrush(QtGui.QColor(84, 84, 84)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active,", "brush = QtGui.QBrush(QtGui.QColor(84, 84, 84)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Window, brush) MainWindow.setPalette(palette)", "\" cx: 0.4, cy: -0.1, fx: 0.4, fy: -0.1,\\n\" \"", "= QtGui.QBrush(QtGui.QColor(0, 0, 0, 128)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.PlaceholderText, brush) self.UserInput.setPalette(palette)", "1.35, stop: 0 #fff, stop: 1 #888\\n\" \" );\\n\" \"", "= QtGui.QRadialGradient(0.3, -0.4, 1.35, 0.3, -0.4) gradient.setSpread(QtGui.QGradient.PadSpread) gradient.setCoordinateMode(QtGui.QGradient.ObjectBoundingMode) gradient.setColorAt(0.0, QtGui.QColor(255,", "brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Window, brush) brush = QtGui.QBrush(QtGui.QColor(255, 255, 255, 128))", "84)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Window, brush) brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))", "128)) brush.setStyle(QtCore.Qt.NoBrush) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.PlaceholderText, brush) brush = QtGui.QBrush(QtGui.QColor(51, 51, 51))", "palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.PlaceholderText, brush) self.UserInput.setPalette(palette) font = QtGui.QFont() font.setBold(True) font.setWeight(75) self.UserInput.setFont(font)", "brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.PlaceholderText, brush) self.Password.setPalette(palette) font = QtGui.QFont() font.setBold(True) font.setWeight(75)", "QtGui.QPalette() brush = QtGui.QBrush(QtGui.QColor(51, 51, 51)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText, brush)", "= QtWidgets.QMenuBar(MainWindow) self.menubar.setGeometry(QtCore.QRect(0, 0, 577, 21)) self.menubar.setObjectName(\"menubar\") MainWindow.setMenuBar(self.menubar) self.statusbar =", "{\\n\" \" color: #333;\\n\" \" \\n\" \" border-radius: 13px;\\n\" \"", "if __name__ == \"__main__\": import sys app = QtWidgets.QApplication(sys.argv) MainWindow", "palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Button, brush) brush = QtGui.QBrush(QtGui.QColor(255, 255, 255)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive,", "\" );\\n\" \" }\") self.Copy.setObjectName(\"Copy\") self.hexify = QtWidgets.QCheckBox(self.centralwidget) self.hexify.setGeometry(QtCore.QRect(250, 180,", "brush = QtGui.QBrush(QtGui.QColor(255, 255, 255)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Text, brush) brush", "self.Algorithms.setItemText(3, _translate(\"MainWindow\", \"sha224\")) self.Algorithms.setItemText(4, _translate(\"MainWindow\", \"sha1\")) self.Algorithms.setItemText(5, _translate(\"MainWindow\", \"sha512\")) self.Generate.setText(_translate(\"MainWindow\",", "\\n\" \" border-radius: 13px;\\n\" \" border-style: outset;\\n\" \" background: qradialgradient(\\n\"", "136)) brush = QtGui.QBrush(gradient) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window, brush) brush = QtGui.QBrush(QtGui.QColor(255,", "\" \\n\" \" border-style: outset;\\n\" \" background: qradialgradient(\\n\" \" cx:", "font.setBold(True) font.setWeight(75) self.UserInput.setFont(font) self.UserInput.setObjectName(\"UserInput\") self.Password = QtWidgets.QLineEdit(self.centralwidget) self.Password.setGeometry(QtCore.QRect(200, 210, 141,", "self.Algorithms.addItem(\"\") self.Algorithms.addItem(\"\") self.Algorithms.addItem(\"\") self.Algorithms.addItem(\"\") self.Algorithms.addItem(\"\") self.Generate = QtWidgets.QPushButton(self.centralwidget) self.Generate.setGeometry(QtCore.QRect(190, 120,", "136)) brush = QtGui.QBrush(gradient) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Window, brush) brush = QtGui.QBrush(QtGui.QColor(51,", "self.Copy.setPalette(palette) font = QtGui.QFont() font.setBold(True) font.setWeight(75) self.Copy.setFont(font) self.Copy.setStyleSheet(\"QPushButton {\\n\" \"", "191, 41)) palette = QtGui.QPalette() brush = QtGui.QBrush(QtGui.QColor(51, 51, 51))", "QtGui.QPalette.Window, brush) brush = QtGui.QBrush(QtGui.QColor(255, 255, 255)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText,", "QtGui.QIcon() icon.addPixmap(QtGui.QPixmap(\"../../Desktop/EYECLOSE.png\"), QtGui.QIcon.Normal, QtGui.QIcon.Off) self.HideShow.setIcon(icon) self.HideShow.setIconSize(QtCore.QSize(30, 30)) self.HideShow.setObjectName(\"HideShow\") self.Copy =", "brush) brush = QtGui.QBrush(QtGui.QColor(255, 255, 255)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText, brush)", "84, 84)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Button, brush) brush = QtGui.QBrush(QtGui.QColor(84, 84,", "border-style: inset;\\n\" \" background: qradialgradient(\\n\" \" cx: 0.4, cy: -0.1,", "palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.PlaceholderText, brush) self.Algorithms.setPalette(palette) font = QtGui.QFont() font.setBold(True) font.setWeight(75) self.Algorithms.setFont(font)", "\"\\n\" \"QComboBox:hover {\\n\" \" background: qradialgradient(\\n\" \" cx: 0.3, cy:", "brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Text, brush) brush = QtGui.QBrush(QtGui.QColor(51, 51, 51)) brush.setStyle(QtCore.Qt.SolidPattern)", "5px;\\n\" \" \\n\" \" }\\n\" \"\\n\" \"\\n\" \"QComboBox:hover {\\n\" \"", "self.Algorithms.setItemText(4, _translate(\"MainWindow\", \"sha1\")) self.Algorithms.setItemText(5, _translate(\"MainWindow\", \"sha512\")) self.Generate.setText(_translate(\"MainWindow\", \"GENERATE\")) self.Copy.setText(_translate(\"MainWindow\", \"COPY", "84)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Base, brush) brush = QtGui.QBrush(QtGui.QColor(84, 84, 84))", "QtGui.QPalette.Button, brush) brush = QtGui.QBrush(QtGui.QColor(255, 255, 255)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Text,", "brush) brush = QtGui.QBrush(QtGui.QColor(51, 51, 51)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ButtonText, brush)", "QtGui.QBrush(QtGui.QColor(51, 51, 51)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ButtonText, brush) gradient = QtGui.QRadialGradient(0.3,", "brush = QtGui.QBrush(QtGui.QColor(51, 51, 51)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush) brush", "background: qradialgradient(\\n\" \" cx: 0.4, cy: -0.1, fx: 0.4, fy:", "MainWindow.setStatusBar(self.statusbar) self.retranslateUi(MainWindow) QtCore.QMetaObject.connectSlotsByName(MainWindow) def retranslateUi(self, MainWindow): _translate = QtCore.QCoreApplication.translate MainWindow.setWindowTitle(_translate(\"MainWindow\",", "color: #333;\\n\" \"\\n\" \" \\n\" \" border-style: outset;\\n\" \" background:", "cx: 0.3, cy: -0.4, fx: 0.3, fy: -0.4,\\n\" \" radius:", "self.Generate.setPalette(palette) font = QtGui.QFont() font.setBold(True) font.setWeight(75) self.Generate.setFont(font) self.Generate.setStyleSheet(\"QPushButton {\\n\" \"", "self.statusbar.setObjectName(\"statusbar\") MainWindow.setStatusBar(self.statusbar) self.retranslateUi(MainWindow) QtCore.QMetaObject.connectSlotsByName(MainWindow) def retranslateUi(self, MainWindow): _translate = QtCore.QCoreApplication.translate", "font.setPointSize(12) font.setBold(True) font.setWeight(75) self.hexify.setFont(font) self.hexify.setObjectName(\"hexify\") MainWindow.setCentralWidget(self.centralwidget) self.menubar = QtWidgets.QMenuBar(MainWindow) self.menubar.setGeometry(QtCore.QRect(0,", "qradialgradient(\\n\" \" cx: 0.4, cy: -0.1, fx: 0.4, fy: -0.1,\\n\"", "brush) self.Generate.setPalette(palette) font = QtGui.QFont() font.setBold(True) font.setWeight(75) self.Generate.setFont(font) self.Generate.setStyleSheet(\"QPushButton {\\n\"", "51, 51)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ButtonText, brush) gradient = QtGui.QRadialGradient(0.3, -0.4,", "self.Copy = QtWidgets.QPushButton(self.centralwidget) self.Copy.setGeometry(QtCore.QRect(190, 250, 201, 31)) palette = QtGui.QPalette()", "= QtGui.QBrush(QtGui.QColor(120, 120, 120)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText, brush) brush =", "#888\\n\" \" );\\n\" \" padding: 5px;\\n\" \" }\\n\" \"\\n\" \"QPushButton:hover", "1.35, stop: 0 #fff, stop: 1 #ddd\\n\" \" );\\n\" \"", "class Ui_MainWindow(object): def setupUi(self, MainWindow): MainWindow.setObjectName(\"MainWindow\") MainWindow.resize(577, 341) palette =", "= QtGui.QBrush(QtGui.QColor(84, 84, 84)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Window, brush) MainWindow.setPalette(palette) MainWindow.setAutoFillBackground(False)", "QtGui.QPalette.PlaceholderText, brush) brush = QtGui.QBrush(QtGui.QColor(255, 255, 255)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText,", "84)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush) brush = QtGui.QBrush(QtGui.QColor(84, 84, 84))", "QtGui.QColor(136, 136, 136)) brush = QtGui.QBrush(gradient) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush) gradient", "0 #fff, stop: 1 #888\\n\" \" );\\n\" \" padding: 5px;\\n\"", "= QtGui.QBrush(QtGui.QColor(51, 51, 51)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ButtonText, brush) gradient =", "stop: 1 #ddd\\n\" \" );\\n\" \" }\") self.Copy.setObjectName(\"Copy\") self.hexify =", "\"Select encryption algorithm\")) self.Algorithms.setItemText(0, _translate(\"MainWindow\", \"Select encryption algorithm\")) self.Algorithms.setItemText(1, _translate(\"MainWindow\",", "136)) brush = QtGui.QBrush(gradient) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Button, brush) brush = QtGui.QBrush(QtGui.QColor(51,", "#fff, stop: 1 #888\\n\" \" );\\n\" \" padding: 5px;\\n\" \"", "1 #ddd\\n\" \" );\\n\" \" }\") self.Copy.setObjectName(\"Copy\") self.hexify = QtWidgets.QCheckBox(self.centralwidget)", "MainWindow): MainWindow.setObjectName(\"MainWindow\") MainWindow.resize(577, 341) palette = QtGui.QPalette() brush = QtGui.QBrush(QtGui.QColor(255,", "= QtGui.QBrush(gradient) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Window, brush) brush = QtGui.QBrush(QtGui.QColor(51, 51, 51,", "136)) brush = QtGui.QBrush(gradient) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Button, brush) brush = QtGui.QBrush(QtGui.QColor(51,", "84, 84)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Button, brush) brush = QtGui.QBrush(QtGui.QColor(84, 84,", "-0.4, 1.35, 0.3, -0.4) gradient.setSpread(QtGui.QGradient.PadSpread) gradient.setCoordinateMode(QtGui.QGradient.ObjectBoundingMode) gradient.setColorAt(0.0, QtGui.QColor(255, 255, 255))", "\" );\\n\" \" padding: 5px;\\n\" \" }\\n\" \"\\n\" \"QPushButton:hover {\\n\"", "QtGui.QPalette.Text, brush) brush = QtGui.QBrush(QtGui.QColor(51, 51, 51)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ButtonText,", "255, 128)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.PlaceholderText, brush) brush = QtGui.QBrush(QtGui.QColor(120, 120,", "\" color: #333;\\n\" \"\\n\" \" border-radius: 7px;\\n\" \" border-style: outset;\\n\"", "136, 136)) brush = QtGui.QBrush(gradient) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Window, brush) brush =", "font.setBold(True) font.setWeight(75) self.Algorithms.setFont(font) self.Algorithms.setStyleSheet(\"QComboBox {\\n\" \" color: #333;\\n\" \"\\n\" \"", "= QtWidgets.QCheckBox(self.centralwidget) self.hexify.setGeometry(QtCore.QRect(250, 180, 81, 21)) palette = QtGui.QPalette() brush", "palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Button, brush) brush = QtGui.QBrush(QtGui.QColor(255, 255, 255)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active,", "-0.4,\\n\" \" radius: 1.35, stop: 0 #fff, stop: 1 #bbb\\n\"", "palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText, brush) brush = QtGui.QBrush(QtGui.QColor(84, 84, 84)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive,", "QtWidgets.QWidget(MainWindow) self.centralwidget.setObjectName(\"centralwidget\") self.Algorithms = QtWidgets.QComboBox(self.centralwidget) self.Algorithms.setGeometry(QtCore.QRect(190, 60, 191, 41)) palette", "self.Copy.setFont(font) self.Copy.setStyleSheet(\"QPushButton {\\n\" \" color: #333;\\n\" \" \\n\" \" border-radius:", "QtWidgets from PyQt5 import QtGui, QtCore class Ui_MainWindow(object): def setupUi(self,", "palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Button, brush) brush = QtGui.QBrush(QtGui.QColor(51, 51, 51)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active,", "self.UserInput.setGeometry(QtCore.QRect(190, 20, 191, 31)) palette = QtGui.QPalette() brush = QtGui.QBrush(QtGui.QColor(255,", "= QtGui.QBrush(gradient) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window, brush) brush = QtGui.QBrush(QtGui.QColor(51, 51, 51,", "QtGui.QPalette.ButtonText, brush) gradient = QtGui.QRadialGradient(0.3, -0.4, 1.35, 0.3, -0.4) gradient.setSpread(QtGui.QGradient.PadSpread)", "84, 84)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Window, brush) brush = QtGui.QBrush(QtGui.QColor(255, 255,", "\" }\\n\" \"\\n\" \"QPushButton:hover {\\n\" \" background: qradialgradient(\\n\" \" cx:", "\" \\n\" \" border-radius: 13px;\\n\" \" border-style: outset;\\n\" \" background:", "\"md5\")) self.Algorithms.setItemText(3, _translate(\"MainWindow\", \"sha224\")) self.Algorithms.setItemText(4, _translate(\"MainWindow\", \"sha1\")) self.Algorithms.setItemText(5, _translate(\"MainWindow\", \"sha512\"))", "51, 128)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.PlaceholderText, brush) self.Algorithms.setPalette(palette) font = QtGui.QFont()", "\" background: qradialgradient(\\n\" \" cx: 0.3, cy: -0.4, fx: 0.3,", "QtGui.QPalette.Text, brush) brush = QtGui.QBrush(QtGui.QColor(51, 51, 51)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ButtonText,", "brush = QtGui.QBrush(QtGui.QColor(255, 255, 255)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText, brush) brush", "brush) brush = QtGui.QBrush(QtGui.QColor(51, 51, 51, 128)) brush.setStyle(QtCore.Qt.NoBrush) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.PlaceholderText,", "brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Button, brush) brush = QtGui.QBrush(QtGui.QColor(255, 255, 255)) brush.setStyle(QtCore.Qt.SolidPattern)", "{\\n\" \" background: qradialgradient(\\n\" \" cx: 0.3, cy: -0.4, fx:", "= QtGui.QBrush(QtGui.QColor(255, 255, 255, 128)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.PlaceholderText, brush) brush", "= QtGui.QFont() font.setBold(True) font.setWeight(75) self.Algorithms.setFont(font) self.Algorithms.setStyleSheet(\"QComboBox {\\n\" \" color: #333;\\n\"", "self.HideShow = QtWidgets.QPushButton(self.centralwidget) self.HideShow.setGeometry(QtCore.QRect(350, 210, 31, 31)) self.HideShow.setStyleSheet(\"QPushButton {\\n\" \"", "\\n\" \" }\\n\" \"\\n\" \"\\n\" \"QComboBox:hover {\\n\" \" background: qradialgradient(\\n\"", "= QtGui.QBrush(QtGui.QColor(84, 84, 84)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Button, brush) brush =", "MainWindow.setPalette(palette) MainWindow.setAutoFillBackground(False) MainWindow.setStyleSheet(\"background-color: rgb(84, 84, 84);\") self.centralwidget = QtWidgets.QWidget(MainWindow) self.centralwidget.setObjectName(\"centralwidget\")", "= QtGui.QPalette() brush = QtGui.QBrush(QtGui.QColor(51, 51, 51)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText,", "}\\n\" \"\\n\" \"QComboBox:pressed {\\n\" \" border-style: inset;\\n\" \" background: qradialgradient(\\n\"", "\" radius: 1.35, stop: 0 #fff, stop: 1 #ddd\\n\" \"", "\" border-radius: 20px;\\n\" \" border-style: outset;\\n\" \" background: qradialgradient(\\n\" \"", "QtGui.QPalette.WindowText, brush) brush = QtGui.QBrush(QtGui.QColor(84, 84, 84)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Button,", "120)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush) brush = QtGui.QBrush(QtGui.QColor(84, 84, 84))", "brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.PlaceholderText, brush) self.hexify.setPalette(palette) font = QtGui.QFont() font.setPointSize(12) font.setBold(True)", "palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Window, brush) brush = QtGui.QBrush(QtGui.QColor(51, 51, 51, 128)) brush.setStyle(QtCore.Qt.NoBrush)", "self.UserInput.setFont(font) self.UserInput.setObjectName(\"UserInput\") self.Password = QtWidgets.QLineEdit(self.centralwidget) self.Password.setGeometry(QtCore.QRect(200, 210, 141, 31)) palette", "= QtGui.QBrush(QtGui.QColor(84, 84, 84)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Button, brush) brush =", "brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.PlaceholderText, brush) brush = QtGui.QBrush(QtGui.QColor(51, 51, 51)) brush.setStyle(QtCore.Qt.SolidPattern)", "palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Button, brush) brush = QtGui.QBrush(QtGui.QColor(84, 84, 84)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled,", "= QtGui.QFont() font.setPointSize(12) font.setBold(True) font.setWeight(75) self.hexify.setFont(font) self.hexify.setObjectName(\"hexify\") MainWindow.setCentralWidget(self.centralwidget) self.menubar =", "brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText, brush) brush = QtGui.QBrush(QtGui.QColor(84, 84, 84)) brush.setStyle(QtCore.Qt.SolidPattern)", "PyQt5 import QtCore, QtGui, QtWidgets from PyQt5 import QtGui, QtCore", "\" border-style: outset;\\n\" \" background: qradialgradient(\\n\" \" cx: 0.3, cy:", "255, 255)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText, brush) brush = QtGui.QBrush(QtGui.QColor(84, 84,", "brush = QtGui.QBrush(QtGui.QColor(51, 51, 51)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ButtonText, brush) gradient", "= QtGui.QBrush(QtGui.QColor(255, 255, 255)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText, brush) brush =", "128)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.PlaceholderText, brush) self.Algorithms.setPalette(palette) font = QtGui.QFont() font.setBold(True)", "QtGui.QPalette.PlaceholderText, brush) self.Generate.setPalette(palette) font = QtGui.QFont() font.setBold(True) font.setWeight(75) self.Generate.setFont(font) self.Generate.setStyleSheet(\"QPushButton", "\" }\\n\" \"\\n\" \"QComboBox:pressed {\\n\" \" border-style: inset;\\n\" \" background:", "= QtGui.QBrush(QtGui.QColor(51, 51, 51, 128)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.PlaceholderText, brush) self.Algorithms.setPalette(palette)", "QtGui.QBrush(gradient) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Base, brush) gradient = QtGui.QRadialGradient(0.3, -0.4, 1.35, 0.3,", "brush) brush = QtGui.QBrush(QtGui.QColor(120, 120, 120)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush)", "brush) self.hexify.setPalette(palette) font = QtGui.QFont() font.setPointSize(12) font.setBold(True) font.setWeight(75) self.hexify.setFont(font) self.hexify.setObjectName(\"hexify\")", "QtGui.QPalette.WindowText, brush) gradient = QtGui.QRadialGradient(0.3, -0.4, 1.35, 0.3, -0.4) gradient.setSpread(QtGui.QGradient.PadSpread)", "QtGui.QPalette.Window, brush) brush = QtGui.QBrush(QtGui.QColor(120, 120, 120)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText,", "= QtGui.QBrush(QtGui.QColor(84, 84, 84)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window, brush) brush =", "= QtGui.QBrush(QtGui.QColor(255, 255, 255, 128)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.PlaceholderText, brush) brush", "QtWidgets.QPushButton(self.centralwidget) self.Copy.setGeometry(QtCore.QRect(190, 250, 201, 31)) palette = QtGui.QPalette() brush =", "#333;\\n\" \" \\n\" \" border-radius: 13px;\\n\" \" border-style: outset;\\n\" \"", "= QtGui.QBrush(QtGui.QColor(0, 0, 0, 128)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.PlaceholderText, brush) self.hexify.setPalette(palette)", "#888\\n\" \" );\\n\" \" padding: 5px;\\n\" \" \\n\" \" }\\n\"", "palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText, brush) gradient = QtGui.QRadialGradient(0.3, -0.4, 1.35, 0.3, -0.4)", "255)) gradient.setColorAt(1.0, QtGui.QColor(136, 136, 136)) brush = QtGui.QBrush(gradient) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window,", "brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.PlaceholderText, brush) self.Algorithms.setPalette(palette) font = QtGui.QFont() font.setBold(True) font.setWeight(75)", "icon.addPixmap(QtGui.QPixmap(\"../../Desktop/EYECLOSE.png\"), QtGui.QIcon.Normal, QtGui.QIcon.Off) self.HideShow.setIcon(icon) self.HideShow.setIconSize(QtCore.QSize(30, 30)) self.HideShow.setObjectName(\"HideShow\") self.Copy = QtWidgets.QPushButton(self.centralwidget)", "#fff, stop: 1 #ddd\\n\" \" );\\n\" \" }\") self.Generate.setObjectName(\"Generate\") self.UserInput", "palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Window, brush) brush = QtGui.QBrush(QtGui.QColor(51, 51, 51, 128)) brush.setStyle(QtCore.Qt.NoBrush)", "brush = QtGui.QBrush(QtGui.QColor(84, 84, 84)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Button, brush) brush", "51, 51)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText, brush) gradient = QtGui.QRadialGradient(0.3, -0.4,", "0, 0, 128)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.PlaceholderText, brush) self.hexify.setPalette(palette) font =", "self.Algorithms.setPalette(palette) font = QtGui.QFont() font.setBold(True) font.setWeight(75) self.Algorithms.setFont(font) self.Algorithms.setStyleSheet(\"QComboBox {\\n\" \"", "brush = QtGui.QBrush(gradient) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Button, brush) brush = QtGui.QBrush(QtGui.QColor(51, 51,", "QtGui.QPalette.Window, brush) brush = QtGui.QBrush(QtGui.QColor(255, 255, 255, 128)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active,", "self.UserInput.setObjectName(\"UserInput\") self.Password = QtWidgets.QLineEdit(self.centralwidget) self.Password.setGeometry(QtCore.QRect(200, 210, 141, 31)) palette =", "brush = QtGui.QBrush(gradient) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window, brush) brush = QtGui.QBrush(QtGui.QColor(51, 51,", "self.statusbar = QtWidgets.QStatusBar(MainWindow) self.statusbar.setObjectName(\"statusbar\") MainWindow.setStatusBar(self.statusbar) self.retranslateUi(MainWindow) QtCore.QMetaObject.connectSlotsByName(MainWindow) def retranslateUi(self, MainWindow):", "51, 128)) brush.setStyle(QtCore.Qt.NoBrush) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.PlaceholderText, brush) self.Generate.setPalette(palette) font = QtGui.QFont()", "gradient.setColorAt(1.0, QtGui.QColor(136, 136, 136)) brush = QtGui.QBrush(gradient) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Button, brush)", "QtGui.QPalette.Text, brush) brush = QtGui.QBrush(QtGui.QColor(84, 84, 84)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Base,", "51, 51)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Text, brush) brush = QtGui.QBrush(QtGui.QColor(51, 51,", "QtGui.QPalette.Window, brush) brush = QtGui.QBrush(QtGui.QColor(255, 255, 255, 128)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive,", "51, 51)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ButtonText, brush) gradient = QtGui.QRadialGradient(0.3, -0.4,", "QtGui.QBrush(QtGui.QColor(84, 84, 84)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Button, brush) brush = QtGui.QBrush(QtGui.QColor(120,", "QtGui.QColor(136, 136, 136)) brush = QtGui.QBrush(gradient) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window, brush) brush", "brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Window, brush) brush = QtGui.QBrush(QtGui.QColor(255, 255, 255)) brush.setStyle(QtCore.Qt.SolidPattern)", "= QtGui.QBrush(QtGui.QColor(51, 51, 51)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush) brush =", "self.Algorithms.setObjectName(\"Algorithms\") self.Algorithms.addItem(\"\") self.Algorithms.addItem(\"\") self.Algorithms.addItem(\"\") self.Algorithms.addItem(\"\") self.Algorithms.addItem(\"\") self.Algorithms.addItem(\"\") self.Generate = QtWidgets.QPushButton(self.centralwidget)", "padding: 5px;\\n\" \" }\\n\" \"\\n\" \"QPushButton:hover {\\n\" \" background: qradialgradient(\\n\"", "255, 255, 128)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.PlaceholderText, brush) brush = QtGui.QBrush(QtGui.QColor(255,", "= QtGui.QBrush(QtGui.QColor(51, 51, 51, 128)) brush.setStyle(QtCore.Qt.NoBrush) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.PlaceholderText, brush) self.Copy.setPalette(palette)", "brush = QtGui.QBrush(QtGui.QColor(51, 51, 51)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ButtonText, brush) gradient", "QtGui.QBrush(QtGui.QColor(84, 84, 84)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Base, brush) brush = QtGui.QBrush(QtGui.QColor(84,", "QtGui.QBrush(QtGui.QColor(0, 0, 0, 128)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.PlaceholderText, brush) self.Password.setPalette(palette) font", "1 #888\\n\" \" );\\n\" \" padding: 5px;\\n\" \" \\n\" \"", "84, 84)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush) brush = QtGui.QBrush(QtGui.QColor(84, 84,", "= QtGui.QBrush(gradient) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Button, brush) brush = QtGui.QBrush(QtGui.QColor(51, 51, 51))", "palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.PlaceholderText, brush) brush = QtGui.QBrush(QtGui.QColor(51, 51, 51)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive,", "255, 255)) gradient.setColorAt(1.0, QtGui.QColor(136, 136, 136)) brush = QtGui.QBrush(gradient) palette.setBrush(QtGui.QPalette.Inactive,", "120, 191, 41)) palette = QtGui.QPalette() brush = QtGui.QBrush(QtGui.QColor(51, 51,", "-0.1,\\n\" \" radius: 1.35, stop: 0 #fff, stop: 1 #ddd\\n\"", "20, 191, 31)) palette = QtGui.QPalette() brush = QtGui.QBrush(QtGui.QColor(255, 255,", "color: #333;\\n\" \"\\n\" \" border-radius: 7px;\\n\" \" border-style: outset;\\n\" \"", "1 #888\\n\" \" );\\n\" \" padding: 5px;\\n\" \" }\\n\" \"\\n\"", "\"MainWindow\")) self.Algorithms.setCurrentText(_translate(\"MainWindow\", \"Select encryption algorithm\")) self.Algorithms.setItemText(0, _translate(\"MainWindow\", \"Select encryption algorithm\"))", "brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.PlaceholderText, brush) brush = QtGui.QBrush(QtGui.QColor(120, 120, 120)) brush.setStyle(QtCore.Qt.SolidPattern)", "21)) self.menubar.setObjectName(\"menubar\") MainWindow.setMenuBar(self.menubar) self.statusbar = QtWidgets.QStatusBar(MainWindow) self.statusbar.setObjectName(\"statusbar\") MainWindow.setStatusBar(self.statusbar) self.retranslateUi(MainWindow) QtCore.QMetaObject.connectSlotsByName(MainWindow)", "QtGui.QBrush(gradient) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window, brush) brush = QtGui.QBrush(QtGui.QColor(51, 51, 51, 128))", "84, 84)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Window, brush) MainWindow.setPalette(palette) MainWindow.setAutoFillBackground(False) MainWindow.setStyleSheet(\"background-color: rgb(84,", "}\\n\" \"\\n\" \"QPushButton:hover {\\n\" \" background: qradialgradient(\\n\" \" cx: 0.3,", "self.HideShow.setObjectName(\"HideShow\") self.Copy = QtWidgets.QPushButton(self.centralwidget) self.Copy.setGeometry(QtCore.QRect(190, 250, 201, 31)) palette =", "self.HideShow.setStyleSheet(\"QPushButton {\\n\" \" color: #333;\\n\" \"\\n\" \" border-radius: 7px;\\n\" \"", "\"\\n\" \" \\n\" \" border-style: outset;\\n\" \" background: qradialgradient(\\n\" \"", "84, 84)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Button, brush) brush = QtGui.QBrush(QtGui.QColor(120, 120,", "fx: 0.4, fy: -0.1,\\n\" \" radius: 1.35, stop: 0 #fff,", "#fff, stop: 1 #ddd\\n\" \" );\\n\" \" }\") self.Copy.setObjectName(\"Copy\") self.hexify", "brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush) brush = QtGui.QBrush(QtGui.QColor(84, 84, 84)) brush.setStyle(QtCore.Qt.SolidPattern)", "palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Base, brush) brush = QtGui.QBrush(QtGui.QColor(84, 84, 84)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active,", "self.Algorithms.setItemText(5, _translate(\"MainWindow\", \"sha512\")) self.Generate.setText(_translate(\"MainWindow\", \"GENERATE\")) self.Copy.setText(_translate(\"MainWindow\", \"COPY TO CLIPBOARD\")) self.hexify.setText(_translate(\"MainWindow\",", "= QtGui.QBrush(gradient) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Window, brush) brush = QtGui.QBrush(QtGui.QColor(255, 255, 255,", "255, 255)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText, brush) brush = QtGui.QBrush(QtGui.QColor(84, 84,", "\"\\n\" \" border-radius: 20px;\\n\" \" border-style: outset;\\n\" \" background: qradialgradient(\\n\"", "brush.setStyle(QtCore.Qt.NoBrush) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.PlaceholderText, brush) self.Generate.setPalette(palette) font = QtGui.QFont() font.setBold(True) font.setWeight(75)", "self.Password.setEchoMode(QtWidgets.QLineEdit.Password) self.Password.setReadOnly(True) self.Password.setObjectName(\"Password\") self.HideShow = QtWidgets.QPushButton(self.centralwidget) self.HideShow.setGeometry(QtCore.QRect(350, 210, 31, 31))", "84)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Button, brush) brush = QtGui.QBrush(QtGui.QColor(84, 84, 84))", "QtGui.QBrush(QtGui.QColor(51, 51, 51)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText, brush) gradient = QtGui.QRadialGradient(0.3,", "self.Algorithms.addItem(\"\") self.Algorithms.addItem(\"\") self.Algorithms.addItem(\"\") self.Algorithms.addItem(\"\") self.Generate = QtWidgets.QPushButton(self.centralwidget) self.Generate.setGeometry(QtCore.QRect(190, 120, 191,", "\"__main__\": import sys app = QtWidgets.QApplication(sys.argv) MainWindow = QtWidgets.QMainWindow() ui", "self.Algorithms.addItem(\"\") self.Algorithms.addItem(\"\") self.Algorithms.addItem(\"\") self.Algorithms.addItem(\"\") self.Algorithms.addItem(\"\") self.Algorithms.addItem(\"\") self.Generate = QtWidgets.QPushButton(self.centralwidget) self.Generate.setGeometry(QtCore.QRect(190,", "51)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText, brush) gradient = QtGui.QRadialGradient(0.3, -0.4, 1.35,", "QtGui.QBrush(QtGui.QColor(0, 0, 0, 128)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.PlaceholderText, brush) self.hexify.setPalette(palette) font", "84)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Button, brush) brush = QtGui.QBrush(QtGui.QColor(84, 84, 84))", "QtGui.QBrush(QtGui.QColor(51, 51, 51)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Text, brush) brush = QtGui.QBrush(QtGui.QColor(51,", "brush) brush = QtGui.QBrush(QtGui.QColor(51, 51, 51)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText, brush)", "51)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush) brush = QtGui.QBrush(QtGui.QColor(51, 51, 51))", "\" border-style: inset;\\n\" \" background: qradialgradient(\\n\" \" cx: 0.4, cy:", "\" );\\n\" \" }\\n\" \"\\n\" \"QPushButton:pressed {\\n\" \" border-style: inset;\\n\"", "84)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window, brush) brush = QtGui.QBrush(QtGui.QColor(255, 255, 255,", "QtWidgets.QApplication(sys.argv) MainWindow = QtWidgets.QMainWindow() ui = Ui_MainWindow() ui.setupUi(MainWindow) MainWindow.show() sys.exit(app.exec_())", "brush) self.Password.setPalette(palette) font = QtGui.QFont() font.setBold(True) font.setWeight(75) self.Password.setFont(font) self.Password.setText(\"\") self.Password.setEchoMode(QtWidgets.QLineEdit.Password)", "180, 81, 21)) palette = QtGui.QPalette() brush = QtGui.QBrush(QtGui.QColor(255, 255,", "51, 128)) brush.setStyle(QtCore.Qt.NoBrush) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.PlaceholderText, brush) brush = QtGui.QBrush(QtGui.QColor(51, 51,", "self.Password.setFont(font) self.Password.setText(\"\") self.Password.setEchoMode(QtWidgets.QLineEdit.Password) self.Password.setReadOnly(True) self.Password.setObjectName(\"Password\") self.HideShow = QtWidgets.QPushButton(self.centralwidget) self.HideShow.setGeometry(QtCore.QRect(350, 210,", "255, 128)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.PlaceholderText, brush) brush = QtGui.QBrush(QtGui.QColor(255, 255,", "= QtGui.QBrush(gradient) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Button, brush) brush = QtGui.QBrush(QtGui.QColor(51, 51, 51))", "= QtGui.QBrush(QtGui.QColor(255, 255, 255)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Text, brush) brush =", "\"\") self.Algorithms.setObjectName(\"Algorithms\") self.Algorithms.addItem(\"\") self.Algorithms.addItem(\"\") self.Algorithms.addItem(\"\") self.Algorithms.addItem(\"\") self.Algorithms.addItem(\"\") self.Algorithms.addItem(\"\") self.Generate =", "self.centralwidget = QtWidgets.QWidget(MainWindow) self.centralwidget.setObjectName(\"centralwidget\") self.Algorithms = QtWidgets.QComboBox(self.centralwidget) self.Algorithms.setGeometry(QtCore.QRect(190, 60, 191,", "font.setBold(True) font.setWeight(75) self.Copy.setFont(font) self.Copy.setStyleSheet(\"QPushButton {\\n\" \" color: #333;\\n\" \" \\n\"", "brush = QtGui.QBrush(gradient) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush) gradient = QtGui.QRadialGradient(0.3, -0.4,", "\" cx: 0.3, cy: -0.4, fx: 0.3, fy: -0.4,\\n\" \"", "palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.PlaceholderText, brush) self.Generate.setPalette(palette) font = QtGui.QFont() font.setBold(True) font.setWeight(75) self.Generate.setFont(font)", "51, 51)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText, brush) gradient = QtGui.QRadialGradient(0.3, -0.4,", "brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText, brush) brush = QtGui.QBrush(QtGui.QColor(84, 84, 84)) brush.setStyle(QtCore.Qt.SolidPattern)", "gradient.setColorAt(1.0, QtGui.QColor(136, 136, 136)) brush = QtGui.QBrush(gradient) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Base, brush)", "\"sha224\")) self.Algorithms.setItemText(4, _translate(\"MainWindow\", \"sha1\")) self.Algorithms.setItemText(5, _translate(\"MainWindow\", \"sha512\")) self.Generate.setText(_translate(\"MainWindow\", \"GENERATE\")) self.Copy.setText(_translate(\"MainWindow\",", "84)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Button, brush) brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))", "= QtGui.QBrush(QtGui.QColor(84, 84, 84)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Base, brush) brush =", "255, 128)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.PlaceholderText, brush) brush = QtGui.QBrush(QtGui.QColor(51, 51,", "self.Generate.setStyleSheet(\"QPushButton {\\n\" \" color: #333;\\n\" \"\\n\" \" border-radius: 20px;\\n\" \"", "palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ButtonText, brush) gradient = QtGui.QRadialGradient(0.3, -0.4, 1.35, 0.3, -0.4)", "palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window, brush) brush = QtGui.QBrush(QtGui.QColor(120, 120, 120)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled,", "\" }\") self.HideShow.setText(\"\") icon = QtGui.QIcon() icon.addPixmap(QtGui.QPixmap(\"../../Desktop/EYECLOSE.png\"), QtGui.QIcon.Normal, QtGui.QIcon.Off) self.HideShow.setIcon(icon)", "brush) brush = QtGui.QBrush(QtGui.QColor(51, 51, 51)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ButtonText, brush)", "cy: -0.1, fx: 0.4, fy: -0.1,\\n\" \" radius: 1.35, stop:", "255)) gradient.setColorAt(1.0, QtGui.QColor(136, 136, 136)) brush = QtGui.QBrush(gradient) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Window,", "}\") self.HideShow.setText(\"\") icon = QtGui.QIcon() icon.addPixmap(QtGui.QPixmap(\"../../Desktop/EYECLOSE.png\"), QtGui.QIcon.Normal, QtGui.QIcon.Off) self.HideShow.setIcon(icon) self.HideShow.setIconSize(QtCore.QSize(30,", "brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Base, brush) brush = QtGui.QBrush(QtGui.QColor(84, 84, 84)) brush.setStyle(QtCore.Qt.SolidPattern)", "QtGui.QColor(136, 136, 136)) brush = QtGui.QBrush(gradient) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Base, brush) gradient", "QtGui.QPalette.Window, brush) brush = QtGui.QBrush(QtGui.QColor(51, 51, 51, 128)) brush.setStyle(QtCore.Qt.NoBrush) palette.setBrush(QtGui.QPalette.Active,", "#333;\\n\" \"\\n\" \" \\n\" \" border-style: outset;\\n\" \" background: qradialgradient(\\n\"", "0 #fff, stop: 1 #bbb\\n\" \" );\\n\" \" }\\n\" \"\\n\"", "255, 255, 128)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.PlaceholderText, brush) brush = QtGui.QBrush(QtGui.QColor(120,", "algorithm\")) self.Algorithms.setItemText(1, _translate(\"MainWindow\", \"sha256\")) self.Algorithms.setItemText(2, _translate(\"MainWindow\", \"md5\")) self.Algorithms.setItemText(3, _translate(\"MainWindow\", \"sha224\"))", "51, 51, 128)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.PlaceholderText, brush) self.Algorithms.setPalette(palette) font =", "#bbb\\n\" \" );\\n\" \" }\\n\" \"\\n\" \"QPushButton:pressed {\\n\" \" border-style:", "= QtGui.QBrush(QtGui.QColor(51, 51, 51)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ButtonText, brush) gradient =", "font.setBold(True) font.setWeight(75) self.Password.setFont(font) self.Password.setText(\"\") self.Password.setEchoMode(QtWidgets.QLineEdit.Password) self.Password.setReadOnly(True) self.Password.setObjectName(\"Password\") self.HideShow = QtWidgets.QPushButton(self.centralwidget)", "from PyQt5 import QtGui, QtCore class Ui_MainWindow(object): def setupUi(self, MainWindow):", "PyQt5 import QtGui, QtCore class Ui_MainWindow(object): def setupUi(self, MainWindow): MainWindow.setObjectName(\"MainWindow\")", "QtGui.QPalette.PlaceholderText, brush) brush = QtGui.QBrush(QtGui.QColor(51, 51, 51)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText,", "84)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Button, brush) brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))", "\" );\\n\" \" }\\n\" \"\\n\" \"QComboBox:pressed {\\n\" \" border-style: inset;\\n\"", "0.4, fy: -0.1,\\n\" \" radius: 1.35, stop: 0 #fff, stop:", "{\\n\" \" color: #333;\\n\" \"\\n\" \" border-radius: 20px;\\n\" \" border-style:", "QtGui.QPalette.Base, brush) brush = QtGui.QBrush(QtGui.QColor(84, 84, 84)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Window,", "gradient.setColorAt(1.0, QtGui.QColor(136, 136, 136)) brush = QtGui.QBrush(gradient) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Window, brush)", "_translate(\"MainWindow\", \"sha224\")) self.Algorithms.setItemText(4, _translate(\"MainWindow\", \"sha1\")) self.Algorithms.setItemText(5, _translate(\"MainWindow\", \"sha512\")) self.Generate.setText(_translate(\"MainWindow\", \"GENERATE\"))", "-0.4,\\n\" \" radius: 1.35, stop: 0 #fff, stop: 1 #888\\n\"", "brush) brush = QtGui.QBrush(QtGui.QColor(84, 84, 84)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window, brush)", "= QtWidgets.QPushButton(self.centralwidget) self.Generate.setGeometry(QtCore.QRect(190, 120, 191, 41)) palette = QtGui.QPalette() brush", "font = QtGui.QFont() font.setPointSize(12) font.setBold(True) font.setWeight(75) self.hexify.setFont(font) self.hexify.setObjectName(\"hexify\") MainWindow.setCentralWidget(self.centralwidget) self.menubar", "self.Algorithms.setGeometry(QtCore.QRect(190, 60, 191, 41)) palette = QtGui.QPalette() brush = QtGui.QBrush(QtGui.QColor(51,", "brush) brush = QtGui.QBrush(QtGui.QColor(84, 84, 84)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Button, brush)", "51)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Text, brush) brush = QtGui.QBrush(QtGui.QColor(51, 51, 51))", "palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Text, brush) brush = QtGui.QBrush(QtGui.QColor(51, 51, 51)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active,", "self.hexify = QtWidgets.QCheckBox(self.centralwidget) self.hexify.setGeometry(QtCore.QRect(250, 180, 81, 21)) palette = QtGui.QPalette()", "brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Button, brush) brush = QtGui.QBrush(QtGui.QColor(84, 84, 84)) brush.setStyle(QtCore.Qt.SolidPattern)", "stop: 1 #ddd\\n\" \" );\\n\" \" }\") self.HideShow.setText(\"\") icon =", "brush) brush = QtGui.QBrush(QtGui.QColor(51, 51, 51)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush)", "51, 51)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Text, brush) brush = QtGui.QBrush(QtGui.QColor(51, 51,", "\" border-radius: 7px;\\n\" \" border-style: outset;\\n\" \" background: qradialgradient(\\n\" \"", "\" );\\n\" \" padding: 5px;\\n\" \" \\n\" \" }\\n\" \"\\n\"", "palette = QtGui.QPalette() brush = QtGui.QBrush(QtGui.QColor(255, 255, 255)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active,", "QtGui.QPalette.Base, brush) brush = QtGui.QBrush(QtGui.QColor(84, 84, 84)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window,", "84, 84)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Button, brush) brush = QtGui.QBrush(QtGui.QColor(255, 255,", "palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText, brush) gradient = QtGui.QRadialGradient(0.3, -0.4, 1.35, 0.3, -0.4)", "import QtCore, QtGui, QtWidgets from PyQt5 import QtGui, QtCore class", "def setupUi(self, MainWindow): MainWindow.setObjectName(\"MainWindow\") MainWindow.resize(577, 341) palette = QtGui.QPalette() brush", "51, 51)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ButtonText, brush) gradient = QtGui.QRadialGradient(0.3, -0.4,", "84, 84)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window, brush) brush = QtGui.QBrush(QtGui.QColor(255, 255,", "84)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window, brush) brush = QtGui.QBrush(QtGui.QColor(120, 120, 120))", "\"\\n\" \"QPushButton:pressed {\\n\" \" border-style: inset;\\n\" \" background: qradialgradient(\\n\" \"", "palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ButtonText, brush) gradient = QtGui.QRadialGradient(0.3, -0.4, 1.35, 0.3, -0.4)", "border-style: outset;\\n\" \" background: qradialgradient(\\n\" \" cx: 0.3, cy: -0.4,", ");\\n\" \" padding: 5px;\\n\" \" }\\n\" \"\\n\" \"QPushButton:hover {\\n\" \"", "font = QtGui.QFont() font.setBold(True) font.setWeight(75) self.UserInput.setFont(font) self.UserInput.setObjectName(\"UserInput\") self.Password = QtWidgets.QLineEdit(self.centralwidget)", "self.Password.setText(\"\") self.Password.setEchoMode(QtWidgets.QLineEdit.Password) self.Password.setReadOnly(True) self.Password.setObjectName(\"Password\") self.HideShow = QtWidgets.QPushButton(self.centralwidget) self.HideShow.setGeometry(QtCore.QRect(350, 210, 31,", "\" }\\n\" \"\\n\" \"QPushButton:pressed {\\n\" \" border-style: inset;\\n\" \" background:", "brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Window, brush) MainWindow.setPalette(palette) MainWindow.setAutoFillBackground(False) MainWindow.setStyleSheet(\"background-color: rgb(84, 84, 84);\")", "QtGui.QPalette.Window, brush) brush = QtGui.QBrush(QtGui.QColor(0, 0, 0, 128)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled,", "84, 84)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Button, brush) brush = QtGui.QBrush(QtGui.QColor(255, 255,", "brush.setStyle(QtCore.Qt.NoBrush) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.PlaceholderText, brush) self.Copy.setPalette(palette) font = QtGui.QFont() font.setBold(True) font.setWeight(75)", "QtWidgets.QPushButton(self.centralwidget) self.HideShow.setGeometry(QtCore.QRect(350, 210, 31, 31)) self.HideShow.setStyleSheet(\"QPushButton {\\n\" \" color: #333;\\n\"", "128)) brush.setStyle(QtCore.Qt.NoBrush) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.PlaceholderText, brush) self.Generate.setPalette(palette) font = QtGui.QFont() font.setBold(True)", "fy: -0.1,\\n\" \" radius: 1.35, stop: 0 #fff, stop: 1", "MainWindow.setObjectName(\"MainWindow\") MainWindow.resize(577, 341) palette = QtGui.QPalette() brush = QtGui.QBrush(QtGui.QColor(255, 255,", "brush) brush = QtGui.QBrush(QtGui.QColor(84, 84, 84)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Button, brush)", "QtWidgets.QLineEdit(self.centralwidget) self.UserInput.setGeometry(QtCore.QRect(190, 20, 191, 31)) palette = QtGui.QPalette() brush =", "font.setBold(True) font.setWeight(75) self.hexify.setFont(font) self.hexify.setObjectName(\"hexify\") MainWindow.setCentralWidget(self.centralwidget) self.menubar = QtWidgets.QMenuBar(MainWindow) self.menubar.setGeometry(QtCore.QRect(0, 0,", "QtGui.QPalette.Button, brush) brush = QtGui.QBrush(QtGui.QColor(51, 51, 51)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Text,", "= QtGui.QBrush(QtGui.QColor(51, 51, 51, 128)) brush.setStyle(QtCore.Qt.NoBrush) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.PlaceholderText, brush) brush", "7px;\\n\" \" border-style: outset;\\n\" \" background: qradialgradient(\\n\" \" cx: 0.3,", "brush = QtGui.QBrush(QtGui.QColor(84, 84, 84)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Base, brush) brush", ");\\n\" \" }\\n\" \"\\n\" \"\\n\" \"\\n\" \"\\n\" \"\\n\" \"\\n\" \"\")", "MainWindow.resize(577, 341) palette = QtGui.QPalette() brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))", "= QtGui.QBrush(gradient) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window, brush) brush = QtGui.QBrush(QtGui.QColor(255, 255, 255,", "palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.PlaceholderText, brush) self.hexify.setPalette(palette) font = QtGui.QFont() font.setPointSize(12) font.setBold(True) font.setWeight(75)", "_translate(\"MainWindow\", \"sha1\")) self.Algorithms.setItemText(5, _translate(\"MainWindow\", \"sha512\")) self.Generate.setText(_translate(\"MainWindow\", \"GENERATE\")) self.Copy.setText(_translate(\"MainWindow\", \"COPY TO", "\"\\n\" \"\\n\" \"\\n\" \"\\n\" \"\\n\" \"\") self.Algorithms.setObjectName(\"Algorithms\") self.Algorithms.addItem(\"\") self.Algorithms.addItem(\"\") self.Algorithms.addItem(\"\")", "QtGui.QBrush(gradient) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Window, brush) brush = QtGui.QBrush(QtGui.QColor(255, 255, 255, 128))", "QtGui.QBrush(QtGui.QColor(84, 84, 84)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window, brush) brush = QtGui.QBrush(QtGui.QColor(255,", "= QtGui.QBrush(QtGui.QColor(51, 51, 51)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Text, brush) brush =", "palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Window, brush) brush = QtGui.QBrush(QtGui.QColor(51, 51, 51, 128)) brush.setStyle(QtCore.Qt.SolidPattern)", "1 #bbb\\n\" \" );\\n\" \" }\\n\" \"\\n\" \"QPushButton:pressed {\\n\" \"", "QtCore.QMetaObject.connectSlotsByName(MainWindow) def retranslateUi(self, MainWindow): _translate = QtCore.QCoreApplication.translate MainWindow.setWindowTitle(_translate(\"MainWindow\", \"MainWindow\")) self.Algorithms.setCurrentText(_translate(\"MainWindow\",", "1.35, 0.3, -0.4) gradient.setSpread(QtGui.QGradient.PadSpread) gradient.setCoordinateMode(QtGui.QGradient.ObjectBoundingMode) gradient.setColorAt(0.0, QtGui.QColor(255, 255, 255)) gradient.setColorAt(1.0,", "QtGui.QPalette.Button, brush) brush = QtGui.QBrush(QtGui.QColor(51, 51, 51)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Text,", "border-radius: 13px;\\n\" \" border-style: outset;\\n\" \" background: qradialgradient(\\n\" \" cx:", "QtGui.QBrush(gradient) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Base, brush) gradient = QtGui.QRadialGradient(0.3, -0.4, 1.35, 0.3,", "brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Base, brush) brush = QtGui.QBrush(QtGui.QColor(84, 84, 84)) brush.setStyle(QtCore.Qt.SolidPattern)", "self.HideShow.setText(\"\") icon = QtGui.QIcon() icon.addPixmap(QtGui.QPixmap(\"../../Desktop/EYECLOSE.png\"), QtGui.QIcon.Normal, QtGui.QIcon.Off) self.HideShow.setIcon(icon) self.HideShow.setIconSize(QtCore.QSize(30, 30))" ]
[ "by Django 3.0.2 on 2020-03-17 08:44 from django.db import migrations,", "2020-03-17 08:44 from django.db import migrations, models class Migration(migrations.Migration): dependencies", "migrations, models class Migration(migrations.Migration): dependencies = [ ('myApp', '0016_usergroup_buyer'), ]", "name='Chat', fields=[ ('id', models.CharField(max_length=31, primary_key=True, serialize=False)), ('chatinfo', models.CharField(max_length=20000)), ('shopid', models.CharField(max_length=30)),", "models.CharField(max_length=20000)), ('shopid', models.CharField(max_length=30)), ('user1', models.CharField(max_length=50)), ('user2', models.CharField(max_length=50)), ('name1', models.CharField(max_length=50)), ('name2',", "Generated by Django 3.0.2 on 2020-03-17 08:44 from django.db import", "models.CharField(max_length=30)), ('user1', models.CharField(max_length=50)), ('user2', models.CharField(max_length=50)), ('name1', models.CharField(max_length=50)), ('name2', models.CharField(max_length=50)), ],", "('myApp', '0016_usergroup_buyer'), ] operations = [ migrations.CreateModel( name='Chat', fields=[ ('id',", "08:44 from django.db import migrations, models class Migration(migrations.Migration): dependencies =", "('id', models.CharField(max_length=31, primary_key=True, serialize=False)), ('chatinfo', models.CharField(max_length=20000)), ('shopid', models.CharField(max_length=30)), ('user1', models.CharField(max_length=50)),", "Migration(migrations.Migration): dependencies = [ ('myApp', '0016_usergroup_buyer'), ] operations = [", "dependencies = [ ('myApp', '0016_usergroup_buyer'), ] operations = [ migrations.CreateModel(", "django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('myApp',", "models.CharField(max_length=31, primary_key=True, serialize=False)), ('chatinfo', models.CharField(max_length=20000)), ('shopid', models.CharField(max_length=30)), ('user1', models.CharField(max_length=50)), ('user2',", "'0016_usergroup_buyer'), ] operations = [ migrations.CreateModel( name='Chat', fields=[ ('id', models.CharField(max_length=31,", "[ migrations.CreateModel( name='Chat', fields=[ ('id', models.CharField(max_length=31, primary_key=True, serialize=False)), ('chatinfo', models.CharField(max_length=20000)),", "operations = [ migrations.CreateModel( name='Chat', fields=[ ('id', models.CharField(max_length=31, primary_key=True, serialize=False)),", "3.0.2 on 2020-03-17 08:44 from django.db import migrations, models class", "migrations.CreateModel( name='Chat', fields=[ ('id', models.CharField(max_length=31, primary_key=True, serialize=False)), ('chatinfo', models.CharField(max_length=20000)), ('shopid',", "# Generated by Django 3.0.2 on 2020-03-17 08:44 from django.db", "Django 3.0.2 on 2020-03-17 08:44 from django.db import migrations, models", "] operations = [ migrations.CreateModel( name='Chat', fields=[ ('id', models.CharField(max_length=31, primary_key=True,", "from django.db import migrations, models class Migration(migrations.Migration): dependencies = [", "import migrations, models class Migration(migrations.Migration): dependencies = [ ('myApp', '0016_usergroup_buyer'),", "fields=[ ('id', models.CharField(max_length=31, primary_key=True, serialize=False)), ('chatinfo', models.CharField(max_length=20000)), ('shopid', models.CharField(max_length=30)), ('user1',", "('chatinfo', models.CharField(max_length=20000)), ('shopid', models.CharField(max_length=30)), ('user1', models.CharField(max_length=50)), ('user2', models.CharField(max_length=50)), ('name1', models.CharField(max_length=50)),", "class Migration(migrations.Migration): dependencies = [ ('myApp', '0016_usergroup_buyer'), ] operations =", "models.CharField(max_length=50)), ('user2', models.CharField(max_length=50)), ('name1', models.CharField(max_length=50)), ('name2', models.CharField(max_length=50)), ], ), ]", "serialize=False)), ('chatinfo', models.CharField(max_length=20000)), ('shopid', models.CharField(max_length=30)), ('user1', models.CharField(max_length=50)), ('user2', models.CharField(max_length=50)), ('name1',", "('shopid', models.CharField(max_length=30)), ('user1', models.CharField(max_length=50)), ('user2', models.CharField(max_length=50)), ('name1', models.CharField(max_length=50)), ('name2', models.CharField(max_length=50)),", "on 2020-03-17 08:44 from django.db import migrations, models class Migration(migrations.Migration):", "primary_key=True, serialize=False)), ('chatinfo', models.CharField(max_length=20000)), ('shopid', models.CharField(max_length=30)), ('user1', models.CharField(max_length=50)), ('user2', models.CharField(max_length=50)),", "models class Migration(migrations.Migration): dependencies = [ ('myApp', '0016_usergroup_buyer'), ] operations", "[ ('myApp', '0016_usergroup_buyer'), ] operations = [ migrations.CreateModel( name='Chat', fields=[", "= [ ('myApp', '0016_usergroup_buyer'), ] operations = [ migrations.CreateModel( name='Chat',", "= [ migrations.CreateModel( name='Chat', fields=[ ('id', models.CharField(max_length=31, primary_key=True, serialize=False)), ('chatinfo',", "('user1', models.CharField(max_length=50)), ('user2', models.CharField(max_length=50)), ('name1', models.CharField(max_length=50)), ('name2', models.CharField(max_length=50)), ], )," ]
[ "permissions and # limitations under the License. \"\"\"Controllers for miscellaneous", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "License. \"\"\"Controllers for miscellaneous services.\"\"\" __author__ = '<NAME>' import base64", "# # Licensed under the Apache License, Version 2.0 (the", "compliance with the License. # You may obtain a copy", "base64-encoded ascii string with uploaded file's content.\"\"\" def post(self): raw_file_content", "All Rights Reserved. # # Licensed under the Apache License,", "2.0 (the \"License\"); # you may not use this file", "file except in compliance with the License. # You may", "agreed to in writing, software # distributed under the License", "Unless required by applicable law or agreed to in writing,", "base64.b64encode(raw_file_content) self.response.headers['Content-Type'] = 'application/json' response = { 'base64_file_content': encoded_content, }", "the License is distributed on an \"AS-IS\" BASIS, # WITHOUT", "import base64 import json from core.controllers import base class FileReadHandler(base.BaseHandler):", "json from core.controllers import base class FileReadHandler(base.BaseHandler): \"\"\"Returns a base64-encoded", "string with uploaded file's content.\"\"\" def post(self): raw_file_content = self.request.get('file')", "Inc. All Rights Reserved. # # Licensed under the Apache", "\"AS-IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "the specific language governing permissions and # limitations under the", "FileReadHandler(base.BaseHandler): \"\"\"Returns a base64-encoded ascii string with uploaded file's content.\"\"\"", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "import base class FileReadHandler(base.BaseHandler): \"\"\"Returns a base64-encoded ascii string with", "express or implied. # See the License for the specific", "applicable law or agreed to in writing, software # distributed", "except in compliance with the License. # You may obtain", "__author__ = '<NAME>' import base64 import json from core.controllers import", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "Copyright 2012 Google Inc. All Rights Reserved. # # Licensed", "Licensed under the Apache License, Version 2.0 (the \"License\"); #", "not use this file except in compliance with the License.", "writing, software # distributed under the License is distributed on", "limitations under the License. \"\"\"Controllers for miscellaneous services.\"\"\" __author__ =", "in writing, software # distributed under the License is distributed", "Google Inc. All Rights Reserved. # # Licensed under the", "you may not use this file except in compliance with", "base64 import json from core.controllers import base class FileReadHandler(base.BaseHandler): \"\"\"Returns", "# Licensed under the Apache License, Version 2.0 (the \"License\");", "encoded_content = base64.b64encode(raw_file_content) self.response.headers['Content-Type'] = 'application/json' response = { 'base64_file_content':", "use this file except in compliance with the License. #", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "and # limitations under the License. \"\"\"Controllers for miscellaneous services.\"\"\"", "under the License is distributed on an \"AS-IS\" BASIS, #", "CONDITIONS OF ANY KIND, either express or implied. # See", "class FileReadHandler(base.BaseHandler): \"\"\"Returns a base64-encoded ascii string with uploaded file's", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "on an \"AS-IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "or implied. # See the License for the specific language", "Rights Reserved. # # Licensed under the Apache License, Version", "License. # You may obtain a copy of the License", "License, Version 2.0 (the \"License\"); # you may not use", "post(self): raw_file_content = self.request.get('file') encoded_content = base64.b64encode(raw_file_content) self.response.headers['Content-Type'] = 'application/json'", "# You may obtain a copy of the License at", "KIND, either express or implied. # See the License for", "specific language governing permissions and # limitations under the License.", "language governing permissions and # limitations under the License. \"\"\"Controllers", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "License for the specific language governing permissions and # limitations", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "file's content.\"\"\" def post(self): raw_file_content = self.request.get('file') encoded_content = base64.b64encode(raw_file_content)", "# Copyright 2012 Google Inc. All Rights Reserved. # #", "Reserved. # # Licensed under the Apache License, Version 2.0", "the License for the specific language governing permissions and #", "\"\"\"Returns a base64-encoded ascii string with uploaded file's content.\"\"\" def", "(the \"License\"); # you may not use this file except", "Apache License, Version 2.0 (the \"License\"); # you may not", "# you may not use this file except in compliance", "either express or implied. # See the License for the", "self.request.get('file') encoded_content = base64.b64encode(raw_file_content) self.response.headers['Content-Type'] = 'application/json' response = {", "OR CONDITIONS OF ANY KIND, either express or implied. #", "distributed under the License is distributed on an \"AS-IS\" BASIS,", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "\"\"\"Controllers for miscellaneous services.\"\"\" __author__ = '<NAME>' import base64 import", "in compliance with the License. # You may obtain a", "for miscellaneous services.\"\"\" __author__ = '<NAME>' import base64 import json", "miscellaneous services.\"\"\" __author__ = '<NAME>' import base64 import json from", "software # distributed under the License is distributed on an", "= '<NAME>' import base64 import json from core.controllers import base", "core.controllers import base class FileReadHandler(base.BaseHandler): \"\"\"Returns a base64-encoded ascii string", "# # Unless required by applicable law or agreed to", "distributed on an \"AS-IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "uploaded file's content.\"\"\" def post(self): raw_file_content = self.request.get('file') encoded_content =", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "governing permissions and # limitations under the License. \"\"\"Controllers for", "Version 2.0 (the \"License\"); # you may not use this", "content.\"\"\" def post(self): raw_file_content = self.request.get('file') encoded_content = base64.b64encode(raw_file_content) self.response.headers['Content-Type']", "law or agreed to in writing, software # distributed under", "# distributed under the License is distributed on an \"AS-IS\"", "ascii string with uploaded file's content.\"\"\" def post(self): raw_file_content =", "from core.controllers import base class FileReadHandler(base.BaseHandler): \"\"\"Returns a base64-encoded ascii", "an \"AS-IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "= self.request.get('file') encoded_content = base64.b64encode(raw_file_content) self.response.headers['Content-Type'] = 'application/json' response =", "implied. # See the License for the specific language governing", "is distributed on an \"AS-IS\" BASIS, # WITHOUT WARRANTIES OR", "under the Apache License, Version 2.0 (the \"License\"); # you", "services.\"\"\" __author__ = '<NAME>' import base64 import json from core.controllers", "\"License\"); # you may not use this file except in", "raw_file_content = self.request.get('file') encoded_content = base64.b64encode(raw_file_content) self.response.headers['Content-Type'] = 'application/json' response", "by applicable law or agreed to in writing, software #", "OF ANY KIND, either express or implied. # See the", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "import json from core.controllers import base class FileReadHandler(base.BaseHandler): \"\"\"Returns a", "may obtain a copy of the License at # #", "# Unless required by applicable law or agreed to in", "ANY KIND, either express or implied. # See the License", "See the License for the specific language governing permissions and", "self.response.headers['Content-Type'] = 'application/json' response = { 'base64_file_content': encoded_content, } self.response.out.write(json.dumps(response))", "the License. # You may obtain a copy of the", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "for the specific language governing permissions and # limitations under", "with uploaded file's content.\"\"\" def post(self): raw_file_content = self.request.get('file') encoded_content", "to in writing, software # distributed under the License is", "# See the License for the specific language governing permissions", "= base64.b64encode(raw_file_content) self.response.headers['Content-Type'] = 'application/json' response = { 'base64_file_content': encoded_content,", "You may obtain a copy of the License at #", "a base64-encoded ascii string with uploaded file's content.\"\"\" def post(self):", "# limitations under the License. \"\"\"Controllers for miscellaneous services.\"\"\" __author__", "under the License. \"\"\"Controllers for miscellaneous services.\"\"\" __author__ = '<NAME>'", "may not use this file except in compliance with the", "or agreed to in writing, software # distributed under the", "'<NAME>' import base64 import json from core.controllers import base class", "required by applicable law or agreed to in writing, software", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "with the License. # You may obtain a copy of", "this file except in compliance with the License. # You", "License is distributed on an \"AS-IS\" BASIS, # WITHOUT WARRANTIES", "the Apache License, Version 2.0 (the \"License\"); # you may", "the License. \"\"\"Controllers for miscellaneous services.\"\"\" __author__ = '<NAME>' import", "base class FileReadHandler(base.BaseHandler): \"\"\"Returns a base64-encoded ascii string with uploaded", "2012 Google Inc. All Rights Reserved. # # Licensed under", "def post(self): raw_file_content = self.request.get('file') encoded_content = base64.b64encode(raw_file_content) self.response.headers['Content-Type'] =" ]
[ "key prefix :rtype: str :return: ssh key prefix \"\"\" return", "delete=False) f.close() try: subprocess.check_call( ['openssl', 'req', '-new', '-nodes', '-x509', '-newkey',", "file :param dict config: configuration dict :rtype: str :return: sha1", "LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A", "privkey.rename(old) if pubkey.exists(): old = pathlib.Path(export_path, prefix + '.pub.old') if", "if old.exists(): old.unlink() pubkey.rename(old) logger.info('generating ssh key pair to path:", "-> bool \"\"\"Connect to node via SSH or execute SSH", "str, str) -> str \"\"\"Derive a private key pem file", "SHA1 thumbprint of PEM :param str pfxfile: name of the", "-> str \"\"\"Get SHA1 thumbprint of PFX :param str pfxfile:", "'pass:' + passphrase] ) # extract public key from private", "{}'.format(pemfile)) # convert pem to pfx for Azure Batch service", "if not check_ssh_private_key_filemode(ssh_private_key): logger.warning( 'SSH private key filemode is too", "raise ValueError('pfxfile is invalid') if passphrase is None: passphrase =", "key and no password f = tempfile.NamedTemporaryFile(mode='wb', delete=False) f.close() try:", "util.get_input('Enter PFX filename to create: ') if passphrase is None:", "permission notice shall be included in # all copies or", "input pemfile = settings.batch_shipyard_encryption_public_key_pem(config) pfxfile = settings.batch_shipyard_encryption_pfx_filename(config) passphrase = settings.batch_shipyard_encryption_pfx_passphrase(config)", "fp.unlink() def _rsa_decrypt_string_with_pfx(ciphertext, config): # type: (str, dict) -> str", "is set properly for the private key if not check_ssh_private_key_filemode(ssh_private_key):", "util.base64_encode_string( proc.communicate(input=util.encode_string(data))[0]) if proc.returncode != 0: raise RuntimeError( 'openssl encryption", "if passphrase is None: passphrase = <PASSWORD>pass('Enter password for PFX:", "+ passphrase] ) logger.debug('created PFX file: {}'.format(pfxfile)) finally: # remove", "portions of the Software. # # THE SOFTWARE IS PROVIDED", "if inkey is None: # derive pem from pfx derived", "the private key if not check_ssh_private_key_filemode(ssh_private_key): logger.warning( 'SSH private key", "'-out', pemfile, '-password', 'pass:' + passphrase] ) except Exception: fp", "str :return: sha1 thumbprint of pfx \"\"\" if pfxfile is", "command :param str remote_ip: remote ip address :param int remote_port:", "def check_ssh_private_key_filemode(ssh_private_key): # type: (pathlib.Path) -> bool \"\"\"Check SSH private", "(None) -> str \"\"\"Get remote fs SSH key prefix :rtype:", "any person obtaining a # copy of this software and", "public key PEM file: {}'.format(pemfile)) # convert pem to pfx", "return code or subprocess handle \"\"\" if not ssh_private_key.exists(): raise", "fp.unlink() # get sha1 thumbprint of pfx return get_sha1_thumbprint_pfx(pfxfile, passphrase)", "(str, dict) -> str \"\"\"RSA decrypt a string :param str", "str \"\"\"Generate a pem and a derived pfx file :param", "keypair for use with user logins :param str export_path: keypair", "in base64 :param dict config: configuration dict :rtype: str :return:", "# in lowercase. Expected openssl output is in the form:", "'-passin', 'pass:', '-passout', 'pass:' + passphrase] ) logger.debug('created PFX file:", "pubkey.exists(): old = pathlib.Path(export_path, prefix + '.pub.old') if old.exists(): old.unlink()", "HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER #", "pfx_passphrase is None: pfx_passphrase = <PASSWORD>('Enter password for PFX: ')", "included in # all copies or substantial portions of the", "= subprocess.Popen( ['openssl', 'rsautl', '-decrypt', '-inkey', pemfile], stdin=subprocess.PIPE, stdout=subprocess.PIPE) cleartext", "cleartext = None try: data = util.base64_decode_string(ciphertext) proc = subprocess.Popen(", "pfx_passphrase = <PASSWORD>('Enter password for PFX: ') sha1_cert_tp = get_sha1_thumbprint_pfx(pfxfile,", "Fingerprint=<thumbprint> return ''.join(util.decode_string( output).strip().split('=')[1].split(':')).lower() def get_sha1_thumbprint_pfx(pfxfile, passphrase): # type: (str,", "text data to encrypt :param dict config: configuration dict :rtype:", "pemfile: path of pem file to write to :rtype: str", "passphrase = settings.batch_shipyard_encryption_pfx_passphrase(config) if pemfile is None: pemfile = util.get_input('Enter", "TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN", "pfxfile = util.get_input('Enter PFX filename to create: ') if passphrase", "str ciphertext: cipher text in base64 :param dict config: configuration", "object, range, str, ascii, chr, hex, input, next, oct, open,", "\"\"\" def _mode_check(fstat, flag): return bool(fstat & flag) if util.on_windows():", "*AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR #", "config): # type: (str, dict) -> str \"\"\"RSA decrypt a", "pfxfile = settings.batch_shipyard_encryption_pfx_filename(config) passphrase = settings.batch_shipyard_encryption_pfx_passphrase(config) if pemfile is None:", "# gather input pemfile = settings.batch_shipyard_encryption_public_key_pem(config) pfxfile = settings.batch_shipyard_encryption_pfx_filename(config) passphrase", "privkey = pathlib.Path(export_path, prefix) pubkey = pathlib.Path(export_path, prefix + '.pub')", "remove temp cert pem fp = pathlib.Path(f.name) if fp.exists(): fp.unlink()", "on', remote_ip, remote_port, ssh_private_key)) if sync: return util.subprocess_with_output(ssh_cmd, shell=shell) else:", "check_ssh_private_key_filemode(ssh_private_key): # type: (pathlib.Path) -> bool \"\"\"Check SSH private key", "key if not check_ssh_private_key_filemode(ssh_private_key): logger.warning( 'SSH private key filemode is", "get_sha1_thumbprint_pem(pemfile): # type: (str) -> str \"\"\"Get SHA1 thumbprint of", "file: {}'.format(pemfile)) # convert pem to pfx for Azure Batch", "= settings.batch_shipyard_encryption_pfx_sha1_thumbprint( config) # manually get thumbprint of pfx if", ":param bool sync: synchronous execution :param bool shell: execute with", "+ '.pub') if privkey.exists(): old = pathlib.Path(export_path, prefix + '.old')", "'-p', str(remote_port), ] if tty: ssh_cmd.append('-t') if util.is_not_empty(ssh_args): ssh_cmd.extend(ssh_args) ssh_cmd.append('{}@{}'.format(username,", "{}'.format( ssh_private_key)) # ensure file mode is set properly for", "config) inkey = derive_public_key_pem_from_pfx(pfxfile, pfx_passphrase, None) try: if inkey is", "proc = subprocess.Popen( ['openssl', 'x509', '-noout', '-fingerprint', '-in', pemfile], stdout=subprocess.PIPE", "type: (str) -> str \"\"\"Get SHA1 thumbprint from buffer :param", "= pathlib.Path(f.name) if fp.exists(): fp.unlink() # get sha1 thumbprint of", "') if pfxfile is None: pfxfile = util.get_input('Enter PFX filename", "not exists in config if util.is_none_or_empty(sha1_cert_tp): if pfx_passphrase is None:", "THE SOFTWARE. # compat imports from __future__ import ( absolute_import,", "if passphrase is None: passphrase = getpass.getpass('Enter password for PFX:", "notice shall be included in # all copies or substantial", "generate pem file with private key and no password f", ":param dict config: configuration dict :rtype: str :return: encrypted string", "'req', '-new', '-nodes', '-x509', '-newkey', 'rsa:2048', '-keyout', privatekey, '-out', f.name,", "type: (str, dict) -> str \"\"\"RSA decrypt a string :param", "and this permission notice shall be included in # all", "'-nodes', '-in', pfxfile, '-out', pemfile, '-password', 'pass:' + passphrase] )", "# create logger logger = logging.getLogger(__name__) util.setup_logger(logger) # global defines", "None: raise RuntimeError('cannot decrypt without valid private key') cleartext =", "old.exists(): old.unlink() privkey.rename(old) if pubkey.exists(): old = pathlib.Path(export_path, prefix +", "_rsa_encrypt_string(data, config): # type: (str, dict) -> str \"\"\"RSA encrypt", "'-o', 'UserKnownHostsFile={}'.format(os.devnull), '-i', str(ssh_private_key), '-p', str(remote_port), ] if tty: ssh_cmd.append('-t')", "and to permit persons to whom the # Software is", "\"\"\"Check SSH private key filemode :param pathlib.Path ssh_private_key: SSH private", "if not exists in config if util.is_none_or_empty(sha1_cert_tp): if pfx_passphrase is", "IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS", "str \"\"\"RSA decrypt a string :param str ciphertext: cipher text", "key prefix :rtype: tuple :return: (private key filename, public key", "enabled: if encryption is enabled :param str string: string to", "getpass import logging import os try: import pathlib2 as pathlib", "PEM file: {}'.format(pemfile)) # convert pem to pfx for Azure", "None: passphrase = <PASSWORD>('Enter password for PFX: ') # convert", "logger logger = logging.getLogger(__name__) util.setup_logger(logger) # global defines _SSH_KEY_PREFIX =", "'-password', 'pass:' + passphrase] ) # extract public key from", "(the \"Software\"), # to deal in the Software without restriction,", "'rsa', '-in', pemfile, '-pubout', '-outform', 'PEM', '-out', pemfile] ) except", "file fp = pathlib.Path(privatekey) if fp.exists(): fp.unlink() # remove temp", "the rights to use, copy, modify, merge, publish, distribute, sublicense,", "= pathlib.Path(pemfile) if fp.exists(): fp.unlink() pemfile = None return pemfile", "return _parse_sha1_thumbprint_openssl(proc.communicate()[0]) def generate_pem_pfx_certificates(config): # type: (dict) -> str \"\"\"Generate", "the Software, and to permit persons to whom the #", "filename=pfxfile, passphrase=pfx_passphrase, sha1=sha1_cert_tp) def _rsa_encrypt_string(data, config): # type: (str, dict)", ":rtype: int or subprocess.Process :return: return code or subprocess handle", "generate_ssh_keypair(export_path, prefix=None): # type: (str, str) -> tuple \"\"\"Generate an", "from private key subprocess.check_call( ['openssl', 'rsa', '-in', privatekey, '-pubout', '-outform',", "ValueError('invalid data to encrypt') inkey = settings.batch_shipyard_encryption_public_key_pem(config) derived = False", "util.is_none_or_empty(ciphertext): raise ValueError('invalid ciphertext to decrypt') pfxfile = settings.batch_shipyard_encryption_pfx_filename(config) pfx_passphrase", "person obtaining a # copy of this software and associated", "is hereby granted, free of charge, to any person obtaining", "getpass.getpass('Enter password for PFX: ') # compute sha1 thumbprint of", "file to export :rtype: str :return: sha1 thumbprint of pem", "pem from pfx subprocess.check_call( ['openssl', 'pkcs12', '-nodes', '-in', pfxfile, '-out',", "associated documentation files (the \"Software\"), # to deal in the", "= util.base64_decode_string(ciphertext) proc = subprocess.Popen( ['openssl', 'rsautl', '-decrypt', '-inkey', pemfile],", "util.on_windows(): return True fstat = ssh_private_key.stat().st_mode modes = frozenset((stat.S_IRWXG, stat.S_IRWXO))", "settings.batch_shipyard_encryption_pfx_sha1_thumbprint( config) # manually get thumbprint of pfx if not", "to parse :rtype: str :return: sha1 thumbprint of buffer \"\"\"", "# compute sha1 thumbprint of pfx pfxdump = subprocess.check_output( ['openssl',", "dict) -> str \"\"\"RSA decrypt a string :param str ciphertext:", "prefix + '.old') if old.exists(): old.unlink() privkey.rename(old) if pubkey.exists(): old", "dict :rtype: str :return: encrypted string if enabled \"\"\" if", "file not found at: {}'.format( ssh_private_key)) # ensure file mode", "PEM :param str pfxfile: name of the pfx file to", "command :rtype: int or subprocess.Process :return: return code or subprocess", "type: (None) -> str \"\"\"Get remote fs SSH key prefix", "remote fs SSH key prefix :rtype: str :return: ssh key", "pem file from a pfx :param str pfxfile: pfx file", "\"\"\"Get remote fs SSH key prefix :rtype: str :return: ssh", "a pfx :param str pfxfile: pfx file :param str passphrase:", "SHA1 Fingerprint=<thumbprint> return ''.join(util.decode_string( output).strip().split('=')[1].split(':')).lower() def get_sha1_thumbprint_pfx(pfxfile, passphrase): # type:", "tuple \"\"\"Get PFX encryption settings from configuration :param dict config:", "key filemode is too permissive: {}'.format( ssh_private_key)) # execute SSH", ") return _parse_sha1_thumbprint_openssl(proc.communicate()[0]) def generate_pem_pfx_certificates(config): # type: (dict) -> str", "remote_ip, remote_port, ssh_private_key)) if sync: return util.subprocess_with_output(ssh_cmd, shell=shell) else: return", "if util.on_windows(): return True fstat = ssh_private_key.stat().st_mode modes = frozenset((stat.S_IRWXG,", ":rtype: str :return: sha1 thumbprint of pem \"\"\" proc =", "granted, free of charge, to any person obtaining a #", "# ensure file mode is set properly for the private", "int, pathlib.Path, str, bool, bool, tuple, tuple) -> bool \"\"\"Connect", "remote port :param pathlib.Path ssh_private_key: SSH private key :param str", "CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER #", "-> str \"\"\"RSA decrypt a string :param str ciphertext: cipher", "imports from __future__ import ( absolute_import, division, print_function, unicode_literals )", "= subprocess.Popen( ['openssl', 'x509', '-noout', '-fingerprint'], stdin=subprocess.PIPE, stdout=subprocess.PIPE ) return", "next, oct, open, pow, round, super, filter, map, zip) #", "decrypt without valid private key') cleartext = None try: data", "# Permission is hereby granted, free of charge, to any", "THE USE OR OTHER # DEALINGS IN THE SOFTWARE. #", "'pass:' + passphrase] ) proc = subprocess.Popen( ['openssl', 'x509', '-noout',", "INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, #", "<PASSWORD>('Enter password for PFX: ') sha1_cert_tp = get_sha1_thumbprint_pfx(pfxfile, pfx_passphrase) settings.set_batch_shipyard_encryption_pfx_sha1_thumbprint(", "_parse_sha1_thumbprint_openssl(output): # type: (str) -> str \"\"\"Get SHA1 thumbprint from", "of PFX :param str pfxfile: name of the pfx file", "type: (str, dict) -> str \"\"\"RSA encrypt a string :param", "prefix :rtype: tuple :return: (private key filename, public key filename)", "public key pem file from a pfx :param str pfxfile:", "pfx file to export :param str passphrase: passphrase for pfx", "util.is_not_empty(ssh_args): ssh_cmd.extend(ssh_args) ssh_cmd.append('{}@{}'.format(username, remote_ip)) if util.is_not_empty(command): ssh_cmd.extend(command) logger.info('{} node {}:{}", "str data: clear text data to encrypt :param dict config:", "'ssh', '-o', 'StrictHostKeyChecking=no', '-o', 'UserKnownHostsFile={}'.format(os.devnull), '-i', str(ssh_private_key), '-p', str(remote_port), ]", "pfx_passphrase = settings.batch_shipyard_encryption_pfx_passphrase(config) pemfile = derive_private_key_pem_from_pfx(pfxfile, pfx_passphrase, None) if pemfile", "command: command :rtype: int or subprocess.Process :return: return code or", "settings.set_batch_shipyard_encryption_pfx_sha1_thumbprint( config, sha1_cert_tp) return PfxSettings( filename=pfxfile, passphrase=pfx_passphrase, sha1=sha1_cert_tp) def _rsa_encrypt_string(data,", ") from builtins import ( # noqa bytes, dict, int,", "remote_ip: remote ip address :param int remote_port: remote port :param", "settings.batch_shipyard_encryption_public_key_pem(config) pfxfile = settings.batch_shipyard_encryption_pfx_filename(config) passphrase = settings.batch_shipyard_encryption_pfx_passphrase(config) if pemfile is", "# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE", "proc.communicate(input=data)[0] finally: fp = pathlib.Path(pemfile) if fp.exists(): fp.unlink() return cleartext", "filename) \"\"\" if util.is_none_or_empty(prefix): prefix = _SSH_KEY_PREFIX privkey = pathlib.Path(export_path,", "passphrase = <PASSWORD>pass('Enter password for PFX: ') # convert pfx", "key :param str username: username :param bool sync: synchronous execution", "bool :return: private key filemode is ok \"\"\" def _mode_check(fstat,", "cert pem fp = pathlib.Path(f.name) if fp.exists(): fp.unlink() # get", "to any person obtaining a # copy of this software", "of the pfx file to export :param str passphrase: passphrase", "import stat import subprocess # local imports from . import", "# copy of this software and associated documentation files (the", "def generate_rdp_password(): # type: (None) -> str \"\"\"Generate an RDP", "thumbprint of PFX :param str pfxfile: name of the pfx", "if enabled \"\"\" if enabled: return _rsa_encrypt_string(string, config) else: return", "username, sync=True, shell=False, tty=False, ssh_args=None, command=None): # type: (str, int,", "password for PFX: ') # convert pfx to pem if", "return _parse_sha1_thumbprint_openssl(proc.communicate(input=pfxdump)[0]) def get_sha1_thumbprint_pem(pemfile): # type: (str) -> str \"\"\"Get", "(None) -> str \"\"\"Generate an RDP password :rtype: str :return:", "FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT", "encryption settings from configuration :param dict config: configuration settings :rtype:", "sha1 thumbprint of buffer \"\"\" # return just thumbprint (without", "def derive_private_key_pem_from_pfx(pfxfile, passphrase=None, pemfile=None): # type: (str, str, str) ->", "['openssl', 'req', '-new', '-nodes', '-x509', '-newkey', 'rsa:2048', '-keyout', privatekey, '-out',", "list, object, range, str, ascii, chr, hex, input, next, oct,", "raise RuntimeError( 'openssl encryption failed with returncode: {}'.format( proc.returncode)) return", ":param str remote_ip: remote ip address :param int remote_port: remote", "key file fp = pathlib.Path(privatekey) if fp.exists(): fp.unlink() # remove", "unicode_literals ) from builtins import ( # noqa bytes, dict,", "ip address :param int remote_port: remote port :param pathlib.Path ssh_private_key:", "absolute_import, division, print_function, unicode_literals ) from builtins import ( #", ":param bool shell: execute with shell :param bool tty: allocate", "\"\"\"Encrypt a string :param bool enabled: if encryption is enabled", "pubkey.rename(old) logger.info('generating ssh key pair to path: {}'.format(export_path)) subprocess.check_call( ['ssh-keygen',", "passphrase is None: passphrase = getpass.getpass('Enter password for PFX: ')", "\"\"\"Get SHA1 thumbprint of PFX :param str pfxfile: name of", "fp = pathlib.Path(f.name) if fp.exists(): fp.unlink() # get sha1 thumbprint", "of pfx return get_sha1_thumbprint_pfx(pfxfile, passphrase) def get_encryption_pfx_settings(config): # type: (dict)", "passphrase) def get_encryption_pfx_settings(config): # type: (dict) -> tuple \"\"\"Get PFX", "remote_port, ssh_private_key, username, sync=True, shell=False, tty=False, ssh_args=None, command=None): # type:", "'-out', pfxfile, '-inkey', privatekey, '-in', f.name, '-certfile', f.name, '-passin', 'pass:',", "None: f = tempfile.NamedTemporaryFile(mode='wb', delete=False) f.close() pemfile = f.name try:", "f.name, '-passin', 'pass:', '-passout', 'pass:' + passphrase] ) logger.debug('created PFX", "if pemfile is None: f = tempfile.NamedTemporaryFile(mode='wb', delete=False) f.close() pemfile", "None) try: if inkey is None: raise RuntimeError('public encryption key", "prefix = _SSH_KEY_PREFIX privkey = pathlib.Path(export_path, prefix) pubkey = pathlib.Path(export_path,", "(privkey, pubkey) def check_ssh_private_key_filemode(ssh_private_key): # type: (pathlib.Path) -> bool \"\"\"Check", "pemfile = settings.batch_shipyard_encryption_public_key_pem(config) pfxfile = settings.batch_shipyard_encryption_pfx_filename(config) passphrase = settings.batch_shipyard_encryption_pfx_passphrase(config) if", "-> tuple \"\"\"Generate an ssh keypair for use with user", "pfxfile, '-out', pemfile, '-password', 'pass:' + passphrase] ) except Exception:", "def connect_or_exec_ssh_command( remote_ip, remote_port, ssh_private_key, username, sync=True, shell=False, tty=False, ssh_args=None,", "Exception: fp = pathlib.Path(pemfile) if fp.exists(): fp.unlink() pemfile = None", "PFX: ') # compute sha1 thumbprint of pfx pfxdump =", "''.join(util.decode_string( output).strip().split('=')[1].split(':')).lower() def get_sha1_thumbprint_pfx(pfxfile, passphrase): # type: (str, str) ->", "returncode: {}'.format( proc.returncode)) return ciphertext finally: if derived: fp =", "via SSH or execute SSH command :param str remote_ip: remote", "failed with returncode: {}'.format( proc.returncode)) return ciphertext finally: if derived:", "pfxfile = settings.batch_shipyard_encryption_pfx_filename(config) pfx_passphrase = settings.batch_shipyard_encryption_pfx_passphrase(config) pemfile = derive_private_key_pem_from_pfx(pfxfile, pfx_passphrase,", "import ( # noqa bytes, dict, int, list, object, range,", "(str, str, str) -> str \"\"\"Derive a private key pem", "if fp.exists(): fp.unlink() pemfile = None return pemfile def derive_public_key_pem_from_pfx(pfxfile,", "for PFX: ') # compute sha1 thumbprint of pfx pfxdump", "derived = False if inkey is None: # derive pem", "if inkey is None: raise RuntimeError('public encryption key is invalid')", "# type: (str) -> str \"\"\"Get SHA1 thumbprint of PEM", "modes = frozenset((stat.S_IRWXG, stat.S_IRWXO)) return not any([_mode_check(fstat, x) for x", "return get_sha1_thumbprint_pfx(pfxfile, passphrase) def get_encryption_pfx_settings(config): # type: (dict) -> tuple", "def generate_ssh_keypair(export_path, prefix=None): # type: (str, str) -> tuple \"\"\"Generate", "str) -> tuple \"\"\"Generate an ssh keypair for use with", ". import util # create logger logger = logging.getLogger(__name__) util.setup_logger(logger)", "= [ 'ssh', '-o', 'StrictHostKeyChecking=no', '-o', 'UserKnownHostsFile={}'.format(os.devnull), '-i', str(ssh_private_key), '-p',", "ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED", "fp.exists(): fp.unlink() pemfile = None return pemfile def derive_public_key_pem_from_pfx(pfxfile, passphrase=None,", "'passphrase', 'sha1']) def get_ssh_key_prefix(): # type: (None) -> str \"\"\"Get", "use, copy, modify, merge, publish, distribute, sublicense, # and/or sell", "pem and a derived pfx file :param dict config: configuration", "['openssl', 'x509', '-noout', '-fingerprint'], stdin=subprocess.PIPE, stdout=subprocess.PIPE ) return _parse_sha1_thumbprint_openssl(proc.communicate(input=pfxdump)[0]) def", "sha1 tp \"\"\" pfxfile = settings.batch_shipyard_encryption_pfx_filename(config) pfx_passphrase = settings.batch_shipyard_encryption_pfx_passphrase(config) sha1_cert_tp", "# and/or sell copies of the Software, and to permit", "= util.get_input('Enter PFX filename to create: ') if passphrase is", "# type: (dict) -> tuple \"\"\"Get PFX encryption settings from", "IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,", "ValueError('invalid ciphertext to decrypt') pfxfile = settings.batch_shipyard_encryption_pfx_filename(config) pfx_passphrase = settings.batch_shipyard_encryption_pfx_passphrase(config)", "# local imports from . import settings from . import", "private key filemode is too permissive: {}'.format( ssh_private_key)) # execute", "export_path: keypair export path :param str prefix: key prefix :rtype:", "SSH private key :rtype: bool :return: private key filemode is", "handle \"\"\" if not ssh_private_key.exists(): raise RuntimeError('SSH private key file", "configuration dict :rtype: str :return: encrypted string if enabled \"\"\"", "'-in', pemfile, '-pubout', '-outform', 'PEM', '-out', pemfile] ) except Exception:", "str username: username :param bool sync: synchronous execution :param bool", "sha1 thumbprint of pfx pfxdump = subprocess.check_output( ['openssl', 'pkcs12', '-in',", "'rsautl', '-decrypt', '-inkey', pemfile], stdin=subprocess.PIPE, stdout=subprocess.PIPE) cleartext = proc.communicate(input=data)[0] finally:", "encrypt :param dict config: configuration dict :rtype: str :return: encrypted", "for x in modes]) def connect_or_exec_ssh_command( remote_ip, remote_port, ssh_private_key, username,", "ssh_private_key.stat().st_mode modes = frozenset((stat.S_IRWXG, stat.S_IRWXO)) return not any([_mode_check(fstat, x) for", "file to export :param str passphrase: passphrase for pfx :rtype:", "is invalid') if passphrase is None: passphrase = getpass.getpass('Enter password", "str :return: decrypted cipher text \"\"\" if util.is_none_or_empty(ciphertext): raise ValueError('invalid", "def get_sha1_thumbprint_pem(pemfile): # type: (str) -> str \"\"\"Get SHA1 thumbprint", "OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR", "'PEM', '-out', pemfile] ) logger.debug('created public key PEM file: {}'.format(pemfile))", "(dict) -> str \"\"\"Generate a pem and a derived pfx", "ssh_args: ssh args :param tuple command: command :rtype: int or", "privatekey, '-pubout', '-outform', 'PEM', '-out', pemfile] ) logger.debug('created public key", "pem from pfx derived = True pfxfile = settings.batch_shipyard_encryption_pfx_filename(config) pfx_passphrase", "# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,", "\"\"\" if util.is_none_or_empty(prefix): prefix = _SSH_KEY_PREFIX privkey = pathlib.Path(export_path, prefix)", "type: (bool, str, dict) -> str \"\"\"Encrypt a string :param", "remote ip address :param int remote_port: remote port :param pathlib.Path", "-> tuple \"\"\"Get PFX encryption settings from configuration :param dict", "rights reserved. # # MIT License # # Permission is", "prefix :rtype: str :return: ssh key prefix for remote fs", "type: (None) -> str \"\"\"Get SSH key prefix :rtype: str", "is None: pfx_passphrase = <PASSWORD>('Enter password for PFX: ') sha1_cert_tp", "-> bool \"\"\"Check SSH private key filemode :param pathlib.Path ssh_private_key:", "prefix + '.pub') if privkey.exists(): old = pathlib.Path(export_path, prefix +", "# MIT License # # Permission is hereby granted, free", "buffer: buffer to parse :rtype: str :return: sha1 thumbprint of", "logging.getLogger(__name__) util.setup_logger(logger) # global defines _SSH_KEY_PREFIX = 'id_rsa_shipyard' _REMOTEFS_SSH_KEY_PREFIX =", "password f = tempfile.NamedTemporaryFile(mode='wb', delete=False) f.close() try: subprocess.check_call( ['openssl', 'req',", "['openssl', 'pkcs12', '-nodes', '-in', pfxfile, '-out', pemfile, '-password', 'pass:' +", "of pem file to write to :rtype: str :return: path", "pathlib.Path(export_path, prefix + '.old') if old.exists(): old.unlink() privkey.rename(old) if pubkey.exists():", "passphrase for pfx :rtype: str :return: sha1 thumbprint of pfx", "(str) -> str \"\"\"Get SHA1 thumbprint from buffer :param str", "'-pubout', '-outform', 'PEM', '-out', pemfile] ) logger.debug('created public key PEM", "this permission notice shall be included in # all copies", "pow, round, super, filter, map, zip) # stdlib imports import", "encrypt a string :param str data: clear text data to", "= <PASSWORD>pass('Enter password for PFX: ') # convert pfx to", "= getpass.getpass('Enter password for PFX: ') # compute sha1 thumbprint", "passphrase): # type: (str, str) -> str \"\"\"Get SHA1 thumbprint", "get_ssh_key_prefix(): # type: (None) -> str \"\"\"Get SSH key prefix", "'-out', pemfile] ) logger.debug('created public key PEM file: {}'.format(pemfile)) #", "return ciphertext finally: if derived: fp = pathlib.Path(inkey) if fp.exists():", "username :param bool sync: synchronous execution :param bool shell: execute", "'pass:' + passphrase] ) logger.debug('created PFX file: {}'.format(pfxfile)) finally: #", "# convert pfx to pem if pemfile is None: f", "SSH command ssh_cmd = [ 'ssh', '-o', 'StrictHostKeyChecking=no', '-o', 'UserKnownHostsFile={}'.format(os.devnull),", "is furnished to do so, subject to the following conditions:", "PFX: ') if len(passphrase) == 0: print('passphrase cannot be empty')", "sha1 thumbprint of pfx \"\"\" # gather input pemfile =", "'-newkey', 'rsa:2048', '-keyout', privatekey, '-out', f.name, '-days', '730', '-subj', '/C=US/ST=None/L=None/O=None/CN=BatchShipyard']", "string :param str ciphertext: cipher text in base64 :param dict", "encrypted string if enabled \"\"\" if enabled: return _rsa_encrypt_string(string, config)", "print_function, unicode_literals ) from builtins import ( # noqa bytes,", "ARISING # FROM, OUT OF OR IN CONNECTION WITH THE", "flag): return bool(fstat & flag) if util.on_windows(): return True fstat", "no password f = tempfile.NamedTemporaryFile(mode='wb', delete=False) f.close() try: subprocess.check_call( ['openssl',", "'730', '-subj', '/C=US/ST=None/L=None/O=None/CN=BatchShipyard'] ) # extract public key from private", "privatekey = pemfile + '.key' # generate pem file with", "= subprocess.Popen( ['openssl', 'rsautl', '-encrypt', '-pubin', '-inkey', inkey], stdin=subprocess.PIPE, stdout=subprocess.PIPE)", "key file not found at: {}'.format( ssh_private_key)) # ensure file", "str \"\"\"RSA encrypt a string :param str data: clear text", ":return: rdp password \"\"\" return base64.b64encode(os.urandom(8)) def generate_ssh_keypair(export_path, prefix=None): #", "type: (str, str) -> tuple \"\"\"Generate an ssh keypair for", "key from private key subprocess.check_call( ['openssl', 'rsa', '-in', privatekey, '-pubout',", "(str) -> str \"\"\"Get SHA1 thumbprint of PEM :param str", "= settings.batch_shipyard_encryption_pfx_passphrase(config) pemfile = derive_private_key_pem_from_pfx(pfxfile, pfx_passphrase, None) if pemfile is", "pseudo-tty :param tuple ssh_args: ssh args :param tuple command: command", "config: configuration dict :rtype: str :return: decrypted cipher text \"\"\"", "None: raise ValueError('pfx file is invalid') if passphrase is None:", "convert pfx to pem if pemfile is None: f =", "import getpass import logging import os try: import pathlib2 as", "ciphertext: cipher text in base64 :param dict config: configuration dict", ":param str passphrase: passphrase for pfx :rtype: str :return: sha1", "defines _SSH_KEY_PREFIX = 'id_rsa_shipyard' _REMOTEFS_SSH_KEY_PREFIX = '{}_remotefs'.format(_SSH_KEY_PREFIX) # named tuples", "a string :param str data: clear text data to encrypt", "of buffer \"\"\" # return just thumbprint (without colons) from", "get thumbprint of pfx if not exists in config if", "bool \"\"\"Connect to node via SSH or execute SSH command", "a derived pfx file :param dict config: configuration dict :rtype:", "is None: pemfile = util.get_input('Enter public key PEM filename to", "shell=shell, pipe_stderr=True) def derive_private_key_pem_from_pfx(pfxfile, passphrase=None, pemfile=None): # type: (str, str,", "_REMOTEFS_SSH_KEY_PREFIX def generate_rdp_password(): # type: (None) -> str \"\"\"Generate an", "subprocess # local imports from . import settings from .", "pem file with private key and no password f =", "') # convert pfx to pem if pemfile is None:", "division, print_function, unicode_literals ) from builtins import ( # noqa", "finally: fp = pathlib.Path(pemfile) if fp.exists(): fp.unlink() return cleartext def", "user logins :param str export_path: keypair export path :param str", "dict) -> str \"\"\"Encrypt a string :param bool enabled: if", "None: passphrase = getpass.getpass('Enter password for PFX: ') # compute", "pemfile def derive_public_key_pem_from_pfx(pfxfile, passphrase=None, pemfile=None): # type: (str, str, str)", "key filename, public key filename) \"\"\" if util.is_none_or_empty(prefix): prefix =", "tuple, tuple) -> bool \"\"\"Connect to node via SSH or", "inkey is None: # derive pem from pfx derived =", "pfxfile: name of the pfx file to export :param str", "an RDP password :rtype: str :return: rdp password \"\"\" return", "encryption key is invalid') proc = subprocess.Popen( ['openssl', 'rsautl', '-encrypt',", "enabled \"\"\" if enabled: return _rsa_encrypt_string(string, config) else: return string", "for pfx :rtype: str :return: sha1 thumbprint of pfx \"\"\"", "pathlib.Path ssh_private_key: SSH private key :param str username: username :param", "Software without restriction, including without limitation # the rights to", "pfxfile, '-inkey', privatekey, '-in', f.name, '-certfile', f.name, '-passin', 'pass:', '-passout',", "['openssl', 'pkcs12', '-in', pfxfile, '-nodes', '-passin', 'pass:' + passphrase] )", "is None: raise RuntimeError('public encryption key is invalid') proc =", "dict config: configuration dict :rtype: str :return: base64-encoded cipher text", "proc.returncode != 0: raise RuntimeError( 'openssl encryption failed with returncode:", "\"\"\" if pfxfile is None: raise ValueError('pfxfile is invalid') if", ":param str buffer: buffer to parse :rtype: str :return: sha1", "create logger logger = logging.getLogger(__name__) util.setup_logger(logger) # global defines _SSH_KEY_PREFIX", ":rtype: tuple :return: (private key filename, public key filename) \"\"\"", "whom the # Software is furnished to do so, subject", "from . import util # create logger logger = logging.getLogger(__name__)", "(private key filename, public key filename) \"\"\" if util.is_none_or_empty(prefix): prefix", "True pfxfile = settings.batch_shipyard_encryption_pfx_filename(config) pfx_passphrase = settings.batch_shipyard_encryption_pfx_passphrase( config) inkey =", "gather input pemfile = settings.batch_shipyard_encryption_public_key_pem(config) pfxfile = settings.batch_shipyard_encryption_pfx_filename(config) passphrase =", "str \"\"\"Get SSH key prefix :rtype: str :return: ssh key", "pathlib.Path(export_path, prefix + '.pub') if privkey.exists(): old = pathlib.Path(export_path, prefix", "util.subprocess_nowait_pipe_stdout( ssh_cmd, shell=shell, pipe_stderr=True) def derive_private_key_pem_from_pfx(pfxfile, passphrase=None, pemfile=None): # type:", "hex, input, next, oct, open, pow, round, super, filter, map,", "'executing command on', remote_ip, remote_port, ssh_private_key)) if sync: return util.subprocess_with_output(ssh_cmd,", "'-pubin', '-inkey', inkey], stdin=subprocess.PIPE, stdout=subprocess.PIPE) ciphertext = util.base64_encode_string( proc.communicate(input=util.encode_string(data))[0]) if", "type: (str) -> str \"\"\"Get SHA1 thumbprint of PEM :param", "\"\"\"Derive a public key pem file from a pfx :param", "bytes, dict, int, list, object, range, str, ascii, chr, hex,", "str prefix: key prefix :rtype: tuple :return: (private key filename,", "private key :rtype: bool :return: private key filemode is ok", "-> str \"\"\"Derive a private key pem file from a", "'id_rsa_shipyard' _REMOTEFS_SSH_KEY_PREFIX = '{}_remotefs'.format(_SSH_KEY_PREFIX) # named tuples PfxSettings = collections.namedtuple(", "dict config: configuration dict :rtype: str :return: encrypted string if", "with shell :param bool tty: allocate pseudo-tty :param tuple ssh_args:", "\"\"\"Get SSH key prefix :rtype: str :return: ssh key prefix", "-> str \"\"\"Get remote fs SSH key prefix :rtype: str", "address :param int remote_port: remote port :param pathlib.Path ssh_private_key: SSH", "configuration settings :rtype: tuple :return: pfxfile, passphrase, sha1 tp \"\"\"", "a string :param str ciphertext: cipher text in base64 :param", ":param str export_path: keypair export path :param str prefix: key", "code or subprocess handle \"\"\" if not ssh_private_key.exists(): raise RuntimeError('SSH", "'rsautl', '-encrypt', '-pubin', '-inkey', inkey], stdin=subprocess.PIPE, stdout=subprocess.PIPE) ciphertext = util.base64_encode_string(", "= 'id_rsa_shipyard' _REMOTEFS_SSH_KEY_PREFIX = '{}_remotefs'.format(_SSH_KEY_PREFIX) # named tuples PfxSettings =", "'''''']) return (privkey, pubkey) def check_ssh_private_key_filemode(ssh_private_key): # type: (pathlib.Path) ->", "to node via SSH or execute SSH command :param str", "args :param tuple command: command :rtype: int or subprocess.Process :return:", "util.is_none_or_empty(data): raise ValueError('invalid data to encrypt') inkey = settings.batch_shipyard_encryption_public_key_pem(config) derived", "& flag) if util.on_windows(): return True fstat = ssh_private_key.stat().st_mode modes", "settings.batch_shipyard_encryption_pfx_filename(config) pfx_passphrase = settings.batch_shipyard_encryption_pfx_passphrase( config) inkey = derive_public_key_pem_from_pfx(pfxfile, pfx_passphrase, None)", "prefix: key prefix :rtype: tuple :return: (private key filename, public", "pemfile is None: raise RuntimeError('cannot decrypt without valid private key')", "AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM,", "if pemfile is None: pemfile = util.get_input('Enter public key PEM", "pfx_passphrase, None) try: if inkey is None: raise RuntimeError('public encryption", "# generate pem file with private key and no password", "private key :param str username: username :param bool sync: synchronous", "notice and this permission notice shall be included in #", "data: clear text data to encrypt :param dict config: configuration", "if derived: fp = pathlib.Path(inkey) if fp.exists(): fp.unlink() def _rsa_decrypt_string_with_pfx(ciphertext,", "THE SOFTWARE OR THE USE OR OTHER # DEALINGS IN", "keypair export path :param str prefix: key prefix :rtype: tuple", "subprocess handle \"\"\" if not ssh_private_key.exists(): raise RuntimeError('SSH private key", "cannot be empty') privatekey = pemfile + '.key' # generate", "pubkey = pathlib.Path(export_path, prefix + '.pub') if privkey.exists(): old =", "filemode :param pathlib.Path ssh_private_key: SSH private key :rtype: bool :return:", "bool tty: allocate pseudo-tty :param tuple ssh_args: ssh args :param", "private key subprocess.check_call( ['openssl', 'rsa', '-in', pemfile, '-pubout', '-outform', 'PEM',", "f = tempfile.NamedTemporaryFile(mode='wb', delete=False) f.close() try: subprocess.check_call( ['openssl', 'req', '-new',", "sell copies of the Software, and to permit persons to", "remote_port, ssh_private_key)) if sync: return util.subprocess_with_output(ssh_cmd, shell=shell) else: return util.subprocess_nowait_pipe_stdout(", "chr, hex, input, next, oct, open, pow, round, super, filter,", "from buffer :param str buffer: buffer to parse :rtype: str", "{}:{} with key {}'.format( 'connecting to' if util.is_none_or_empty(command) else 'executing", "to write to :rtype: str :return: path of pem file", "at: {}'.format( ssh_private_key)) # ensure file mode is set properly", "import pathlib2 as pathlib except ImportError: import pathlib import tempfile", "'StrictHostKeyChecking=no', '-o', 'UserKnownHostsFile={}'.format(os.devnull), '-i', str(ssh_private_key), '-p', str(remote_port), ] if tty:", "'pkcs12', '-nodes', '-in', pfxfile, '-out', pemfile, '-password', 'pass:' + passphrase]", "file mode is set properly for the private key if", "passphrase is None: passphrase = <PASSWORD>pass('Enter password for PFX: ')", "SHA1 thumbprint of PFX :param str pfxfile: name of the", "def get_remotefs_ssh_key_prefix(): # type: (None) -> str \"\"\"Get remote fs", "logins :param str export_path: keypair export path :param str prefix:", ") # extract public key from private key subprocess.check_call( ['openssl',", "settings.batch_shipyard_encryption_pfx_passphrase(config) if pemfile is None: pemfile = util.get_input('Enter public key", "passphrase=None, pemfile=None): # type: (str, str, str) -> str \"\"\"Derive", "ssh_private_key)) # ensure file mode is set properly for the", ":return: ssh key prefix \"\"\" return _SSH_KEY_PREFIX def get_remotefs_ssh_key_prefix(): #", ":return: sha1 thumbprint of pem \"\"\" proc = subprocess.Popen( ['openssl',", "# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO", "subprocess.check_call( ['openssl', 'rsa', '-in', pemfile, '-pubout', '-outform', 'PEM', '-out', pemfile]", ":param tuple command: command :rtype: int or subprocess.Process :return: return", "of charge, to any person obtaining a # copy of", "password \"\"\" return base64.b64encode(os.urandom(8)) def generate_ssh_keypair(export_path, prefix=None): # type: (str,", "= subprocess.check_output( ['openssl', 'pkcs12', '-in', pfxfile, '-nodes', '-passin', 'pass:' +", "\"\"\"RSA encrypt a string :param str data: clear text data", "sha1 thumbprint of pfx return get_sha1_thumbprint_pfx(pfxfile, passphrase) def get_encryption_pfx_settings(config): #", "try: import pathlib2 as pathlib except ImportError: import pathlib import", "util.subprocess_with_output(ssh_cmd, shell=shell) else: return util.subprocess_nowait_pipe_stdout( ssh_cmd, shell=shell, pipe_stderr=True) def derive_private_key_pem_from_pfx(pfxfile,", "private key') cleartext = None try: data = util.base64_decode_string(ciphertext) proc", "from pfx subprocess.check_call( ['openssl', 'pkcs12', '-nodes', '-in', pfxfile, '-out', pemfile,", "= '{}_remotefs'.format(_SSH_KEY_PREFIX) # named tuples PfxSettings = collections.namedtuple( 'PfxSettings', ['filename',", "str :return: ssh key prefix \"\"\" return _SSH_KEY_PREFIX def get_remotefs_ssh_key_prefix():", "passphrase = getpass.getpass('Enter password for PFX: ') # compute sha1", "# convert pem to pfx for Azure Batch service subprocess.check_call(", "FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN", ":return: return code or subprocess handle \"\"\" if not ssh_private_key.exists():", "= util.base64_encode_string( proc.communicate(input=util.encode_string(data))[0]) if proc.returncode != 0: raise RuntimeError( 'openssl", "remote_ip)) if util.is_not_empty(command): ssh_cmd.extend(command) logger.info('{} node {}:{} with key {}'.format(", "pfxfile is None: raise ValueError('pfx file is invalid') if passphrase", "False if inkey is None: # derive pem from pfx", "port :param pathlib.Path ssh_private_key: SSH private key :param str username:", "derive_public_key_pem_from_pfx(pfxfile, passphrase=None, pemfile=None): # type: (str, str, str) -> str", "charge, to any person obtaining a # copy of this", "collections.namedtuple( 'PfxSettings', ['filename', 'passphrase', 'sha1']) def get_ssh_key_prefix(): # type: (None)", "] if tty: ssh_cmd.append('-t') if util.is_not_empty(ssh_args): ssh_cmd.extend(ssh_args) ssh_cmd.append('{}@{}'.format(username, remote_ip)) if", "in config if util.is_none_or_empty(sha1_cert_tp): if pfx_passphrase is None: pfx_passphrase =", "(str, dict) -> str \"\"\"RSA encrypt a string :param str", "None return pemfile def _parse_sha1_thumbprint_openssl(output): # type: (str) -> str", "execute SSH command ssh_cmd = [ 'ssh', '-o', 'StrictHostKeyChecking=no', '-o',", "rights to use, copy, modify, merge, publish, distribute, sublicense, #", "'-export', '-out', pfxfile, '-inkey', privatekey, '-in', f.name, '-certfile', f.name, '-passin',", "def get_sha1_thumbprint_pfx(pfxfile, passphrase): # type: (str, str) -> str \"\"\"Get", "private key file fp = pathlib.Path(privatekey) if fp.exists(): fp.unlink() #", "if util.is_not_empty(command): ssh_cmd.extend(command) logger.info('{} node {}:{} with key {}'.format( 'connecting", "IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS", "') sha1_cert_tp = get_sha1_thumbprint_pfx(pfxfile, pfx_passphrase) settings.set_batch_shipyard_encryption_pfx_sha1_thumbprint( config, sha1_cert_tp) return PfxSettings(", ":param str pemfile: path of pem file to write to", "string if enabled \"\"\" if enabled: return _rsa_encrypt_string(string, config) else:", "str buffer: buffer to parse :rtype: str :return: sha1 thumbprint", "-> str \"\"\"Encrypt a string :param bool enabled: if encryption", "bool shell: execute with shell :param bool tty: allocate pseudo-tty", "derive_public_key_pem_from_pfx(pfxfile, pfx_passphrase, None) try: if inkey is None: raise RuntimeError('public", "above openssl command # in lowercase. Expected openssl output is", "type: (None) -> str \"\"\"Generate an RDP password :rtype: str", "thumbprint of buffer \"\"\" # return just thumbprint (without colons)", "not any([_mode_check(fstat, x) for x in modes]) def connect_or_exec_ssh_command( remote_ip,", "'-o', 'StrictHostKeyChecking=no', '-o', 'UserKnownHostsFile={}'.format(os.devnull), '-i', str(ssh_private_key), '-p', str(remote_port), ] if", "SSH key prefix :rtype: str :return: ssh key prefix for", "set properly for the private key if not check_ssh_private_key_filemode(ssh_private_key): logger.warning(", "# return just thumbprint (without colons) from the above openssl", "or execute SSH command :param str remote_ip: remote ip address", "or substantial portions of the Software. # # THE SOFTWARE", "os try: import pathlib2 as pathlib except ImportError: import pathlib", "pemfile] ) logger.debug('created public key PEM file: {}'.format(pemfile)) # convert", "stat import subprocess # local imports from . import settings", "OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR", "range, str, ascii, chr, hex, input, next, oct, open, pow,", "a pem and a derived pfx file :param dict config:", "ssh_cmd, shell=shell, pipe_stderr=True) def derive_private_key_pem_from_pfx(pfxfile, passphrase=None, pemfile=None): # type: (str,", "if pfx_passphrase is None: pfx_passphrase = <PASSWORD>('Enter password for PFX:", "CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION", "command ssh_cmd = [ 'ssh', '-o', 'StrictHostKeyChecking=no', '-o', 'UserKnownHostsFile={}'.format(os.devnull), '-i',", "str export_path: keypair export path :param str prefix: key prefix", "= settings.batch_shipyard_encryption_pfx_filename(config) pfx_passphrase = settings.batch_shipyard_encryption_pfx_passphrase(config) sha1_cert_tp = settings.batch_shipyard_encryption_pfx_sha1_thumbprint( config) #", "PFX file: {}'.format(pfxfile)) finally: # remove rsa private key file", "ssh_private_key)) # execute SSH command ssh_cmd = [ 'ssh', '-o',", "f.close() try: subprocess.check_call( ['openssl', 'req', '-new', '-nodes', '-x509', '-newkey', 'rsa:2048',", "rsa private key file fp = pathlib.Path(privatekey) if fp.exists(): fp.unlink()", "# Software is furnished to do so, subject to the", "'connecting to' if util.is_none_or_empty(command) else 'executing command on', remote_ip, remote_port,", "= pemfile + '.key' # generate pem file with private", "stdlib imports import base64 import collections import getpass import logging", ":rtype: str :return: sha1 thumbprint of buffer \"\"\" # return", "try: data = util.base64_decode_string(ciphertext) proc = subprocess.Popen( ['openssl', 'rsautl', '-decrypt',", "ssh_cmd.extend(ssh_args) ssh_cmd.append('{}@{}'.format(username, remote_ip)) if util.is_not_empty(command): ssh_cmd.extend(command) logger.info('{} node {}:{} with", "'-t', 'rsa', '-N', '''''']) return (privkey, pubkey) def check_ssh_private_key_filemode(ssh_private_key): #", "not check_ssh_private_key_filemode(ssh_private_key): logger.warning( 'SSH private key filemode is too permissive:", ":param dict config: configuration dict :rtype: str :return: base64-encoded cipher", "util.is_none_or_empty(passphrase): passphrase = getpass.getpass('Enter password for PFX: ') if len(passphrase)", "to whom the # Software is furnished to do so,", "LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER", "ssh key prefix \"\"\" return _SSH_KEY_PREFIX def get_remotefs_ssh_key_prefix(): # type:", "so, subject to the following conditions: # # The above", "type: (str, str, str) -> str \"\"\"Derive a private key", "pfx return get_sha1_thumbprint_pfx(pfxfile, passphrase) def get_encryption_pfx_settings(config): # type: (dict) ->", "PfxSettings( filename=pfxfile, passphrase=pfx_passphrase, sha1=sha1_cert_tp) def _rsa_encrypt_string(data, config): # type: (str,", "dict config: configuration dict :rtype: str :return: decrypted cipher text", "imports import base64 import collections import getpass import logging import", "pathlib.Path(pemfile) if fp.exists(): fp.unlink() pemfile = None return pemfile def", "if util.is_none_or_empty(prefix): prefix = _SSH_KEY_PREFIX privkey = pathlib.Path(export_path, prefix) pubkey", "None return pemfile def derive_public_key_pem_from_pfx(pfxfile, passphrase=None, pemfile=None): # type: (str,", "of pem file \"\"\" if pfxfile is None: raise ValueError('pfx", "privkey.exists(): old = pathlib.Path(export_path, prefix + '.old') if old.exists(): old.unlink()", "is ok \"\"\" def _mode_check(fstat, flag): return bool(fstat & flag)", "ssh_cmd = [ 'ssh', '-o', 'StrictHostKeyChecking=no', '-o', 'UserKnownHostsFile={}'.format(os.devnull), '-i', str(ssh_private_key),", "= tempfile.NamedTemporaryFile(mode='wb', delete=False) f.close() pemfile = f.name try: # create", ":param pathlib.Path ssh_private_key: SSH private key :param str username: username", "the following conditions: # # The above copyright notice and", "'-inkey', pemfile], stdin=subprocess.PIPE, stdout=subprocess.PIPE) cleartext = proc.communicate(input=data)[0] finally: fp =", "str pfxfile: name of the pfx file to export :param", "return just thumbprint (without colons) from the above openssl command", "pfxfile: pfx file :param str passphrase: passphrase for pfx :param", "\"\"\"Connect to node via SSH or execute SSH command :param", "if not ssh_private_key.exists(): raise RuntimeError('SSH private key file not found", "None: pemfile = util.get_input('Enter public key PEM filename to create:", ") except Exception: fp = pathlib.Path(pemfile) if fp.exists(): fp.unlink() pemfile", "with returncode: {}'.format( proc.returncode)) return ciphertext finally: if derived: fp", "logger.debug('created public key PEM file: {}'.format(pemfile)) # convert pem to", "get_remotefs_ssh_key_prefix(): # type: (None) -> str \"\"\"Get remote fs SSH", "<PASSWORD>pass('Enter password for PFX: ') # convert pfx to pem", "'pass:' + passphrase] ) except Exception: fp = pathlib.Path(pemfile) if", "in # all copies or substantial portions of the Software.", "RuntimeError('public encryption key is invalid') proc = subprocess.Popen( ['openssl', 'rsautl',", "if pfxfile is None: pfxfile = util.get_input('Enter PFX filename to", "file \"\"\" if pfxfile is None: raise ValueError('pfx file is", "files (the \"Software\"), # to deal in the Software without", "= subprocess.Popen( ['openssl', 'x509', '-noout', '-fingerprint', '-in', pemfile], stdout=subprocess.PIPE )", "without restriction, including without limitation # the rights to use,", "a string :param bool enabled: if encryption is enabled :param", "name of the pfx file to export :param str passphrase:", "'-new', '-nodes', '-x509', '-newkey', 'rsa:2048', '-keyout', privatekey, '-out', f.name, '-days',", "for pfx :param str pemfile: path of pem file to", "pemfile + '.key' # generate pem file with private key", "text in base64 :param dict config: configuration dict :rtype: str", "pfx_passphrase = settings.batch_shipyard_encryption_pfx_passphrase( config) inkey = derive_public_key_pem_from_pfx(pfxfile, pfx_passphrase, None) try:", "of the Software, and to permit persons to whom the", "cipher text in base64 :param dict config: configuration dict :rtype:", "pfxfile is None: raise ValueError('pfxfile is invalid') if passphrase is", "fs SSH key prefix :rtype: str :return: ssh key prefix", "f = tempfile.NamedTemporaryFile(mode='wb', delete=False) f.close() pemfile = f.name try: #", "distribute, sublicense, # and/or sell copies of the Software, and", "key pair to path: {}'.format(export_path)) subprocess.check_call( ['ssh-keygen', '-f', str(privkey), '-t',", ":param pathlib.Path ssh_private_key: SSH private key :rtype: bool :return: private", "# type: (None) -> str \"\"\"Get remote fs SSH key", "pfx for Azure Batch service subprocess.check_call( ['openssl', 'pkcs12', '-export', '-out',", "bool(fstat & flag) if util.on_windows(): return True fstat = ssh_private_key.stat().st_mode", "WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING", "tuple \"\"\"Generate an ssh keypair for use with user logins", "key prefix :rtype: str :return: ssh key prefix for remote", "-> str \"\"\"RSA encrypt a string :param str data: clear", "== 0: print('passphrase cannot be empty') privatekey = pemfile +", "of this software and associated documentation files (the \"Software\"), #", "'rsa:2048', '-keyout', privatekey, '-out', f.name, '-days', '730', '-subj', '/C=US/ST=None/L=None/O=None/CN=BatchShipyard'] )", "'-keyout', privatekey, '-out', f.name, '-days', '730', '-subj', '/C=US/ST=None/L=None/O=None/CN=BatchShipyard'] ) #", "def derive_public_key_pem_from_pfx(pfxfile, passphrase=None, pemfile=None): # type: (str, str, str) ->", "_SSH_KEY_PREFIX def get_remotefs_ssh_key_prefix(): # type: (None) -> str \"\"\"Get remote", "of pfx if not exists in config if util.is_none_or_empty(sha1_cert_tp): if", "= settings.batch_shipyard_encryption_pfx_filename(config) pfx_passphrase = settings.batch_shipyard_encryption_pfx_passphrase(config) pemfile = derive_private_key_pem_from_pfx(pfxfile, pfx_passphrase, None)", "util.is_none_or_empty(sha1_cert_tp): if pfx_passphrase is None: pfx_passphrase = <PASSWORD>('Enter password for", "pfx \"\"\" # gather input pemfile = settings.batch_shipyard_encryption_public_key_pem(config) pfxfile =", "is invalid') if passphrase is None: passphrase = <PASSWORD>('Enter password", "str, bool, bool, tuple, tuple) -> bool \"\"\"Connect to node", "# stdlib imports import base64 import collections import getpass import", "if passphrase is None: while util.is_none_or_empty(passphrase): passphrase = getpass.getpass('Enter password", "'-certfile', f.name, '-passin', 'pass:', '-passout', 'pass:' + passphrase] ) logger.debug('created", "pathlib.Path(privatekey) if fp.exists(): fp.unlink() # remove temp cert pem fp", "str passphrase: passphrase for pfx :rtype: str :return: sha1 thumbprint", "'PfxSettings', ['filename', 'passphrase', 'sha1']) def get_ssh_key_prefix(): # type: (None) ->", "string: string to encrypt :param dict config: configuration dict :rtype:", "copy, modify, merge, publish, distribute, sublicense, # and/or sell copies", "subprocess.check_call( ['ssh-keygen', '-f', str(privkey), '-t', 'rsa', '-N', '''''']) return (privkey,", "'-out', pemfile, '-password', 'pass:' + passphrase] ) # extract public", "text \"\"\" if util.is_none_or_empty(data): raise ValueError('invalid data to encrypt') inkey", "raise ValueError('invalid data to encrypt') inkey = settings.batch_shipyard_encryption_public_key_pem(config) derived =", "is in the form: # SHA1 Fingerprint=<thumbprint> return ''.join(util.decode_string( output).strip().split('=')[1].split(':')).lower()", "if sync: return util.subprocess_with_output(ssh_cmd, shell=shell) else: return util.subprocess_nowait_pipe_stdout( ssh_cmd, shell=shell,", "the above openssl command # in lowercase. Expected openssl output", "to encrypt :param dict config: configuration dict :rtype: str :return:", ":return: decrypted cipher text \"\"\" if util.is_none_or_empty(ciphertext): raise ValueError('invalid ciphertext", "OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE", "return _REMOTEFS_SSH_KEY_PREFIX def generate_rdp_password(): # type: (None) -> str \"\"\"Generate", "pemfile], stdout=subprocess.PIPE ) return _parse_sha1_thumbprint_openssl(proc.communicate()[0]) def generate_pem_pfx_certificates(config): # type: (dict)", "# noqa bytes, dict, int, list, object, range, str, ascii,", "Batch service subprocess.check_call( ['openssl', 'pkcs12', '-export', '-out', pfxfile, '-inkey', privatekey,", "__future__ import ( absolute_import, division, print_function, unicode_literals ) from builtins", "= get_sha1_thumbprint_pfx(pfxfile, pfx_passphrase) settings.set_batch_shipyard_encryption_pfx_sha1_thumbprint( config, sha1_cert_tp) return PfxSettings( filename=pfxfile, passphrase=pfx_passphrase,", "# to deal in the Software without restriction, including without", ":rtype: str :return: rdp password \"\"\" return base64.b64encode(os.urandom(8)) def generate_ssh_keypair(export_path,", "any([_mode_check(fstat, x) for x in modes]) def connect_or_exec_ssh_command( remote_ip, remote_port,", "+ passphrase] ) # extract public key from private key", "str \"\"\"Get SHA1 thumbprint from buffer :param str buffer: buffer", "key filemode is ok \"\"\" def _mode_check(fstat, flag): return bool(fstat", "sha1_cert_tp) return PfxSettings( filename=pfxfile, passphrase=pfx_passphrase, sha1=sha1_cert_tp) def _rsa_encrypt_string(data, config): #", "# Copyright (c) Microsoft Corporation # # All rights reserved.", "base64-encoded cipher text \"\"\" if util.is_none_or_empty(data): raise ValueError('invalid data to", "type: (dict) -> str \"\"\"Generate a pem and a derived", "proc = subprocess.Popen( ['openssl', 'rsautl', '-decrypt', '-inkey', pemfile], stdin=subprocess.PIPE, stdout=subprocess.PIPE)", "['openssl', 'rsautl', '-encrypt', '-pubin', '-inkey', inkey], stdin=subprocess.PIPE, stdout=subprocess.PIPE) ciphertext =", "config: configuration dict :rtype: str :return: sha1 thumbprint of pfx", "is None: raise RuntimeError('cannot decrypt without valid private key') cleartext", "THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE", "str pfxfile: name of the pfx file to export :rtype:", "above copyright notice and this permission notice shall be included", "invalid') if passphrase is None: passphrase = <PASSWORD>pass('Enter password for", "configuration :param dict config: configuration settings :rtype: tuple :return: pfxfile,", "EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE", "'-x509', '-newkey', 'rsa:2048', '-keyout', privatekey, '-out', f.name, '-days', '730', '-subj',", "OR OTHER # DEALINGS IN THE SOFTWARE. # compat imports", "tuple :return: pfxfile, passphrase, sha1 tp \"\"\" pfxfile = settings.batch_shipyard_encryption_pfx_filename(config)", "'sha1']) def get_ssh_key_prefix(): # type: (None) -> str \"\"\"Get SSH", "subprocess.check_output( ['openssl', 'pkcs12', '-in', pfxfile, '-nodes', '-passin', 'pass:' + passphrase]", "str pemfile: path of pem file to write to :rtype:", ":return: sha1 thumbprint of pfx \"\"\" # gather input pemfile", "be empty') privatekey = pemfile + '.key' # generate pem", "OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT,", "['filename', 'passphrase', 'sha1']) def get_ssh_key_prefix(): # type: (None) -> str", "(pathlib.Path) -> bool \"\"\"Check SSH private key filemode :param pathlib.Path", "to the following conditions: # # The above copyright notice", "\"\"\"Get PFX encryption settings from configuration :param dict config: configuration", "the Software. # # THE SOFTWARE IS PROVIDED *AS IS*,", "buffer :param str buffer: buffer to parse :rtype: str :return:", "util.is_not_empty(command): ssh_cmd.extend(command) logger.info('{} node {}:{} with key {}'.format( 'connecting to'", "type: (dict) -> tuple \"\"\"Get PFX encryption settings from configuration", "sync=True, shell=False, tty=False, ssh_args=None, command=None): # type: (str, int, pathlib.Path,", "subprocess.check_call( ['openssl', 'rsa', '-in', privatekey, '-pubout', '-outform', 'PEM', '-out', pemfile]", "def _mode_check(fstat, flag): return bool(fstat & flag) if util.on_windows(): return", "PFX filename to create: ') if passphrase is None: while", "settings.batch_shipyard_encryption_pfx_filename(config) pfx_passphrase = settings.batch_shipyard_encryption_pfx_passphrase(config) sha1_cert_tp = settings.batch_shipyard_encryption_pfx_sha1_thumbprint( config) # manually", "# extract public key from private key subprocess.check_call( ['openssl', 'rsa',", "\"\"\" if util.is_none_or_empty(data): raise ValueError('invalid data to encrypt') inkey =", "pemfile], stdin=subprocess.PIPE, stdout=subprocess.PIPE) cleartext = proc.communicate(input=data)[0] finally: fp = pathlib.Path(pemfile)", "# global defines _SSH_KEY_PREFIX = 'id_rsa_shipyard' _REMOTEFS_SSH_KEY_PREFIX = '{}_remotefs'.format(_SSH_KEY_PREFIX) #", "tuple ssh_args: ssh args :param tuple command: command :rtype: int", "sha1 thumbprint of pfx \"\"\" if pfxfile is None: raise", "raise RuntimeError('SSH private key file not found at: {}'.format( ssh_private_key))", "try: if inkey is None: raise RuntimeError('public encryption key is", "def encrypt_string(enabled, string, config): # type: (bool, str, dict) ->", "except ImportError: import pathlib import tempfile import stat import subprocess", ":param str ciphertext: cipher text in base64 :param dict config:", "'-in', pfxfile, '-out', pemfile, '-password', 'pass:' + passphrase] ) except", "= collections.namedtuple( 'PfxSettings', ['filename', 'passphrase', 'sha1']) def get_ssh_key_prefix(): # type:", "passphrase = getpass.getpass('Enter password for PFX: ') if len(passphrase) ==", "'-encrypt', '-pubin', '-inkey', inkey], stdin=subprocess.PIPE, stdout=subprocess.PIPE) ciphertext = util.base64_encode_string( proc.communicate(input=util.encode_string(data))[0])", "clear text data to encrypt :param dict config: configuration dict", "len(passphrase) == 0: print('passphrase cannot be empty') privatekey = pemfile", ". import settings from . import util # create logger", "(str, str, str) -> str \"\"\"Derive a public key pem", "f.name, '-certfile', f.name, '-passin', 'pass:', '-passout', 'pass:' + passphrase] )", "= settings.batch_shipyard_encryption_pfx_passphrase( config) inkey = derive_public_key_pem_from_pfx(pfxfile, pfx_passphrase, None) try: if", "valid private key') cleartext = None try: data = util.base64_decode_string(ciphertext)", "Corporation # # All rights reserved. # # MIT License", "of the pfx file to export :rtype: str :return: sha1", "shell=shell) else: return util.subprocess_nowait_pipe_stdout( ssh_cmd, shell=shell, pipe_stderr=True) def derive_private_key_pem_from_pfx(pfxfile, passphrase=None,", "execution :param bool shell: execute with shell :param bool tty:", "type: (pathlib.Path) -> bool \"\"\"Check SSH private key filemode :param", "name of the pfx file to export :rtype: str :return:", "COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER", "encrypt :param dict config: configuration dict :rtype: str :return: base64-encoded", "or subprocess.Process :return: return code or subprocess handle \"\"\" if", "to export :param str passphrase: passphrase for pfx :rtype: str", "pathlib.Path, str, bool, bool, tuple, tuple) -> bool \"\"\"Connect to", "thumbprint (without colons) from the above openssl command # in", "(str, str) -> str \"\"\"Get SHA1 thumbprint of PFX :param", "+ '.key' # generate pem file with private key and", "ssh key prefix for remote fs \"\"\" return _REMOTEFS_SSH_KEY_PREFIX def", "'-in', f.name, '-certfile', f.name, '-passin', 'pass:', '-passout', 'pass:' + passphrase]", "config: configuration dict :rtype: str :return: base64-encoded cipher text \"\"\"", "str \"\"\"Derive a public key pem file from a pfx", "publish, distribute, sublicense, # and/or sell copies of the Software,", "private key filemode is ok \"\"\" def _mode_check(fstat, flag): return", "util.is_none_or_empty(command) else 'executing command on', remote_ip, remote_port, ssh_private_key)) if sync:", "password for PFX: ') if len(passphrase) == 0: print('passphrase cannot", "subprocess.Process :return: return code or subprocess handle \"\"\" if not", "'/C=US/ST=None/L=None/O=None/CN=BatchShipyard'] ) # extract public key from private key subprocess.check_call(", "ssh_cmd.extend(command) logger.info('{} node {}:{} with key {}'.format( 'connecting to' if", "cleartext = proc.communicate(input=data)[0] finally: fp = pathlib.Path(pemfile) if fp.exists(): fp.unlink()", "util.get_input('Enter public key PEM filename to create: ') if pfxfile", "# type: (str, dict) -> str \"\"\"RSA decrypt a string", "# type: (bool, str, dict) -> str \"\"\"Encrypt a string", "string :param str data: clear text data to encrypt :param", "pem to pfx for Azure Batch service subprocess.check_call( ['openssl', 'pkcs12',", ":param str prefix: key prefix :rtype: tuple :return: (private key", "= pathlib.Path(privatekey) if fp.exists(): fp.unlink() # remove temp cert pem", "# the rights to use, copy, modify, merge, publish, distribute,", "for Azure Batch service subprocess.check_call( ['openssl', 'pkcs12', '-export', '-out', pfxfile,", "import logging import os try: import pathlib2 as pathlib except", "encrypt_string(enabled, string, config): # type: (bool, str, dict) -> str", "def _rsa_decrypt_string_with_pfx(ciphertext, config): # type: (str, dict) -> str \"\"\"RSA", "password for PFX: ') sha1_cert_tp = get_sha1_thumbprint_pfx(pfxfile, pfx_passphrase) settings.set_batch_shipyard_encryption_pfx_sha1_thumbprint( config,", "AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES", "without valid private key') cleartext = None try: data =", "noqa bytes, dict, int, list, object, range, str, ascii, chr,", "an ssh keypair for use with user logins :param str", "logging import os try: import pathlib2 as pathlib except ImportError:", "PFX :param str pfxfile: name of the pfx file to", "# named tuples PfxSettings = collections.namedtuple( 'PfxSettings', ['filename', 'passphrase', 'sha1'])", "(without colons) from the above openssl command # in lowercase.", "a private key pem file from a pfx :param str", "IN THE SOFTWARE. # compat imports from __future__ import (", "delete=False) f.close() pemfile = f.name try: # create pem from", "if fp.exists(): fp.unlink() return cleartext def encrypt_string(enabled, string, config): #", "str :return: encrypted string if enabled \"\"\" if enabled: return", "logger.info('{} node {}:{} with key {}'.format( 'connecting to' if util.is_none_or_empty(command)", "= tempfile.NamedTemporaryFile(mode='wb', delete=False) f.close() try: subprocess.check_call( ['openssl', 'req', '-new', '-nodes',", "_SSH_KEY_PREFIX = 'id_rsa_shipyard' _REMOTEFS_SSH_KEY_PREFIX = '{}_remotefs'.format(_SSH_KEY_PREFIX) # named tuples PfxSettings", "RuntimeError('SSH private key file not found at: {}'.format( ssh_private_key)) #", "is invalid') if passphrase is None: passphrase = <PASSWORD>pass('Enter password", "None) if pemfile is None: raise RuntimeError('cannot decrypt without valid", "str) -> str \"\"\"Derive a private key pem file from", "config): # type: (bool, str, dict) -> str \"\"\"Encrypt a", "TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR", "settings.batch_shipyard_encryption_pfx_passphrase(config) sha1_cert_tp = settings.batch_shipyard_encryption_pfx_sha1_thumbprint( config) # manually get thumbprint of", "+ passphrase] ) proc = subprocess.Popen( ['openssl', 'x509', '-noout', '-fingerprint'],", "passphrase, sha1 tp \"\"\" pfxfile = settings.batch_shipyard_encryption_pfx_filename(config) pfx_passphrase = settings.batch_shipyard_encryption_pfx_passphrase(config)", "free of charge, to any person obtaining a # copy", "cipher text \"\"\" if util.is_none_or_empty(ciphertext): raise ValueError('invalid ciphertext to decrypt')", "str \"\"\"Generate an RDP password :rtype: str :return: rdp password", "deal in the Software without restriction, including without limitation #", "') if passphrase is None: while util.is_none_or_empty(passphrase): passphrase = getpass.getpass('Enter", "if len(passphrase) == 0: print('passphrase cannot be empty') privatekey =", "lowercase. Expected openssl output is in the form: # SHA1", "sync: return util.subprocess_with_output(ssh_cmd, shell=shell) else: return util.subprocess_nowait_pipe_stdout( ssh_cmd, shell=shell, pipe_stderr=True)", "ImportError: import pathlib import tempfile import stat import subprocess #", "CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR", "key subprocess.check_call( ['openssl', 'rsa', '-in', pemfile, '-pubout', '-outform', 'PEM', '-out',", ":param tuple ssh_args: ssh args :param tuple command: command :rtype:", "# # The above copyright notice and this permission notice", "for the private key if not check_ssh_private_key_filemode(ssh_private_key): logger.warning( 'SSH private", "str :return: rdp password \"\"\" return base64.b64encode(os.urandom(8)) def generate_ssh_keypair(export_path, prefix=None):", "subprocess.Popen( ['openssl', 'rsautl', '-encrypt', '-pubin', '-inkey', inkey], stdin=subprocess.PIPE, stdout=subprocess.PIPE) ciphertext", "if fp.exists(): fp.unlink() pemfile = None return pemfile def _parse_sha1_thumbprint_openssl(output):", "not found at: {}'.format( ssh_private_key)) # ensure file mode is", "= pathlib.Path(inkey) if fp.exists(): fp.unlink() def _rsa_decrypt_string_with_pfx(ciphertext, config): # type:", "inkey is None: raise RuntimeError('public encryption key is invalid') proc", "copies of the Software, and to permit persons to whom", "to deal in the Software without restriction, including without limitation", "key PEM file: {}'.format(pemfile)) # convert pem to pfx for", "str(ssh_private_key), '-p', str(remote_port), ] if tty: ssh_cmd.append('-t') if util.is_not_empty(ssh_args): ssh_cmd.extend(ssh_args)", "check_ssh_private_key_filemode(ssh_private_key): logger.warning( 'SSH private key filemode is too permissive: {}'.format(", ":rtype: str :return: sha1 thumbprint of pfx \"\"\" # gather", "pfx :param str pfxfile: pfx file :param str passphrase: passphrase", "OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.", "\"\"\"Get SHA1 thumbprint from buffer :param str buffer: buffer to", "['openssl', 'pkcs12', '-export', '-out', pfxfile, '-inkey', privatekey, '-in', f.name, '-certfile',", "ssh_private_key)) if sync: return util.subprocess_with_output(ssh_cmd, shell=shell) else: return util.subprocess_nowait_pipe_stdout( ssh_cmd,", "fp.exists(): fp.unlink() # get sha1 thumbprint of pfx return get_sha1_thumbprint_pfx(pfxfile,", "generate_rdp_password(): # type: (None) -> str \"\"\"Generate an RDP password", "\"Software\"), # to deal in the Software without restriction, including", "'UserKnownHostsFile={}'.format(os.devnull), '-i', str(ssh_private_key), '-p', str(remote_port), ] if tty: ssh_cmd.append('-t') if", "key') cleartext = None try: data = util.base64_decode_string(ciphertext) proc =", "stdin=subprocess.PIPE, stdout=subprocess.PIPE) ciphertext = util.base64_encode_string( proc.communicate(input=util.encode_string(data))[0]) if proc.returncode != 0:", "') if len(passphrase) == 0: print('passphrase cannot be empty') privatekey", "0: print('passphrase cannot be empty') privatekey = pemfile + '.key'", "ascii, chr, hex, input, next, oct, open, pow, round, super,", "key :rtype: bool :return: private key filemode is ok \"\"\"", "# type: (str, str, str) -> str \"\"\"Derive a public", "including without limitation # the rights to use, copy, modify,", "'-f', str(privkey), '-t', 'rsa', '-N', '''''']) return (privkey, pubkey) def", "util # create logger logger = logging.getLogger(__name__) util.setup_logger(logger) # global", "compat imports from __future__ import ( absolute_import, division, print_function, unicode_literals", ") return _parse_sha1_thumbprint_openssl(proc.communicate(input=pfxdump)[0]) def get_sha1_thumbprint_pem(pemfile): # type: (str) -> str", "passphrase: passphrase for pfx :rtype: str :return: sha1 thumbprint of", "ssh_private_key: SSH private key :rtype: bool :return: private key filemode", "node via SSH or execute SSH command :param str remote_ip:", "to encrypt') inkey = settings.batch_shipyard_encryption_public_key_pem(config) derived = False if inkey", "str, ascii, chr, hex, input, next, oct, open, pow, round,", "pemfile def _parse_sha1_thumbprint_openssl(output): # type: (str) -> str \"\"\"Get SHA1", "Software, and to permit persons to whom the # Software", "if pfxfile is None: raise ValueError('pfx file is invalid') if", "True fstat = ssh_private_key.stat().st_mode modes = frozenset((stat.S_IRWXG, stat.S_IRWXO)) return not", "node {}:{} with key {}'.format( 'connecting to' if util.is_none_or_empty(command) else", "logger = logging.getLogger(__name__) util.setup_logger(logger) # global defines _SSH_KEY_PREFIX = 'id_rsa_shipyard'", "data to encrypt') inkey = settings.batch_shipyard_encryption_public_key_pem(config) derived = False if", "AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR", "return base64.b64encode(os.urandom(8)) def generate_ssh_keypair(export_path, prefix=None): # type: (str, str) ->", "to path: {}'.format(export_path)) subprocess.check_call( ['ssh-keygen', '-f', str(privkey), '-t', 'rsa', '-N',", "# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF", "to decrypt') pfxfile = settings.batch_shipyard_encryption_pfx_filename(config) pfx_passphrase = settings.batch_shipyard_encryption_pfx_passphrase(config) pemfile =", "'-in', pfxfile, '-out', pemfile, '-password', 'pass:' + passphrase] ) #", "tuple :return: (private key filename, public key filename) \"\"\" if", "proc = subprocess.Popen( ['openssl', 'rsautl', '-encrypt', '-pubin', '-inkey', inkey], stdin=subprocess.PIPE,", "str \"\"\"Encrypt a string :param bool enabled: if encryption is", "filter, map, zip) # stdlib imports import base64 import collections", "just thumbprint (without colons) from the above openssl command #", "map, zip) # stdlib imports import base64 import collections import", "-> str \"\"\"Get SSH key prefix :rtype: str :return: ssh", "raise RuntimeError('public encryption key is invalid') proc = subprocess.Popen( ['openssl',", "pemfile = derive_private_key_pem_from_pfx(pfxfile, pfx_passphrase, None) if pemfile is None: raise", "str \"\"\"Get remote fs SSH key prefix :rtype: str :return:", "filename to create: ') if pfxfile is None: pfxfile =", "(dict) -> tuple \"\"\"Get PFX encryption settings from configuration :param", "SHA1 thumbprint from buffer :param str buffer: buffer to parse", "ssh_private_key.exists(): raise RuntimeError('SSH private key file not found at: {}'.format(", "furnished to do so, subject to the following conditions: #", "# The above copyright notice and this permission notice shall", "pem if pemfile is None: f = tempfile.NamedTemporaryFile(mode='wb', delete=False) f.close()", "<filename>convoy/crypto.py # Copyright (c) Microsoft Corporation # # All rights", "of the Software. # # THE SOFTWARE IS PROVIDED *AS", "stdout=subprocess.PIPE) cleartext = proc.communicate(input=data)[0] finally: fp = pathlib.Path(pemfile) if fp.exists():", "of PEM :param str pfxfile: name of the pfx file", "\"\"\" # return just thumbprint (without colons) from the above", "OTHER # DEALINGS IN THE SOFTWARE. # compat imports from", "prefix \"\"\" return _SSH_KEY_PREFIX def get_remotefs_ssh_key_prefix(): # type: (None) ->", "'pkcs12', '-export', '-out', pfxfile, '-inkey', privatekey, '-in', f.name, '-certfile', f.name,", "as pathlib except ImportError: import pathlib import tempfile import stat", "def get_ssh_key_prefix(): # type: (None) -> str \"\"\"Get SSH key", ":param str data: clear text data to encrypt :param dict", "empty') privatekey = pemfile + '.key' # generate pem file", "pathlib except ImportError: import pathlib import tempfile import stat import", "following conditions: # # The above copyright notice and this", "conditions: # # The above copyright notice and this permission", "_parse_sha1_thumbprint_openssl(proc.communicate(input=pfxdump)[0]) def get_sha1_thumbprint_pem(pemfile): # type: (str) -> str \"\"\"Get SHA1", "and associated documentation files (the \"Software\"), # to deal in", "derive_private_key_pem_from_pfx(pfxfile, pfx_passphrase, None) if pemfile is None: raise RuntimeError('cannot decrypt", "ciphertext to decrypt') pfxfile = settings.batch_shipyard_encryption_pfx_filename(config) pfx_passphrase = settings.batch_shipyard_encryption_pfx_passphrase(config) pemfile", "License # # Permission is hereby granted, free of charge,", "util.setup_logger(logger) # global defines _SSH_KEY_PREFIX = 'id_rsa_shipyard' _REMOTEFS_SSH_KEY_PREFIX = '{}_remotefs'.format(_SSH_KEY_PREFIX)", "subprocess.Popen( ['openssl', 'x509', '-noout', '-fingerprint'], stdin=subprocess.PIPE, stdout=subprocess.PIPE ) return _parse_sha1_thumbprint_openssl(proc.communicate(input=pfxdump)[0])", "password :rtype: str :return: rdp password \"\"\" return base64.b64encode(os.urandom(8)) def", "= pathlib.Path(export_path, prefix + '.old') if old.exists(): old.unlink() privkey.rename(old) if", "return util.subprocess_with_output(ssh_cmd, shell=shell) else: return util.subprocess_nowait_pipe_stdout( ssh_cmd, shell=shell, pipe_stderr=True) def", "RuntimeError( 'openssl encryption failed with returncode: {}'.format( proc.returncode)) return ciphertext", "prefix :rtype: str :return: ssh key prefix \"\"\" return _SSH_KEY_PREFIX", "SSH command :param str remote_ip: remote ip address :param int", "private key subprocess.check_call( ['openssl', 'rsa', '-in', privatekey, '-pubout', '-outform', 'PEM',", "thumbprint of pfx if not exists in config if util.is_none_or_empty(sha1_cert_tp):", "for PFX: ') sha1_cert_tp = get_sha1_thumbprint_pfx(pfxfile, pfx_passphrase) settings.set_batch_shipyard_encryption_pfx_sha1_thumbprint( config, sha1_cert_tp)", "pemfile, '-password', 'pass:' + passphrase] ) # extract public key", "to create: ') if passphrase is None: while util.is_none_or_empty(passphrase): passphrase", "str :return: sha1 thumbprint of pfx \"\"\" # gather input", "a # copy of this software and associated documentation files", "be included in # all copies or substantial portions of", "pathlib.Path ssh_private_key: SSH private key :rtype: bool :return: private key", "return cleartext def encrypt_string(enabled, string, config): # type: (bool, str,", "fp.unlink() return cleartext def encrypt_string(enabled, string, config): # type: (bool,", "def _rsa_encrypt_string(data, config): # type: (str, dict) -> str \"\"\"RSA", "= None return pemfile def _parse_sha1_thumbprint_openssl(output): # type: (str) ->", "is None: raise ValueError('pfx file is invalid') if passphrase is", "key prefix \"\"\" return _SSH_KEY_PREFIX def get_remotefs_ssh_key_prefix(): # type: (None)", "colons) from the above openssl command # in lowercase. Expected", "configuration dict :rtype: str :return: sha1 thumbprint of pfx \"\"\"", "import settings from . import util # create logger logger", "# # Permission is hereby granted, free of charge, to", "import base64 import collections import getpass import logging import os", "the # Software is furnished to do so, subject to", "str, dict) -> str \"\"\"Encrypt a string :param bool enabled:", "subject to the following conditions: # # The above copyright", "# type: (str, str, str) -> str \"\"\"Derive a private", "to :rtype: str :return: path of pem file \"\"\" if", "None: # derive pem from pfx derived = True pfxfile", "frozenset((stat.S_IRWXG, stat.S_IRWXO)) return not any([_mode_check(fstat, x) for x in modes])", "str :return: sha1 thumbprint of pem \"\"\" proc = subprocess.Popen(", "-> str \"\"\"Generate a pem and a derived pfx file", "pemfile=None): # type: (str, str, str) -> str \"\"\"Derive a", "str) -> str \"\"\"Derive a public key pem file from", "execute with shell :param bool tty: allocate pseudo-tty :param tuple", "documentation files (the \"Software\"), # to deal in the Software", "create: ') if passphrase is None: while util.is_none_or_empty(passphrase): passphrase =", "return _SSH_KEY_PREFIX def get_remotefs_ssh_key_prefix(): # type: (None) -> str \"\"\"Get", "'-fingerprint'], stdin=subprocess.PIPE, stdout=subprocess.PIPE ) return _parse_sha1_thumbprint_openssl(proc.communicate(input=pfxdump)[0]) def get_sha1_thumbprint_pem(pemfile): # type:", "modify, merge, publish, distribute, sublicense, # and/or sell copies of", "sha1_cert_tp = settings.batch_shipyard_encryption_pfx_sha1_thumbprint( config) # manually get thumbprint of pfx", "FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL", "sha1 thumbprint of pem \"\"\" proc = subprocess.Popen( ['openssl', 'x509',", "the pfx file to export :rtype: str :return: sha1 thumbprint", "encrypt') inkey = settings.batch_shipyard_encryption_public_key_pem(config) derived = False if inkey is", "IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER", ":return: path of pem file \"\"\" if pfxfile is None:", "import ( absolute_import, division, print_function, unicode_literals ) from builtins import", "public key PEM filename to create: ') if pfxfile is", "private key if not check_ssh_private_key_filemode(ssh_private_key): logger.warning( 'SSH private key filemode", "key {}'.format( 'connecting to' if util.is_none_or_empty(command) else 'executing command on',", "fstat = ssh_private_key.stat().st_mode modes = frozenset((stat.S_IRWXG, stat.S_IRWXO)) return not any([_mode_check(fstat,", "prefix) pubkey = pathlib.Path(export_path, prefix + '.pub') if privkey.exists(): old", "restriction, including without limitation # the rights to use, copy,", "ssh_cmd.append('-t') if util.is_not_empty(ssh_args): ssh_cmd.extend(ssh_args) ssh_cmd.append('{}@{}'.format(username, remote_ip)) if util.is_not_empty(command): ssh_cmd.extend(command) logger.info('{}", "NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT", "return True fstat = ssh_private_key.stat().st_mode modes = frozenset((stat.S_IRWXG, stat.S_IRWXO)) return", "-> str \"\"\"Generate an RDP password :rtype: str :return: rdp", "'-pubout', '-outform', 'PEM', '-out', pemfile] ) except Exception: fp =", "NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR", "(c) Microsoft Corporation # # All rights reserved. # #", "pubkey) def check_ssh_private_key_filemode(ssh_private_key): # type: (pathlib.Path) -> bool \"\"\"Check SSH", "# type: (dict) -> str \"\"\"Generate a pem and a", "return PfxSettings( filename=pfxfile, passphrase=pfx_passphrase, sha1=sha1_cert_tp) def _rsa_encrypt_string(data, config): # type:", "is None: # derive pem from pfx derived = True", "= pathlib.Path(export_path, prefix) pubkey = pathlib.Path(export_path, prefix + '.pub') if", "username: username :param bool sync: synchronous execution :param bool shell:", "= ssh_private_key.stat().st_mode modes = frozenset((stat.S_IRWXG, stat.S_IRWXO)) return not any([_mode_check(fstat, x)", "def get_encryption_pfx_settings(config): # type: (dict) -> tuple \"\"\"Get PFX encryption", "create pem from pfx subprocess.check_call( ['openssl', 'pkcs12', '-nodes', '-in', pfxfile,", "shell: execute with shell :param bool tty: allocate pseudo-tty :param", "SSH private key filemode :param pathlib.Path ssh_private_key: SSH private key", "= None try: data = util.base64_decode_string(ciphertext) proc = subprocess.Popen( ['openssl',", "# type: (None) -> str \"\"\"Get SSH key prefix :rtype:", "'-outform', 'PEM', '-out', pemfile] ) logger.debug('created public key PEM file:", "pemfile = None return pemfile def derive_public_key_pem_from_pfx(pfxfile, passphrase=None, pemfile=None): #", "DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF", "ssh_private_key, username, sync=True, shell=False, tty=False, ssh_args=None, command=None): # type: (str,", "dict config: configuration settings :rtype: tuple :return: pfxfile, passphrase, sha1", "'-passin', 'pass:' + passphrase] ) proc = subprocess.Popen( ['openssl', 'x509',", "= False if inkey is None: # derive pem from", "public key from private key subprocess.check_call( ['openssl', 'rsa', '-in', privatekey,", "this software and associated documentation files (the \"Software\"), # to", "if pemfile is None: raise RuntimeError('cannot decrypt without valid private", "f.close() pemfile = f.name try: # create pem from pfx", "pem file to write to :rtype: str :return: path of", "properly for the private key if not check_ssh_private_key_filemode(ssh_private_key): logger.warning( 'SSH", "PFX: ') sha1_cert_tp = get_sha1_thumbprint_pfx(pfxfile, pfx_passphrase) settings.set_batch_shipyard_encryption_pfx_sha1_thumbprint( config, sha1_cert_tp) return", "pair to path: {}'.format(export_path)) subprocess.check_call( ['ssh-keygen', '-f', str(privkey), '-t', 'rsa',", "'-decrypt', '-inkey', pemfile], stdin=subprocess.PIPE, stdout=subprocess.PIPE) cleartext = proc.communicate(input=data)[0] finally: fp", "with user logins :param str export_path: keypair export path :param", "(str, int, pathlib.Path, str, bool, bool, tuple, tuple) -> bool", "modes]) def connect_or_exec_ssh_command( remote_ip, remote_port, ssh_private_key, username, sync=True, shell=False, tty=False,", "settings.batch_shipyard_encryption_pfx_passphrase(config) pemfile = derive_private_key_pem_from_pfx(pfxfile, pfx_passphrase, None) if pemfile is None:", "get_sha1_thumbprint_pfx(pfxfile, passphrase) def get_encryption_pfx_settings(config): # type: (dict) -> tuple \"\"\"Get", "ValueError('pfx file is invalid') if passphrase is None: passphrase =", "'-noout', '-fingerprint'], stdin=subprocess.PIPE, stdout=subprocess.PIPE ) return _parse_sha1_thumbprint_openssl(proc.communicate(input=pfxdump)[0]) def get_sha1_thumbprint_pem(pemfile): #", "key pem file from a pfx :param str pfxfile: pfx", ":rtype: tuple :return: pfxfile, passphrase, sha1 tp \"\"\" pfxfile =", "dict :rtype: str :return: decrypted cipher text \"\"\" if util.is_none_or_empty(ciphertext):", "private key file not found at: {}'.format( ssh_private_key)) # ensure", "ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN", "PfxSettings = collections.namedtuple( 'PfxSettings', ['filename', 'passphrase', 'sha1']) def get_ssh_key_prefix(): #", "_REMOTEFS_SSH_KEY_PREFIX = '{}_remotefs'.format(_SSH_KEY_PREFIX) # named tuples PfxSettings = collections.namedtuple( 'PfxSettings',", "stat.S_IRWXO)) return not any([_mode_check(fstat, x) for x in modes]) def", "inkey = derive_public_key_pem_from_pfx(pfxfile, pfx_passphrase, None) try: if inkey is None:", "old = pathlib.Path(export_path, prefix + '.pub.old') if old.exists(): old.unlink() pubkey.rename(old)", ":rtype: str :return: decrypted cipher text \"\"\" if util.is_none_or_empty(ciphertext): raise", "import tempfile import stat import subprocess # local imports from", "IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING #", "'{}_remotefs'.format(_SSH_KEY_PREFIX) # named tuples PfxSettings = collections.namedtuple( 'PfxSettings', ['filename', 'passphrase',", "-> str \"\"\"Derive a public key pem file from a", "input, next, oct, open, pow, round, super, filter, map, zip)", "named tuples PfxSettings = collections.namedtuple( 'PfxSettings', ['filename', 'passphrase', 'sha1']) def", "import pathlib import tempfile import stat import subprocess # local", "key PEM filename to create: ') if pfxfile is None:", "str \"\"\"Derive a private key pem file from a pfx", "ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT", "if util.is_none_or_empty(ciphertext): raise ValueError('invalid ciphertext to decrypt') pfxfile = settings.batch_shipyard_encryption_pfx_filename(config)", "pipe_stderr=True) def derive_private_key_pem_from_pfx(pfxfile, passphrase=None, pemfile=None): # type: (str, str, str)", "password for PFX: ') # compute sha1 thumbprint of pfx", "{}'.format(pfxfile)) finally: # remove rsa private key file fp =", "settings.batch_shipyard_encryption_pfx_filename(config) passphrase = settings.batch_shipyard_encryption_pfx_passphrase(config) if pemfile is None: pemfile =", "with key {}'.format( 'connecting to' if util.is_none_or_empty(command) else 'executing command", "{}'.format( proc.returncode)) return ciphertext finally: if derived: fp = pathlib.Path(inkey)", "substantial portions of the Software. # # THE SOFTWARE IS", "merge, publish, distribute, sublicense, # and/or sell copies of the", "LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,", "to' if util.is_none_or_empty(command) else 'executing command on', remote_ip, remote_port, ssh_private_key))", ":return: pfxfile, passphrase, sha1 tp \"\"\" pfxfile = settings.batch_shipyard_encryption_pfx_filename(config) pfx_passphrase", "str \"\"\"Get SHA1 thumbprint of PEM :param str pfxfile: name", "fp = pathlib.Path(inkey) if fp.exists(): fp.unlink() def _rsa_decrypt_string_with_pfx(ciphertext, config): #", "if fp.exists(): fp.unlink() # get sha1 thumbprint of pfx return", ":param str pfxfile: name of the pfx file to export", "'-in', privatekey, '-pubout', '-outform', 'PEM', '-out', pemfile] ) logger.debug('created public", "if pfxfile is None: raise ValueError('pfxfile is invalid') if passphrase", "shell=False, tty=False, ssh_args=None, command=None): # type: (str, int, pathlib.Path, str,", "pfx file :param str passphrase: passphrase for pfx :param str", "config if util.is_none_or_empty(sha1_cert_tp): if pfx_passphrase is None: pfx_passphrase = <PASSWORD>('Enter", "and no password f = tempfile.NamedTemporaryFile(mode='wb', delete=False) f.close() try: subprocess.check_call(", "# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF", "= util.get_input('Enter public key PEM filename to create: ') if", "data = util.base64_decode_string(ciphertext) proc = subprocess.Popen( ['openssl', 'rsautl', '-decrypt', '-inkey',", "permit persons to whom the # Software is furnished to", "string to encrypt :param dict config: configuration dict :rtype: str", "# All rights reserved. # # MIT License # #", "reserved. # # MIT License # # Permission is hereby", "copyright notice and this permission notice shall be included in", "ssh_cmd.append('{}@{}'.format(username, remote_ip)) if util.is_not_empty(command): ssh_cmd.extend(command) logger.info('{} node {}:{} with key", "if proc.returncode != 0: raise RuntimeError( 'openssl encryption failed with", "pfx pfxdump = subprocess.check_output( ['openssl', 'pkcs12', '-in', pfxfile, '-nodes', '-passin',", "extract public key from private key subprocess.check_call( ['openssl', 'rsa', '-in',", "config): # type: (str, dict) -> str \"\"\"RSA encrypt a", "return util.subprocess_nowait_pipe_stdout( ssh_cmd, shell=shell, pipe_stderr=True) def derive_private_key_pem_from_pfx(pfxfile, passphrase=None, pemfile=None): #", "'-in', pemfile], stdout=subprocess.PIPE ) return _parse_sha1_thumbprint_openssl(proc.communicate()[0]) def generate_pem_pfx_certificates(config): # type:", "{}'.format( 'connecting to' if util.is_none_or_empty(command) else 'executing command on', remote_ip,", "to pfx for Azure Batch service subprocess.check_call( ['openssl', 'pkcs12', '-export',", "\"\"\"Derive a private key pem file from a pfx :param", "and a derived pfx file :param dict config: configuration dict", "path of pem file to write to :rtype: str :return:", "generate_pem_pfx_certificates(config): # type: (dict) -> str \"\"\"Generate a pem and", "int remote_port: remote port :param pathlib.Path ssh_private_key: SSH private key", "# type: (str, str) -> tuple \"\"\"Generate an ssh keypair", "pfx_passphrase, None) if pemfile is None: raise RuntimeError('cannot decrypt without", "pfxfile is None: pfxfile = util.get_input('Enter PFX filename to create:", "cipher text \"\"\" if util.is_none_or_empty(data): raise ValueError('invalid data to encrypt')", "str :return: sha1 thumbprint of buffer \"\"\" # return just", "IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED,", "_parse_sha1_thumbprint_openssl(proc.communicate()[0]) def generate_pem_pfx_certificates(config): # type: (dict) -> str \"\"\"Generate a", "OR THE USE OR OTHER # DEALINGS IN THE SOFTWARE.", "export :param str passphrase: passphrase for pfx :rtype: str :return:", "pathlib import tempfile import stat import subprocess # local imports", "bool \"\"\"Check SSH private key filemode :param pathlib.Path ssh_private_key: SSH", "pfxfile = settings.batch_shipyard_encryption_pfx_filename(config) pfx_passphrase = settings.batch_shipyard_encryption_pfx_passphrase(config) sha1_cert_tp = settings.batch_shipyard_encryption_pfx_sha1_thumbprint( config)", "to pem if pemfile is None: f = tempfile.NamedTemporaryFile(mode='wb', delete=False)", "limitation # the rights to use, copy, modify, merge, publish,", "in lowercase. Expected openssl output is in the form: #", "collections import getpass import logging import os try: import pathlib2", "get_encryption_pfx_settings(config): # type: (dict) -> tuple \"\"\"Get PFX encryption settings", "passphrase] ) logger.debug('created PFX file: {}'.format(pfxfile)) finally: # remove rsa", "from . import settings from . import util # create", "ensure file mode is set properly for the private key", "fp.unlink() pemfile = None return pemfile def _parse_sha1_thumbprint_openssl(output): # type:", "EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE", "(None) -> str \"\"\"Get SSH key prefix :rtype: str :return:", "for PFX: ') # convert pfx to pem if pemfile", "thumbprint of PEM :param str pfxfile: name of the pfx", "pemfile, '-password', 'pass:' + passphrase] ) except Exception: fp =", "permissive: {}'.format( ssh_private_key)) # execute SSH command ssh_cmd = [", "configuration dict :rtype: str :return: decrypted cipher text \"\"\" if", "PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS", "# get sha1 thumbprint of pfx return get_sha1_thumbprint_pfx(pfxfile, passphrase) def", "if privkey.exists(): old = pathlib.Path(export_path, prefix + '.old') if old.exists():", ":return: base64-encoded cipher text \"\"\" if util.is_none_or_empty(data): raise ValueError('invalid data", "stdout=subprocess.PIPE) ciphertext = util.base64_encode_string( proc.communicate(input=util.encode_string(data))[0]) if proc.returncode != 0: raise", "SSH key prefix :rtype: str :return: ssh key prefix \"\"\"", "except Exception: fp = pathlib.Path(pemfile) if fp.exists(): fp.unlink() pemfile =", "logger.debug('created PFX file: {}'.format(pfxfile)) finally: # remove rsa private key", "'-passout', 'pass:' + passphrase] ) logger.debug('created PFX file: {}'.format(pfxfile)) finally:", "ssh keypair for use with user logins :param str export_path:", "builtins import ( # noqa bytes, dict, int, list, object,", "if old.exists(): old.unlink() privkey.rename(old) if pubkey.exists(): old = pathlib.Path(export_path, prefix", "A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE", "_rsa_decrypt_string_with_pfx(ciphertext, config): # type: (str, dict) -> str \"\"\"RSA decrypt", "OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION", "remote_ip, remote_port, ssh_private_key, username, sync=True, shell=False, tty=False, ssh_args=None, command=None): #", "private key pem file from a pfx :param str pfxfile:", "DEALINGS IN THE SOFTWARE. # compat imports from __future__ import", "# SHA1 Fingerprint=<thumbprint> return ''.join(util.decode_string( output).strip().split('=')[1].split(':')).lower() def get_sha1_thumbprint_pfx(pfxfile, passphrase): #", "Microsoft Corporation # # All rights reserved. # # MIT", "None: pfx_passphrase = <PASSWORD>('Enter password for PFX: ') sha1_cert_tp =", "ssh_args=None, command=None): # type: (str, int, pathlib.Path, str, bool, bool,", ":param bool tty: allocate pseudo-tty :param tuple ssh_args: ssh args", "passphrase for pfx :param str pemfile: path of pem file", "settings from configuration :param dict config: configuration settings :rtype: tuple", ":return: sha1 thumbprint of buffer \"\"\" # return just thumbprint", "fp = pathlib.Path(privatekey) if fp.exists(): fp.unlink() # remove temp cert", "None: pfxfile = util.get_input('Enter PFX filename to create: ') if", "'-N', '''''']) return (privkey, pubkey) def check_ssh_private_key_filemode(ssh_private_key): # type: (pathlib.Path)", "thumbprint from buffer :param str buffer: buffer to parse :rtype:", "pemfile = f.name try: # create pem from pfx subprocess.check_call(", "exists in config if util.is_none_or_empty(sha1_cert_tp): if pfx_passphrase is None: pfx_passphrase", "return pemfile def derive_public_key_pem_from_pfx(pfxfile, passphrase=None, pemfile=None): # type: (str, str,", "dict, int, list, object, range, str, ascii, chr, hex, input,", "RuntimeError('cannot decrypt without valid private key') cleartext = None try:", "') # compute sha1 thumbprint of pfx pfxdump = subprocess.check_output(", "# compat imports from __future__ import ( absolute_import, division, print_function,", "key filename) \"\"\" if util.is_none_or_empty(prefix): prefix = _SSH_KEY_PREFIX privkey =", "# derive pem from pfx derived = True pfxfile =", "pathlib.Path(pemfile) if fp.exists(): fp.unlink() return cleartext def encrypt_string(enabled, string, config):", "= pathlib.Path(export_path, prefix + '.pub.old') if old.exists(): old.unlink() pubkey.rename(old) logger.info('generating", "'x509', '-noout', '-fingerprint'], stdin=subprocess.PIPE, stdout=subprocess.PIPE ) return _parse_sha1_thumbprint_openssl(proc.communicate(input=pfxdump)[0]) def get_sha1_thumbprint_pem(pemfile):", "get_sha1_thumbprint_pfx(pfxfile, passphrase): # type: (str, str) -> str \"\"\"Get SHA1", "is None: passphrase = getpass.getpass('Enter password for PFX: ') #", "= settings.batch_shipyard_encryption_pfx_filename(config) passphrase = settings.batch_shipyard_encryption_pfx_passphrase(config) if pemfile is None: pemfile", "# # All rights reserved. # # MIT License #", "if fp.exists(): fp.unlink() def _rsa_decrypt_string_with_pfx(ciphertext, config): # type: (str, dict)", "return ''.join(util.decode_string( output).strip().split('=')[1].split(':')).lower() def get_sha1_thumbprint_pfx(pfxfile, passphrase): # type: (str, str)", "fp.unlink() pemfile = None return pemfile def derive_public_key_pem_from_pfx(pfxfile, passphrase=None, pemfile=None):", "\"\"\" if pfxfile is None: raise ValueError('pfx file is invalid')", "pathlib.Path(export_path, prefix) pubkey = pathlib.Path(export_path, prefix + '.pub') if privkey.exists():", "str string: string to encrypt :param dict config: configuration dict", "stdout=subprocess.PIPE ) return _parse_sha1_thumbprint_openssl(proc.communicate(input=pfxdump)[0]) def get_sha1_thumbprint_pem(pemfile): # type: (str) ->", "util.is_none_or_empty(prefix): prefix = _SSH_KEY_PREFIX privkey = pathlib.Path(export_path, prefix) pubkey =", "'-password', 'pass:' + passphrase] ) except Exception: fp = pathlib.Path(pemfile)", "use with user logins :param str export_path: keypair export path", "get sha1 thumbprint of pfx return get_sha1_thumbprint_pfx(pfxfile, passphrase) def get_encryption_pfx_settings(config):", "str :return: ssh key prefix for remote fs \"\"\" return", "is invalid') proc = subprocess.Popen( ['openssl', 'rsautl', '-encrypt', '-pubin', '-inkey',", "pfx file to export :rtype: str :return: sha1 thumbprint of", "OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT", "SOFTWARE. # compat imports from __future__ import ( absolute_import, division,", "# type: (str, int, pathlib.Path, str, bool, bool, tuple, tuple)", "OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF", "pfxdump = subprocess.check_output( ['openssl', 'pkcs12', '-in', pfxfile, '-nodes', '-passin', 'pass:'", "passphrase] ) proc = subprocess.Popen( ['openssl', 'x509', '-noout', '-fingerprint'], stdin=subprocess.PIPE,", "inkey = settings.batch_shipyard_encryption_public_key_pem(config) derived = False if inkey is None:", "pfx_passphrase) settings.set_batch_shipyard_encryption_pfx_sha1_thumbprint( config, sha1_cert_tp) return PfxSettings( filename=pfxfile, passphrase=pfx_passphrase, sha1=sha1_cert_tp) def", "= True pfxfile = settings.batch_shipyard_encryption_pfx_filename(config) pfx_passphrase = settings.batch_shipyard_encryption_pfx_passphrase( config) inkey", "pem \"\"\" proc = subprocess.Popen( ['openssl', 'x509', '-noout', '-fingerprint', '-in',", "global defines _SSH_KEY_PREFIX = 'id_rsa_shipyard' _REMOTEFS_SSH_KEY_PREFIX = '{}_remotefs'.format(_SSH_KEY_PREFIX) # named", "compute sha1 thumbprint of pfx pfxdump = subprocess.check_output( ['openssl', 'pkcs12',", "tuples PfxSettings = collections.namedtuple( 'PfxSettings', ['filename', 'passphrase', 'sha1']) def get_ssh_key_prefix():", "invalid') if passphrase is None: passphrase = getpass.getpass('Enter password for", "PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR", "sha1_cert_tp = get_sha1_thumbprint_pfx(pfxfile, pfx_passphrase) settings.set_batch_shipyard_encryption_pfx_sha1_thumbprint( config, sha1_cert_tp) return PfxSettings( filename=pfxfile,", "SSH or execute SSH command :param str remote_ip: remote ip", "buffer \"\"\" # return just thumbprint (without colons) from the", ":param int remote_port: remote port :param pathlib.Path ssh_private_key: SSH private", "passphrase is None: while util.is_none_or_empty(passphrase): passphrase = getpass.getpass('Enter password for", "file with private key and no password f = tempfile.NamedTemporaryFile(mode='wb',", "dict :rtype: str :return: base64-encoded cipher text \"\"\" if util.is_none_or_empty(data):", "if fp.exists(): fp.unlink() # remove temp cert pem fp =", "thumbprint of pfx \"\"\" # gather input pemfile = settings.batch_shipyard_encryption_public_key_pem(config)", "remote_port: remote port :param pathlib.Path ssh_private_key: SSH private key :param", "decrypt a string :param str ciphertext: cipher text in base64", "finally: if derived: fp = pathlib.Path(inkey) if fp.exists(): fp.unlink() def", "pfxfile, '-nodes', '-passin', 'pass:' + passphrase] ) proc = subprocess.Popen(", "for remote fs \"\"\" return _REMOTEFS_SSH_KEY_PREFIX def generate_rdp_password(): # type:", "'.old') if old.exists(): old.unlink() privkey.rename(old) if pubkey.exists(): old = pathlib.Path(export_path,", "# type: (str, str) -> str \"\"\"Get SHA1 thumbprint of", "rdp password \"\"\" return base64.b64encode(os.urandom(8)) def generate_ssh_keypair(export_path, prefix=None): # type:", "\"\"\" return _SSH_KEY_PREFIX def get_remotefs_ssh_key_prefix(): # type: (None) -> str", "Permission is hereby granted, free of charge, to any person", "'-inkey', privatekey, '-in', f.name, '-certfile', f.name, '-passin', 'pass:', '-passout', 'pass:'", "if encryption is enabled :param str string: string to encrypt", "in modes]) def connect_or_exec_ssh_command( remote_ip, remote_port, ssh_private_key, username, sync=True, shell=False,", "None: raise ValueError('pfxfile is invalid') if passphrase is None: passphrase", "bool, bool, tuple, tuple) -> bool \"\"\"Connect to node via", "_mode_check(fstat, flag): return bool(fstat & flag) if util.on_windows(): return True", "from pfx derived = True pfxfile = settings.batch_shipyard_encryption_pfx_filename(config) pfx_passphrase =", "# create pem from pfx subprocess.check_call( ['openssl', 'pkcs12', '-nodes', '-in',", "subprocess.check_call( ['openssl', 'req', '-new', '-nodes', '-x509', '-newkey', 'rsa:2048', '-keyout', privatekey,", "return not any([_mode_check(fstat, x) for x in modes]) def connect_or_exec_ssh_command(", "sublicense, # and/or sell copies of the Software, and to", "WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT", "finally: # remove rsa private key file fp = pathlib.Path(privatekey)", "old.unlink() pubkey.rename(old) logger.info('generating ssh key pair to path: {}'.format(export_path)) subprocess.check_call(", "tp \"\"\" pfxfile = settings.batch_shipyard_encryption_pfx_filename(config) pfx_passphrase = settings.batch_shipyard_encryption_pfx_passphrase(config) sha1_cert_tp =", "0: raise RuntimeError( 'openssl encryption failed with returncode: {}'.format( proc.returncode))", ":return: encrypted string if enabled \"\"\" if enabled: return _rsa_encrypt_string(string,", "import util # create logger logger = logging.getLogger(__name__) util.setup_logger(logger) #", "+ '.old') if old.exists(): old.unlink() privkey.rename(old) if pubkey.exists(): old =", "key is invalid') proc = subprocess.Popen( ['openssl', 'rsautl', '-encrypt', '-pubin',", "Expected openssl output is in the form: # SHA1 Fingerprint=<thumbprint>", "WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING", "tuple command: command :rtype: int or subprocess.Process :return: return code", "pemfile = None return pemfile def _parse_sha1_thumbprint_openssl(output): # type: (str)", "key subprocess.check_call( ['openssl', 'rsa', '-in', privatekey, '-pubout', '-outform', 'PEM', '-out',", "oct, open, pow, round, super, filter, map, zip) # stdlib", "# execute SSH command ssh_cmd = [ 'ssh', '-o', 'StrictHostKeyChecking=no',", "create: ') if pfxfile is None: pfxfile = util.get_input('Enter PFX", "type: (str, str) -> str \"\"\"Get SHA1 thumbprint of PFX", "OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH", "configuration dict :rtype: str :return: base64-encoded cipher text \"\"\" if", "tempfile import stat import subprocess # local imports from .", "dict config: configuration dict :rtype: str :return: sha1 thumbprint of", "= pathlib.Path(pemfile) if fp.exists(): fp.unlink() return cleartext def encrypt_string(enabled, string,", "'rsa', '-N', '''''']) return (privkey, pubkey) def check_ssh_private_key_filemode(ssh_private_key): # type:", "str \"\"\"Get SHA1 thumbprint of PFX :param str pfxfile: name", "pemfile is None: f = tempfile.NamedTemporaryFile(mode='wb', delete=False) f.close() pemfile =", "text \"\"\" if util.is_none_or_empty(ciphertext): raise ValueError('invalid ciphertext to decrypt') pfxfile", "remote fs \"\"\" return _REMOTEFS_SSH_KEY_PREFIX def generate_rdp_password(): # type: (None)", "bool sync: synchronous execution :param bool shell: execute with shell", "# type: (pathlib.Path) -> bool \"\"\"Check SSH private key filemode", "# remove temp cert pem fp = pathlib.Path(f.name) if fp.exists():", "from builtins import ( # noqa bytes, dict, int, list,", "file to write to :rtype: str :return: path of pem", "passphrase: passphrase for pfx :param str pemfile: path of pem", ":rtype: bool :return: private key filemode is ok \"\"\" def", "command # in lowercase. Expected openssl output is in the", "'-out', pemfile] ) except Exception: fp = pathlib.Path(pemfile) if fp.exists():", "not ssh_private_key.exists(): raise RuntimeError('SSH private key file not found at:", "( # noqa bytes, dict, int, list, object, range, str,", "raise ValueError('pfx file is invalid') if passphrase is None: passphrase", "= derive_private_key_pem_from_pfx(pfxfile, pfx_passphrase, None) if pemfile is None: raise RuntimeError('cannot", "from __future__ import ( absolute_import, division, print_function, unicode_literals ) from", "public key filename) \"\"\" if util.is_none_or_empty(prefix): prefix = _SSH_KEY_PREFIX privkey", "encryption failed with returncode: {}'.format( proc.returncode)) return ciphertext finally: if", "derived = True pfxfile = settings.batch_shipyard_encryption_pfx_filename(config) pfx_passphrase = settings.batch_shipyard_encryption_pfx_passphrase( config)", "thumbprint of pfx return get_sha1_thumbprint_pfx(pfxfile, passphrase) def get_encryption_pfx_settings(config): # type:", "x) for x in modes]) def connect_or_exec_ssh_command( remote_ip, remote_port, ssh_private_key,", "filename to create: ') if passphrase is None: while util.is_none_or_empty(passphrase):", "str pfxfile: pfx file :param str passphrase: passphrase for pfx", "hereby granted, free of charge, to any person obtaining a", "+ passphrase] ) except Exception: fp = pathlib.Path(pemfile) if fp.exists():", "= frozenset((stat.S_IRWXG, stat.S_IRWXO)) return not any([_mode_check(fstat, x) for x in", ":param dict config: configuration dict :rtype: str :return: decrypted cipher", "OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE", "fp.unlink() # remove temp cert pem fp = pathlib.Path(f.name) if", "str remote_ip: remote ip address :param int remote_port: remote port", "super, filter, map, zip) # stdlib imports import base64 import", "# all copies or substantial portions of the Software. #", "mode is set properly for the private key if not", "output is in the form: # SHA1 Fingerprint=<thumbprint> return ''.join(util.decode_string(", "dict) -> str \"\"\"RSA encrypt a string :param str data:", "return (privkey, pubkey) def check_ssh_private_key_filemode(ssh_private_key): # type: (pathlib.Path) -> bool", "ok \"\"\" def _mode_check(fstat, flag): return bool(fstat & flag) if", "if util.is_none_or_empty(data): raise ValueError('invalid data to encrypt') inkey = settings.batch_shipyard_encryption_public_key_pem(config)", "from configuration :param dict config: configuration settings :rtype: tuple :return:", ":rtype: str :return: ssh key prefix for remote fs \"\"\"", "if passphrase is None: passphrase = <PASSWORD>('Enter password for PFX:", "FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR", "string :param bool enabled: if encryption is enabled :param str", "bool enabled: if encryption is enabled :param str string: string", "old.unlink() privkey.rename(old) if pubkey.exists(): old = pathlib.Path(export_path, prefix + '.pub.old')", "= getpass.getpass('Enter password for PFX: ') if len(passphrase) == 0:", "old = pathlib.Path(export_path, prefix + '.old') if old.exists(): old.unlink() privkey.rename(old)", "open, pow, round, super, filter, map, zip) # stdlib imports", "stdout=subprocess.PIPE ) return _parse_sha1_thumbprint_openssl(proc.communicate()[0]) def generate_pem_pfx_certificates(config): # type: (dict) ->", "# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR", "pfxfile = settings.batch_shipyard_encryption_pfx_filename(config) pfx_passphrase = settings.batch_shipyard_encryption_pfx_passphrase( config) inkey = derive_public_key_pem_from_pfx(pfxfile,", "# DEALINGS IN THE SOFTWARE. # compat imports from __future__", ":param str pfxfile: pfx file :param str passphrase: passphrase for", "all copies or substantial portions of the Software. # #", "cleartext def encrypt_string(enabled, string, config): # type: (bool, str, dict)", "Software is furnished to do so, subject to the following", "-> str \"\"\"Get SHA1 thumbprint of PEM :param str pfxfile:", "prefix=None): # type: (str, str) -> tuple \"\"\"Generate an ssh", "tempfile.NamedTemporaryFile(mode='wb', delete=False) f.close() try: subprocess.check_call( ['openssl', 'req', '-new', '-nodes', '-x509',", "pfxfile, '-out', pemfile, '-password', 'pass:' + passphrase] ) # extract", "flag) if util.on_windows(): return True fstat = ssh_private_key.stat().st_mode modes =", "convert pem to pfx for Azure Batch service subprocess.check_call( ['openssl',", "tty: ssh_cmd.append('-t') if util.is_not_empty(ssh_args): ssh_cmd.extend(ssh_args) ssh_cmd.append('{}@{}'.format(username, remote_ip)) if util.is_not_empty(command): ssh_cmd.extend(command)", "str :return: base64-encoded cipher text \"\"\" if util.is_none_or_empty(data): raise ValueError('invalid", "ciphertext = util.base64_encode_string( proc.communicate(input=util.encode_string(data))[0]) if proc.returncode != 0: raise RuntimeError(", "pemfile] ) except Exception: fp = pathlib.Path(pemfile) if fp.exists(): fp.unlink()", "pem file \"\"\" if pfxfile is None: raise ValueError('pfx file", "'.key' # generate pem file with private key and no", "'.pub.old') if old.exists(): old.unlink() pubkey.rename(old) logger.info('generating ssh key pair to", "fs \"\"\" return _REMOTEFS_SSH_KEY_PREFIX def generate_rdp_password(): # type: (None) ->", "of pem \"\"\" proc = subprocess.Popen( ['openssl', 'x509', '-noout', '-fingerprint',", "int or subprocess.Process :return: return code or subprocess handle \"\"\"", "'-i', str(ssh_private_key), '-p', str(remote_port), ] if tty: ssh_cmd.append('-t') if util.is_not_empty(ssh_args):", ":return: private key filemode is ok \"\"\" def _mode_check(fstat, flag):", "command on', remote_ip, remote_port, ssh_private_key)) if sync: return util.subprocess_with_output(ssh_cmd, shell=shell)", "getpass.getpass('Enter password for PFX: ') if len(passphrase) == 0: print('passphrase", "\"\"\" pfxfile = settings.batch_shipyard_encryption_pfx_filename(config) pfx_passphrase = settings.batch_shipyard_encryption_pfx_passphrase(config) sha1_cert_tp = settings.batch_shipyard_encryption_pfx_sha1_thumbprint(", "to do so, subject to the following conditions: # #", "SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR", "pathlib.Path(f.name) if fp.exists(): fp.unlink() # get sha1 thumbprint of pfx", "with private key and no password f = tempfile.NamedTemporaryFile(mode='wb', delete=False)", "'openssl encryption failed with returncode: {}'.format( proc.returncode)) return ciphertext finally:", "\"\"\" # gather input pemfile = settings.batch_shipyard_encryption_public_key_pem(config) pfxfile = settings.batch_shipyard_encryption_pfx_filename(config)", "thumbprint of pem \"\"\" proc = subprocess.Popen( ['openssl', 'x509', '-noout',", "to permit persons to whom the # Software is furnished", "openssl output is in the form: # SHA1 Fingerprint=<thumbprint> return", "passphrase = <PASSWORD>('Enter password for PFX: ') # convert pfx", ") logger.debug('created public key PEM file: {}'.format(pemfile)) # convert pem", "thumbprint of pfx \"\"\" if pfxfile is None: raise ValueError('pfxfile", "dict :rtype: str :return: sha1 thumbprint of pfx \"\"\" #", "ciphertext finally: if derived: fp = pathlib.Path(inkey) if fp.exists(): fp.unlink()", "base64.b64encode(os.urandom(8)) def generate_ssh_keypair(export_path, prefix=None): # type: (str, str) -> tuple", "NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE", "a public key pem file from a pfx :param str", "sha1=sha1_cert_tp) def _rsa_encrypt_string(data, config): # type: (str, dict) -> str", "decrypted cipher text \"\"\" if util.is_none_or_empty(ciphertext): raise ValueError('invalid ciphertext to", "imports from . import settings from . import util #", "else 'executing command on', remote_ip, remote_port, ssh_private_key)) if sync: return", "print('passphrase cannot be empty') privatekey = pemfile + '.key' #", "else: return util.subprocess_nowait_pipe_stdout( ssh_cmd, shell=shell, pipe_stderr=True) def derive_private_key_pem_from_pfx(pfxfile, passphrase=None, pemfile=None):", "openssl command # in lowercase. Expected openssl output is in", "= settings.batch_shipyard_encryption_pfx_filename(config) pfx_passphrase = settings.batch_shipyard_encryption_pfx_passphrase( config) inkey = derive_public_key_pem_from_pfx(pfxfile, pfx_passphrase,", "= f.name try: # create pem from pfx subprocess.check_call( ['openssl',", "return pemfile def _parse_sha1_thumbprint_openssl(output): # type: (str) -> str \"\"\"Get", "'-nodes', '-x509', '-newkey', 'rsa:2048', '-keyout', privatekey, '-out', f.name, '-days', '730',", ") proc = subprocess.Popen( ['openssl', 'x509', '-noout', '-fingerprint'], stdin=subprocess.PIPE, stdout=subprocess.PIPE", "f.name, '-days', '730', '-subj', '/C=US/ST=None/L=None/O=None/CN=BatchShipyard'] ) # extract public key", "from the above openssl command # in lowercase. Expected openssl", "settings.batch_shipyard_encryption_pfx_filename(config) pfx_passphrase = settings.batch_shipyard_encryption_pfx_passphrase(config) pemfile = derive_private_key_pem_from_pfx(pfxfile, pfx_passphrase, None) if", "if util.is_none_or_empty(sha1_cert_tp): if pfx_passphrase is None: pfx_passphrase = <PASSWORD>('Enter password", "PFX: ') # convert pfx to pem if pemfile is", "invalid') proc = subprocess.Popen( ['openssl', 'rsautl', '-encrypt', '-pubin', '-inkey', inkey],", "export path :param str prefix: key prefix :rtype: tuple :return:", "subprocess.check_call( ['openssl', 'pkcs12', '-export', '-out', pfxfile, '-inkey', privatekey, '-in', f.name,", "All rights reserved. # # MIT License # # Permission", "bool, tuple, tuple) -> bool \"\"\"Connect to node via SSH", "'-subj', '/C=US/ST=None/L=None/O=None/CN=BatchShipyard'] ) # extract public key from private key", "str passphrase: passphrase for pfx :param str pemfile: path of", "sync: synchronous execution :param bool shell: execute with shell :param", "\"\"\" return _REMOTEFS_SSH_KEY_PREFIX def generate_rdp_password(): # type: (None) -> str", "'-fingerprint', '-in', pemfile], stdout=subprocess.PIPE ) return _parse_sha1_thumbprint_openssl(proc.communicate()[0]) def generate_pem_pfx_certificates(config): #", "raise ValueError('invalid ciphertext to decrypt') pfxfile = settings.batch_shipyard_encryption_pfx_filename(config) pfx_passphrase =", "local imports from . import settings from . import util", "# manually get thumbprint of pfx if not exists in", "key from private key subprocess.check_call( ['openssl', 'rsa', '-in', pemfile, '-pubout',", "passphrase] ) # extract public key from private key subprocess.check_call(", "= pathlib.Path(export_path, prefix + '.pub') if privkey.exists(): old = pathlib.Path(export_path,", "WITH THE SOFTWARE OR THE USE OR OTHER # DEALINGS", "stdin=subprocess.PIPE, stdout=subprocess.PIPE ) return _parse_sha1_thumbprint_openssl(proc.communicate(input=pfxdump)[0]) def get_sha1_thumbprint_pem(pemfile): # type: (str)", "is None: passphrase = <PASSWORD>pass('Enter password for PFX: ') #", "'x509', '-noout', '-fingerprint', '-in', pemfile], stdout=subprocess.PIPE ) return _parse_sha1_thumbprint_openssl(proc.communicate()[0]) def", "# # THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY", "THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY", "= <PASSWORD>('Enter password for PFX: ') # convert pfx to", "['ssh-keygen', '-f', str(privkey), '-t', 'rsa', '-N', '''''']) return (privkey, pubkey)", "'PEM', '-out', pemfile] ) except Exception: fp = pathlib.Path(pemfile) if", "allocate pseudo-tty :param tuple ssh_args: ssh args :param tuple command:", "if util.is_not_empty(ssh_args): ssh_cmd.extend(ssh_args) ssh_cmd.append('{}@{}'.format(username, remote_ip)) if util.is_not_empty(command): ssh_cmd.extend(command) logger.info('{} node", "{}'.format(export_path)) subprocess.check_call( ['ssh-keygen', '-f', str(privkey), '-t', 'rsa', '-N', '''''']) return", "is too permissive: {}'.format( ssh_private_key)) # execute SSH command ssh_cmd", "and/or sell copies of the Software, and to permit persons", "is None: passphrase = <PASSWORD>('Enter password for PFX: ') #", "PFX encryption settings from configuration :param dict config: configuration settings", "pfx \"\"\" if pfxfile is None: raise ValueError('pfxfile is invalid')", "prefix + '.pub.old') if old.exists(): old.unlink() pubkey.rename(old) logger.info('generating ssh key", "{}'.format( ssh_private_key)) # execute SSH command ssh_cmd = [ 'ssh',", "pathlib.Path(inkey) if fp.exists(): fp.unlink() def _rsa_decrypt_string_with_pfx(ciphertext, config): # type: (str,", "subprocess.check_call( ['openssl', 'pkcs12', '-nodes', '-in', pfxfile, '-out', pemfile, '-password', 'pass:'", "settings.batch_shipyard_encryption_public_key_pem(config) derived = False if inkey is None: # derive", ":return: ssh key prefix for remote fs \"\"\" return _REMOTEFS_SSH_KEY_PREFIX", "settings.batch_shipyard_encryption_pfx_passphrase( config) inkey = derive_public_key_pem_from_pfx(pfxfile, pfx_passphrase, None) try: if inkey", "tty: allocate pseudo-tty :param tuple ssh_args: ssh args :param tuple", "['openssl', 'rsautl', '-decrypt', '-inkey', pemfile], stdin=subprocess.PIPE, stdout=subprocess.PIPE) cleartext = proc.communicate(input=data)[0]", "copy of this software and associated documentation files (the \"Software\"),", "BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS", "= settings.batch_shipyard_encryption_public_key_pem(config) pfxfile = settings.batch_shipyard_encryption_pfx_filename(config) passphrase = settings.batch_shipyard_encryption_pfx_passphrase(config) if pemfile", "ValueError('pfxfile is invalid') if passphrase is None: passphrase = getpass.getpass('Enter", "is None: f = tempfile.NamedTemporaryFile(mode='wb', delete=False) f.close() pemfile = f.name", "\"\"\" return base64.b64encode(os.urandom(8)) def generate_ssh_keypair(export_path, prefix=None): # type: (str, str)", "ssh args :param tuple command: command :rtype: int or subprocess.Process", "pfx file :param dict config: configuration dict :rtype: str :return:", "pfx :param str pemfile: path of pem file to write", "proc.communicate(input=util.encode_string(data))[0]) if proc.returncode != 0: raise RuntimeError( 'openssl encryption failed", "to export :rtype: str :return: sha1 thumbprint of pem \"\"\"", "'rsa', '-in', privatekey, '-pubout', '-outform', 'PEM', '-out', pemfile] ) logger.debug('created", "pfx derived = True pfxfile = settings.batch_shipyard_encryption_pfx_filename(config) pfx_passphrase = settings.batch_shipyard_encryption_pfx_passphrase(", "string, config): # type: (bool, str, dict) -> str \"\"\"Encrypt", "= settings.batch_shipyard_encryption_pfx_passphrase(config) if pemfile is None: pemfile = util.get_input('Enter public", "fp = pathlib.Path(pemfile) if fp.exists(): fp.unlink() pemfile = None return", "'SSH private key filemode is too permissive: {}'.format( ssh_private_key)) #", "'-nodes', '-passin', 'pass:' + passphrase] ) proc = subprocess.Popen( ['openssl',", "software and associated documentation files (the \"Software\"), # to deal", "obtaining a # copy of this software and associated documentation", "passphrase is None: passphrase = <PASSWORD>('Enter password for PFX: ')", "derive pem from pfx derived = True pfxfile = settings.batch_shipyard_encryption_pfx_filename(config)", "from a pfx :param str pfxfile: pfx file :param str", "pathlib2 as pathlib except ImportError: import pathlib import tempfile import", "if tty: ssh_cmd.append('-t') if util.is_not_empty(ssh_args): ssh_cmd.extend(ssh_args) ssh_cmd.append('{}@{}'.format(username, remote_ip)) if util.is_not_empty(command):", ":rtype: str :return: sha1 thumbprint of pfx \"\"\" if pfxfile", "path: {}'.format(export_path)) subprocess.check_call( ['ssh-keygen', '-f', str(privkey), '-t', 'rsa', '-N', ''''''])", "connect_or_exec_ssh_command( remote_ip, remote_port, ssh_private_key, username, sync=True, shell=False, tty=False, ssh_args=None, command=None):", "write to :rtype: str :return: path of pem file \"\"\"", "# type: (str) -> str \"\"\"Get SHA1 thumbprint from buffer", "x in modes]) def connect_or_exec_ssh_command( remote_ip, remote_port, ssh_private_key, username, sync=True,", "config, sha1_cert_tp) return PfxSettings( filename=pfxfile, passphrase=pfx_passphrase, sha1=sha1_cert_tp) def _rsa_encrypt_string(data, config):", "the pfx file to export :param str passphrase: passphrase for", "pem fp = pathlib.Path(f.name) if fp.exists(): fp.unlink() # get sha1", "encryption is enabled :param str string: string to encrypt :param", "prefix for remote fs \"\"\" return _REMOTEFS_SSH_KEY_PREFIX def generate_rdp_password(): #", "pfx to pem if pemfile is None: f = tempfile.NamedTemporaryFile(mode='wb',", ":param str username: username :param bool sync: synchronous execution :param", "temp cert pem fp = pathlib.Path(f.name) if fp.exists(): fp.unlink() #", "base64 import collections import getpass import logging import os try:", "int, list, object, range, str, ascii, chr, hex, input, next,", "['openssl', 'rsa', '-in', pemfile, '-pubout', '-outform', 'PEM', '-out', pemfile] )", "filename, public key filename) \"\"\" if util.is_none_or_empty(prefix): prefix = _SSH_KEY_PREFIX", "pathlib.Path(export_path, prefix + '.pub.old') if old.exists(): old.unlink() pubkey.rename(old) logger.info('generating ssh", "ssh_private_key: SSH private key :param str username: username :param bool", "None: while util.is_none_or_empty(passphrase): passphrase = getpass.getpass('Enter password for PFX: ')", "derived: fp = pathlib.Path(inkey) if fp.exists(): fp.unlink() def _rsa_decrypt_string_with_pfx(ciphertext, config):", "for PFX: ') if len(passphrase) == 0: print('passphrase cannot be", "'-in', pfxfile, '-nodes', '-passin', 'pass:' + passphrase] ) proc =", "inkey], stdin=subprocess.PIPE, stdout=subprocess.PIPE) ciphertext = util.base64_encode_string( proc.communicate(input=util.encode_string(data))[0]) if proc.returncode !=", "zip) # stdlib imports import base64 import collections import getpass", "path of pem file \"\"\" if pfxfile is None: raise", "pemfile, '-pubout', '-outform', 'PEM', '-out', pemfile] ) except Exception: fp", "str :return: path of pem file \"\"\" if pfxfile is", "while util.is_none_or_empty(passphrase): passphrase = getpass.getpass('Enter password for PFX: ') if", ":rtype: str :return: ssh key prefix \"\"\" return _SSH_KEY_PREFIX def", "file from a pfx :param str pfxfile: pfx file :param", "data to encrypt :param dict config: configuration dict :rtype: str", "fp.exists(): fp.unlink() # remove temp cert pem fp = pathlib.Path(f.name)", "persons to whom the # Software is furnished to do", "get_sha1_thumbprint_pfx(pfxfile, pfx_passphrase) settings.set_batch_shipyard_encryption_pfx_sha1_thumbprint( config, sha1_cert_tp) return PfxSettings( filename=pfxfile, passphrase=pfx_passphrase, sha1=sha1_cert_tp)", "None: raise RuntimeError('public encryption key is invalid') proc = subprocess.Popen(", "MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN", "proc.returncode)) return ciphertext finally: if derived: fp = pathlib.Path(inkey) if", "path :param str prefix: key prefix :rtype: tuple :return: (private", ") logger.debug('created PFX file: {}'.format(pfxfile)) finally: # remove rsa private", "tuple) -> bool \"\"\"Connect to node via SSH or execute", "private key and no password f = tempfile.NamedTemporaryFile(mode='wb', delete=False) f.close()", "'-noout', '-fingerprint', '-in', pemfile], stdout=subprocess.PIPE ) return _parse_sha1_thumbprint_openssl(proc.communicate()[0]) def generate_pem_pfx_certificates(config):", "= None return pemfile def derive_public_key_pem_from_pfx(pfxfile, passphrase=None, pemfile=None): # type:", "parse :rtype: str :return: sha1 thumbprint of buffer \"\"\" #", "BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY,", "'pkcs12', '-in', pfxfile, '-nodes', '-passin', 'pass:' + passphrase] ) proc", ":param str passphrase: passphrase for pfx :param str pemfile: path", "'-out', f.name, '-days', '730', '-subj', '/C=US/ST=None/L=None/O=None/CN=BatchShipyard'] ) # extract public", "pfx :rtype: str :return: sha1 thumbprint of pfx \"\"\" if", "fp.exists(): fp.unlink() pemfile = None return pemfile def _parse_sha1_thumbprint_openssl(output): #", "f.name try: # create pem from pfx subprocess.check_call( ['openssl', 'pkcs12',", "public key from private key subprocess.check_call( ['openssl', 'rsa', '-in', pemfile,", "type: (str, int, pathlib.Path, str, bool, bool, tuple, tuple) ->", "str, str) -> str \"\"\"Derive a public key pem file", "= settings.batch_shipyard_encryption_public_key_pem(config) derived = False if inkey is None: #", "do so, subject to the following conditions: # # The", "command=None): # type: (str, int, pathlib.Path, str, bool, bool, tuple,", "import collections import getpass import logging import os try: import", "# type: (None) -> str \"\"\"Generate an RDP password :rtype:", "str(privkey), '-t', 'rsa', '-N', '''''']) return (privkey, pubkey) def check_ssh_private_key_filemode(ssh_private_key):", "of pfx \"\"\" # gather input pemfile = settings.batch_shipyard_encryption_public_key_pem(config) pfxfile", "config: configuration dict :rtype: str :return: encrypted string if enabled", "fp.exists(): fp.unlink() def _rsa_decrypt_string_with_pfx(ciphertext, config): # type: (str, dict) ->", "(str, str) -> tuple \"\"\"Generate an ssh keypair for use", "RDP password :rtype: str :return: rdp password \"\"\" return base64.b64encode(os.urandom(8))", "'-days', '730', '-subj', '/C=US/ST=None/L=None/O=None/CN=BatchShipyard'] ) # extract public key from", "= settings.batch_shipyard_encryption_pfx_passphrase(config) sha1_cert_tp = settings.batch_shipyard_encryption_pfx_sha1_thumbprint( config) # manually get thumbprint", "shall be included in # all copies or substantial portions", "passphrase=pfx_passphrase, sha1=sha1_cert_tp) def _rsa_encrypt_string(data, config): # type: (str, dict) ->", "KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO", "OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES", "MIT License # # Permission is hereby granted, free of", "pfx_passphrase = settings.batch_shipyard_encryption_pfx_passphrase(config) sha1_cert_tp = settings.batch_shipyard_encryption_pfx_sha1_thumbprint( config) # manually get", ":rtype: str :return: base64-encoded cipher text \"\"\" if util.is_none_or_empty(data): raise", ":param bool enabled: if encryption is enabled :param str string:", "the Software without restriction, including without limitation # the rights", "derive_private_key_pem_from_pfx(pfxfile, passphrase=None, pemfile=None): # type: (str, str, str) -> str", "import subprocess # local imports from . import settings from", "raise RuntimeError('cannot decrypt without valid private key') cleartext = None", "_SSH_KEY_PREFIX privkey = pathlib.Path(export_path, prefix) pubkey = pathlib.Path(export_path, prefix +", "old.exists(): old.unlink() pubkey.rename(old) logger.info('generating ssh key pair to path: {}'.format(export_path))", "base64 :param dict config: configuration dict :rtype: str :return: decrypted", "def generate_pem_pfx_certificates(config): # type: (dict) -> str \"\"\"Generate a pem", "# # MIT License # # Permission is hereby granted,", ":param dict config: configuration dict :rtype: str :return: sha1 thumbprint", "output).strip().split('=')[1].split(':')).lower() def get_sha1_thumbprint_pfx(pfxfile, passphrase): # type: (str, str) -> str", "decrypt') pfxfile = settings.batch_shipyard_encryption_pfx_filename(config) pfx_passphrase = settings.batch_shipyard_encryption_pfx_passphrase(config) pemfile = derive_private_key_pem_from_pfx(pfxfile,", "[ 'ssh', '-o', 'StrictHostKeyChecking=no', '-o', 'UserKnownHostsFile={}'.format(os.devnull), '-i', str(ssh_private_key), '-p', str(remote_port),", "is enabled :param str string: string to encrypt :param dict", "util.base64_decode_string(ciphertext) proc = subprocess.Popen( ['openssl', 'rsautl', '-decrypt', '-inkey', pemfile], stdin=subprocess.PIPE,", "file is invalid') if passphrase is None: passphrase = <PASSWORD>pass('Enter", "ssh key pair to path: {}'.format(export_path)) subprocess.check_call( ['ssh-keygen', '-f', str(privkey),", "= derive_public_key_pem_from_pfx(pfxfile, pfx_passphrase, None) try: if inkey is None: raise", "SOFTWARE OR THE USE OR OTHER # DEALINGS IN THE", "if util.is_none_or_empty(command) else 'executing command on', remote_ip, remote_port, ssh_private_key)) if", "return bool(fstat & flag) if util.on_windows(): return True fstat =", "(bool, str, dict) -> str \"\"\"Encrypt a string :param bool", "Software. # # THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT", "logger.warning( 'SSH private key filemode is too permissive: {}'.format( ssh_private_key))", "buffer to parse :rtype: str :return: sha1 thumbprint of buffer", "Copyright (c) Microsoft Corporation # # All rights reserved. #", "SSH private key :param str username: username :param bool sync:", "enabled :param str string: string to encrypt :param dict config:", "( absolute_import, division, print_function, unicode_literals ) from builtins import (", "privatekey, '-in', f.name, '-certfile', f.name, '-passin', 'pass:', '-passout', 'pass:' +", "is None: raise ValueError('pfxfile is invalid') if passphrase is None:", "'pass:', '-passout', 'pass:' + passphrase] ) logger.debug('created PFX file: {}'.format(pfxfile))", "file: {}'.format(pfxfile)) finally: # remove rsa private key file fp", "is None: pfxfile = util.get_input('Enter PFX filename to create: ')", "key filemode :param pathlib.Path ssh_private_key: SSH private key :rtype: bool", "form: # SHA1 Fingerprint=<thumbprint> return ''.join(util.decode_string( output).strip().split('=')[1].split(':')).lower() def get_sha1_thumbprint_pfx(pfxfile, passphrase):", "of pfx pfxdump = subprocess.check_output( ['openssl', 'pkcs12', '-in', pfxfile, '-nodes',", "proc = subprocess.Popen( ['openssl', 'x509', '-noout', '-fingerprint'], stdin=subprocess.PIPE, stdout=subprocess.PIPE )", "= _SSH_KEY_PREFIX privkey = pathlib.Path(export_path, prefix) pubkey = pathlib.Path(export_path, prefix", "'-outform', 'PEM', '-out', pemfile] ) except Exception: fp = pathlib.Path(pemfile)", "WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND", "thumbprint of pfx pfxdump = subprocess.check_output( ['openssl', 'pkcs12', '-in', pfxfile,", "tty=False, ssh_args=None, command=None): # type: (str, int, pathlib.Path, str, bool,", "invalid') if passphrase is None: passphrase = <PASSWORD>('Enter password for", "!= 0: raise RuntimeError( 'openssl encryption failed with returncode: {}'.format(", "config) # manually get thumbprint of pfx if not exists", ":rtype: str :return: encrypted string if enabled \"\"\" if enabled:", "filemode is too permissive: {}'.format( ssh_private_key)) # execute SSH command", "type: (str, str, str) -> str \"\"\"Derive a public key", "'.pub') if privkey.exists(): old = pathlib.Path(export_path, prefix + '.old') if", "service subprocess.check_call( ['openssl', 'pkcs12', '-export', '-out', pfxfile, '-inkey', privatekey, '-in',", "None: passphrase = <PASSWORD>pass('Enter password for PFX: ') # convert", "pfx subprocess.check_call( ['openssl', 'pkcs12', '-nodes', '-in', pfxfile, '-out', pemfile, '-password',", "PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE #", "'-inkey', inkey], stdin=subprocess.PIPE, stdout=subprocess.PIPE) ciphertext = util.base64_encode_string( proc.communicate(input=util.encode_string(data))[0]) if proc.returncode", ":return: sha1 thumbprint of pfx \"\"\" if pfxfile is None:", "USE OR OTHER # DEALINGS IN THE SOFTWARE. # compat", "config: configuration settings :rtype: tuple :return: pfxfile, passphrase, sha1 tp", "\"\"\"Generate a pem and a derived pfx file :param dict", "copies or substantial portions of the Software. # # THE", "file :param str passphrase: passphrase for pfx :param str pemfile:", "pfxfile: name of the pfx file to export :rtype: str", "\"\"\"Get SHA1 thumbprint of PEM :param str pfxfile: name of", "pfxfile, passphrase, sha1 tp \"\"\" pfxfile = settings.batch_shipyard_encryption_pfx_filename(config) pfx_passphrase =", "str(remote_port), ] if tty: ssh_cmd.append('-t') if util.is_not_empty(ssh_args): ssh_cmd.extend(ssh_args) ssh_cmd.append('{}@{}'.format(username, remote_ip))", "SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND,", "in the form: # SHA1 Fingerprint=<thumbprint> return ''.join(util.decode_string( output).strip().split('=')[1].split(':')).lower() def", "pemfile = util.get_input('Enter public key PEM filename to create: ')", "# type: (str, dict) -> str \"\"\"RSA encrypt a string", "settings :rtype: tuple :return: pfxfile, passphrase, sha1 tp \"\"\" pfxfile", "= logging.getLogger(__name__) util.setup_logger(logger) # global defines _SSH_KEY_PREFIX = 'id_rsa_shipyard' _REMOTEFS_SSH_KEY_PREFIX", ":param str string: string to encrypt :param dict config: configuration", "OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT", "passphrase] ) except Exception: fp = pathlib.Path(pemfile) if fp.exists(): fp.unlink()", "\"\"\"RSA decrypt a string :param str ciphertext: cipher text in", "synchronous execution :param bool shell: execute with shell :param bool", "is None: while util.is_none_or_empty(passphrase): passphrase = getpass.getpass('Enter password for PFX:", "pemfile is None: pemfile = util.get_input('Enter public key PEM filename", "['openssl', 'x509', '-noout', '-fingerprint', '-in', pemfile], stdout=subprocess.PIPE ) return _parse_sha1_thumbprint_openssl(proc.communicate()[0])", "str) -> str \"\"\"Get SHA1 thumbprint of PFX :param str", "privatekey, '-out', f.name, '-days', '730', '-subj', '/C=US/ST=None/L=None/O=None/CN=BatchShipyard'] ) # extract", "fp = pathlib.Path(pemfile) if fp.exists(): fp.unlink() return cleartext def encrypt_string(enabled,", "-> str \"\"\"Get SHA1 thumbprint from buffer :param str buffer:", "\"\"\" if not ssh_private_key.exists(): raise RuntimeError('SSH private key file not", "pfx if not exists in config if util.is_none_or_empty(sha1_cert_tp): if pfx_passphrase", "settings from . import util # create logger logger =", "execute SSH command :param str remote_ip: remote ip address :param", "remove rsa private key file fp = pathlib.Path(privatekey) if fp.exists():", "\"\"\"Generate an RDP password :rtype: str :return: rdp password \"\"\"", "\"\"\"Generate an ssh keypair for use with user logins :param", "export :rtype: str :return: sha1 thumbprint of pem \"\"\" proc", "+ '.pub.old') if old.exists(): old.unlink() pubkey.rename(old) logger.info('generating ssh key pair", ":rtype: str :return: path of pem file \"\"\" if pfxfile", "Azure Batch service subprocess.check_call( ['openssl', 'pkcs12', '-export', '-out', pfxfile, '-inkey',", "= proc.communicate(input=data)[0] finally: fp = pathlib.Path(pemfile) if fp.exists(): fp.unlink() return", "private key filemode :param pathlib.Path ssh_private_key: SSH private key :rtype:", "key prefix for remote fs \"\"\" return _REMOTEFS_SSH_KEY_PREFIX def generate_rdp_password():", "<PASSWORD>('Enter password for PFX: ') # convert pfx to pem", "stdin=subprocess.PIPE, stdout=subprocess.PIPE) cleartext = proc.communicate(input=data)[0] finally: fp = pathlib.Path(pemfile) if", "def _parse_sha1_thumbprint_openssl(output): # type: (str) -> str \"\"\"Get SHA1 thumbprint", "of pfx \"\"\" if pfxfile is None: raise ValueError('pfxfile is", "['openssl', 'rsa', '-in', privatekey, '-pubout', '-outform', 'PEM', '-out', pemfile] )", "to create: ') if pfxfile is None: pfxfile = util.get_input('Enter", "file is invalid') if passphrase is None: passphrase = <PASSWORD>('Enter", "the form: # SHA1 Fingerprint=<thumbprint> return ''.join(util.decode_string( output).strip().split('=')[1].split(':')).lower() def get_sha1_thumbprint_pfx(pfxfile,", "to use, copy, modify, merge, publish, distribute, sublicense, # and/or", "tempfile.NamedTemporaryFile(mode='wb', delete=False) f.close() pemfile = f.name try: # create pem", "import os try: import pathlib2 as pathlib except ImportError: import", "round, super, filter, map, zip) # stdlib imports import base64", "None try: data = util.base64_decode_string(ciphertext) proc = subprocess.Popen( ['openssl', 'rsautl',", "\"\"\" proc = subprocess.Popen( ['openssl', 'x509', '-noout', '-fingerprint', '-in', pemfile],", "= <PASSWORD>('Enter password for PFX: ') sha1_cert_tp = get_sha1_thumbprint_pfx(pfxfile, pfx_passphrase)", "from private key subprocess.check_call( ['openssl', 'rsa', '-in', pemfile, '-pubout', '-outform',", "too permissive: {}'.format( ssh_private_key)) # execute SSH command ssh_cmd =", "try: # create pem from pfx subprocess.check_call( ['openssl', 'pkcs12', '-nodes',", "PEM filename to create: ') if pfxfile is None: pfxfile", "THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY", "manually get thumbprint of pfx if not exists in config", "if pubkey.exists(): old = pathlib.Path(export_path, prefix + '.pub.old') if old.exists():", "shell :param bool tty: allocate pseudo-tty :param tuple ssh_args: ssh", ":param dict config: configuration settings :rtype: tuple :return: pfxfile, passphrase,", "subprocess.Popen( ['openssl', 'x509', '-noout', '-fingerprint', '-in', pemfile], stdout=subprocess.PIPE ) return", "logger.info('generating ssh key pair to path: {}'.format(export_path)) subprocess.check_call( ['ssh-keygen', '-f',", "The above copyright notice and this permission notice shall be", "found at: {}'.format( ssh_private_key)) # ensure file mode is set", "for use with user logins :param str export_path: keypair export", "in the Software without restriction, including without limitation # the", "subprocess.Popen( ['openssl', 'rsautl', '-decrypt', '-inkey', pemfile], stdin=subprocess.PIPE, stdout=subprocess.PIPE) cleartext =", ":return: (private key filename, public key filename) \"\"\" if util.is_none_or_empty(prefix):", "without limitation # the rights to use, copy, modify, merge,", "fp.exists(): fp.unlink() return cleartext def encrypt_string(enabled, string, config): # type:", "\"\"\" if util.is_none_or_empty(ciphertext): raise ValueError('invalid ciphertext to decrypt') pfxfile =", "# remove rsa private key file fp = pathlib.Path(privatekey) if", "derived pfx file :param dict config: configuration dict :rtype: str", "filemode is ok \"\"\" def _mode_check(fstat, flag): return bool(fstat &", "try: subprocess.check_call( ['openssl', 'req', '-new', '-nodes', '-x509', '-newkey', 'rsa:2048', '-keyout',", "or subprocess handle \"\"\" if not ssh_private_key.exists(): raise RuntimeError('SSH private" ]
[ "False KEEP_PROB = 1.0 SHOW_SCORE_THRSHOLD = 0.6 # only show", "RPN_TOP_K_NMS_TEST = 6000 RPN_MAXIMUM_PROPOSAL_TEST = 1000 # -------------------------------------------Fast-RCNN config ROI_SIZE", "show in tensorboard FAST_RCNN_NMS_IOU_THRESHOLD = 0.5 # 0.6 FAST_RCNN_NMS_MAX_BOXES_PER_CLASS =", "tensorflow as tf ''' gluoncv backbone + multi_gpu ''' #", "SAVE_WEIGHTS_INTE) LR = 5e-4 * 2 * 1.25 * NUM_GPU", "BATCH_SIZE = 1 WARM_SETP = int(0.25 * SAVE_WEIGHTS_INTE) LR =", "CLASS_NUM = 80 # --------------------------------------------- Network_config INITIALIZER = tf.random_normal_initializer(mean=0.0, stddev=0.01)", "# if None, will not multipy GRADIENT_CLIPPING_BY_NORM = None #", "= 0.3 TRAIN_RPN_CLOOBER_POSITIVES = False RPN_MINIBATCH_SIZE = 256 RPN_POSITIVE_RATE =", "Train config RESTORE_FROM_RPN = False IS_FILTER_OUTSIDE_BOXES = False FIXED_BLOCKS =", "''' # ------------------------------------------------ VERSION = 'Cascade_FPN_Res50_COCO_1x_20190421_v3' NET_NAME = 'resnet50_v1d' ADD_BOX_IN_TENSORBOARD", "+ '/tools/inference_image' INFERENCE_SAVE_PATH = ROOT_PATH + '/tools/inference_results' if NET_NAME.startswith(\"resnet\"): weights_name", "+ '/tools/test_result' INFERENCE_IMAGE_PATH = ROOT_PATH + '/tools/inference_image' INFERENCE_SAVE_PATH = ROOT_PATH", "+ '.ckpt' TRAINED_CKPT = os.path.join(ROOT_PATH, 'output/trained_weights') EVALUATE_DIR = ROOT_PATH +", "128, 256, 512] ANCHOR_STRIDE_LIST = [4, 8, 16, 32, 64]", "'/tools/test_result' INFERENCE_IMAGE_PATH = ROOT_PATH + '/tools/inference_image' INFERENCE_SAVE_PATH = ROOT_PATH +", "20.0]] ANCHOR_SCALE_FACTORS = [10., 10., 5.0, 5.0] # --------------------------------------------FPN config", "# if is -1, that is train with OHEM FAST_RCNN_POSITIVE_RATE", "= 20*SAVE_WEIGHTS_INTE # -------------------------------------------- Data_preprocess_config DATASET_NAME = 'coco' # 'pascal',", "BATCH_SIZE DECAY_STEP = [11*SAVE_WEIGHTS_INTE, 16*SAVE_WEIGHTS_INTE, 20*SAVE_WEIGHTS_INTE] # 50000, 70000 MAX_ITERATION", "utf-8 -*- from __future__ import division, print_function, absolute_import import os", "elif NET_NAME.startswith(\"MobilenetV2\"): weights_name = \"mobilenet/mobilenet_v2_1.0_224\" else: raise NotImplementedError PRETRAINED_CKPT =", "= ROOT_PATH + '/tools/test_result' INFERENCE_IMAGE_PATH = ROOT_PATH + '/tools/inference_image' INFERENCE_SAVE_PATH", "channel is RGB. In openCV, channel is BGR IMG_SHORT_SIDE_LEN =", "# -------------------------------------------- Data_preprocess_config DATASET_NAME = 'coco' # 'pascal', 'coco' PIXEL_MEAN", "config ROI_SIZE = 14 ROI_POOL_KERNEL_SIZE = 2 USE_DROPOUT = False", "[0.485, 0.456, 0.406] PIXEL_STD = [0.229, 0.224, 0.225] # R,", "= False KEEP_PROB = 1.0 SHOW_SCORE_THRSHOLD = 0.6 # only", "'coco' # 'pascal', 'coco' PIXEL_MEAN = [123.68, 116.779, 103.939] #", "# ------------------------------------------ Train config RESTORE_FROM_RPN = False IS_FILTER_OUTSIDE_BOXES = False", "3 RPN_IOU_POSITIVE_THRESHOLD = 0.7 RPN_IOU_NEGATIVE_THRESHOLD = 0.3 TRAIN_RPN_CLOOBER_POSITIVES = False", "FAST_RCNN_LOCATION_LOSS_WEIGHT = 1.0 FAST_RCNN_CLASSIFICATION_LOSS_WEIGHT = 1.0 RPN_SIGMA = 3.0 FASTRCNN_SIGMA", "# 2.0 # if None, will not multipy GRADIENT_CLIPPING_BY_NORM =", "20*SAVE_WEIGHTS_INTE] # 50000, 70000 MAX_ITERATION = 20*SAVE_WEIGHTS_INTE # -------------------------------------------- Data_preprocess_config", "2 * 1.25 * NUM_GPU * BATCH_SIZE DECAY_STEP = [11*SAVE_WEIGHTS_INTE,", "'/output/evaluate_result_pickle/' # ------------------------------------------ Train config RESTORE_FROM_RPN = False IS_FILTER_OUTSIDE_BOXES =", "RPN_MAXIMUM_PROPOSAL_TARIN = 2000 RPN_TOP_K_NMS_TEST = 6000 RPN_MAXIMUM_PROPOSAL_TEST = 1000 #", "= ROOT_PATH + '/data/pretrained_weights/' + weights_name + '.ckpt' TRAINED_CKPT =", "= 800 IMG_MAX_LENGTH = 1333 CLASS_NUM = 80 # ---------------------------------------------", "= [123.68, 116.779, 103.939] # R, G, B. In tf,", "SUMMARY_PATH = ROOT_PATH + '/output/summary' TEST_SAVE_PATH = ROOT_PATH + '/tools/test_result'", "0.5 # 0.6 FAST_RCNN_NMS_MAX_BOXES_PER_CLASS = 100 FAST_RCNN_IOU_POSITIVE_THRESHOLD = 0.5 FAST_RCNN_IOU_NEGATIVE_THRESHOLD", "SHARE_HEADS = True KERNEL_SIZE = 3 RPN_IOU_POSITIVE_THRESHOLD = 0.7 RPN_IOU_NEGATIVE_THRESHOLD", "False] # for gluoncv backbone USE_07_METRIC = True CUDA9 =", "10., 5.0, 5.0] # --------------------------------------------FPN config SHARE_HEADS = True KERNEL_SIZE", "1.0 RPN_SIGMA = 3.0 FASTRCNN_SIGMA = 1.0 MUTILPY_BIAS_GRADIENT = None", "= 3 RPN_IOU_POSITIVE_THRESHOLD = 0.7 RPN_IOU_NEGATIVE_THRESHOLD = 0.3 TRAIN_RPN_CLOOBER_POSITIVES =", "= 1.0 FAST_RCNN_CLASSIFICATION_LOSS_WEIGHT = 1.0 RPN_SIGMA = 3.0 FASTRCNN_SIGMA =", "70000 MAX_ITERATION = 20*SAVE_WEIGHTS_INTE # -------------------------------------------- Data_preprocess_config DATASET_NAME = 'coco'", "= False RPN_MINIBATCH_SIZE = 256 RPN_POSITIVE_RATE = 0.5 RPN_NMS_IOU_THRESHOLD =", "multi_gpu ''' # ------------------------------------------------ VERSION = 'Cascade_FPN_Res50_COCO_1x_20190421_v3' NET_NAME = 'resnet50_v1d'", "'output/trained_weights') EVALUATE_DIR = ROOT_PATH + '/output/evaluate_result_pickle/' # ------------------------------------------ Train config", "= True # ---------------------------------------------Anchor config USE_CENTER_OFFSET = True LEVLES =", "= 6000 RPN_MAXIMUM_PROPOSAL_TEST = 1000 # -------------------------------------------Fast-RCNN config ROI_SIZE =", "tf.random_normal_initializer(mean=0.0, stddev=0.01) BBOX_INITIALIZER = tf.random_normal_initializer(mean=0.0, stddev=0.001) WEIGHT_DECAY = 0.00004 if", "ANCHOR_STRIDE_LIST = [4, 8, 16, 32, 64] ANCHOR_SCALES = [1.0]", "channel is BGR IMG_SHORT_SIDE_LEN = 800 IMG_MAX_LENGTH = 1333 CLASS_NUM", "RPN_SIGMA = 3.0 FASTRCNN_SIGMA = 1.0 MUTILPY_BIAS_GRADIENT = None #", "= tf.random_normal_initializer(mean=0.0, stddev=0.001) WEIGHT_DECAY = 0.00004 if NET_NAME.startswith('Mobilenet') else 0.0001", "---------------------------------------- System_config ROOT_PATH = os.path.abspath('../') print(20*\"++--\") print(ROOT_PATH) GPU_GROUP = \"0,1,2,3,4,5,6,7\"", "print_function, absolute_import import os import tensorflow as tf ''' gluoncv", "* 1.25 * NUM_GPU * BATCH_SIZE DECAY_STEP = [11*SAVE_WEIGHTS_INTE, 16*SAVE_WEIGHTS_INTE,", "TRAIN_RPN_CLOOBER_POSITIVES = False RPN_MINIBATCH_SIZE = 256 RPN_POSITIVE_RATE = 0.5 RPN_NMS_IOU_THRESHOLD", "IOU < 0.5 is negative FAST_RCNN_MINIBATCH_SIZE = 512 # if", "ANCHOR_RATIOS = [0.5, 1., 2.0] ROI_SCALE_FACTORS = [[10., 10., 5.0,", "0.3 TRAIN_RPN_CLOOBER_POSITIVES = False RPN_MINIBATCH_SIZE = 256 RPN_POSITIVE_RATE = 0.5", "* NUM_GPU * BATCH_SIZE DECAY_STEP = [11*SAVE_WEIGHTS_INTE, 16*SAVE_WEIGHTS_INTE, 20*SAVE_WEIGHTS_INTE] #", "''' gluoncv backbone + multi_gpu ''' # ------------------------------------------------ VERSION =", "16, 32, 64] ANCHOR_SCALES = [1.0] ANCHOR_RATIOS = [0.5, 1.,", "[10., 10., 5.0, 5.0] # --------------------------------------------FPN config SHARE_HEADS = True", "INFERENCE_SAVE_PATH = ROOT_PATH + '/tools/inference_results' if NET_NAME.startswith(\"resnet\"): weights_name = NET_NAME", "[1.0] ANCHOR_RATIOS = [0.5, 1., 2.0] ROI_SCALE_FACTORS = [[10., 10.,", "= 1e-5 MOMENTUM = 0.9 BATCH_SIZE = 1 WARM_SETP =", "# ------------------------------------------------ VERSION = 'Cascade_FPN_Res50_COCO_1x_20190421_v3' NET_NAME = 'resnet50_v1d' ADD_BOX_IN_TENSORBOARD =", "BASE_ANCHOR_SIZE_LIST = [32, 64, 128, 256, 512] ANCHOR_STRIDE_LIST = [4,", "-*- coding: utf-8 -*- from __future__ import division, print_function, absolute_import", "------------------------------------------------ VERSION = 'Cascade_FPN_Res50_COCO_1x_20190421_v3' NET_NAME = 'resnet50_v1d' ADD_BOX_IN_TENSORBOARD = True", "LR = 5e-4 * 2 * 1.25 * NUM_GPU *", "else 0.0001 IS_ASSIGN = True # ---------------------------------------------Anchor config USE_CENTER_OFFSET =", "RPN_IOU_NEGATIVE_THRESHOLD = 0.3 TRAIN_RPN_CLOOBER_POSITIVES = False RPN_MINIBATCH_SIZE = 256 RPN_POSITIVE_RATE", "NET_NAME = 'resnet50_v1d' ADD_BOX_IN_TENSORBOARD = True # ---------------------------------------- System_config ROOT_PATH", "# R, G, B. In tf, channel is RGB. In", "256, 512] ANCHOR_STRIDE_LIST = [4, 8, 16, 32, 64] ANCHOR_SCALES", "'.ckpt' TRAINED_CKPT = os.path.join(ROOT_PATH, 'output/trained_weights') EVALUATE_DIR = ROOT_PATH + '/output/evaluate_result_pickle/'", "True EVAL_THRESHOLD = 0.5 RPN_LOCATION_LOSS_WEIGHT = 1. RPN_CLASSIFICATION_LOSS_WEIGHT = 1.0", "10.0 if None, will not clip EPSILON = 1e-5 MOMENTUM", "is BGR IMG_SHORT_SIDE_LEN = 800 IMG_MAX_LENGTH = 1333 CLASS_NUM =", "only show in tensorboard FAST_RCNN_NMS_IOU_THRESHOLD = 0.5 # 0.6 FAST_RCNN_NMS_MAX_BOXES_PER_CLASS", "gluoncv backbone USE_07_METRIC = True CUDA9 = True EVAL_THRESHOLD =", "openCV, channel is BGR IMG_SHORT_SIDE_LEN = 800 IMG_MAX_LENGTH = 1333", "= True EVAL_THRESHOLD = 0.5 RPN_LOCATION_LOSS_WEIGHT = 1. RPN_CLASSIFICATION_LOSS_WEIGHT =", "= None # 10.0 if None, will not clip EPSILON", "3.0 FASTRCNN_SIGMA = 1.0 MUTILPY_BIAS_GRADIENT = None # 2.0 #", "RPN_MAXIMUM_PROPOSAL_TEST = 1000 # -------------------------------------------Fast-RCNN config ROI_SIZE = 14 ROI_POOL_KERNEL_SIZE", "In tf, channel is RGB. In openCV, channel is BGR", "0 # allow 0~3 FREEZE_BLOCKS = [True, False, False, False,", "NET_NAME elif NET_NAME.startswith(\"MobilenetV2\"): weights_name = \"mobilenet/mobilenet_v2_1.0_224\" else: raise NotImplementedError PRETRAINED_CKPT", "= 'resnet50_v1d' ADD_BOX_IN_TENSORBOARD = True # ---------------------------------------- System_config ROOT_PATH =", "MUTILPY_BIAS_GRADIENT = None # 2.0 # if None, will not", "# --------------------------------------------FPN config SHARE_HEADS = True KERNEL_SIZE = 3 RPN_IOU_POSITIVE_THRESHOLD", "= 'Cascade_FPN_Res50_COCO_1x_20190421_v3' NET_NAME = 'resnet50_v1d' ADD_BOX_IN_TENSORBOARD = True # ----------------------------------------", "-------------------------------------------Fast-RCNN config ROI_SIZE = 14 ROI_POOL_KERNEL_SIZE = 2 USE_DROPOUT =", "\"0,1,2,3,4,5,6,7\" NUM_GPU = len(GPU_GROUP.strip().split(',')) SHOW_TRAIN_INFO_INTE = 20 SMRY_ITER = 200", "+ '/output/evaluate_result_pickle/' # ------------------------------------------ Train config RESTORE_FROM_RPN = False IS_FILTER_OUTSIDE_BOXES", "INITIALIZER = tf.random_normal_initializer(mean=0.0, stddev=0.01) BBOX_INITIALIZER = tf.random_normal_initializer(mean=0.0, stddev=0.001) WEIGHT_DECAY =", "G, B. In tf, channel is RGB. In openCV, channel", "WEIGHT_DECAY = 0.00004 if NET_NAME.startswith('Mobilenet') else 0.0001 IS_ASSIGN = True", "B. In tf, channel is RGB. In openCV, channel is", "= len(GPU_GROUP.strip().split(',')) SHOW_TRAIN_INFO_INTE = 20 SMRY_ITER = 200 SAVE_WEIGHTS_INTE =", "= 2000 RPN_TOP_K_NMS_TEST = 6000 RPN_MAXIMUM_PROPOSAL_TEST = 1000 # -------------------------------------------Fast-RCNN", "CUDA9 = True EVAL_THRESHOLD = 0.5 RPN_LOCATION_LOSS_WEIGHT = 1. RPN_CLASSIFICATION_LOSS_WEIGHT", "= 0.5 RPN_NMS_IOU_THRESHOLD = 0.7 RPN_TOP_K_NMS_TRAIN = 12000 RPN_MAXIMUM_PROPOSAL_TARIN =", "is negative FAST_RCNN_MINIBATCH_SIZE = 512 # if is -1, that", "2.0 # if None, will not multipy GRADIENT_CLIPPING_BY_NORM = None", "GPU_GROUP = \"0,1,2,3,4,5,6,7\" NUM_GPU = len(GPU_GROUP.strip().split(',')) SHOW_TRAIN_INFO_INTE = 20 SMRY_ITER", "USE_DROPOUT = False KEEP_PROB = 1.0 SHOW_SCORE_THRSHOLD = 0.6 #", "# -------------------------------------------Fast-RCNN config ROI_SIZE = 14 ROI_POOL_KERNEL_SIZE = 2 USE_DROPOUT", "config USE_CENTER_OFFSET = True LEVLES = ['P2', 'P3', 'P4', 'P5',", "+ weights_name + '.ckpt' TRAINED_CKPT = os.path.join(ROOT_PATH, 'output/trained_weights') EVALUATE_DIR =", "= 0.7 RPN_TOP_K_NMS_TRAIN = 12000 RPN_MAXIMUM_PROPOSAL_TARIN = 2000 RPN_TOP_K_NMS_TEST =", "6000 RPN_MAXIMUM_PROPOSAL_TEST = 1000 # -------------------------------------------Fast-RCNN config ROI_SIZE = 14", "[20., 20., 10.0, 10.0], [40., 40., 20.0, 20.0]] ANCHOR_SCALE_FACTORS =", "0.6 # only show in tensorboard FAST_RCNN_NMS_IOU_THRESHOLD = 0.5 #", "---------------------------------------------Anchor config USE_CENTER_OFFSET = True LEVLES = ['P2', 'P3', 'P4',", "1.0 SHOW_SCORE_THRSHOLD = 0.6 # only show in tensorboard FAST_RCNN_NMS_IOU_THRESHOLD", "SHOW_TRAIN_INFO_INTE = 20 SMRY_ITER = 200 SAVE_WEIGHTS_INTE = 80000 SUMMARY_PATH", "= None # 2.0 # if None, will not multipy", "gluoncv backbone + multi_gpu ''' # ------------------------------------------------ VERSION = 'Cascade_FPN_Res50_COCO_1x_20190421_v3'", "config RESTORE_FROM_RPN = False IS_FILTER_OUTSIDE_BOXES = False FIXED_BLOCKS = 0", "that is train with OHEM FAST_RCNN_POSITIVE_RATE = 0.25 ADD_GTBOXES_TO_TRAIN =", "FIXED_BLOCKS = 0 # allow 0~3 FREEZE_BLOCKS = [True, False,", "USE_07_METRIC = True CUDA9 = True EVAL_THRESHOLD = 0.5 RPN_LOCATION_LOSS_WEIGHT", "< 0.5 is negative FAST_RCNN_MINIBATCH_SIZE = 512 # if is", "1.0 FAST_RCNN_CLASSIFICATION_LOSS_WEIGHT = 1.0 RPN_SIGMA = 3.0 FASTRCNN_SIGMA = 1.0", "800 IMG_MAX_LENGTH = 1333 CLASS_NUM = 80 # --------------------------------------------- Network_config", "= 1 WARM_SETP = int(0.25 * SAVE_WEIGHTS_INTE) LR = 5e-4", "= ROOT_PATH + '/tools/inference_results' if NET_NAME.startswith(\"resnet\"): weights_name = NET_NAME elif", "= \"0,1,2,3,4,5,6,7\" NUM_GPU = len(GPU_GROUP.strip().split(',')) SHOW_TRAIN_INFO_INTE = 20 SMRY_ITER =", "not clip EPSILON = 1e-5 MOMENTUM = 0.9 BATCH_SIZE =", "[11*SAVE_WEIGHTS_INTE, 16*SAVE_WEIGHTS_INTE, 20*SAVE_WEIGHTS_INTE] # 50000, 70000 MAX_ITERATION = 20*SAVE_WEIGHTS_INTE #", "SHOW_SCORE_THRSHOLD = 0.6 # only show in tensorboard FAST_RCNN_NMS_IOU_THRESHOLD =", "IMG_SHORT_SIDE_LEN = 800 IMG_MAX_LENGTH = 1333 CLASS_NUM = 80 #", "= [True, False, False, False, False] # for gluoncv backbone", "'/data/pretrained_weights/' + weights_name + '.ckpt' TRAINED_CKPT = os.path.join(ROOT_PATH, 'output/trained_weights') EVALUATE_DIR", "None # 10.0 if None, will not clip EPSILON =", "64, 128, 256, 512] ANCHOR_STRIDE_LIST = [4, 8, 16, 32,", "stddev=0.01) BBOX_INITIALIZER = tf.random_normal_initializer(mean=0.0, stddev=0.001) WEIGHT_DECAY = 0.00004 if NET_NAME.startswith('Mobilenet')", "-1, that is train with OHEM FAST_RCNN_POSITIVE_RATE = 0.25 ADD_GTBOXES_TO_TRAIN", "as tf ''' gluoncv backbone + multi_gpu ''' # ------------------------------------------------", "1333 CLASS_NUM = 80 # --------------------------------------------- Network_config INITIALIZER = tf.random_normal_initializer(mean=0.0,", "openCV, channel is BGR PIXEL_MEAN_ = [0.485, 0.456, 0.406] PIXEL_STD", "5.0, 5.0] # --------------------------------------------FPN config SHARE_HEADS = True KERNEL_SIZE =", "= 1000 # -------------------------------------------Fast-RCNN config ROI_SIZE = 14 ROI_POOL_KERNEL_SIZE =", "\"mobilenet/mobilenet_v2_1.0_224\" else: raise NotImplementedError PRETRAINED_CKPT = ROOT_PATH + '/data/pretrained_weights/' +", "TRAINED_CKPT = os.path.join(ROOT_PATH, 'output/trained_weights') EVALUATE_DIR = ROOT_PATH + '/output/evaluate_result_pickle/' #", "DECAY_STEP = [11*SAVE_WEIGHTS_INTE, 16*SAVE_WEIGHTS_INTE, 20*SAVE_WEIGHTS_INTE] # 50000, 70000 MAX_ITERATION =", "ROI_SIZE = 14 ROI_POOL_KERNEL_SIZE = 2 USE_DROPOUT = False KEEP_PROB", "= 1.0 SHOW_SCORE_THRSHOLD = 0.6 # only show in tensorboard", "KERNEL_SIZE = 3 RPN_IOU_POSITIVE_THRESHOLD = 0.7 RPN_IOU_NEGATIVE_THRESHOLD = 0.3 TRAIN_RPN_CLOOBER_POSITIVES", "= [0.229, 0.224, 0.225] # R, G, B. In tf,", "0.5 RPN_NMS_IOU_THRESHOLD = 0.7 RPN_TOP_K_NMS_TRAIN = 12000 RPN_MAXIMUM_PROPOSAL_TARIN = 2000", "5.0] # --------------------------------------------FPN config SHARE_HEADS = True KERNEL_SIZE = 3", "= os.path.abspath('../') print(20*\"++--\") print(ROOT_PATH) GPU_GROUP = \"0,1,2,3,4,5,6,7\" NUM_GPU = len(GPU_GROUP.strip().split(','))", "0.9 BATCH_SIZE = 1 WARM_SETP = int(0.25 * SAVE_WEIGHTS_INTE) LR", "'P3', 'P4', 'P5', 'P6'] BASE_ANCHOR_SIZE_LIST = [32, 64, 128, 256,", "= True KERNEL_SIZE = 3 RPN_IOU_POSITIVE_THRESHOLD = 0.7 RPN_IOU_NEGATIVE_THRESHOLD =", "NET_NAME.startswith('Mobilenet') else 0.0001 IS_ASSIGN = True # ---------------------------------------------Anchor config USE_CENTER_OFFSET", "'/tools/inference_results' if NET_NAME.startswith(\"resnet\"): weights_name = NET_NAME elif NET_NAME.startswith(\"MobilenetV2\"): weights_name =", "0.0 # 0.1 < IOU < 0.5 is negative FAST_RCNN_MINIBATCH_SIZE", "= 5e-4 * 2 * 1.25 * NUM_GPU * BATCH_SIZE", "0.225] # R, G, B. In tf, channel is RGB.", "= ROOT_PATH + '/output/evaluate_result_pickle/' # ------------------------------------------ Train config RESTORE_FROM_RPN =", "10.0], [40., 40., 20.0, 20.0]] ANCHOR_SCALE_FACTORS = [10., 10., 5.0,", "Network_config INITIALIZER = tf.random_normal_initializer(mean=0.0, stddev=0.01) BBOX_INITIALIZER = tf.random_normal_initializer(mean=0.0, stddev=0.001) WEIGHT_DECAY", "USE_CENTER_OFFSET = True LEVLES = ['P2', 'P3', 'P4', 'P5', 'P6']", "True LEVLES = ['P2', 'P3', 'P4', 'P5', 'P6'] BASE_ANCHOR_SIZE_LIST =", "= [0.485, 0.456, 0.406] PIXEL_STD = [0.229, 0.224, 0.225] #", "1.0 FAST_RCNN_LOCATION_LOSS_WEIGHT = 1.0 FAST_RCNN_CLASSIFICATION_LOSS_WEIGHT = 1.0 RPN_SIGMA = 3.0", "FAST_RCNN_IOU_POSITIVE_THRESHOLD = 0.5 FAST_RCNN_IOU_NEGATIVE_THRESHOLD = 0.0 # 0.1 < IOU", "= ROOT_PATH + '/output/summary' TEST_SAVE_PATH = ROOT_PATH + '/tools/test_result' INFERENCE_IMAGE_PATH", "ANCHOR_SCALES = [1.0] ANCHOR_RATIOS = [0.5, 1., 2.0] ROI_SCALE_FACTORS =", "'resnet50_v1d' ADD_BOX_IN_TENSORBOARD = True # ---------------------------------------- System_config ROOT_PATH = os.path.abspath('../')", "RPN_POSITIVE_RATE = 0.5 RPN_NMS_IOU_THRESHOLD = 0.7 RPN_TOP_K_NMS_TRAIN = 12000 RPN_MAXIMUM_PROPOSAL_TARIN", "20*SAVE_WEIGHTS_INTE # -------------------------------------------- Data_preprocess_config DATASET_NAME = 'coco' # 'pascal', 'coco'", "backbone + multi_gpu ''' # ------------------------------------------------ VERSION = 'Cascade_FPN_Res50_COCO_1x_20190421_v3' NET_NAME", "IMG_MAX_LENGTH = 1333 CLASS_NUM = 80 # --------------------------------------------- Network_config INITIALIZER", "# --------------------------------------------- Network_config INITIALIZER = tf.random_normal_initializer(mean=0.0, stddev=0.01) BBOX_INITIALIZER = tf.random_normal_initializer(mean=0.0,", "= 1.0 MUTILPY_BIAS_GRADIENT = None # 2.0 # if None,", "= [[10., 10., 5.0, 5.0], [20., 20., 10.0, 10.0], [40.,", "5.0, 5.0], [20., 20., 10.0, 10.0], [40., 40., 20.0, 20.0]]", "if None, will not multipy GRADIENT_CLIPPING_BY_NORM = None # 10.0", "= 1.0 RPN_SIGMA = 3.0 FASTRCNN_SIGMA = 1.0 MUTILPY_BIAS_GRADIENT =", "0~3 FREEZE_BLOCKS = [True, False, False, False, False] # for", "# allow 0~3 FREEZE_BLOCKS = [True, False, False, False, False]", "FAST_RCNN_NMS_IOU_THRESHOLD = 0.5 # 0.6 FAST_RCNN_NMS_MAX_BOXES_PER_CLASS = 100 FAST_RCNN_IOU_POSITIVE_THRESHOLD =", "len(GPU_GROUP.strip().split(',')) SHOW_TRAIN_INFO_INTE = 20 SMRY_ITER = 200 SAVE_WEIGHTS_INTE = 80000", "print(20*\"++--\") print(ROOT_PATH) GPU_GROUP = \"0,1,2,3,4,5,6,7\" NUM_GPU = len(GPU_GROUP.strip().split(',')) SHOW_TRAIN_INFO_INTE =", "'pascal', 'coco' PIXEL_MEAN = [123.68, 116.779, 103.939] # R, G,", "= 0.0 # 0.1 < IOU < 0.5 is negative", "is RGB. In openCV, channel is BGR IMG_SHORT_SIDE_LEN = 800", "= 12000 RPN_MAXIMUM_PROPOSAL_TARIN = 2000 RPN_TOP_K_NMS_TEST = 6000 RPN_MAXIMUM_PROPOSAL_TEST =", "import tensorflow as tf ''' gluoncv backbone + multi_gpu '''", "20.0, 20.0]] ANCHOR_SCALE_FACTORS = [10., 10., 5.0, 5.0] # --------------------------------------------FPN", "= 14 ROI_POOL_KERNEL_SIZE = 2 USE_DROPOUT = False KEEP_PROB =", "IS_FILTER_OUTSIDE_BOXES = False FIXED_BLOCKS = 0 # allow 0~3 FREEZE_BLOCKS", "= [11*SAVE_WEIGHTS_INTE, 16*SAVE_WEIGHTS_INTE, 20*SAVE_WEIGHTS_INTE] # 50000, 70000 MAX_ITERATION = 20*SAVE_WEIGHTS_INTE", "0.5 RPN_LOCATION_LOSS_WEIGHT = 1. RPN_CLASSIFICATION_LOSS_WEIGHT = 1.0 FAST_RCNN_LOCATION_LOSS_WEIGHT = 1.0", "'P4', 'P5', 'P6'] BASE_ANCHOR_SIZE_LIST = [32, 64, 128, 256, 512]", "ROOT_PATH + '/tools/inference_image' INFERENCE_SAVE_PATH = ROOT_PATH + '/tools/inference_results' if NET_NAME.startswith(\"resnet\"):", "# 10.0 if None, will not clip EPSILON = 1e-5", "0.6 FAST_RCNN_NMS_MAX_BOXES_PER_CLASS = 100 FAST_RCNN_IOU_POSITIVE_THRESHOLD = 0.5 FAST_RCNN_IOU_NEGATIVE_THRESHOLD = 0.0", "['P2', 'P3', 'P4', 'P5', 'P6'] BASE_ANCHOR_SIZE_LIST = [32, 64, 128,", "= True LEVLES = ['P2', 'P3', 'P4', 'P5', 'P6'] BASE_ANCHOR_SIZE_LIST", "import division, print_function, absolute_import import os import tensorflow as tf", "MOMENTUM = 0.9 BATCH_SIZE = 1 WARM_SETP = int(0.25 *", "256 RPN_POSITIVE_RATE = 0.5 RPN_NMS_IOU_THRESHOLD = 0.7 RPN_TOP_K_NMS_TRAIN = 12000", "20., 10.0, 10.0], [40., 40., 20.0, 20.0]] ANCHOR_SCALE_FACTORS = [10.,", "# 50000, 70000 MAX_ITERATION = 20*SAVE_WEIGHTS_INTE # -------------------------------------------- Data_preprocess_config DATASET_NAME", "+ '/data/pretrained_weights/' + weights_name + '.ckpt' TRAINED_CKPT = os.path.join(ROOT_PATH, 'output/trained_weights')", "MAX_ITERATION = 20*SAVE_WEIGHTS_INTE # -------------------------------------------- Data_preprocess_config DATASET_NAME = 'coco' #", "os import tensorflow as tf ''' gluoncv backbone + multi_gpu", "16*SAVE_WEIGHTS_INTE, 20*SAVE_WEIGHTS_INTE] # 50000, 70000 MAX_ITERATION = 20*SAVE_WEIGHTS_INTE # --------------------------------------------", "5e-4 * 2 * 1.25 * NUM_GPU * BATCH_SIZE DECAY_STEP", "1e-5 MOMENTUM = 0.9 BATCH_SIZE = 1 WARM_SETP = int(0.25", "raise NotImplementedError PRETRAINED_CKPT = ROOT_PATH + '/data/pretrained_weights/' + weights_name +", "In openCV, channel is BGR PIXEL_MEAN_ = [0.485, 0.456, 0.406]", "1 WARM_SETP = int(0.25 * SAVE_WEIGHTS_INTE) LR = 5e-4 *", "backbone USE_07_METRIC = True CUDA9 = True EVAL_THRESHOLD = 0.5", "10., 5.0, 5.0], [20., 20., 10.0, 10.0], [40., 40., 20.0,", "RPN_TOP_K_NMS_TRAIN = 12000 RPN_MAXIMUM_PROPOSAL_TARIN = 2000 RPN_TOP_K_NMS_TEST = 6000 RPN_MAXIMUM_PROPOSAL_TEST", "if is -1, that is train with OHEM FAST_RCNN_POSITIVE_RATE =", "None # 2.0 # if None, will not multipy GRADIENT_CLIPPING_BY_NORM", "ROOT_PATH + '/data/pretrained_weights/' + weights_name + '.ckpt' TRAINED_CKPT = os.path.join(ROOT_PATH,", "--------------------------------------------FPN config SHARE_HEADS = True KERNEL_SIZE = 3 RPN_IOU_POSITIVE_THRESHOLD =", "RGB. In openCV, channel is BGR PIXEL_MEAN_ = [0.485, 0.456,", "will not clip EPSILON = 1e-5 MOMENTUM = 0.9 BATCH_SIZE", "RPN_LOCATION_LOSS_WEIGHT = 1. RPN_CLASSIFICATION_LOSS_WEIGHT = 1.0 FAST_RCNN_LOCATION_LOSS_WEIGHT = 1.0 FAST_RCNN_CLASSIFICATION_LOSS_WEIGHT", "0.456, 0.406] PIXEL_STD = [0.229, 0.224, 0.225] # R, G,", "channel is RGB. In openCV, channel is BGR PIXEL_MEAN_ =", "[True, False, False, False, False] # for gluoncv backbone USE_07_METRIC", "= 100 FAST_RCNN_IOU_POSITIVE_THRESHOLD = 0.5 FAST_RCNN_IOU_NEGATIVE_THRESHOLD = 0.0 # 0.1", "BBOX_INITIALIZER = tf.random_normal_initializer(mean=0.0, stddev=0.001) WEIGHT_DECAY = 0.00004 if NET_NAME.startswith('Mobilenet') else", "EPSILON = 1e-5 MOMENTUM = 0.9 BATCH_SIZE = 1 WARM_SETP", "System_config ROOT_PATH = os.path.abspath('../') print(20*\"++--\") print(ROOT_PATH) GPU_GROUP = \"0,1,2,3,4,5,6,7\" NUM_GPU", "= [10., 10., 5.0, 5.0] # --------------------------------------------FPN config SHARE_HEADS =", "will not multipy GRADIENT_CLIPPING_BY_NORM = None # 10.0 if None,", "else: raise NotImplementedError PRETRAINED_CKPT = ROOT_PATH + '/data/pretrained_weights/' + weights_name", "int(0.25 * SAVE_WEIGHTS_INTE) LR = 5e-4 * 2 * 1.25", "weights_name = NET_NAME elif NET_NAME.startswith(\"MobilenetV2\"): weights_name = \"mobilenet/mobilenet_v2_1.0_224\" else: raise", "5.0], [20., 20., 10.0, 10.0], [40., 40., 20.0, 20.0]] ANCHOR_SCALE_FACTORS", "'coco' PIXEL_MEAN = [123.68, 116.779, 103.939] # R, G, B.", "tensorboard FAST_RCNN_NMS_IOU_THRESHOLD = 0.5 # 0.6 FAST_RCNN_NMS_MAX_BOXES_PER_CLASS = 100 FAST_RCNN_IOU_POSITIVE_THRESHOLD", "not multipy GRADIENT_CLIPPING_BY_NORM = None # 10.0 if None, will", "import os import tensorflow as tf ''' gluoncv backbone +", "= 0 # allow 0~3 FREEZE_BLOCKS = [True, False, False,", "= 20 SMRY_ITER = 200 SAVE_WEIGHTS_INTE = 80000 SUMMARY_PATH =", "= 200 SAVE_WEIGHTS_INTE = 80000 SUMMARY_PATH = ROOT_PATH + '/output/summary'", "NET_NAME.startswith(\"MobilenetV2\"): weights_name = \"mobilenet/mobilenet_v2_1.0_224\" else: raise NotImplementedError PRETRAINED_CKPT = ROOT_PATH", "True CUDA9 = True EVAL_THRESHOLD = 0.5 RPN_LOCATION_LOSS_WEIGHT = 1.", "14 ROI_POOL_KERNEL_SIZE = 2 USE_DROPOUT = False KEEP_PROB = 1.0", "NUM_GPU = len(GPU_GROUP.strip().split(',')) SHOW_TRAIN_INFO_INTE = 20 SMRY_ITER = 200 SAVE_WEIGHTS_INTE", "= ['P2', 'P3', 'P4', 'P5', 'P6'] BASE_ANCHOR_SIZE_LIST = [32, 64,", "allow 0~3 FREEZE_BLOCKS = [True, False, False, False, False] #", "# for gluoncv backbone USE_07_METRIC = True CUDA9 = True", "+ '/tools/inference_results' if NET_NAME.startswith(\"resnet\"): weights_name = NET_NAME elif NET_NAME.startswith(\"MobilenetV2\"): weights_name", "if NET_NAME.startswith(\"resnet\"): weights_name = NET_NAME elif NET_NAME.startswith(\"MobilenetV2\"): weights_name = \"mobilenet/mobilenet_v2_1.0_224\"", "= int(0.25 * SAVE_WEIGHTS_INTE) LR = 5e-4 * 2 *", "[0.5, 1., 2.0] ROI_SCALE_FACTORS = [[10., 10., 5.0, 5.0], [20.,", "RPN_NMS_IOU_THRESHOLD = 0.7 RPN_TOP_K_NMS_TRAIN = 12000 RPN_MAXIMUM_PROPOSAL_TARIN = 2000 RPN_TOP_K_NMS_TEST", "absolute_import import os import tensorflow as tf ''' gluoncv backbone", "True # ---------------------------------------- System_config ROOT_PATH = os.path.abspath('../') print(20*\"++--\") print(ROOT_PATH) GPU_GROUP", "0.1 < IOU < 0.5 is negative FAST_RCNN_MINIBATCH_SIZE = 512", "weights_name + '.ckpt' TRAINED_CKPT = os.path.join(ROOT_PATH, 'output/trained_weights') EVALUATE_DIR = ROOT_PATH", "IS_ASSIGN = True # ---------------------------------------------Anchor config USE_CENTER_OFFSET = True LEVLES", "tf ''' gluoncv backbone + multi_gpu ''' # ------------------------------------------------ VERSION", "50000, 70000 MAX_ITERATION = 20*SAVE_WEIGHTS_INTE # -------------------------------------------- Data_preprocess_config DATASET_NAME =", "0.5 FAST_RCNN_IOU_NEGATIVE_THRESHOLD = 0.0 # 0.1 < IOU < 0.5", "PIXEL_MEAN = [123.68, 116.779, 103.939] # R, G, B. In", "SMRY_ITER = 200 SAVE_WEIGHTS_INTE = 80000 SUMMARY_PATH = ROOT_PATH +", "PRETRAINED_CKPT = ROOT_PATH + '/data/pretrained_weights/' + weights_name + '.ckpt' TRAINED_CKPT", "if NET_NAME.startswith('Mobilenet') else 0.0001 IS_ASSIGN = True # ---------------------------------------------Anchor config", "0.0001 IS_ASSIGN = True # ---------------------------------------------Anchor config USE_CENTER_OFFSET = True", "40., 20.0, 20.0]] ANCHOR_SCALE_FACTORS = [10., 10., 5.0, 5.0] #", "20 SMRY_ITER = 200 SAVE_WEIGHTS_INTE = 80000 SUMMARY_PATH = ROOT_PATH", "= \"mobilenet/mobilenet_v2_1.0_224\" else: raise NotImplementedError PRETRAINED_CKPT = ROOT_PATH + '/data/pretrained_weights/'", "= 0.7 RPN_IOU_NEGATIVE_THRESHOLD = 0.3 TRAIN_RPN_CLOOBER_POSITIVES = False RPN_MINIBATCH_SIZE =", "print(ROOT_PATH) GPU_GROUP = \"0,1,2,3,4,5,6,7\" NUM_GPU = len(GPU_GROUP.strip().split(',')) SHOW_TRAIN_INFO_INTE = 20", "0.00004 if NET_NAME.startswith('Mobilenet') else 0.0001 IS_ASSIGN = True # ---------------------------------------------Anchor", "NotImplementedError PRETRAINED_CKPT = ROOT_PATH + '/data/pretrained_weights/' + weights_name + '.ckpt'", "1., 2.0] ROI_SCALE_FACTORS = [[10., 10., 5.0, 5.0], [20., 20.,", "8, 16, 32, 64] ANCHOR_SCALES = [1.0] ANCHOR_RATIOS = [0.5,", "False RPN_MINIBATCH_SIZE = 256 RPN_POSITIVE_RATE = 0.5 RPN_NMS_IOU_THRESHOLD = 0.7", "FASTRCNN_SIGMA = 1.0 MUTILPY_BIAS_GRADIENT = None # 2.0 # if", "Data_preprocess_config DATASET_NAME = 'coco' # 'pascal', 'coco' PIXEL_MEAN = [123.68,", "+ multi_gpu ''' # ------------------------------------------------ VERSION = 'Cascade_FPN_Res50_COCO_1x_20190421_v3' NET_NAME =", "ROOT_PATH + '/output/evaluate_result_pickle/' # ------------------------------------------ Train config RESTORE_FROM_RPN = False", "= NET_NAME elif NET_NAME.startswith(\"MobilenetV2\"): weights_name = \"mobilenet/mobilenet_v2_1.0_224\" else: raise NotImplementedError", "= True # ---------------------------------------- System_config ROOT_PATH = os.path.abspath('../') print(20*\"++--\") print(ROOT_PATH)", "KEEP_PROB = 1.0 SHOW_SCORE_THRSHOLD = 0.6 # only show in", "2.0] ROI_SCALE_FACTORS = [[10., 10., 5.0, 5.0], [20., 20., 10.0,", "= 2 USE_DROPOUT = False KEEP_PROB = 1.0 SHOW_SCORE_THRSHOLD =", "False IS_FILTER_OUTSIDE_BOXES = False FIXED_BLOCKS = 0 # allow 0~3", "[32, 64, 128, 256, 512] ANCHOR_STRIDE_LIST = [4, 8, 16,", "# 0.6 FAST_RCNN_NMS_MAX_BOXES_PER_CLASS = 100 FAST_RCNN_IOU_POSITIVE_THRESHOLD = 0.5 FAST_RCNN_IOU_NEGATIVE_THRESHOLD =", "* BATCH_SIZE DECAY_STEP = [11*SAVE_WEIGHTS_INTE, 16*SAVE_WEIGHTS_INTE, 20*SAVE_WEIGHTS_INTE] # 50000, 70000", "tf.random_normal_initializer(mean=0.0, stddev=0.001) WEIGHT_DECAY = 0.00004 if NET_NAME.startswith('Mobilenet') else 0.0001 IS_ASSIGN", "os.path.abspath('../') print(20*\"++--\") print(ROOT_PATH) GPU_GROUP = \"0,1,2,3,4,5,6,7\" NUM_GPU = len(GPU_GROUP.strip().split(',')) SHOW_TRAIN_INFO_INTE", "# -*- coding: utf-8 -*- from __future__ import division, print_function,", "* SAVE_WEIGHTS_INTE) LR = 5e-4 * 2 * 1.25 *", "BGR IMG_SHORT_SIDE_LEN = 800 IMG_MAX_LENGTH = 1333 CLASS_NUM = 80", "FAST_RCNN_NMS_MAX_BOXES_PER_CLASS = 100 FAST_RCNN_IOU_POSITIVE_THRESHOLD = 0.5 FAST_RCNN_IOU_NEGATIVE_THRESHOLD = 0.0 #", "ROOT_PATH + '/tools/test_result' INFERENCE_IMAGE_PATH = ROOT_PATH + '/tools/inference_image' INFERENCE_SAVE_PATH =", "= [4, 8, 16, 32, 64] ANCHOR_SCALES = [1.0] ANCHOR_RATIOS", "1. RPN_CLASSIFICATION_LOSS_WEIGHT = 1.0 FAST_RCNN_LOCATION_LOSS_WEIGHT = 1.0 FAST_RCNN_CLASSIFICATION_LOSS_WEIGHT = 1.0", "0.406] PIXEL_STD = [0.229, 0.224, 0.225] # R, G, B.", "= 0.5 RPN_LOCATION_LOSS_WEIGHT = 1. RPN_CLASSIFICATION_LOSS_WEIGHT = 1.0 FAST_RCNN_LOCATION_LOSS_WEIGHT =", "PIXEL_MEAN_ = [0.485, 0.456, 0.406] PIXEL_STD = [0.229, 0.224, 0.225]", "BGR PIXEL_MEAN_ = [0.485, 0.456, 0.406] PIXEL_STD = [0.229, 0.224,", "True KERNEL_SIZE = 3 RPN_IOU_POSITIVE_THRESHOLD = 0.7 RPN_IOU_NEGATIVE_THRESHOLD = 0.3", "if None, will not clip EPSILON = 1e-5 MOMENTUM =", "= 256 RPN_POSITIVE_RATE = 0.5 RPN_NMS_IOU_THRESHOLD = 0.7 RPN_TOP_K_NMS_TRAIN =", "is -1, that is train with OHEM FAST_RCNN_POSITIVE_RATE = 0.25", "RGB. In openCV, channel is BGR IMG_SHORT_SIDE_LEN = 800 IMG_MAX_LENGTH", "--------------------------------------------- Network_config INITIALIZER = tf.random_normal_initializer(mean=0.0, stddev=0.01) BBOX_INITIALIZER = tf.random_normal_initializer(mean=0.0, stddev=0.001)", "= 'coco' # 'pascal', 'coco' PIXEL_MEAN = [123.68, 116.779, 103.939]", "1000 # -------------------------------------------Fast-RCNN config ROI_SIZE = 14 ROI_POOL_KERNEL_SIZE = 2", "[[10., 10., 5.0, 5.0], [20., 20., 10.0, 10.0], [40., 40.,", "116.779, 103.939] # R, G, B. In tf, channel is", "1.0 MUTILPY_BIAS_GRADIENT = None # 2.0 # if None, will", "'/tools/inference_image' INFERENCE_SAVE_PATH = ROOT_PATH + '/tools/inference_results' if NET_NAME.startswith(\"resnet\"): weights_name =", "RPN_MINIBATCH_SIZE = 256 RPN_POSITIVE_RATE = 0.5 RPN_NMS_IOU_THRESHOLD = 0.7 RPN_TOP_K_NMS_TRAIN", "ROOT_PATH + '/tools/inference_results' if NET_NAME.startswith(\"resnet\"): weights_name = NET_NAME elif NET_NAME.startswith(\"MobilenetV2\"):", "32, 64] ANCHOR_SCALES = [1.0] ANCHOR_RATIOS = [0.5, 1., 2.0]", "10.0, 10.0], [40., 40., 20.0, 20.0]] ANCHOR_SCALE_FACTORS = [10., 10.,", "= [1.0] ANCHOR_RATIOS = [0.5, 1., 2.0] ROI_SCALE_FACTORS = [[10.,", "EVAL_THRESHOLD = 0.5 RPN_LOCATION_LOSS_WEIGHT = 1. RPN_CLASSIFICATION_LOSS_WEIGHT = 1.0 FAST_RCNN_LOCATION_LOSS_WEIGHT", "= tf.random_normal_initializer(mean=0.0, stddev=0.01) BBOX_INITIALIZER = tf.random_normal_initializer(mean=0.0, stddev=0.001) WEIGHT_DECAY = 0.00004", "# ---------------------------------------------Anchor config USE_CENTER_OFFSET = True LEVLES = ['P2', 'P3',", "[0.229, 0.224, 0.225] # R, G, B. In tf, channel", "from __future__ import division, print_function, absolute_import import os import tensorflow", "ANCHOR_SCALE_FACTORS = [10., 10., 5.0, 5.0] # --------------------------------------------FPN config SHARE_HEADS", "= 80000 SUMMARY_PATH = ROOT_PATH + '/output/summary' TEST_SAVE_PATH = ROOT_PATH", "0.224, 0.225] # R, G, B. In tf, channel is", "0.7 RPN_IOU_NEGATIVE_THRESHOLD = 0.3 TRAIN_RPN_CLOOBER_POSITIVES = False RPN_MINIBATCH_SIZE = 256", "FAST_RCNN_IOU_NEGATIVE_THRESHOLD = 0.0 # 0.1 < IOU < 0.5 is", "80000 SUMMARY_PATH = ROOT_PATH + '/output/summary' TEST_SAVE_PATH = ROOT_PATH +", "= 0.6 # only show in tensorboard FAST_RCNN_NMS_IOU_THRESHOLD = 0.5", "ADD_BOX_IN_TENSORBOARD = True # ---------------------------------------- System_config ROOT_PATH = os.path.abspath('../') print(20*\"++--\")", "FAST_RCNN_MINIBATCH_SIZE = 512 # if is -1, that is train", "= 0.00004 if NET_NAME.startswith('Mobilenet') else 0.0001 IS_ASSIGN = True #", "os.path.join(ROOT_PATH, 'output/trained_weights') EVALUATE_DIR = ROOT_PATH + '/output/evaluate_result_pickle/' # ------------------------------------------ Train", "In openCV, channel is BGR IMG_SHORT_SIDE_LEN = 800 IMG_MAX_LENGTH =", "100 FAST_RCNN_IOU_POSITIVE_THRESHOLD = 0.5 FAST_RCNN_IOU_NEGATIVE_THRESHOLD = 0.0 # 0.1 <", "ROI_SCALE_FACTORS = [[10., 10., 5.0, 5.0], [20., 20., 10.0, 10.0],", "RPN_IOU_POSITIVE_THRESHOLD = 0.7 RPN_IOU_NEGATIVE_THRESHOLD = 0.3 TRAIN_RPN_CLOOBER_POSITIVES = False RPN_MINIBATCH_SIZE", "multipy GRADIENT_CLIPPING_BY_NORM = None # 10.0 if None, will not", "12000 RPN_MAXIMUM_PROPOSAL_TARIN = 2000 RPN_TOP_K_NMS_TEST = 6000 RPN_MAXIMUM_PROPOSAL_TEST = 1000", "FAST_RCNN_CLASSIFICATION_LOSS_WEIGHT = 1.0 RPN_SIGMA = 3.0 FASTRCNN_SIGMA = 1.0 MUTILPY_BIAS_GRADIENT", "ROOT_PATH = os.path.abspath('../') print(20*\"++--\") print(ROOT_PATH) GPU_GROUP = \"0,1,2,3,4,5,6,7\" NUM_GPU =", "= ROOT_PATH + '/tools/inference_image' INFERENCE_SAVE_PATH = ROOT_PATH + '/tools/inference_results' if", "[123.68, 116.779, 103.939] # R, G, B. In tf, channel", "= 1.0 FAST_RCNN_LOCATION_LOSS_WEIGHT = 1.0 FAST_RCNN_CLASSIFICATION_LOSS_WEIGHT = 1.0 RPN_SIGMA =", "LEVLES = ['P2', 'P3', 'P4', 'P5', 'P6'] BASE_ANCHOR_SIZE_LIST = [32,", "= 80 # --------------------------------------------- Network_config INITIALIZER = tf.random_normal_initializer(mean=0.0, stddev=0.01) BBOX_INITIALIZER", "clip EPSILON = 1e-5 MOMENTUM = 0.9 BATCH_SIZE = 1", "'P6'] BASE_ANCHOR_SIZE_LIST = [32, 64, 128, 256, 512] ANCHOR_STRIDE_LIST =", "------------------------------------------ Train config RESTORE_FROM_RPN = False IS_FILTER_OUTSIDE_BOXES = False FIXED_BLOCKS", "[40., 40., 20.0, 20.0]] ANCHOR_SCALE_FACTORS = [10., 10., 5.0, 5.0]", "= False IS_FILTER_OUTSIDE_BOXES = False FIXED_BLOCKS = 0 # allow", "[4, 8, 16, 32, 64] ANCHOR_SCALES = [1.0] ANCHOR_RATIOS =", "103.939] # R, G, B. In tf, channel is RGB.", "False, False] # for gluoncv backbone USE_07_METRIC = True CUDA9", "INFERENCE_IMAGE_PATH = ROOT_PATH + '/tools/inference_image' INFERENCE_SAVE_PATH = ROOT_PATH + '/tools/inference_results'", "None, will not multipy GRADIENT_CLIPPING_BY_NORM = None # 10.0 if", "2 USE_DROPOUT = False KEEP_PROB = 1.0 SHOW_SCORE_THRSHOLD = 0.6", "= 0.5 # 0.6 FAST_RCNN_NMS_MAX_BOXES_PER_CLASS = 100 FAST_RCNN_IOU_POSITIVE_THRESHOLD = 0.5", "VERSION = 'Cascade_FPN_Res50_COCO_1x_20190421_v3' NET_NAME = 'resnet50_v1d' ADD_BOX_IN_TENSORBOARD = True #", "# ---------------------------------------- System_config ROOT_PATH = os.path.abspath('../') print(20*\"++--\") print(ROOT_PATH) GPU_GROUP =", "512] ANCHOR_STRIDE_LIST = [4, 8, 16, 32, 64] ANCHOR_SCALES =", "for gluoncv backbone USE_07_METRIC = True CUDA9 = True EVAL_THRESHOLD", "R, G, B. In tf, channel is RGB. In openCV,", "__future__ import division, print_function, absolute_import import os import tensorflow as", "is BGR PIXEL_MEAN_ = [0.485, 0.456, 0.406] PIXEL_STD = [0.229,", "1.25 * NUM_GPU * BATCH_SIZE DECAY_STEP = [11*SAVE_WEIGHTS_INTE, 16*SAVE_WEIGHTS_INTE, 20*SAVE_WEIGHTS_INTE]", "= 512 # if is -1, that is train with", "False, False, False] # for gluoncv backbone USE_07_METRIC = True", "is train with OHEM FAST_RCNN_POSITIVE_RATE = 0.25 ADD_GTBOXES_TO_TRAIN = False", "= [32, 64, 128, 256, 512] ANCHOR_STRIDE_LIST = [4, 8,", "'/output/summary' TEST_SAVE_PATH = ROOT_PATH + '/tools/test_result' INFERENCE_IMAGE_PATH = ROOT_PATH +", "# 'pascal', 'coco' PIXEL_MEAN = [123.68, 116.779, 103.939] # R,", "= os.path.join(ROOT_PATH, 'output/trained_weights') EVALUATE_DIR = ROOT_PATH + '/output/evaluate_result_pickle/' # ------------------------------------------", "config SHARE_HEADS = True KERNEL_SIZE = 3 RPN_IOU_POSITIVE_THRESHOLD = 0.7", "ROOT_PATH + '/output/summary' TEST_SAVE_PATH = ROOT_PATH + '/tools/test_result' INFERENCE_IMAGE_PATH =", "NUM_GPU * BATCH_SIZE DECAY_STEP = [11*SAVE_WEIGHTS_INTE, 16*SAVE_WEIGHTS_INTE, 20*SAVE_WEIGHTS_INTE] # 50000,", "200 SAVE_WEIGHTS_INTE = 80000 SUMMARY_PATH = ROOT_PATH + '/output/summary' TEST_SAVE_PATH", "= 0.5 FAST_RCNN_IOU_NEGATIVE_THRESHOLD = 0.0 # 0.1 < IOU <", "is RGB. In openCV, channel is BGR PIXEL_MEAN_ = [0.485,", "= 0.9 BATCH_SIZE = 1 WARM_SETP = int(0.25 * SAVE_WEIGHTS_INTE)", "'P5', 'P6'] BASE_ANCHOR_SIZE_LIST = [32, 64, 128, 256, 512] ANCHOR_STRIDE_LIST", "'Cascade_FPN_Res50_COCO_1x_20190421_v3' NET_NAME = 'resnet50_v1d' ADD_BOX_IN_TENSORBOARD = True # ---------------------------------------- System_config", "EVALUATE_DIR = ROOT_PATH + '/output/evaluate_result_pickle/' # ------------------------------------------ Train config RESTORE_FROM_RPN", "WARM_SETP = int(0.25 * SAVE_WEIGHTS_INTE) LR = 5e-4 * 2", "0.7 RPN_TOP_K_NMS_TRAIN = 12000 RPN_MAXIMUM_PROPOSAL_TARIN = 2000 RPN_TOP_K_NMS_TEST = 6000", "weights_name = \"mobilenet/mobilenet_v2_1.0_224\" else: raise NotImplementedError PRETRAINED_CKPT = ROOT_PATH +", "TEST_SAVE_PATH = ROOT_PATH + '/tools/test_result' INFERENCE_IMAGE_PATH = ROOT_PATH + '/tools/inference_image'", "None, will not clip EPSILON = 1e-5 MOMENTUM = 0.9", "0.5 is negative FAST_RCNN_MINIBATCH_SIZE = 512 # if is -1,", "-*- from __future__ import division, print_function, absolute_import import os import", "SAVE_WEIGHTS_INTE = 80000 SUMMARY_PATH = ROOT_PATH + '/output/summary' TEST_SAVE_PATH =", "-------------------------------------------- Data_preprocess_config DATASET_NAME = 'coco' # 'pascal', 'coco' PIXEL_MEAN =", "= 3.0 FASTRCNN_SIGMA = 1.0 MUTILPY_BIAS_GRADIENT = None # 2.0", "GRADIENT_CLIPPING_BY_NORM = None # 10.0 if None, will not clip", "RESTORE_FROM_RPN = False IS_FILTER_OUTSIDE_BOXES = False FIXED_BLOCKS = 0 #", "in tensorboard FAST_RCNN_NMS_IOU_THRESHOLD = 0.5 # 0.6 FAST_RCNN_NMS_MAX_BOXES_PER_CLASS = 100", "2000 RPN_TOP_K_NMS_TEST = 6000 RPN_MAXIMUM_PROPOSAL_TEST = 1000 # -------------------------------------------Fast-RCNN config", "= 1333 CLASS_NUM = 80 # --------------------------------------------- Network_config INITIALIZER =", "stddev=0.001) WEIGHT_DECAY = 0.00004 if NET_NAME.startswith('Mobilenet') else 0.0001 IS_ASSIGN =", "RPN_CLASSIFICATION_LOSS_WEIGHT = 1.0 FAST_RCNN_LOCATION_LOSS_WEIGHT = 1.0 FAST_RCNN_CLASSIFICATION_LOSS_WEIGHT = 1.0 RPN_SIGMA", "FREEZE_BLOCKS = [True, False, False, False, False] # for gluoncv", "64] ANCHOR_SCALES = [1.0] ANCHOR_RATIOS = [0.5, 1., 2.0] ROI_SCALE_FACTORS", "negative FAST_RCNN_MINIBATCH_SIZE = 512 # if is -1, that is", "NET_NAME.startswith(\"resnet\"): weights_name = NET_NAME elif NET_NAME.startswith(\"MobilenetV2\"): weights_name = \"mobilenet/mobilenet_v2_1.0_224\" else:", "# only show in tensorboard FAST_RCNN_NMS_IOU_THRESHOLD = 0.5 # 0.6", "tf, channel is RGB. In openCV, channel is BGR IMG_SHORT_SIDE_LEN", "ROI_POOL_KERNEL_SIZE = 2 USE_DROPOUT = False KEEP_PROB = 1.0 SHOW_SCORE_THRSHOLD", "False FIXED_BLOCKS = 0 # allow 0~3 FREEZE_BLOCKS = [True,", "tf, channel is RGB. In openCV, channel is BGR PIXEL_MEAN_", "+ '/output/summary' TEST_SAVE_PATH = ROOT_PATH + '/tools/test_result' INFERENCE_IMAGE_PATH = ROOT_PATH", "= [0.5, 1., 2.0] ROI_SCALE_FACTORS = [[10., 10., 5.0, 5.0],", "= 1. RPN_CLASSIFICATION_LOSS_WEIGHT = 1.0 FAST_RCNN_LOCATION_LOSS_WEIGHT = 1.0 FAST_RCNN_CLASSIFICATION_LOSS_WEIGHT =", "< IOU < 0.5 is negative FAST_RCNN_MINIBATCH_SIZE = 512 #", "* 2 * 1.25 * NUM_GPU * BATCH_SIZE DECAY_STEP =", "division, print_function, absolute_import import os import tensorflow as tf '''", "DATASET_NAME = 'coco' # 'pascal', 'coco' PIXEL_MEAN = [123.68, 116.779,", "= False FIXED_BLOCKS = 0 # allow 0~3 FREEZE_BLOCKS =", "= True CUDA9 = True EVAL_THRESHOLD = 0.5 RPN_LOCATION_LOSS_WEIGHT =", "False, False, False, False] # for gluoncv backbone USE_07_METRIC =", "channel is BGR PIXEL_MEAN_ = [0.485, 0.456, 0.406] PIXEL_STD =", "coding: utf-8 -*- from __future__ import division, print_function, absolute_import import", "80 # --------------------------------------------- Network_config INITIALIZER = tf.random_normal_initializer(mean=0.0, stddev=0.01) BBOX_INITIALIZER =", "512 # if is -1, that is train with OHEM", "PIXEL_STD = [0.229, 0.224, 0.225] # R, G, B. In", "True # ---------------------------------------------Anchor config USE_CENTER_OFFSET = True LEVLES = ['P2',", "# 0.1 < IOU < 0.5 is negative FAST_RCNN_MINIBATCH_SIZE =" ]
[ "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "self.assertRaises(exceptions.ConcurrentAppendException, lambda: self._raise_concurrent_exception(e)) def test_capture_concurrent_delete_read_exception(self): e = self.spark._jvm.io.delta.exceptions.ConcurrentDeleteReadException self.assertRaises(exceptions.ConcurrentDeleteReadException, lambda:", "self._raise_concurrent_exception(e)) def test_capture_concurrent_transaction_exception(self): e = self.spark._jvm.io.delta.exceptions.ConcurrentTransactionException self.assertRaises(exceptions.ConcurrentTransactionException, lambda: self._raise_concurrent_exception(e)) if", "# # Licensed under the Apache License, Version 2.0 (the", "compliance with the License. # You may obtain a copy", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "2.0 (the \"License\"); # you may not use this file", "agreed to in writing, software # distributed under the License", "file except in compliance with the License. # You may", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "Unless required by applicable law or agreed to in writing,", "class DeltaExceptionTests(DeltaTestCase): def _raise_concurrent_exception(self, exception_type): e = exception_type(\"\") self.spark.sparkContext._jvm.scala.util.Failure(e).get() def", "= self.spark._jvm.io.delta.exceptions.ProtocolChangedException self.assertRaises(exceptions.ProtocolChangedException, lambda: self._raise_concurrent_exception(e)) def test_capture_concurrent_append_exception(self): e = self.spark._jvm.io.delta.exceptions.ConcurrentAppendException", "test_capture_concurrent_delete_delete_exception(self): e = self.spark._jvm.io.delta.exceptions.ConcurrentDeleteDeleteException self.assertRaises(exceptions.ConcurrentDeleteDeleteException, lambda: self._raise_concurrent_exception(e)) def test_capture_concurrent_transaction_exception(self): e", "self.spark.sparkContext._jvm.scala.util.Failure(e).get() def test_capture_concurrent_write_exception(self): e = self.spark._jvm.io.delta.exceptions.ConcurrentWriteException self.assertRaises(exceptions.ConcurrentWriteException, lambda: self._raise_concurrent_exception(e)) def", "distributed under the License is distributed on an \"AS IS\"", "e = self.spark._jvm.io.delta.exceptions.ProtocolChangedException self.assertRaises(exceptions.ProtocolChangedException, lambda: self._raise_concurrent_exception(e)) def test_capture_concurrent_append_exception(self): e =", "testRunner = xmlrunner.XMLTestRunner(output='target/test-reports', verbosity=4) except ImportError: testRunner = None unittest.main(testRunner=testRunner,", "permissions and # limitations under the License. # import unittest", "the specific language governing permissions and # limitations under the", "import DeltaTestCase class DeltaExceptionTests(DeltaTestCase): def _raise_concurrent_exception(self, exception_type): e = exception_type(\"\")", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "xmlrunner testRunner = xmlrunner.XMLTestRunner(output='target/test-reports', verbosity=4) except ImportError: testRunner = None", "express or implied. # See the License for the specific", "applicable law or agreed to in writing, software # distributed", "lambda: self._raise_concurrent_exception(e)) def test_capture_concurrent_append_exception(self): e = self.spark._jvm.io.delta.exceptions.ConcurrentAppendException self.assertRaises(exceptions.ConcurrentAppendException, lambda: self._raise_concurrent_exception(e))", "except in compliance with the License. # You may obtain", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "\"__main__\": try: import xmlrunner testRunner = xmlrunner.XMLTestRunner(output='target/test-reports', verbosity=4) except ImportError:", "import xmlrunner testRunner = xmlrunner.XMLTestRunner(output='target/test-reports', verbosity=4) except ImportError: testRunner =", "== \"__main__\": try: import xmlrunner testRunner = xmlrunner.XMLTestRunner(output='target/test-reports', verbosity=4) except", "DeltaExceptionTests(DeltaTestCase): def _raise_concurrent_exception(self, exception_type): e = exception_type(\"\") self.spark.sparkContext._jvm.scala.util.Failure(e).get() def test_capture_concurrent_write_exception(self):", "Licensed under the Apache License, Version 2.0 (the \"License\"); #", "lambda: self._raise_concurrent_exception(e)) def test_capture_concurrent_delete_delete_exception(self): e = self.spark._jvm.io.delta.exceptions.ConcurrentDeleteDeleteException self.assertRaises(exceptions.ConcurrentDeleteDeleteException, lambda: self._raise_concurrent_exception(e))", "not use this file except in compliance with the License.", "self.spark._jvm.io.delta.exceptions.ProtocolChangedException self.assertRaises(exceptions.ProtocolChangedException, lambda: self._raise_concurrent_exception(e)) def test_capture_concurrent_append_exception(self): e = self.spark._jvm.io.delta.exceptions.ConcurrentAppendException self.assertRaises(exceptions.ConcurrentAppendException,", "= self.spark._jvm.io.delta.exceptions.MetadataChangedException self.assertRaises(exceptions.MetadataChangedException, lambda: self._raise_concurrent_exception(e)) def test_capture_protocol_changed_exception(self): e = self.spark._jvm.io.delta.exceptions.ProtocolChangedException", "self._raise_concurrent_exception(e)) def test_capture_concurrent_append_exception(self): e = self.spark._jvm.io.delta.exceptions.ConcurrentAppendException self.assertRaises(exceptions.ConcurrentAppendException, lambda: self._raise_concurrent_exception(e)) def", "The Delta Lake Project Authors. # # Licensed under the", "# # Copyright (2020) The Delta Lake Project Authors. #", "lambda: self._raise_concurrent_exception(e)) def test_capture_protocol_changed_exception(self): e = self.spark._jvm.io.delta.exceptions.ProtocolChangedException self.assertRaises(exceptions.ProtocolChangedException, lambda: self._raise_concurrent_exception(e))", "Delta Lake Project Authors. # # Licensed under the Apache", "writing, software # distributed under the License is distributed on", "lambda: self._raise_concurrent_exception(e)) def test_capture_concurrent_delete_read_exception(self): e = self.spark._jvm.io.delta.exceptions.ConcurrentDeleteReadException self.assertRaises(exceptions.ConcurrentDeleteReadException, lambda: self._raise_concurrent_exception(e))", "in writing, software # distributed under the License is distributed", "Lake Project Authors. # # Licensed under the Apache License,", "and # limitations under the License. # import unittest import", "= self.spark._jvm.io.delta.exceptions.ConcurrentDeleteReadException self.assertRaises(exceptions.ConcurrentDeleteReadException, lambda: self._raise_concurrent_exception(e)) def test_capture_concurrent_delete_delete_exception(self): e = self.spark._jvm.io.delta.exceptions.ConcurrentDeleteDeleteException", "you may not use this file except in compliance with", "limitations under the License. # import unittest import delta.exceptions as", "# Licensed under the Apache License, Version 2.0 (the \"License\");", "e = exception_type(\"\") self.spark.sparkContext._jvm.scala.util.Failure(e).get() def test_capture_concurrent_write_exception(self): e = self.spark._jvm.io.delta.exceptions.ConcurrentWriteException self.assertRaises(exceptions.ConcurrentWriteException,", "self.spark._jvm.io.delta.exceptions.ConcurrentTransactionException self.assertRaises(exceptions.ConcurrentTransactionException, lambda: self._raise_concurrent_exception(e)) if __name__ == \"__main__\": try: import", "lambda: self._raise_concurrent_exception(e)) if __name__ == \"__main__\": try: import xmlrunner testRunner", "<gh_stars>1-10 # # Copyright (2020) The Delta Lake Project Authors.", "use this file except in compliance with the License. #", "delta.exceptions as exceptions from delta.testing.utils import DeltaTestCase class DeltaExceptionTests(DeltaTestCase): def", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "test_capture_concurrent_append_exception(self): e = self.spark._jvm.io.delta.exceptions.ConcurrentAppendException self.assertRaises(exceptions.ConcurrentAppendException, lambda: self._raise_concurrent_exception(e)) def test_capture_concurrent_delete_read_exception(self): e", "Authors. # # Licensed under the Apache License, Version 2.0", "CONDITIONS OF ANY KIND, either express or implied. # See", "(2020) The Delta Lake Project Authors. # # Licensed under", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "= self.spark._jvm.io.delta.exceptions.ConcurrentTransactionException self.assertRaises(exceptions.ConcurrentTransactionException, lambda: self._raise_concurrent_exception(e)) if __name__ == \"__main__\": try:", "or implied. # See the License for the specific language", "License is distributed on an \"AS IS\" BASIS, # WITHOUT", "License. # You may obtain a copy of the License", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "License, Version 2.0 (the \"License\"); # you may not use", "exceptions from delta.testing.utils import DeltaTestCase class DeltaExceptionTests(DeltaTestCase): def _raise_concurrent_exception(self, exception_type):", "# You may obtain a copy of the License at", "unittest import delta.exceptions as exceptions from delta.testing.utils import DeltaTestCase class", "KIND, either express or implied. # See the License for", "specific language governing permissions and # limitations under the License.", "self.assertRaises(exceptions.ConcurrentWriteException, lambda: self._raise_concurrent_exception(e)) def test_capture_metadata_changed_exception(self): e = self.spark._jvm.io.delta.exceptions.MetadataChangedException self.assertRaises(exceptions.MetadataChangedException, lambda:", "under the License is distributed on an \"AS IS\" BASIS,", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "exception_type): e = exception_type(\"\") self.spark.sparkContext._jvm.scala.util.Failure(e).get() def test_capture_concurrent_write_exception(self): e = self.spark._jvm.io.delta.exceptions.ConcurrentWriteException", "License for the specific language governing permissions and # limitations", "e = self.spark._jvm.io.delta.exceptions.ConcurrentDeleteDeleteException self.assertRaises(exceptions.ConcurrentDeleteDeleteException, lambda: self._raise_concurrent_exception(e)) def test_capture_concurrent_transaction_exception(self): e =", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "test_capture_protocol_changed_exception(self): e = self.spark._jvm.io.delta.exceptions.ProtocolChangedException self.assertRaises(exceptions.ProtocolChangedException, lambda: self._raise_concurrent_exception(e)) def test_capture_concurrent_append_exception(self): e", "if __name__ == \"__main__\": try: import xmlrunner testRunner = xmlrunner.XMLTestRunner(output='target/test-reports',", "self.spark._jvm.io.delta.exceptions.MetadataChangedException self.assertRaises(exceptions.MetadataChangedException, lambda: self._raise_concurrent_exception(e)) def test_capture_protocol_changed_exception(self): e = self.spark._jvm.io.delta.exceptions.ProtocolChangedException self.assertRaises(exceptions.ProtocolChangedException,", "the License for the specific language governing permissions and #", "(the \"License\"); # you may not use this file except", "test_capture_concurrent_delete_read_exception(self): e = self.spark._jvm.io.delta.exceptions.ConcurrentDeleteReadException self.assertRaises(exceptions.ConcurrentDeleteReadException, lambda: self._raise_concurrent_exception(e)) def test_capture_concurrent_delete_delete_exception(self): e", "Apache License, Version 2.0 (the \"License\"); # you may not", "test_capture_metadata_changed_exception(self): e = self.spark._jvm.io.delta.exceptions.MetadataChangedException self.assertRaises(exceptions.MetadataChangedException, lambda: self._raise_concurrent_exception(e)) def test_capture_protocol_changed_exception(self): e", "# you may not use this file except in compliance", "self._raise_concurrent_exception(e)) def test_capture_protocol_changed_exception(self): e = self.spark._jvm.io.delta.exceptions.ProtocolChangedException self.assertRaises(exceptions.ProtocolChangedException, lambda: self._raise_concurrent_exception(e)) def", "e = self.spark._jvm.io.delta.exceptions.ConcurrentAppendException self.assertRaises(exceptions.ConcurrentAppendException, lambda: self._raise_concurrent_exception(e)) def test_capture_concurrent_delete_read_exception(self): e =", "either express or implied. # See the License for the", "def test_capture_concurrent_delete_read_exception(self): e = self.spark._jvm.io.delta.exceptions.ConcurrentDeleteReadException self.assertRaises(exceptions.ConcurrentDeleteReadException, lambda: self._raise_concurrent_exception(e)) def test_capture_concurrent_delete_delete_exception(self):", "OR CONDITIONS OF ANY KIND, either express or implied. #", "self.assertRaises(exceptions.ProtocolChangedException, lambda: self._raise_concurrent_exception(e)) def test_capture_concurrent_append_exception(self): e = self.spark._jvm.io.delta.exceptions.ConcurrentAppendException self.assertRaises(exceptions.ConcurrentAppendException, lambda:", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "the License is distributed on an \"AS IS\" BASIS, #", "from delta.testing.utils import DeltaTestCase class DeltaExceptionTests(DeltaTestCase): def _raise_concurrent_exception(self, exception_type): e", "import unittest import delta.exceptions as exceptions from delta.testing.utils import DeltaTestCase", "in compliance with the License. # You may obtain a", "= exception_type(\"\") self.spark.sparkContext._jvm.scala.util.Failure(e).get() def test_capture_concurrent_write_exception(self): e = self.spark._jvm.io.delta.exceptions.ConcurrentWriteException self.assertRaises(exceptions.ConcurrentWriteException, lambda:", "test_capture_concurrent_write_exception(self): e = self.spark._jvm.io.delta.exceptions.ConcurrentWriteException self.assertRaises(exceptions.ConcurrentWriteException, lambda: self._raise_concurrent_exception(e)) def test_capture_metadata_changed_exception(self): e", "import delta.exceptions as exceptions from delta.testing.utils import DeltaTestCase class DeltaExceptionTests(DeltaTestCase):", "software # distributed under the License is distributed on an", "def _raise_concurrent_exception(self, exception_type): e = exception_type(\"\") self.spark.sparkContext._jvm.scala.util.Failure(e).get() def test_capture_concurrent_write_exception(self): e", "e = self.spark._jvm.io.delta.exceptions.MetadataChangedException self.assertRaises(exceptions.MetadataChangedException, lambda: self._raise_concurrent_exception(e)) def test_capture_protocol_changed_exception(self): e =", "__name__ == \"__main__\": try: import xmlrunner testRunner = xmlrunner.XMLTestRunner(output='target/test-reports', verbosity=4)", "# Copyright (2020) The Delta Lake Project Authors. # #", "= xmlrunner.XMLTestRunner(output='target/test-reports', verbosity=4) except ImportError: testRunner = None unittest.main(testRunner=testRunner, verbosity=4)", "= self.spark._jvm.io.delta.exceptions.ConcurrentWriteException self.assertRaises(exceptions.ConcurrentWriteException, lambda: self._raise_concurrent_exception(e)) def test_capture_metadata_changed_exception(self): e = self.spark._jvm.io.delta.exceptions.MetadataChangedException", "test_capture_concurrent_transaction_exception(self): e = self.spark._jvm.io.delta.exceptions.ConcurrentTransactionException self.assertRaises(exceptions.ConcurrentTransactionException, lambda: self._raise_concurrent_exception(e)) if __name__ ==", "# # Unless required by applicable law or agreed to", "self.assertRaises(exceptions.ConcurrentTransactionException, lambda: self._raise_concurrent_exception(e)) if __name__ == \"__main__\": try: import xmlrunner", "Copyright (2020) The Delta Lake Project Authors. # # Licensed", "self._raise_concurrent_exception(e)) def test_capture_concurrent_delete_delete_exception(self): e = self.spark._jvm.io.delta.exceptions.ConcurrentDeleteDeleteException self.assertRaises(exceptions.ConcurrentDeleteDeleteException, lambda: self._raise_concurrent_exception(e)) def", "self._raise_concurrent_exception(e)) if __name__ == \"__main__\": try: import xmlrunner testRunner =", "try: import xmlrunner testRunner = xmlrunner.XMLTestRunner(output='target/test-reports', verbosity=4) except ImportError: testRunner", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "self.assertRaises(exceptions.MetadataChangedException, lambda: self._raise_concurrent_exception(e)) def test_capture_protocol_changed_exception(self): e = self.spark._jvm.io.delta.exceptions.ProtocolChangedException self.assertRaises(exceptions.ProtocolChangedException, lambda:", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "exception_type(\"\") self.spark.sparkContext._jvm.scala.util.Failure(e).get() def test_capture_concurrent_write_exception(self): e = self.spark._jvm.io.delta.exceptions.ConcurrentWriteException self.assertRaises(exceptions.ConcurrentWriteException, lambda: self._raise_concurrent_exception(e))", "# import unittest import delta.exceptions as exceptions from delta.testing.utils import", "Version 2.0 (the \"License\"); # you may not use this", "lambda: self._raise_concurrent_exception(e)) def test_capture_metadata_changed_exception(self): e = self.spark._jvm.io.delta.exceptions.MetadataChangedException self.assertRaises(exceptions.MetadataChangedException, lambda: self._raise_concurrent_exception(e))", "self.assertRaises(exceptions.ConcurrentDeleteReadException, lambda: self._raise_concurrent_exception(e)) def test_capture_concurrent_delete_delete_exception(self): e = self.spark._jvm.io.delta.exceptions.ConcurrentDeleteDeleteException self.assertRaises(exceptions.ConcurrentDeleteDeleteException, lambda:", "delta.testing.utils import DeltaTestCase class DeltaExceptionTests(DeltaTestCase): def _raise_concurrent_exception(self, exception_type): e =", "law or agreed to in writing, software # distributed under", "self._raise_concurrent_exception(e)) def test_capture_metadata_changed_exception(self): e = self.spark._jvm.io.delta.exceptions.MetadataChangedException self.assertRaises(exceptions.MetadataChangedException, lambda: self._raise_concurrent_exception(e)) def", "governing permissions and # limitations under the License. # import", "as exceptions from delta.testing.utils import DeltaTestCase class DeltaExceptionTests(DeltaTestCase): def _raise_concurrent_exception(self,", "lambda: self._raise_concurrent_exception(e)) def test_capture_concurrent_transaction_exception(self): e = self.spark._jvm.io.delta.exceptions.ConcurrentTransactionException self.assertRaises(exceptions.ConcurrentTransactionException, lambda: self._raise_concurrent_exception(e))", "DeltaTestCase class DeltaExceptionTests(DeltaTestCase): def _raise_concurrent_exception(self, exception_type): e = exception_type(\"\") self.spark.sparkContext._jvm.scala.util.Failure(e).get()", "implied. # See the License for the specific language governing", "under the Apache License, Version 2.0 (the \"License\"); # you", "\"License\"); # you may not use this file except in", "def test_capture_concurrent_delete_delete_exception(self): e = self.spark._jvm.io.delta.exceptions.ConcurrentDeleteDeleteException self.assertRaises(exceptions.ConcurrentDeleteDeleteException, lambda: self._raise_concurrent_exception(e)) def test_capture_concurrent_transaction_exception(self):", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "def test_capture_metadata_changed_exception(self): e = self.spark._jvm.io.delta.exceptions.MetadataChangedException self.assertRaises(exceptions.MetadataChangedException, lambda: self._raise_concurrent_exception(e)) def test_capture_protocol_changed_exception(self):", "e = self.spark._jvm.io.delta.exceptions.ConcurrentWriteException self.assertRaises(exceptions.ConcurrentWriteException, lambda: self._raise_concurrent_exception(e)) def test_capture_metadata_changed_exception(self): e =", "= self.spark._jvm.io.delta.exceptions.ConcurrentAppendException self.assertRaises(exceptions.ConcurrentAppendException, lambda: self._raise_concurrent_exception(e)) def test_capture_concurrent_delete_read_exception(self): e = self.spark._jvm.io.delta.exceptions.ConcurrentDeleteReadException", "Project Authors. # # Licensed under the Apache License, Version", "by applicable law or agreed to in writing, software #", "# distributed under the License is distributed on an \"AS", "OF ANY KIND, either express or implied. # See the", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "e = self.spark._jvm.io.delta.exceptions.ConcurrentTransactionException self.assertRaises(exceptions.ConcurrentTransactionException, lambda: self._raise_concurrent_exception(e)) if __name__ == \"__main__\":", "_raise_concurrent_exception(self, exception_type): e = exception_type(\"\") self.spark.sparkContext._jvm.scala.util.Failure(e).get() def test_capture_concurrent_write_exception(self): e =", "may obtain a copy of the License at # #", "# Unless required by applicable law or agreed to in", "ANY KIND, either express or implied. # See the License", "See the License for the specific language governing permissions and", "def test_capture_concurrent_write_exception(self): e = self.spark._jvm.io.delta.exceptions.ConcurrentWriteException self.assertRaises(exceptions.ConcurrentWriteException, lambda: self._raise_concurrent_exception(e)) def test_capture_metadata_changed_exception(self):", "def test_capture_concurrent_transaction_exception(self): e = self.spark._jvm.io.delta.exceptions.ConcurrentTransactionException self.assertRaises(exceptions.ConcurrentTransactionException, lambda: self._raise_concurrent_exception(e)) if __name__", "def test_capture_concurrent_append_exception(self): e = self.spark._jvm.io.delta.exceptions.ConcurrentAppendException self.assertRaises(exceptions.ConcurrentAppendException, lambda: self._raise_concurrent_exception(e)) def test_capture_concurrent_delete_read_exception(self):", "the License. # You may obtain a copy of the", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "for the specific language governing permissions and # limitations under", "the License. # import unittest import delta.exceptions as exceptions from", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "e = self.spark._jvm.io.delta.exceptions.ConcurrentDeleteReadException self.assertRaises(exceptions.ConcurrentDeleteReadException, lambda: self._raise_concurrent_exception(e)) def test_capture_concurrent_delete_delete_exception(self): e =", "to in writing, software # distributed under the License is", "self.assertRaises(exceptions.ConcurrentDeleteDeleteException, lambda: self._raise_concurrent_exception(e)) def test_capture_concurrent_transaction_exception(self): e = self.spark._jvm.io.delta.exceptions.ConcurrentTransactionException self.assertRaises(exceptions.ConcurrentTransactionException, lambda:", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "# See the License for the specific language governing permissions", "self._raise_concurrent_exception(e)) def test_capture_concurrent_delete_read_exception(self): e = self.spark._jvm.io.delta.exceptions.ConcurrentDeleteReadException self.assertRaises(exceptions.ConcurrentDeleteReadException, lambda: self._raise_concurrent_exception(e)) def", "You may obtain a copy of the License at #", "def test_capture_protocol_changed_exception(self): e = self.spark._jvm.io.delta.exceptions.ProtocolChangedException self.assertRaises(exceptions.ProtocolChangedException, lambda: self._raise_concurrent_exception(e)) def test_capture_concurrent_append_exception(self):", "= self.spark._jvm.io.delta.exceptions.ConcurrentDeleteDeleteException self.assertRaises(exceptions.ConcurrentDeleteDeleteException, lambda: self._raise_concurrent_exception(e)) def test_capture_concurrent_transaction_exception(self): e = self.spark._jvm.io.delta.exceptions.ConcurrentTransactionException", "language governing permissions and # limitations under the License. #", "may not use this file except in compliance with the", "or agreed to in writing, software # distributed under the", "self.spark._jvm.io.delta.exceptions.ConcurrentAppendException self.assertRaises(exceptions.ConcurrentAppendException, lambda: self._raise_concurrent_exception(e)) def test_capture_concurrent_delete_read_exception(self): e = self.spark._jvm.io.delta.exceptions.ConcurrentDeleteReadException self.assertRaises(exceptions.ConcurrentDeleteReadException,", "self.spark._jvm.io.delta.exceptions.ConcurrentDeleteDeleteException self.assertRaises(exceptions.ConcurrentDeleteDeleteException, lambda: self._raise_concurrent_exception(e)) def test_capture_concurrent_transaction_exception(self): e = self.spark._jvm.io.delta.exceptions.ConcurrentTransactionException self.assertRaises(exceptions.ConcurrentTransactionException,", "required by applicable law or agreed to in writing, software", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "with the License. # You may obtain a copy of", "this file except in compliance with the License. # You", "the Apache License, Version 2.0 (the \"License\"); # you may", "# limitations under the License. # import unittest import delta.exceptions", "License. # import unittest import delta.exceptions as exceptions from delta.testing.utils", "under the License. # import unittest import delta.exceptions as exceptions", "self.spark._jvm.io.delta.exceptions.ConcurrentWriteException self.assertRaises(exceptions.ConcurrentWriteException, lambda: self._raise_concurrent_exception(e)) def test_capture_metadata_changed_exception(self): e = self.spark._jvm.io.delta.exceptions.MetadataChangedException self.assertRaises(exceptions.MetadataChangedException,", "self.spark._jvm.io.delta.exceptions.ConcurrentDeleteReadException self.assertRaises(exceptions.ConcurrentDeleteReadException, lambda: self._raise_concurrent_exception(e)) def test_capture_concurrent_delete_delete_exception(self): e = self.spark._jvm.io.delta.exceptions.ConcurrentDeleteDeleteException self.assertRaises(exceptions.ConcurrentDeleteDeleteException," ]
[ "no of columns\")) matrix=[] print(\"Enter the enteries\") for i in", "print(\"same\") else: print(\"not same\") print(\"Enter the 1st matrix\") first_matrix =", "def matrix_form(): r = int(input(\"Enter the no of rows\")) c", "c = int(input(\"Enter the no of columns\")) matrix=[] print(\"Enter the", "r = int(input(\"Enter the no of rows\")) c = int(input(\"Enter", "range(r): a = [] for j in range(c): a.append(int(input())) matrix.append(a)", "if(first_matrix==sec_matrix): print(\"same\") else: print(\"not same\") print(\"Enter the 1st matrix\") first_matrix", "= int(input(\"Enter the no of columns\")) matrix=[] print(\"Enter the enteries\")", "no of rows\")) c = int(input(\"Enter the no of columns\"))", "matrix.append(a) return(matrix) def check_matrix(first_matrix,sec_matrix): if(first_matrix==sec_matrix): print(\"same\") else: print(\"not same\") print(\"Enter", "1st matrix\") first_matrix = matrix_form() print(first_matrix) print(\"Enter the 2nd matrix\")", "return(matrix) def check_matrix(first_matrix,sec_matrix): if(first_matrix==sec_matrix): print(\"same\") else: print(\"not same\") print(\"Enter the", "of rows\")) c = int(input(\"Enter the no of columns\")) matrix=[]", "the no of columns\")) matrix=[] print(\"Enter the enteries\") for i", "the 1st matrix\") first_matrix = matrix_form() print(first_matrix) print(\"Enter the 2nd", "int(input(\"Enter the no of columns\")) matrix=[] print(\"Enter the enteries\") for", "for j in range(c): a.append(int(input())) matrix.append(a) return(matrix) def check_matrix(first_matrix,sec_matrix): if(first_matrix==sec_matrix):", "same\") print(\"Enter the 1st matrix\") first_matrix = matrix_form() print(first_matrix) print(\"Enter", "= matrix_form() print(first_matrix) print(\"Enter the 2nd matrix\") sec_matrix = matrix_form()", "matrix\") first_matrix = matrix_form() print(first_matrix) print(\"Enter the 2nd matrix\") sec_matrix", "def check_matrix(first_matrix,sec_matrix): if(first_matrix==sec_matrix): print(\"same\") else: print(\"not same\") print(\"Enter the 1st", "first_matrix = matrix_form() print(first_matrix) print(\"Enter the 2nd matrix\") sec_matrix =", "for i in range(r): a = [] for j in", "= int(input(\"Enter the no of rows\")) c = int(input(\"Enter the", "columns\")) matrix=[] print(\"Enter the enteries\") for i in range(r): a", "in range(c): a.append(int(input())) matrix.append(a) return(matrix) def check_matrix(first_matrix,sec_matrix): if(first_matrix==sec_matrix): print(\"same\") else:", "a = [] for j in range(c): a.append(int(input())) matrix.append(a) return(matrix)", "a.append(int(input())) matrix.append(a) return(matrix) def check_matrix(first_matrix,sec_matrix): if(first_matrix==sec_matrix): print(\"same\") else: print(\"not same\")", "in range(r): a = [] for j in range(c): a.append(int(input()))", "matrix_form(): r = int(input(\"Enter the no of rows\")) c =", "matrix=[] print(\"Enter the enteries\") for i in range(r): a =", "rows\")) c = int(input(\"Enter the no of columns\")) matrix=[] print(\"Enter", "print(\"Enter the 1st matrix\") first_matrix = matrix_form() print(first_matrix) print(\"Enter the", "= [] for j in range(c): a.append(int(input())) matrix.append(a) return(matrix) def", "i in range(r): a = [] for j in range(c):", "print(first_matrix) print(\"Enter the 2nd matrix\") sec_matrix = matrix_form() print(sec_matrix) check_matrix(first_matrix,sec_matrix)", "the enteries\") for i in range(r): a = [] for", "check_matrix(first_matrix,sec_matrix): if(first_matrix==sec_matrix): print(\"same\") else: print(\"not same\") print(\"Enter the 1st matrix\")", "j in range(c): a.append(int(input())) matrix.append(a) return(matrix) def check_matrix(first_matrix,sec_matrix): if(first_matrix==sec_matrix): print(\"same\")", "of columns\")) matrix=[] print(\"Enter the enteries\") for i in range(r):", "[] for j in range(c): a.append(int(input())) matrix.append(a) return(matrix) def check_matrix(first_matrix,sec_matrix):", "else: print(\"not same\") print(\"Enter the 1st matrix\") first_matrix = matrix_form()", "print(\"not same\") print(\"Enter the 1st matrix\") first_matrix = matrix_form() print(first_matrix)", "int(input(\"Enter the no of rows\")) c = int(input(\"Enter the no", "the no of rows\")) c = int(input(\"Enter the no of", "enteries\") for i in range(r): a = [] for j", "range(c): a.append(int(input())) matrix.append(a) return(matrix) def check_matrix(first_matrix,sec_matrix): if(first_matrix==sec_matrix): print(\"same\") else: print(\"not", "print(\"Enter the enteries\") for i in range(r): a = []", "matrix_form() print(first_matrix) print(\"Enter the 2nd matrix\") sec_matrix = matrix_form() print(sec_matrix)" ]
[ "length = len(Y) U = [cid] * length else: X", "s3 = session.resource('s3') s3 = boto3.client ('s3') s3.download_file('for-ndar',os.path.join(\"metadata/\", subject +", "0 for cell in Cells: print(cell) cell_size=0 cell_ids = []", "from keras.models import Model from keras import backend as K", "cell_ids: cid = cid.decode('utf-8') s3.meta.client.download_file('bsmn-data',os.path.join(subject, cell+'_'+cid+'.h5'),os.path.join(basepath,cell+'_'+cid+'.h5')) xyz = h5py.File(os.path.join(basepath,cell+'_'+cid+'.h5'), 'r')", "as f: config = [line.rstrip() for line in f] print", "backend as K from keras.utils import multi_gpu_model ##Path to Data", "import Session import boto3 import h5py import umap import hdbscan", "numpy as np from time import time from subprocess import", "boto3.session import Session import boto3 import h5py import umap import", "from subprocess import (call, Popen, PIPE) from itertools import product", "K from keras.utils import multi_gpu_model ##Path to Data basepath =", "xyz['X'] Y = np.append(Y,xyz['Y'], axis=0) z = feat_extractor.predict(X, batch_size =", "Y = np.append(Y,xyz['Y'], axis=0) z = feat_extractor.predict(X, batch_size = 128)", "config = [line.rstrip() for line in f] print config[0] print", "= [line.rstrip() for line in f] print config[0] print config[1]", "open(subject + \".txt\") as f: Cells = [line.rstrip() for line", "f = h5py.File(os.path.join(basepath,cell+'_IDs.h5'), 'r') cell_ids = f['ID'] for cid in", "[cid] * length else: X = xyz['X'] Y = np.append(Y,xyz['Y'],", "##Path to Data basepath = \"/home/ubuntu/\" subject = sys.argv[1] with", "keras.models import load_model from keras.models import Model from keras import", "= h5py.File(os.path.join(basepath,cell+'_IDs.h5'), 'r') cell_ids = f['ID'] for cid in cell_ids:", "for cell in Cells: print(cell) cell_size=0 cell_ids = [] s3.meta.client.download_file('bsmn-data',os.path.join(subject,", "[] s3.meta.client.download_file('bsmn-data',os.path.join(subject, cell+'_IDs.h5'),os.path.join(basepath,cell+'_IDs.h5')) f = h5py.File(os.path.join(basepath,cell+'_IDs.h5'), 'r') cell_ids = f['ID']", "Data basepath = \"/home/ubuntu/\" subject = sys.argv[1] with open(\"config.txt\") as", "from time import time from subprocess import (call, Popen, PIPE)", "([cid] * length) print(Z.shape) hf = h5py.File(subject+'_ef.h5', 'w') hf.create_dataset('Y', data=Y)", "product import shutil import re import pickle from boto3.session import", "* length else: X = xyz['X'] Y = np.append(Y,xyz['Y'], axis=0)", "from keras import backend as K from keras.utils import multi_gpu_model", "np.append(Y,xyz['Y'], axis=0) z = feat_extractor.predict(X, batch_size = 128) Z =", "open(\"config.txt\") as f: config = [line.rstrip() for line in f]", "pickle from boto3.session import Session import boto3 import h5py import", "import re import pickle from boto3.session import Session import boto3", "as f: Cells = [line.rstrip() for line in f] session", "= cid.decode('utf-8') s3.meta.client.download_file('bsmn-data',os.path.join(subject, cell+'_'+cid+'.h5'),os.path.join(basepath,cell+'_'+cid+'.h5')) xyz = h5py.File(os.path.join(basepath,cell+'_'+cid+'.h5'), 'r') os.remove(os.path.join(basepath,cell+'_'+cid+'.h5')) if", "= feat_extractor.predict(X, batch_size = 128) Z = np.append(Z,z, axis=0) length", "time from subprocess import (call, Popen, PIPE) from itertools import", "shutil import re import pickle from boto3.session import Session import", "itertools import product import shutil import re import pickle from", "= U + ([cid] * length) print(Z.shape) hf = h5py.File(subject+'_ef.h5',", "os, gc import uuid import os.path import csv import numpy", "print config[1] session = Session(aws_access_key_id=config[0],aws_secret_access_key=config[1]) s3 = session.resource('s3') s3 =", "'w') hf.create_dataset('Y', data=Y) hf.create_dataset('Z', data=Z) hf.close() session = Session(aws_access_key_id=config[0],aws_secret_access_key=config[1]) s3", "s3.meta.client.download_file('bsmn-data',os.path.join('Inception_Transfer_Model.h5'),os.path.join(basepath,'Inception_Transfer_Model.h5')) feat_extractor = load_model(os.path.join(basepath,'Inception_Transfer_Model.h5')) parallel_model = multi_gpu_model(feat_extractor, gpus=2) count =", "Z = parallel_model.predict(X, batch_size = 128) count+=1 length = len(Y)", "'r') os.remove(os.path.join(basepath,cell+'_'+cid+'.h5')) if count == 0: X = xyz['X'] Y", "== 0: X = xyz['X'] Y = xyz['Y'] Z =", "session = Session(aws_access_key_id=config[0],aws_secret_access_key=config[1]) s3 = session.resource('s3') s3.meta.client.upload_file(os.path.join(subject+'_ef.h5'),'bsmn-data',os.path.join(subject, subject+'_ef.h5')) call(['sudo', 'shutdown',", "gpus=2) count = 0 for cell in Cells: print(cell) cell_size=0", "time import time from subprocess import (call, Popen, PIPE) from", "in f] session = Session(aws_access_key_id=config[0],aws_secret_access_key=config[1]) s3 = session.resource('s3') s3.meta.client.download_file('bsmn-data',os.path.join('Inception_Transfer_Model.h5'),os.path.join(basepath,'Inception_Transfer_Model.h5')) feat_extractor", "for line in f] print config[0] print config[1] session =", "+ \".txt\")) with open(subject + \".txt\") as f: Cells =", "import product import shutil import re import pickle from boto3.session", "= parallel_model.predict(X, batch_size = 128) count+=1 length = len(Y) U", "if count == 0: X = xyz['X'] Y = xyz['Y']", "= Session(aws_access_key_id=config[0],aws_secret_access_key=config[1]) s3 = session.resource('s3') s3.meta.client.download_file('bsmn-data',os.path.join('Inception_Transfer_Model.h5'),os.path.join(basepath,'Inception_Transfer_Model.h5')) feat_extractor = load_model(os.path.join(basepath,'Inception_Transfer_Model.h5')) parallel_model", "cell_ids = f['ID'] for cid in cell_ids: cid = cid.decode('utf-8')", "parallel_model = multi_gpu_model(feat_extractor, gpus=2) count = 0 for cell in", "X = xyz['X'] Y = xyz['Y'] Z = parallel_model.predict(X, batch_size", "h5py.File(subject+'_ef.h5', 'w') hf.create_dataset('Y', data=Y) hf.create_dataset('Z', data=Z) hf.close() session = Session(aws_access_key_id=config[0],aws_secret_access_key=config[1])", "cell in Cells: print(cell) cell_size=0 cell_ids = [] s3.meta.client.download_file('bsmn-data',os.path.join(subject, cell+'_IDs.h5'),os.path.join(basepath,cell+'_IDs.h5'))", "in Cells: print(cell) cell_size=0 cell_ids = [] s3.meta.client.download_file('bsmn-data',os.path.join(subject, cell+'_IDs.h5'),os.path.join(basepath,cell+'_IDs.h5')) f", "\"/home/ubuntu/\" subject = sys.argv[1] with open(\"config.txt\") as f: config =", "[line.rstrip() for line in f] print config[0] print config[1] session", "= f['ID'] for cid in cell_ids: cid = cid.decode('utf-8') s3.meta.client.download_file('bsmn-data',os.path.join(subject,", "f['ID'] for cid in cell_ids: cid = cid.decode('utf-8') s3.meta.client.download_file('bsmn-data',os.path.join(subject, cell+'_'+cid+'.h5'),os.path.join(basepath,cell+'_'+cid+'.h5'))", "hdbscan from keras.models import load_model from keras.models import Model from", "length = len(xyz['Y']) U = U + ([cid] * length)", "np from time import time from subprocess import (call, Popen,", "in cell_ids: cid = cid.decode('utf-8') s3.meta.client.download_file('bsmn-data',os.path.join(subject, cell+'_'+cid+'.h5'),os.path.join(basepath,cell+'_'+cid+'.h5')) xyz = h5py.File(os.path.join(basepath,cell+'_'+cid+'.h5'),", "cell+'_IDs.h5'),os.path.join(basepath,cell+'_IDs.h5')) f = h5py.File(os.path.join(basepath,cell+'_IDs.h5'), 'r') cell_ids = f['ID'] for cid", "as np from time import time from subprocess import (call,", "PIPE) from itertools import product import shutil import re import", "import numpy as np from time import time from subprocess", "sys.argv[1] with open(\"config.txt\") as f: config = [line.rstrip() for line", "boto3.client ('s3') s3.download_file('for-ndar',os.path.join(\"metadata/\", subject + \".txt\"),os.path.join(basepath,subject + \".txt\")) with open(subject", "import umap import hdbscan from keras.models import load_model from keras.models", "xyz = h5py.File(os.path.join(basepath,cell+'_'+cid+'.h5'), 'r') os.remove(os.path.join(basepath,cell+'_'+cid+'.h5')) if count == 0: X", "import (call, Popen, PIPE) from itertools import product import shutil", "import Model from keras import backend as K from keras.utils", "csv import numpy as np from time import time from", "import multi_gpu_model ##Path to Data basepath = \"/home/ubuntu/\" subject =", "count = 0 for cell in Cells: print(cell) cell_size=0 cell_ids", "= xyz['X'] Y = xyz['Y'] Z = parallel_model.predict(X, batch_size =", "import csv import numpy as np from time import time", "\".txt\"),os.path.join(basepath,subject + \".txt\")) with open(subject + \".txt\") as f: Cells", "basepath = \"/home/ubuntu/\" subject = sys.argv[1] with open(\"config.txt\") as f:", "batch_size = 128) count+=1 length = len(Y) U = [cid]", "keras import backend as K from keras.utils import multi_gpu_model ##Path", "count == 0: X = xyz['X'] Y = xyz['Y'] Z", "import boto3 import h5py import umap import hdbscan from keras.models", "keras.models import Model from keras import backend as K from", "+ \".txt\") as f: Cells = [line.rstrip() for line in", "= \"/home/ubuntu/\" subject = sys.argv[1] with open(\"config.txt\") as f: config", "Z = np.append(Z,z, axis=0) length = len(xyz['Y']) U = U", "else: X = xyz['X'] Y = np.append(Y,xyz['Y'], axis=0) z =", "hf.create_dataset('Z', data=Z) hf.close() session = Session(aws_access_key_id=config[0],aws_secret_access_key=config[1]) s3 = session.resource('s3') s3.meta.client.upload_file(os.path.join(subject+'_ef.h5'),'bsmn-data',os.path.join(subject,", "import division import sys import glob, os, gc import uuid", "__future__ import division import sys import glob, os, gc import", "= 128) count+=1 length = len(Y) U = [cid] *", "data=Z) hf.close() session = Session(aws_access_key_id=config[0],aws_secret_access_key=config[1]) s3 = session.resource('s3') s3.meta.client.upload_file(os.path.join(subject+'_ef.h5'),'bsmn-data',os.path.join(subject, subject+'_ef.h5'))", "Y = xyz['Y'] Z = parallel_model.predict(X, batch_size = 128) count+=1", "= session.resource('s3') s3 = boto3.client ('s3') s3.download_file('for-ndar',os.path.join(\"metadata/\", subject + \".txt\"),os.path.join(basepath,subject", "import time from subprocess import (call, Popen, PIPE) from itertools", "s3.meta.client.download_file('bsmn-data',os.path.join(subject, cell+'_'+cid+'.h5'),os.path.join(basepath,cell+'_'+cid+'.h5')) xyz = h5py.File(os.path.join(basepath,cell+'_'+cid+'.h5'), 'r') os.remove(os.path.join(basepath,cell+'_'+cid+'.h5')) if count ==", "gc import uuid import os.path import csv import numpy as", "h5py.File(os.path.join(basepath,cell+'_IDs.h5'), 'r') cell_ids = f['ID'] for cid in cell_ids: cid", "line in f] session = Session(aws_access_key_id=config[0],aws_secret_access_key=config[1]) s3 = session.resource('s3') s3.meta.client.download_file('bsmn-data',os.path.join('Inception_Transfer_Model.h5'),os.path.join(basepath,'Inception_Transfer_Model.h5'))", "session = Session(aws_access_key_id=config[0],aws_secret_access_key=config[1]) s3 = session.resource('s3') s3.meta.client.download_file('bsmn-data',os.path.join('Inception_Transfer_Model.h5'),os.path.join(basepath,'Inception_Transfer_Model.h5')) feat_extractor = load_model(os.path.join(basepath,'Inception_Transfer_Model.h5'))", "= session.resource('s3') s3.meta.client.download_file('bsmn-data',os.path.join('Inception_Transfer_Model.h5'),os.path.join(basepath,'Inception_Transfer_Model.h5')) feat_extractor = load_model(os.path.join(basepath,'Inception_Transfer_Model.h5')) parallel_model = multi_gpu_model(feat_extractor, gpus=2)", "= len(xyz['Y']) U = U + ([cid] * length) print(Z.shape)", "+ ([cid] * length) print(Z.shape) hf = h5py.File(subject+'_ef.h5', 'w') hf.create_dataset('Y',", "len(xyz['Y']) U = U + ([cid] * length) print(Z.shape) hf", "f: config = [line.rstrip() for line in f] print config[0]", "sys import glob, os, gc import uuid import os.path import", "session = Session(aws_access_key_id=config[0],aws_secret_access_key=config[1]) s3 = session.resource('s3') s3 = boto3.client ('s3')", "load_model(os.path.join(basepath,'Inception_Transfer_Model.h5')) parallel_model = multi_gpu_model(feat_extractor, gpus=2) count = 0 for cell", "len(Y) U = [cid] * length else: X = xyz['X']", "feat_extractor.predict(X, batch_size = 128) Z = np.append(Z,z, axis=0) length =", "print(Z.shape) hf = h5py.File(subject+'_ef.h5', 'w') hf.create_dataset('Y', data=Y) hf.create_dataset('Z', data=Z) hf.close()", "128) count+=1 length = len(Y) U = [cid] * length", "data=Y) hf.create_dataset('Z', data=Z) hf.close() session = Session(aws_access_key_id=config[0],aws_secret_access_key=config[1]) s3 = session.resource('s3')", "multi_gpu_model(feat_extractor, gpus=2) count = 0 for cell in Cells: print(cell)", "= [line.rstrip() for line in f] session = Session(aws_access_key_id=config[0],aws_secret_access_key=config[1]) s3", "import shutil import re import pickle from boto3.session import Session", "subject = sys.argv[1] with open(\"config.txt\") as f: config = [line.rstrip()", "U = U + ([cid] * length) print(Z.shape) hf =", "= h5py.File(subject+'_ef.h5', 'w') hf.create_dataset('Y', data=Y) hf.create_dataset('Z', data=Z) hf.close() session =", "= boto3.client ('s3') s3.download_file('for-ndar',os.path.join(\"metadata/\", subject + \".txt\"),os.path.join(basepath,subject + \".txt\")) with", "with open(subject + \".txt\") as f: Cells = [line.rstrip() for", "hf.close() session = Session(aws_access_key_id=config[0],aws_secret_access_key=config[1]) s3 = session.resource('s3') s3.meta.client.upload_file(os.path.join(subject+'_ef.h5'),'bsmn-data',os.path.join(subject, subject+'_ef.h5')) call(['sudo',", "= [] s3.meta.client.download_file('bsmn-data',os.path.join(subject, cell+'_IDs.h5'),os.path.join(basepath,cell+'_IDs.h5')) f = h5py.File(os.path.join(basepath,cell+'_IDs.h5'), 'r') cell_ids =", "as K from keras.utils import multi_gpu_model ##Path to Data basepath", "to Data basepath = \"/home/ubuntu/\" subject = sys.argv[1] with open(\"config.txt\")", "from __future__ import division import sys import glob, os, gc", "h5py.File(os.path.join(basepath,cell+'_'+cid+'.h5'), 'r') os.remove(os.path.join(basepath,cell+'_'+cid+'.h5')) if count == 0: X = xyz['X']", "import glob, os, gc import uuid import os.path import csv", "subprocess import (call, Popen, PIPE) from itertools import product import", "= xyz['X'] Y = np.append(Y,xyz['Y'], axis=0) z = feat_extractor.predict(X, batch_size", "import os.path import csv import numpy as np from time", "axis=0) z = feat_extractor.predict(X, batch_size = 128) Z = np.append(Z,z,", "= load_model(os.path.join(basepath,'Inception_Transfer_Model.h5')) parallel_model = multi_gpu_model(feat_extractor, gpus=2) count = 0 for", "import load_model from keras.models import Model from keras import backend", "cid in cell_ids: cid = cid.decode('utf-8') s3.meta.client.download_file('bsmn-data',os.path.join(subject, cell+'_'+cid+'.h5'),os.path.join(basepath,cell+'_'+cid+'.h5')) xyz =", "f] print config[0] print config[1] session = Session(aws_access_key_id=config[0],aws_secret_access_key=config[1]) s3 =", "#!/home/ubuntu/miniconda2/bin/python from __future__ import division import sys import glob, os,", "in f] print config[0] print config[1] session = Session(aws_access_key_id=config[0],aws_secret_access_key=config[1]) s3", "print(cell) cell_size=0 cell_ids = [] s3.meta.client.download_file('bsmn-data',os.path.join(subject, cell+'_IDs.h5'),os.path.join(basepath,cell+'_IDs.h5')) f = h5py.File(os.path.join(basepath,cell+'_IDs.h5'),", "0: X = xyz['X'] Y = xyz['Y'] Z = parallel_model.predict(X,", "= np.append(Z,z, axis=0) length = len(xyz['Y']) U = U +", "config[0] print config[1] session = Session(aws_access_key_id=config[0],aws_secret_access_key=config[1]) s3 = session.resource('s3') s3", "for line in f] session = Session(aws_access_key_id=config[0],aws_secret_access_key=config[1]) s3 = session.resource('s3')", "hf.create_dataset('Y', data=Y) hf.create_dataset('Z', data=Z) hf.close() session = Session(aws_access_key_id=config[0],aws_secret_access_key=config[1]) s3 =", "parallel_model.predict(X, batch_size = 128) count+=1 length = len(Y) U =", "U = [cid] * length else: X = xyz['X'] Y", "s3 = boto3.client ('s3') s3.download_file('for-ndar',os.path.join(\"metadata/\", subject + \".txt\"),os.path.join(basepath,subject + \".txt\"))", "* length) print(Z.shape) hf = h5py.File(subject+'_ef.h5', 'w') hf.create_dataset('Y', data=Y) hf.create_dataset('Z',", "session.resource('s3') s3 = boto3.client ('s3') s3.download_file('for-ndar',os.path.join(\"metadata/\", subject + \".txt\"),os.path.join(basepath,subject +", "import sys import glob, os, gc import uuid import os.path", "config[1] session = Session(aws_access_key_id=config[0],aws_secret_access_key=config[1]) s3 = session.resource('s3') s3 = boto3.client", "[line.rstrip() for line in f] session = Session(aws_access_key_id=config[0],aws_secret_access_key=config[1]) s3 =", "session.resource('s3') s3.meta.client.download_file('bsmn-data',os.path.join('Inception_Transfer_Model.h5'),os.path.join(basepath,'Inception_Transfer_Model.h5')) feat_extractor = load_model(os.path.join(basepath,'Inception_Transfer_Model.h5')) parallel_model = multi_gpu_model(feat_extractor, gpus=2) count", "division import sys import glob, os, gc import uuid import", "Session(aws_access_key_id=config[0],aws_secret_access_key=config[1]) s3 = session.resource('s3') s3.meta.client.download_file('bsmn-data',os.path.join('Inception_Transfer_Model.h5'),os.path.join(basepath,'Inception_Transfer_Model.h5')) feat_extractor = load_model(os.path.join(basepath,'Inception_Transfer_Model.h5')) parallel_model =", "Cells: print(cell) cell_size=0 cell_ids = [] s3.meta.client.download_file('bsmn-data',os.path.join(subject, cell+'_IDs.h5'),os.path.join(basepath,cell+'_IDs.h5')) f =", "= Session(aws_access_key_id=config[0],aws_secret_access_key=config[1]) s3 = session.resource('s3') s3.meta.client.upload_file(os.path.join(subject+'_ef.h5'),'bsmn-data',os.path.join(subject, subject+'_ef.h5')) call(['sudo', 'shutdown', '-h',", "= 0 for cell in Cells: print(cell) cell_size=0 cell_ids =", "uuid import os.path import csv import numpy as np from", "import h5py import umap import hdbscan from keras.models import load_model", "128) Z = np.append(Z,z, axis=0) length = len(xyz['Y']) U =", "Session(aws_access_key_id=config[0],aws_secret_access_key=config[1]) s3 = session.resource('s3') s3.meta.client.upload_file(os.path.join(subject+'_ef.h5'),'bsmn-data',os.path.join(subject, subject+'_ef.h5')) call(['sudo', 'shutdown', '-h', 'now'])", "= xyz['Y'] Z = parallel_model.predict(X, batch_size = 128) count+=1 length", "cell_ids = [] s3.meta.client.download_file('bsmn-data',os.path.join(subject, cell+'_IDs.h5'),os.path.join(basepath,cell+'_IDs.h5')) f = h5py.File(os.path.join(basepath,cell+'_IDs.h5'), 'r') cell_ids", "os.path import csv import numpy as np from time import", "U + ([cid] * length) print(Z.shape) hf = h5py.File(subject+'_ef.h5', 'w')", "import pickle from boto3.session import Session import boto3 import h5py", "subject + \".txt\"),os.path.join(basepath,subject + \".txt\")) with open(subject + \".txt\") as", "X = xyz['X'] Y = np.append(Y,xyz['Y'], axis=0) z = feat_extractor.predict(X,", "= sys.argv[1] with open(\"config.txt\") as f: config = [line.rstrip() for", "import uuid import os.path import csv import numpy as np", "s3.meta.client.download_file('bsmn-data',os.path.join(subject, cell+'_IDs.h5'),os.path.join(basepath,cell+'_IDs.h5')) f = h5py.File(os.path.join(basepath,cell+'_IDs.h5'), 'r') cell_ids = f['ID'] for", "from boto3.session import Session import boto3 import h5py import umap", "keras.utils import multi_gpu_model ##Path to Data basepath = \"/home/ubuntu/\" subject", "cell+'_'+cid+'.h5'),os.path.join(basepath,cell+'_'+cid+'.h5')) xyz = h5py.File(os.path.join(basepath,cell+'_'+cid+'.h5'), 'r') os.remove(os.path.join(basepath,cell+'_'+cid+'.h5')) if count == 0:", "\".txt\")) with open(subject + \".txt\") as f: Cells = [line.rstrip()", "line in f] print config[0] print config[1] session = Session(aws_access_key_id=config[0],aws_secret_access_key=config[1])", "umap import hdbscan from keras.models import load_model from keras.models import", "Session import boto3 import h5py import umap import hdbscan from", "axis=0) length = len(xyz['Y']) U = U + ([cid] *", "\".txt\") as f: Cells = [line.rstrip() for line in f]", "= multi_gpu_model(feat_extractor, gpus=2) count = 0 for cell in Cells:", "Popen, PIPE) from itertools import product import shutil import re", "feat_extractor = load_model(os.path.join(basepath,'Inception_Transfer_Model.h5')) parallel_model = multi_gpu_model(feat_extractor, gpus=2) count = 0", "count+=1 length = len(Y) U = [cid] * length else:", "os.remove(os.path.join(basepath,cell+'_'+cid+'.h5')) if count == 0: X = xyz['X'] Y =", "with open(\"config.txt\") as f: config = [line.rstrip() for line in", "Session(aws_access_key_id=config[0],aws_secret_access_key=config[1]) s3 = session.resource('s3') s3 = boto3.client ('s3') s3.download_file('for-ndar',os.path.join(\"metadata/\", subject", "f] session = Session(aws_access_key_id=config[0],aws_secret_access_key=config[1]) s3 = session.resource('s3') s3.meta.client.download_file('bsmn-data',os.path.join('Inception_Transfer_Model.h5'),os.path.join(basepath,'Inception_Transfer_Model.h5')) feat_extractor =", "(call, Popen, PIPE) from itertools import product import shutil import", "load_model from keras.models import Model from keras import backend as", "= Session(aws_access_key_id=config[0],aws_secret_access_key=config[1]) s3 = session.resource('s3') s3 = boto3.client ('s3') s3.download_file('for-ndar',os.path.join(\"metadata/\",", "('s3') s3.download_file('for-ndar',os.path.join(\"metadata/\", subject + \".txt\"),os.path.join(basepath,subject + \".txt\")) with open(subject +", "s3.download_file('for-ndar',os.path.join(\"metadata/\", subject + \".txt\"),os.path.join(basepath,subject + \".txt\")) with open(subject + \".txt\")", "np.append(Z,z, axis=0) length = len(xyz['Y']) U = U + ([cid]", "= h5py.File(os.path.join(basepath,cell+'_'+cid+'.h5'), 'r') os.remove(os.path.join(basepath,cell+'_'+cid+'.h5')) if count == 0: X =", "= 128) Z = np.append(Z,z, axis=0) length = len(xyz['Y']) U", "multi_gpu_model ##Path to Data basepath = \"/home/ubuntu/\" subject = sys.argv[1]", "batch_size = 128) Z = np.append(Z,z, axis=0) length = len(xyz['Y'])", "length) print(Z.shape) hf = h5py.File(subject+'_ef.h5', 'w') hf.create_dataset('Y', data=Y) hf.create_dataset('Z', data=Z)", "cell_size=0 cell_ids = [] s3.meta.client.download_file('bsmn-data',os.path.join(subject, cell+'_IDs.h5'),os.path.join(basepath,cell+'_IDs.h5')) f = h5py.File(os.path.join(basepath,cell+'_IDs.h5'), 'r')", "+ \".txt\"),os.path.join(basepath,subject + \".txt\")) with open(subject + \".txt\") as f:", "from itertools import product import shutil import re import pickle", "from keras.models import load_model from keras.models import Model from keras", "print config[0] print config[1] session = Session(aws_access_key_id=config[0],aws_secret_access_key=config[1]) s3 = session.resource('s3')", "import backend as K from keras.utils import multi_gpu_model ##Path to", "from keras.utils import multi_gpu_model ##Path to Data basepath = \"/home/ubuntu/\"", "= np.append(Y,xyz['Y'], axis=0) z = feat_extractor.predict(X, batch_size = 128) Z", "re import pickle from boto3.session import Session import boto3 import", "import hdbscan from keras.models import load_model from keras.models import Model", "glob, os, gc import uuid import os.path import csv import", "Cells = [line.rstrip() for line in f] session = Session(aws_access_key_id=config[0],aws_secret_access_key=config[1])", "f: Cells = [line.rstrip() for line in f] session =", "s3 = session.resource('s3') s3.meta.client.download_file('bsmn-data',os.path.join('Inception_Transfer_Model.h5'),os.path.join(basepath,'Inception_Transfer_Model.h5')) feat_extractor = load_model(os.path.join(basepath,'Inception_Transfer_Model.h5')) parallel_model = multi_gpu_model(feat_extractor,", "'r') cell_ids = f['ID'] for cid in cell_ids: cid =", "boto3 import h5py import umap import hdbscan from keras.models import", "length else: X = xyz['X'] Y = np.append(Y,xyz['Y'], axis=0) z", "Model from keras import backend as K from keras.utils import", "= [cid] * length else: X = xyz['X'] Y =", "for cid in cell_ids: cid = cid.decode('utf-8') s3.meta.client.download_file('bsmn-data',os.path.join(subject, cell+'_'+cid+'.h5'),os.path.join(basepath,cell+'_'+cid+'.h5')) xyz", "cid = cid.decode('utf-8') s3.meta.client.download_file('bsmn-data',os.path.join(subject, cell+'_'+cid+'.h5'),os.path.join(basepath,cell+'_'+cid+'.h5')) xyz = h5py.File(os.path.join(basepath,cell+'_'+cid+'.h5'), 'r') os.remove(os.path.join(basepath,cell+'_'+cid+'.h5'))", "z = feat_extractor.predict(X, batch_size = 128) Z = np.append(Z,z, axis=0)", "hf = h5py.File(subject+'_ef.h5', 'w') hf.create_dataset('Y', data=Y) hf.create_dataset('Z', data=Z) hf.close() session", "= len(Y) U = [cid] * length else: X =", "cid.decode('utf-8') s3.meta.client.download_file('bsmn-data',os.path.join(subject, cell+'_'+cid+'.h5'),os.path.join(basepath,cell+'_'+cid+'.h5')) xyz = h5py.File(os.path.join(basepath,cell+'_'+cid+'.h5'), 'r') os.remove(os.path.join(basepath,cell+'_'+cid+'.h5')) if count", "h5py import umap import hdbscan from keras.models import load_model from", "xyz['X'] Y = xyz['Y'] Z = parallel_model.predict(X, batch_size = 128)", "xyz['Y'] Z = parallel_model.predict(X, batch_size = 128) count+=1 length =" ]
[ "r0 def d2kepler_dchi2(chi, alpha, r0, vr0, mu, dt): ''' Second", "before breaking :return: (km) final position 3-vector, (km/s) final velocity", "anomaly, modified for use in numerical solvers. ''' z =", "f(E, e, Me)/fp(E, e) iters = 0 while abs(ratio) >", "velocity magnitude vr0 = np.dot(v_0, r_0)/r0 # (km/s) initial radial", "for numerical method (default 1e-7 is IEEE 745 single precision)", "using above methods r_n, v_n = solve_kepler_chi(r_0, v_0, dt, Earth,", "from textbook correct_r_1 = np.array([26338, -128750, -29656]) # (km) final", "form containing Eccentric Anomaly (E), eccentricity (e), and Mean Anomaly", "Problem 3.20 from Orbital Mechanics for Engineering Students, 4 ed,", "after initial time # given correct answer from textbook correct_r_1", "np.array([26338, -128750, -29656]) # (km) final position vector correct_v_1 =", "initial state to solve for r, v as 3-vectors :param", "import newton, laguerre from lagrange import calc_f, calc_fd, calc_g, calc_gd", "method not in VALID_METHODS: print(f'Method \\'{method}\\' is not valid, must", "alpha*chi**2 return (r0*vr0/np.sqrt(mu))*chi**2*C(z) + \\ (1 - alpha*r0)*chi**3*S(z) + \\", "alpha*r0)*chi**2*C(z) + r0 def d2kepler_dchi2(chi, alpha, r0, vr0, mu, dt):", "dt): ''' Kepler's Equation of the universal anomaly, modified for", "position 3-vector :param v_0: `iterable` (km/s) initial velocity 3-vector :param", "S from CelestialBody import BODIES from numerical import newton, laguerre", "Equation :param tol: `float` (--) decimal tolerance for numerical method", "numerical.py def f(E, e, Me): return E - e*np.sin(E) -", "z = alpha*chi**2 return (r0*vr0/np.sqrt(mu))*chi**2*C(z) + \\ (1 - alpha*r0)*chi**3*S(z)", "S_ = S(z) return (r0*vr0/np.sqrt(mu))*(1 - 3*z*S_ + z*(C(z) -", "3.20 from Orbital Mechanics for Engineering Students, 4 ed, Curtis.", "''' Solve Kepler's Equation in the form containing Eccentric Anomaly", "mu = body.mu # (km**3/s**2) gravitational parameter of the specified", "(--) maximum number of iterations in numerical method before breaking", "''' # given starting information Earth = BODIES['Earth'] # `CelestialBody`", "method='newton') r_l, v_l = solve_kepler_chi(r_0, v_0, dt, Earth, method='laguerre') #", "Kepler's Equation of the universal anomaly chi using the specified", "e, Me)/fp(E, e) iters += 1 E -= ratio converged", "return E - e*np.sin(E) - Me def fp(E, e): return", "(km) initial position vector v_0 = np.array([0.9, -3.4, -1.5]) #", "''' Derivative of Kepler's Equation of the universal anomaly, modified", "ratio ratio = f(E, e, Me)/fp(E, e) iters += 1", "chi0 = np.sqrt(mu)*np.abs(alpha)*dt if method not in VALID_METHODS: print(f'Method \\'{method}\\'", "4 ed, Curtis. ''' # given starting information Earth =", "- 3*z*S_ + z*(C(z) - 3*S_)) + \\ chi*(1 -", "Orbital Mechanics for Engineering Students, 4 ed, Curtis. ''' #", "fd*r_0 + gd*v_0 return r_1, v_1 def solve_kepler_E(e, Me, tol=1e-7,", "Curtis. ''' # given starting information Earth = BODIES['Earth'] #", "use of one of the numerical methods in numerical.py def", "initial velocity vector dt = 2*60*60 # (s) time of", "e/2 if Me < np.pi else Me - e/2 ratio", "Anomaly (E), eccentricity (e), and Mean Anomaly of Ellipse (Me).", "Engineering Students, 4 ed, Curtis. ''' # given starting information", ":return: (km) final position 3-vector, (km/s) final velocity 3-vector '''", "correct_v_1 = np.array([0.86280, -3.2116, -1.4613]) # (km/s) final velocity vector", "inverse of semi-major axis chi0 = np.sqrt(mu)*np.abs(alpha)*dt if method not", "Engineering Students, 4 ed, Curtis. ''' # TODO: have this", "(--) the celestial body to use for orbital parameters :param", "Me < np.pi else Me - e/2 ratio = f(E,", "= np.array([0.86280, -3.2116, -1.4613]) # (km/s) final velocity vector #", "dt): ''' Second derivative of Kepler's Equation of the universal", "Curtis. ''' # TODO: have this function make use of", "(--) Earth and all the Earth things r_0 = np.array([20000,", "(km/s) initial radial velocity magnitude alpha = 2/r0 - v0**2/mu", "return (r0*vr0/np.sqrt(mu))*chi*(1 - alpha*chi**2*S(z)) + \\ (1 - alpha*r0)*chi**2*C(z) +", "return (r0*vr0/np.sqrt(mu))*chi**2*C(z) + \\ (1 - alpha*r0)*chi**3*S(z) + \\ r0*chi", "e) iters += 1 E -= ratio converged = np.abs(ratio)", ":param dt: `float` (s) time after initial state to solve", "`float` (s) time after initial state to solve for r,", "vr0, mu, dt): ''' Derivative of Kepler's Equation of the", "alpha*chi**2 return (r0*vr0/np.sqrt(mu))*chi*(1 - alpha*chi**2*S(z)) + \\ (1 - alpha*r0)*chi**2*C(z)", "return (r0*vr0/np.sqrt(mu))*(1 - 3*z*S_ + z*(C(z) - 3*S_)) + \\", "3.4 from Orbital Mechanics for Engineering Students, 4 ed, Curtis.", "-1.4613]) # (km/s) final velocity vector # solve using above", "(r0*vr0/np.sqrt(mu))*chi*(1 - alpha*chi**2*S(z)) + \\ (1 - alpha*r0)*chi**2*C(z) + r0", "3*z*S_ + z*(C(z) - 3*S_)) + \\ chi*(1 - z*S_)*(1", "np.dot(v_0, r_0)/r0 # (km/s) initial radial velocity magnitude alpha =", "+ gd*v_0 return r_1, v_1 def solve_kepler_E(e, Me, tol=1e-7, max_iters=100):", "return 1 - e*np.cos(E) E = Me + e/2 if", "alpha*r0) def solve_kepler_chi(r_0, v_0, dt, body=BODIES['Earth'], method='laguerre', tol=1e-7, max_iters=100): '''", "chi, _, _ = newton(chi0, kepler_chi, dkepler_dchi, alpha, r0, vr0,", "position vector v_0 = np.array([0.9, -3.4, -1.5]) # (km/s) initial", "test(): ''' Test the functionality of solve_kepler_chi and solve_kepler_laguerre using", "'laguerre' chi, _, _ = laguerre(chi0, kepler_chi, dkepler_dchi, d2kepler_dchi2, alpha,", "f*r_0 + g*v_0 r1 = np.linalg.norm(r_1) fd = calc_fd(mu, r1,", "r_n, v_n = solve_kepler_chi(r_0, v_0, dt, Earth, method='newton') r_l, v_l", "r0, vr0, mu, dt): ''' Derivative of Kepler's Equation of", "Anomaly of Ellipse (Me). Uses Algorithm 3.1 from Orbital Mechanics", "def fp(E, e): return 1 - e*np.cos(E) E = Me", "atol=1) and np.allclose(v_n, correct_v_1, atol=1e-4) laguerre_valid = np.allclose(r_l, correct_r_1, atol=1)", "Me + e/2 if Me < np.pi else Me -", "alpha, r0, vr0, mu, dt) elif method == 'newton': chi,", "= fd*r_0 + gd*v_0 return r_1, v_1 def solve_kepler_E(e, Me,", "which numerical method to use to solve Kepler's Equation :param", "eccentricity (e), and Mean Anomaly of Ellipse (Me). Uses Algorithm", "= np.linalg.norm(r_1) fd = calc_fd(mu, r1, r0, alpha, chi) gd", "= np.array([0.9, -3.4, -1.5]) # (km/s) initial velocity vector dt", "d2kepler_dchi2(chi, alpha, r0, vr0, mu, dt): ''' Second derivative of", "r0, alpha) g = calc_g(dt, mu, chi, alpha) r_1 =", "of {VALID_METHODS}.\\nDefaulting to laguerre method.') chi, _, _ = laguerre(chi0,", "np.linalg.norm(v_0) # (km/s) initial velocity magnitude vr0 = np.dot(v_0, r_0)/r0", "\\ (1 - alpha*r0)*chi**2*C(z) + r0 def d2kepler_dchi2(chi, alpha, r0,", "np.allclose(r_l, correct_r_1, atol=1) and np.allclose(v_l, correct_v_1, atol=1e-4) return all([newton_valid, laguerre_valid])", "alpha = 2/r0 - v0**2/mu # (1/km) inverse of semi-major", "`CelestialBody` (--) Earth and all the Earth things r_0 =", "= np.dot(v_0, r_0)/r0 # (km/s) initial radial velocity magnitude alpha", "= 2*60*60 # (s) time of interest after initial time", "the specified numerical method. Applies Algorithm 3.4 from Orbital Mechanics", "- e*np.sin(E) - Me def fp(E, e): return 1 -", "2*60*60 # (s) time of interest after initial time #", "f(E, e, Me)/fp(E, e) iters += 1 E -= ratio", "atol=1) and np.allclose(v_l, correct_v_1, atol=1e-4) return all([newton_valid, laguerre_valid]) if __name__", "parameters :param method: `str` (--) which numerical method to use", ":param v_0: `iterable` (km/s) initial velocity 3-vector :param dt: `float`", "Derivative of Kepler's Equation of the universal anomaly, modified for", "if Me < np.pi else Me - e/2 ratio =", "method: `str` (--) which numerical method to use to solve", "position 3-vector, (km/s) final velocity 3-vector ''' VALID_METHODS = ('laguerre',", "1e-7 is IEEE 745 single precision) :param max_iters: `int` (--)", "= laguerre(chi0, kepler_chi, dkepler_dchi, d2kepler_dchi2, alpha, r0, vr0, mu, dt)", "iters, converged def test(): ''' Test the functionality of solve_kepler_chi", "= BODIES['Earth'] # `CelestialBody` (--) Earth and all the Earth", "Earth, method='newton') r_l, v_l = solve_kepler_chi(r_0, v_0, dt, Earth, method='laguerre')", "dt) f = calc_f(chi, r0, alpha) g = calc_g(dt, mu,", "(km/s) initial velocity vector dt = 2*60*60 # (s) time", ":param body: `CelestialBody` (--) the celestial body to use for", "tol return E, iters, converged def test(): ''' Test the", "= np.array([20000, -105000, -19000]) # (km) initial position vector v_0", "`int` (--) maximum number of iterations in numerical method before", "dt): ''' Derivative of Kepler's Equation of the universal anomaly,", "tolerance for numerical method (default 1e-7 is IEEE 745 single", "laguerre_valid = np.allclose(r_l, correct_r_1, atol=1) and np.allclose(v_l, correct_v_1, atol=1e-4) return", "r0, alpha, chi) gd = calc_gd(chi, r1, alpha) v_1 =", "TODO: have this function make use of one of the", "method to use to solve Kepler's Equation :param tol: `float`", "one of the numerical methods in numerical.py def f(E, e,", "def dkepler_dchi(chi, alpha, r0, vr0, mu, dt): ''' Derivative of", "(Me). Uses Algorithm 3.1 from Orbital Mechanics for Engineering Students,", "(km) final position vector correct_v_1 = np.array([0.86280, -3.2116, -1.4613]) #", "(r0*vr0/np.sqrt(mu))*chi**2*C(z) + \\ (1 - alpha*r0)*chi**3*S(z) + \\ r0*chi -", "solve_kepler_chi(r_0, v_0, dt, Earth, method='laguerre') # check correctness # tolerance", "''' Solve Kepler's Equation of the universal anomaly chi using", "# (km) initial position magnitude v0 = np.linalg.norm(v_0) # (km/s)", "solve using above methods r_n, v_n = solve_kepler_chi(r_0, v_0, dt,", "alpha*chi**2*S(z)) + \\ (1 - alpha*r0)*chi**2*C(z) + r0 def d2kepler_dchi2(chi,", "and np.allclose(v_l, correct_v_1, atol=1e-4) return all([newton_valid, laguerre_valid]) if __name__ ==", "_, _ = newton(chi0, kepler_chi, dkepler_dchi, alpha, r0, vr0, mu,", "chi using the specified numerical method. Applies Algorithm 3.4 from", "== 'newton': chi, _, _ = newton(chi0, kepler_chi, dkepler_dchi, alpha,", "Me def fp(E, e): return 1 - e*np.cos(E) E =", "= np.allclose(r_n, correct_r_1, atol=1) and np.allclose(v_n, correct_v_1, atol=1e-4) laguerre_valid =", "gravitational parameter of the specified primary body r0 = np.linalg.norm(r_0)", "answer from textbook correct_r_1 = np.array([26338, -128750, -29656]) # (km)", "(1 - alpha*r0)*chi**2*C(z) + r0 def d2kepler_dchi2(chi, alpha, r0, vr0,", "initial position magnitude v0 = np.linalg.norm(v_0) # (km/s) initial velocity", "(km**3/s**2) gravitational parameter of the specified primary body r0 =", "else: # method == 'laguerre' chi, _, _ = laguerre(chi0,", "solve for r, v as 3-vectors :param body: `CelestialBody` (--)", "def f(E, e, Me): return E - e*np.sin(E) - Me", "Test the functionality of solve_kepler_chi and solve_kepler_laguerre using Problem 3.20", "of given answers newton_valid = np.allclose(r_n, correct_r_1, atol=1) and np.allclose(v_n,", "as np from stumpff import C, S from CelestialBody import", "chi*(1 - z*S_)*(1 - alpha*r0) def solve_kepler_chi(r_0, v_0, dt, body=BODIES['Earth'],", "+ g*v_0 r1 = np.linalg.norm(r_1) fd = calc_fd(mu, r1, r0,", "ed, Curtis. ''' # given starting information Earth = BODIES['Earth']", "solve_kepler_laguerre using Problem 3.20 from Orbital Mechanics for Engineering Students,", "abs(ratio) > tol and iters < max_iters: E -= ratio", "alpha, r0, vr0, mu, dt): ''' Second derivative of Kepler's", "r_l, v_l = solve_kepler_chi(r_0, v_0, dt, Earth, method='laguerre') # check", "(r0*vr0/np.sqrt(mu))*(1 - 3*z*S_ + z*(C(z) - 3*S_)) + \\ chi*(1", "- v0**2/mu # (1/km) inverse of semi-major axis chi0 =", "kepler_chi(chi, alpha, r0, vr0, mu, dt): ''' Kepler's Equation of", "precision) :param max_iters: `int` (--) maximum number of iterations in", "interest after initial time # given correct answer from textbook", "based on significant figures of given answers newton_valid = np.allclose(r_n,", "methods in numerical.py def f(E, e, Me): return E -", "one of {VALID_METHODS}.\\nDefaulting to laguerre method.') chi, _, _ =", "z = alpha*chi**2 return (r0*vr0/np.sqrt(mu))*chi*(1 - alpha*chi**2*S(z)) + \\ (1", "(km) initial position magnitude v0 = np.linalg.norm(v_0) # (km/s) initial", "alpha, r0, vr0, mu, dt) f = calc_f(chi, r0, alpha)", "np.linalg.norm(r_1) fd = calc_fd(mu, r1, r0, alpha, chi) gd =", "fp(E, e): return 1 - e*np.cos(E) E = Me +", "of Kepler's Equation of the universal anomaly, modified for use", "vector dt = 2*60*60 # (s) time of interest after", "maximum number of iterations in numerical method before breaking :return:", "newton_valid = np.allclose(r_n, correct_r_1, atol=1) and np.allclose(v_n, correct_v_1, atol=1e-4) laguerre_valid", "final velocity 3-vector ''' VALID_METHODS = ('laguerre', 'newton') mu =", "iters < max_iters: E -= ratio ratio = f(E, e,", "(1/km) inverse of semi-major axis chi0 = np.sqrt(mu)*np.abs(alpha)*dt if method", "Me): return E - e*np.sin(E) - Me def fp(E, e):", "final velocity vector # solve using above methods r_n, v_n", "chi) gd = calc_gd(chi, r1, alpha) v_1 = fd*r_0 +", "if method not in VALID_METHODS: print(f'Method \\'{method}\\' is not valid,", "Earth, method='laguerre') # check correctness # tolerance based on significant", "1 E -= ratio converged = np.abs(ratio) <= tol return", "# (km/s) initial velocity vector dt = 2*60*60 # (s)", "= calc_f(chi, r0, alpha) g = calc_g(dt, mu, chi, alpha)", "+ \\ (1 - alpha*r0)*chi**2*C(z) + r0 def d2kepler_dchi2(chi, alpha,", "from Orbital Mechanics for Engineering Students, 4 ed, Curtis. :param", "the form containing Eccentric Anomaly (E), eccentricity (e), and Mean", "solvers. ''' z = alpha*chi**2 return (r0*vr0/np.sqrt(mu))*chi*(1 - alpha*chi**2*S(z)) +", "alpha, r0, vr0, mu, dt): ''' Derivative of Kepler's Equation", "answers newton_valid = np.allclose(r_n, correct_r_1, atol=1) and np.allclose(v_n, correct_v_1, atol=1e-4)", "''' Kepler's Equation of the universal anomaly, modified for use", "z*(C(z) - 3*S_)) + \\ chi*(1 - z*S_)*(1 - alpha*r0)", "method. Applies Algorithm 3.4 from Orbital Mechanics for Engineering Students,", "calc_f, calc_fd, calc_g, calc_gd def kepler_chi(chi, alpha, r0, vr0, mu,", "gd = calc_gd(chi, r1, alpha) v_1 = fd*r_0 + gd*v_0", "velocity 3-vector :param dt: `float` (s) time after initial state", "Earth = BODIES['Earth'] # `CelestialBody` (--) Earth and all the", "def kepler_chi(chi, alpha, r0, vr0, mu, dt): ''' Kepler's Equation", "= 2/r0 - v0**2/mu # (1/km) inverse of semi-major axis", "from stumpff import C, S from CelestialBody import BODIES from", "time of interest after initial time # given correct answer", "method='laguerre', tol=1e-7, max_iters=100): ''' Solve Kepler's Equation of the universal", "all the Earth things r_0 = np.array([20000, -105000, -19000]) #", "correct_r_1 = np.array([26338, -128750, -29656]) # (km) final position vector", "import BODIES from numerical import newton, laguerre from lagrange import", "r_0)/r0 # (km/s) initial radial velocity magnitude alpha = 2/r0", "3-vectors :param body: `CelestialBody` (--) the celestial body to use", "# (km**3/s**2) gravitational parameter of the specified primary body r0", "in numerical.py def f(E, e, Me): return E - e*np.sin(E)", "''' VALID_METHODS = ('laguerre', 'newton') mu = body.mu # (km**3/s**2)", "of the universal anomaly, modified for use in numerical solvers.", "# (km) final position vector correct_v_1 = np.array([0.86280, -3.2116, -1.4613])", ":param method: `str` (--) which numerical method to use to", "np.allclose(v_n, correct_v_1, atol=1e-4) laguerre_valid = np.allclose(r_l, correct_r_1, atol=1) and np.allclose(v_l,", "iterations in numerical method before breaking :return: (km) final position", "Orbital Mechanics for Engineering Students, 4 ed, Curtis. :param r_0:", "as 3-vectors :param body: `CelestialBody` (--) the celestial body to", "`float` (--) decimal tolerance for numerical method (default 1e-7 is", "(default 1e-7 is IEEE 745 single precision) :param max_iters: `int`", "# (km/s) initial velocity magnitude vr0 = np.dot(v_0, r_0)/r0 #", "not valid, must be one of {VALID_METHODS}.\\nDefaulting to laguerre method.')", "solve_kepler_chi(r_0, v_0, dt, body=BODIES['Earth'], method='laguerre', tol=1e-7, max_iters=100): ''' Solve Kepler's", "tol and iters < max_iters: E -= ratio ratio =", "# given correct answer from textbook correct_r_1 = np.array([26338, -128750,", "the specified primary body r0 = np.linalg.norm(r_0) # (km) initial", "d2kepler_dchi2, alpha, r0, vr0, mu, dt) elif method == 'newton':", "(e), and Mean Anomaly of Ellipse (Me). Uses Algorithm 3.1", "alpha) g = calc_g(dt, mu, chi, alpha) r_1 = f*r_0", "ed, Curtis. :param r_0: `iterable` (km) initial position 3-vector :param", "correct_v_1, atol=1e-4) return all([newton_valid, laguerre_valid]) if __name__ == '__main__': print(test())", "= ('laguerre', 'newton') mu = body.mu # (km**3/s**2) gravitational parameter", "E -= ratio converged = np.abs(ratio) <= tol return E,", "dt) else: # method == 'laguerre' chi, _, _ =", "correct_r_1, atol=1) and np.allclose(v_l, correct_v_1, atol=1e-4) return all([newton_valid, laguerre_valid]) if", "(km) initial position 3-vector :param v_0: `iterable` (km/s) initial velocity", "make use of one of the numerical methods in numerical.py", "Students, 4 ed, Curtis. ''' # TODO: have this function", "(s) time after initial state to solve for r, v", "dt = 2*60*60 # (s) time of interest after initial", "alpha) r_1 = f*r_0 + g*v_0 r1 = np.linalg.norm(r_1) fd", "numerical methods in numerical.py def f(E, e, Me): return E", "of the universal anomaly chi using the specified numerical method.", "''' z = alpha*chi**2 return (r0*vr0/np.sqrt(mu))*chi*(1 - alpha*chi**2*S(z)) + \\", "solvers. ''' z = alpha*chi**2 return (r0*vr0/np.sqrt(mu))*chi**2*C(z) + \\ (1", "for Engineering Students, 4 ed, Curtis. ''' # given starting", "containing Eccentric Anomaly (E), eccentricity (e), and Mean Anomaly of", "e) iters = 0 while abs(ratio) > tol and iters", "dkepler_dchi, d2kepler_dchi2, alpha, r0, vr0, mu, dt) f = calc_f(chi,", "correct answer from textbook correct_r_1 = np.array([26338, -128750, -29656]) #", "v0 = np.linalg.norm(v_0) # (km/s) initial velocity magnitude vr0 =", "v0**2/mu # (1/km) inverse of semi-major axis chi0 = np.sqrt(mu)*np.abs(alpha)*dt", "on significant figures of given answers newton_valid = np.allclose(r_n, correct_r_1,", "Solve Kepler's Equation in the form containing Eccentric Anomaly (E),", "745 single precision) :param max_iters: `int` (--) maximum number of", "alpha, r0, vr0, mu, dt): ''' Kepler's Equation of the", "solvers. ''' z = alpha*chi**2 S_ = S(z) return (r0*vr0/np.sqrt(mu))*(1", "alpha, chi) gd = calc_gd(chi, r1, alpha) v_1 = fd*r_0", "v_l = solve_kepler_chi(r_0, v_0, dt, Earth, method='laguerre') # check correctness", "+ \\ (1 - alpha*r0)*chi**3*S(z) + \\ r0*chi - np.sqrt(mu)*dt", "z = alpha*chi**2 S_ = S(z) return (r0*vr0/np.sqrt(mu))*(1 - 3*z*S_", "`CelestialBody` (--) the celestial body to use for orbital parameters", "of semi-major axis chi0 = np.sqrt(mu)*np.abs(alpha)*dt if method not in", "be one of {VALID_METHODS}.\\nDefaulting to laguerre method.') chi, _, _", "time # given correct answer from textbook correct_r_1 = np.array([26338,", "r0 = np.linalg.norm(r_0) # (km) initial position magnitude v0 =", "the numerical methods in numerical.py def f(E, e, Me): return", "import calc_f, calc_fd, calc_g, calc_gd def kepler_chi(chi, alpha, r0, vr0,", "and Mean Anomaly of Ellipse (Me). Uses Algorithm 3.1 from", "def solve_kepler_E(e, Me, tol=1e-7, max_iters=100): ''' Solve Kepler's Equation in", "given starting information Earth = BODIES['Earth'] # `CelestialBody` (--) Earth", "check correctness # tolerance based on significant figures of given", "solve Kepler's Equation :param tol: `float` (--) decimal tolerance for", "calc_gd def kepler_chi(chi, alpha, r0, vr0, mu, dt): ''' Kepler's", "np.array([20000, -105000, -19000]) # (km) initial position vector v_0 =", "+ \\ chi*(1 - z*S_)*(1 - alpha*r0) def solve_kepler_chi(r_0, v_0,", "VALID_METHODS: print(f'Method \\'{method}\\' is not valid, must be one of", "method == 'newton': chi, _, _ = newton(chi0, kepler_chi, dkepler_dchi,", "final position vector correct_v_1 = np.array([0.86280, -3.2116, -1.4613]) # (km/s)", "ratio = f(E, e, Me)/fp(E, e) iters += 1 E", "('laguerre', 'newton') mu = body.mu # (km**3/s**2) gravitational parameter of", "Kepler's Equation :param tol: `float` (--) decimal tolerance for numerical", "3-vector, (km/s) final velocity 3-vector ''' VALID_METHODS = ('laguerre', 'newton')", "solve_kepler_chi and solve_kepler_laguerre using Problem 3.20 from Orbital Mechanics for", "valid, must be one of {VALID_METHODS}.\\nDefaulting to laguerre method.') chi,", "from numerical import newton, laguerre from lagrange import calc_f, calc_fd,", "v_n = solve_kepler_chi(r_0, v_0, dt, Earth, method='newton') r_l, v_l =", "Applies Algorithm 3.4 from Orbital Mechanics for Engineering Students, 4", "the universal anomaly chi using the specified numerical method. Applies", "= solve_kepler_chi(r_0, v_0, dt, Earth, method='laguerre') # check correctness #", "{VALID_METHODS}.\\nDefaulting to laguerre method.') chi, _, _ = laguerre(chi0, kepler_chi,", "`iterable` (km) initial position 3-vector :param v_0: `iterable` (km/s) initial", "alpha) v_1 = fd*r_0 + gd*v_0 return r_1, v_1 def", "Algorithm 3.4 from Orbital Mechanics for Engineering Students, 4 ed,", "numerical method (default 1e-7 is IEEE 745 single precision) :param", "in numerical method before breaking :return: (km) final position 3-vector,", "iters += 1 E -= ratio converged = np.abs(ratio) <=", "mu, dt): ''' Kepler's Equation of the universal anomaly, modified", "axis chi0 = np.sqrt(mu)*np.abs(alpha)*dt if method not in VALID_METHODS: print(f'Method", "# (1/km) inverse of semi-major axis chi0 = np.sqrt(mu)*np.abs(alpha)*dt if", "3-vector ''' VALID_METHODS = ('laguerre', 'newton') mu = body.mu #", "else Me - e/2 ratio = f(E, e, Me)/fp(E, e)", "r1, r0, alpha, chi) gd = calc_gd(chi, r1, alpha) v_1", "-19000]) # (km) initial position vector v_0 = np.array([0.9, -3.4,", "methods r_n, v_n = solve_kepler_chi(r_0, v_0, dt, Earth, method='newton') r_l,", "Earth things r_0 = np.array([20000, -105000, -19000]) # (km) initial", "in VALID_METHODS: print(f'Method \\'{method}\\' is not valid, must be one", "anomaly chi using the specified numerical method. Applies Algorithm 3.4", "for orbital parameters :param method: `str` (--) which numerical method", "dt, Earth, method='newton') r_l, v_l = solve_kepler_chi(r_0, v_0, dt, Earth,", "primary body r0 = np.linalg.norm(r_0) # (km) initial position magnitude", "- np.sqrt(mu)*dt def dkepler_dchi(chi, alpha, r0, vr0, mu, dt): '''", "# solve using above methods r_n, v_n = solve_kepler_chi(r_0, v_0,", "Mechanics for Engineering Students, 4 ed, Curtis. :param r_0: `iterable`", "textbook correct_r_1 = np.array([26338, -128750, -29656]) # (km) final position", "\\ chi*(1 - z*S_)*(1 - alpha*r0) def solve_kepler_chi(r_0, v_0, dt,", "Mechanics for Engineering Students, 4 ed, Curtis. ''' # TODO:", "radial velocity magnitude alpha = 2/r0 - v0**2/mu # (1/km)", "IEEE 745 single precision) :param max_iters: `int` (--) maximum number", "r0, vr0, mu, dt) elif method == 'newton': chi, _,", "is not valid, must be one of {VALID_METHODS}.\\nDefaulting to laguerre", "laguerre(chi0, kepler_chi, dkepler_dchi, d2kepler_dchi2, alpha, r0, vr0, mu, dt) f", "tol=1e-7, max_iters=100): ''' Solve Kepler's Equation in the form containing", "3-vector :param v_0: `iterable` (km/s) initial velocity 3-vector :param dt:", "solve_kepler_chi(r_0, v_0, dt, Earth, method='newton') r_l, v_l = solve_kepler_chi(r_0, v_0,", "Eccentric Anomaly (E), eccentricity (e), and Mean Anomaly of Ellipse", "Kepler's Equation in the form containing Eccentric Anomaly (E), eccentricity", "(E), eccentricity (e), and Mean Anomaly of Ellipse (Me). Uses", "and iters < max_iters: E -= ratio ratio = f(E,", "parameter of the specified primary body r0 = np.linalg.norm(r_0) #", "universal anomaly, modified for use in numerical solvers. ''' z", "dkepler_dchi, alpha, r0, vr0, mu, dt) else: # method ==", "from Orbital Mechanics for Engineering Students, 4 ed, Curtis. '''", "numerical import newton, laguerre from lagrange import calc_f, calc_fd, calc_g,", "# check correctness # tolerance based on significant figures of", "2/r0 - v0**2/mu # (1/km) inverse of semi-major axis chi0", "# method == 'laguerre' chi, _, _ = laguerre(chi0, kepler_chi,", "- alpha*r0)*chi**2*C(z) + r0 def d2kepler_dchi2(chi, alpha, r0, vr0, mu,", "(km/s) initial velocity 3-vector :param dt: `float` (s) time after", "to laguerre method.') chi, _, _ = laguerre(chi0, kepler_chi, dkepler_dchi,", "gd*v_0 return r_1, v_1 def solve_kepler_E(e, Me, tol=1e-7, max_iters=100): '''", "< max_iters: E -= ratio ratio = f(E, e, Me)/fp(E,", "body=BODIES['Earth'], method='laguerre', tol=1e-7, max_iters=100): ''' Solve Kepler's Equation of the", "from lagrange import calc_f, calc_fd, calc_g, calc_gd def kepler_chi(chi, alpha,", "have this function make use of one of the numerical", "Solve Kepler's Equation of the universal anomaly chi using the", "+ e/2 if Me < np.pi else Me - e/2", "vector v_0 = np.array([0.9, -3.4, -1.5]) # (km/s) initial velocity", "= np.array([26338, -128750, -29656]) # (km) final position vector correct_v_1", "Equation of the universal anomaly, modified for use in numerical", "Second derivative of Kepler's Equation of the universal anomaly, modified", "vr0, mu, dt) elif method == 'newton': chi, _, _", "this function make use of one of the numerical methods", "import numpy as np from stumpff import C, S from", "3-vector :param dt: `float` (s) time after initial state to", "np from stumpff import C, S from CelestialBody import BODIES", "'newton') mu = body.mu # (km**3/s**2) gravitational parameter of the", "= calc_g(dt, mu, chi, alpha) r_1 = f*r_0 + g*v_0", "S(z) return (r0*vr0/np.sqrt(mu))*(1 - 3*z*S_ + z*(C(z) - 3*S_)) +", "is IEEE 745 single precision) :param max_iters: `int` (--) maximum", "decimal tolerance for numerical method (default 1e-7 is IEEE 745", "velocity 3-vector ''' VALID_METHODS = ('laguerre', 'newton') mu = body.mu", "starting information Earth = BODIES['Earth'] # `CelestialBody` (--) Earth and", "vr0 = np.dot(v_0, r_0)/r0 # (km/s) initial radial velocity magnitude", "= calc_fd(mu, r1, r0, alpha, chi) gd = calc_gd(chi, r1,", "np.array([0.9, -3.4, -1.5]) # (km/s) initial velocity vector dt =", "in numerical solvers. ''' z = alpha*chi**2 return (r0*vr0/np.sqrt(mu))*chi*(1 -", "universal anomaly chi using the specified numerical method. Applies Algorithm", "final position 3-vector, (km/s) final velocity 3-vector ''' VALID_METHODS =", "kepler_chi, dkepler_dchi, alpha, r0, vr0, mu, dt) else: # method", "r0*chi - np.sqrt(mu)*dt def dkepler_dchi(chi, alpha, r0, vr0, mu, dt):", "max_iters: `int` (--) maximum number of iterations in numerical method", "the Earth things r_0 = np.array([20000, -105000, -19000]) # (km)", "magnitude v0 = np.linalg.norm(v_0) # (km/s) initial velocity magnitude vr0", "v_1 def solve_kepler_E(e, Me, tol=1e-7, max_iters=100): ''' Solve Kepler's Equation", "e*np.sin(E) - Me def fp(E, e): return 1 - e*np.cos(E)", "_ = laguerre(chi0, kepler_chi, dkepler_dchi, d2kepler_dchi2, alpha, r0, vr0, mu,", "v_1 = fd*r_0 + gd*v_0 return r_1, v_1 def solve_kepler_E(e,", "converged def test(): ''' Test the functionality of solve_kepler_chi and", "calc_g(dt, mu, chi, alpha) r_1 = f*r_0 + g*v_0 r1", "mu, dt) elif method == 'newton': chi, _, _ =", "calc_gd(chi, r1, alpha) v_1 = fd*r_0 + gd*v_0 return r_1,", "use to solve Kepler's Equation :param tol: `float` (--) decimal", "and np.allclose(v_n, correct_v_1, atol=1e-4) laguerre_valid = np.allclose(r_l, correct_r_1, atol=1) and", "numerical solvers. ''' z = alpha*chi**2 return (r0*vr0/np.sqrt(mu))*chi*(1 - alpha*chi**2*S(z))", "atol=1e-4) laguerre_valid = np.allclose(r_l, correct_r_1, atol=1) and np.allclose(v_l, correct_v_1, atol=1e-4)", "kepler_chi, dkepler_dchi, d2kepler_dchi2, alpha, r0, vr0, mu, dt) f =", "body: `CelestialBody` (--) the celestial body to use for orbital", "Curtis. :param r_0: `iterable` (km) initial position 3-vector :param v_0:", "f(E, e, Me): return E - e*np.sin(E) - Me def", "(km/s) final velocity 3-vector ''' VALID_METHODS = ('laguerre', 'newton') mu", "= S(z) return (r0*vr0/np.sqrt(mu))*(1 - 3*z*S_ + z*(C(z) - 3*S_))", "3.1 from Orbital Mechanics for Engineering Students, 4 ed, Curtis.", "for use in numerical solvers. ''' z = alpha*chi**2 S_", "from CelestialBody import BODIES from numerical import newton, laguerre from", "converged = np.abs(ratio) <= tol return E, iters, converged def", "_, _ = laguerre(chi0, kepler_chi, dkepler_dchi, d2kepler_dchi2, alpha, r0, vr0,", "np.allclose(v_l, correct_v_1, atol=1e-4) return all([newton_valid, laguerre_valid]) if __name__ == '__main__':", "numerical solvers. ''' z = alpha*chi**2 return (r0*vr0/np.sqrt(mu))*chi**2*C(z) + \\", "laguerre from lagrange import calc_f, calc_fd, calc_g, calc_gd def kepler_chi(chi,", "of iterations in numerical method before breaking :return: (km) final", "max_iters=100): ''' Solve Kepler's Equation in the form containing Eccentric", "vr0, mu, dt) else: # method == 'laguerre' chi, _,", "E -= ratio ratio = f(E, e, Me)/fp(E, e) iters", "mu, dt) f = calc_f(chi, r0, alpha) g = calc_g(dt,", "e, Me)/fp(E, e) iters = 0 while abs(ratio) > tol", "r_1 = f*r_0 + g*v_0 r1 = np.linalg.norm(r_1) fd =", "3*S_)) + \\ chi*(1 - z*S_)*(1 - alpha*r0) def solve_kepler_chi(r_0,", "- z*S_)*(1 - alpha*r0) def solve_kepler_chi(r_0, v_0, dt, body=BODIES['Earth'], method='laguerre',", "position vector correct_v_1 = np.array([0.86280, -3.2116, -1.4613]) # (km/s) final", "Mechanics for Engineering Students, 4 ed, Curtis. ''' # given", "Earth and all the Earth things r_0 = np.array([20000, -105000,", "vector correct_v_1 = np.array([0.86280, -3.2116, -1.4613]) # (km/s) final velocity", "v_0: `iterable` (km/s) initial velocity 3-vector :param dt: `float` (s)", "r_0: `iterable` (km) initial position 3-vector :param v_0: `iterable` (km/s)", "initial position vector v_0 = np.array([0.9, -3.4, -1.5]) # (km/s)", "r, v as 3-vectors :param body: `CelestialBody` (--) the celestial", "for Engineering Students, 4 ed, Curtis. :param r_0: `iterable` (km)", "state to solve for r, v as 3-vectors :param body:", "= np.linalg.norm(v_0) # (km/s) initial velocity magnitude vr0 = np.dot(v_0,", "BODIES['Earth'] # `CelestialBody` (--) Earth and all the Earth things", "Uses Algorithm 3.1 from Orbital Mechanics for Engineering Students, 4", "r0, vr0, mu, dt): ''' Second derivative of Kepler's Equation", "iters = 0 while abs(ratio) > tol and iters <", "r0, vr0, mu, dt): ''' Kepler's Equation of the universal", "method (default 1e-7 is IEEE 745 single precision) :param max_iters:", "1 - e*np.cos(E) E = Me + e/2 if Me", "- alpha*r0)*chi**3*S(z) + \\ r0*chi - np.sqrt(mu)*dt def dkepler_dchi(chi, alpha,", "magnitude alpha = 2/r0 - v0**2/mu # (1/km) inverse of", "0 while abs(ratio) > tol and iters < max_iters: E", "_ = newton(chi0, kepler_chi, dkepler_dchi, alpha, r0, vr0, mu, dt)", "> tol and iters < max_iters: E -= ratio ratio", "alpha*chi**2 S_ = S(z) return (r0*vr0/np.sqrt(mu))*(1 - 3*z*S_ + z*(C(z)", "magnitude vr0 = np.dot(v_0, r_0)/r0 # (km/s) initial radial velocity", "derivative of Kepler's Equation of the universal anomaly, modified for", "laguerre method.') chi, _, _ = laguerre(chi0, kepler_chi, dkepler_dchi, d2kepler_dchi2,", "single precision) :param max_iters: `int` (--) maximum number of iterations", "semi-major axis chi0 = np.sqrt(mu)*np.abs(alpha)*dt if method not in VALID_METHODS:", "# tolerance based on significant figures of given answers newton_valid", "mu, dt): ''' Derivative of Kepler's Equation of the universal", "of solve_kepler_chi and solve_kepler_laguerre using Problem 3.20 from Orbital Mechanics", "\\'{method}\\' is not valid, must be one of {VALID_METHODS}.\\nDefaulting to", "''' # TODO: have this function make use of one", "# (km/s) final velocity vector # solve using above methods", "r0, vr0, mu, dt) f = calc_f(chi, r0, alpha) g", "-= ratio ratio = f(E, e, Me)/fp(E, e) iters +=", "method='laguerre') # check correctness # tolerance based on significant figures", "return r_1, v_1 def solve_kepler_E(e, Me, tol=1e-7, max_iters=100): ''' Solve", "'newton': chi, _, _ = newton(chi0, kepler_chi, dkepler_dchi, alpha, r0,", "def test(): ''' Test the functionality of solve_kepler_chi and solve_kepler_laguerre", "-105000, -19000]) # (km) initial position vector v_0 = np.array([0.9,", "of Ellipse (Me). Uses Algorithm 3.1 from Orbital Mechanics for", "while abs(ratio) > tol and iters < max_iters: E -=", "using Problem 3.20 from Orbital Mechanics for Engineering Students, 4", "numerical method before breaking :return: (km) final position 3-vector, (km/s)", "Equation in the form containing Eccentric Anomaly (E), eccentricity (e),", "use in numerical solvers. ''' z = alpha*chi**2 return (r0*vr0/np.sqrt(mu))*chi*(1", "E - e*np.sin(E) - Me def fp(E, e): return 1", "body r0 = np.linalg.norm(r_0) # (km) initial position magnitude v0", "(s) time of interest after initial time # given correct", "-128750, -29656]) # (km) final position vector correct_v_1 = np.array([0.86280,", "vr0, mu, dt) f = calc_f(chi, r0, alpha) g =", "= newton(chi0, kepler_chi, dkepler_dchi, alpha, r0, vr0, mu, dt) else:", "dt, Earth, method='laguerre') # check correctness # tolerance based on", "Engineering Students, 4 ed, Curtis. :param r_0: `iterable` (km) initial", "- alpha*r0) def solve_kepler_chi(r_0, v_0, dt, body=BODIES['Earth'], method='laguerre', tol=1e-7, max_iters=100):", "= np.sqrt(mu)*np.abs(alpha)*dt if method not in VALID_METHODS: print(f'Method \\'{method}\\' is", "of one of the numerical methods in numerical.py def f(E,", "initial position 3-vector :param v_0: `iterable` (km/s) initial velocity 3-vector", "in numerical solvers. ''' z = alpha*chi**2 return (r0*vr0/np.sqrt(mu))*chi**2*C(z) +", "dt) elif method == 'newton': chi, _, _ = newton(chi0,", "= f*r_0 + g*v_0 r1 = np.linalg.norm(r_1) fd = calc_fd(mu,", "# `CelestialBody` (--) Earth and all the Earth things r_0", "-= ratio converged = np.abs(ratio) <= tol return E, iters,", "v_0, dt, Earth, method='laguerre') # check correctness # tolerance based", "def d2kepler_dchi2(chi, alpha, r0, vr0, mu, dt): ''' Second derivative", "Students, 4 ed, Curtis. :param r_0: `iterable` (km) initial position", "== 'laguerre' chi, _, _ = laguerre(chi0, kepler_chi, dkepler_dchi, d2kepler_dchi2,", ":param max_iters: `int` (--) maximum number of iterations in numerical", "np.sqrt(mu)*dt def dkepler_dchi(chi, alpha, r0, vr0, mu, dt): ''' Derivative", "ratio converged = np.abs(ratio) <= tol return E, iters, converged", "= np.abs(ratio) <= tol return E, iters, converged def test():", "functionality of solve_kepler_chi and solve_kepler_laguerre using Problem 3.20 from Orbital", "Equation of the universal anomaly chi using the specified numerical", "(1 - alpha*r0)*chi**3*S(z) + \\ r0*chi - np.sqrt(mu)*dt def dkepler_dchi(chi,", "# (km/s) initial radial velocity magnitude alpha = 2/r0 -", "and all the Earth things r_0 = np.array([20000, -105000, -19000])", "g*v_0 r1 = np.linalg.norm(r_1) fd = calc_fd(mu, r1, r0, alpha,", "- e/2 ratio = f(E, e, Me)/fp(E, e) iters =", "''' z = alpha*chi**2 return (r0*vr0/np.sqrt(mu))*chi**2*C(z) + \\ (1 -", "# (s) time of interest after initial time # given", "''' Second derivative of Kepler's Equation of the universal anomaly,", "= solve_kepler_chi(r_0, v_0, dt, Earth, method='newton') r_l, v_l = solve_kepler_chi(r_0,", "= calc_gd(chi, r1, alpha) v_1 = fd*r_0 + gd*v_0 return", "tol=1e-7, max_iters=100): ''' Solve Kepler's Equation of the universal anomaly", "figures of given answers newton_valid = np.allclose(r_n, correct_r_1, atol=1) and", "vr0, mu, dt): ''' Second derivative of Kepler's Equation of", "''' z = alpha*chi**2 S_ = S(z) return (r0*vr0/np.sqrt(mu))*(1 -", "of interest after initial time # given correct answer from", "and solve_kepler_laguerre using Problem 3.20 from Orbital Mechanics for Engineering", "correct_r_1, atol=1) and np.allclose(v_n, correct_v_1, atol=1e-4) laguerre_valid = np.allclose(r_l, correct_r_1,", "number of iterations in numerical method before breaking :return: (km)", "r1 = np.linalg.norm(r_1) fd = calc_fd(mu, r1, r0, alpha, chi)", "numerical solvers. ''' z = alpha*chi**2 S_ = S(z) return", "modified for use in numerical solvers. ''' z = alpha*chi**2", "= body.mu # (km**3/s**2) gravitational parameter of the specified primary", "calc_f(chi, r0, alpha) g = calc_g(dt, mu, chi, alpha) r_1", "e/2 ratio = f(E, e, Me)/fp(E, e) iters = 0", "= np.allclose(r_l, correct_r_1, atol=1) and np.allclose(v_l, correct_v_1, atol=1e-4) return all([newton_valid,", "`iterable` (km/s) initial velocity 3-vector :param dt: `float` (s) time", "lagrange import calc_f, calc_fd, calc_g, calc_gd def kepler_chi(chi, alpha, r0,", "r1, alpha) v_1 = fd*r_0 + gd*v_0 return r_1, v_1", "in numerical solvers. ''' z = alpha*chi**2 S_ = S(z)", "E = Me + e/2 if Me < np.pi else", "max_iters: E -= ratio ratio = f(E, e, Me)/fp(E, e)", "4 ed, Curtis. ''' # TODO: have this function make", ":param tol: `float` (--) decimal tolerance for numerical method (default", "the celestial body to use for orbital parameters :param method:", "(km/s) final velocity vector # solve using above methods r_n,", "dt, body=BODIES['Earth'], method='laguerre', tol=1e-7, max_iters=100): ''' Solve Kepler's Equation of", "celestial body to use for orbital parameters :param method: `str`", "body.mu # (km**3/s**2) gravitational parameter of the specified primary body", "import C, S from CelestialBody import BODIES from numerical import", "after initial state to solve for r, v as 3-vectors", "return E, iters, converged def test(): ''' Test the functionality", "ratio = f(E, e, Me)/fp(E, e) iters = 0 while", "(--) which numerical method to use to solve Kepler's Equation", "<= tol return E, iters, converged def test(): ''' Test", "+ r0 def d2kepler_dchi2(chi, alpha, r0, vr0, mu, dt): '''", "def solve_kepler_chi(r_0, v_0, dt, body=BODIES['Earth'], method='laguerre', tol=1e-7, max_iters=100): ''' Solve", "vector # solve using above methods r_n, v_n = solve_kepler_chi(r_0,", "(km/s) initial velocity magnitude vr0 = np.dot(v_0, r_0)/r0 # (km/s)", "numerical method. Applies Algorithm 3.4 from Orbital Mechanics for Engineering", "to solve Kepler's Equation :param tol: `float` (--) decimal tolerance", "ed, Curtis. ''' # TODO: have this function make use", "Kepler's Equation of the universal anomaly, modified for use in", "things r_0 = np.array([20000, -105000, -19000]) # (km) initial position", "d2kepler_dchi2, alpha, r0, vr0, mu, dt) f = calc_f(chi, r0,", "e, Me): return E - e*np.sin(E) - Me def fp(E,", "np.abs(ratio) <= tol return E, iters, converged def test(): '''", "chi, alpha) r_1 = f*r_0 + g*v_0 r1 = np.linalg.norm(r_1)", "VALID_METHODS = ('laguerre', 'newton') mu = body.mu # (km**3/s**2) gravitational", "given correct answer from textbook correct_r_1 = np.array([26338, -128750, -29656])", "4 ed, Curtis. :param r_0: `iterable` (km) initial position 3-vector", "to use to solve Kepler's Equation :param tol: `float` (--)", "specified numerical method. Applies Algorithm 3.4 from Orbital Mechanics for", "correct_v_1, atol=1e-4) laguerre_valid = np.allclose(r_l, correct_r_1, atol=1) and np.allclose(v_l, correct_v_1,", "numerical method to use to solve Kepler's Equation :param tol:", "use in numerical solvers. ''' z = alpha*chi**2 S_ =", "of the specified primary body r0 = np.linalg.norm(r_0) # (km)", "alpha, r0, vr0, mu, dt) else: # method == 'laguerre'", "use in numerical solvers. ''' z = alpha*chi**2 return (r0*vr0/np.sqrt(mu))*chi**2*C(z)", "method == 'laguerre' chi, _, _ = laguerre(chi0, kepler_chi, dkepler_dchi,", "velocity vector dt = 2*60*60 # (s) time of interest", "for Engineering Students, 4 ed, Curtis. ''' # TODO: have", "mu, chi, alpha) r_1 = f*r_0 + g*v_0 r1 =", "vr0, mu, dt): ''' Kepler's Equation of the universal anomaly,", "# TODO: have this function make use of one of", "of the numerical methods in numerical.py def f(E, e, Me):", "elif method == 'newton': chi, _, _ = newton(chi0, kepler_chi,", "np.linalg.norm(r_0) # (km) initial position magnitude v0 = np.linalg.norm(v_0) #", "Me - e/2 ratio = f(E, e, Me)/fp(E, e) iters", "(--) decimal tolerance for numerical method (default 1e-7 is IEEE", "correctness # tolerance based on significant figures of given answers", "solve_kepler_E(e, Me, tol=1e-7, max_iters=100): ''' Solve Kepler's Equation in the", "= f(E, e, Me)/fp(E, e) iters += 1 E -=", "significant figures of given answers newton_valid = np.allclose(r_n, correct_r_1, atol=1)", "\\ r0*chi - np.sqrt(mu)*dt def dkepler_dchi(chi, alpha, r0, vr0, mu,", "-1.5]) # (km/s) initial velocity vector dt = 2*60*60 #", "chi, _, _ = laguerre(chi0, kepler_chi, dkepler_dchi, d2kepler_dchi2, alpha, r0,", "v_0, dt, Earth, method='newton') r_l, v_l = solve_kepler_chi(r_0, v_0, dt,", "numpy as np from stumpff import C, S from CelestialBody", "dt: `float` (s) time after initial state to solve for", "Algorithm 3.1 from Orbital Mechanics for Engineering Students, 4 ed,", "calc_fd, calc_g, calc_gd def kepler_chi(chi, alpha, r0, vr0, mu, dt):", "time after initial state to solve for r, v as", "Ellipse (Me). Uses Algorithm 3.1 from Orbital Mechanics for Engineering", "# given starting information Earth = BODIES['Earth'] # `CelestialBody` (--)", "for r, v as 3-vectors :param body: `CelestialBody` (--) the", "mu, dt): ''' Second derivative of Kepler's Equation of the", "tol: `float` (--) decimal tolerance for numerical method (default 1e-7", "z*S_)*(1 - alpha*r0) def solve_kepler_chi(r_0, v_0, dt, body=BODIES['Earth'], method='laguerre', tol=1e-7,", "newton, laguerre from lagrange import calc_f, calc_fd, calc_g, calc_gd def", "dkepler_dchi, d2kepler_dchi2, alpha, r0, vr0, mu, dt) elif method ==", "velocity magnitude alpha = 2/r0 - v0**2/mu # (1/km) inverse", "not in VALID_METHODS: print(f'Method \\'{method}\\' is not valid, must be", "g = calc_g(dt, mu, chi, alpha) r_1 = f*r_0 +", "Students, 4 ed, Curtis. ''' # given starting information Earth", "\\ (1 - alpha*r0)*chi**3*S(z) + \\ r0*chi - np.sqrt(mu)*dt def", "the universal anomaly, modified for use in numerical solvers. '''", "= 0 while abs(ratio) > tol and iters < max_iters:", "np.array([0.86280, -3.2116, -1.4613]) # (km/s) final velocity vector # solve", "= f(E, e, Me)/fp(E, e) iters = 0 while abs(ratio)", "E, iters, converged def test(): ''' Test the functionality of", "orbital parameters :param method: `str` (--) which numerical method to", "velocity vector # solve using above methods r_n, v_n =", "alpha*r0)*chi**3*S(z) + \\ r0*chi - np.sqrt(mu)*dt def dkepler_dchi(chi, alpha, r0,", "Mean Anomaly of Ellipse (Me). Uses Algorithm 3.1 from Orbital", "np.allclose(r_n, correct_r_1, atol=1) and np.allclose(v_n, correct_v_1, atol=1e-4) laguerre_valid = np.allclose(r_l,", "position magnitude v0 = np.linalg.norm(v_0) # (km/s) initial velocity magnitude", "initial time # given correct answer from textbook correct_r_1 =", "< np.pi else Me - e/2 ratio = f(E, e,", "+= 1 E -= ratio converged = np.abs(ratio) <= tol", "e*np.cos(E) E = Me + e/2 if Me < np.pi", "tolerance based on significant figures of given answers newton_valid =", "v_0, dt, body=BODIES['Earth'], method='laguerre', tol=1e-7, max_iters=100): ''' Solve Kepler's Equation", "+ \\ r0*chi - np.sqrt(mu)*dt def dkepler_dchi(chi, alpha, r0, vr0,", "stumpff import C, S from CelestialBody import BODIES from numerical", "- alpha*chi**2*S(z)) + \\ (1 - alpha*r0)*chi**2*C(z) + r0 def", "np.sqrt(mu)*np.abs(alpha)*dt if method not in VALID_METHODS: print(f'Method \\'{method}\\' is not", "method.') chi, _, _ = laguerre(chi0, kepler_chi, dkepler_dchi, d2kepler_dchi2, alpha,", "in the form containing Eccentric Anomaly (E), eccentricity (e), and", "-3.2116, -1.4613]) # (km/s) final velocity vector # solve using", "to use for orbital parameters :param method: `str` (--) which", "# (km) initial position vector v_0 = np.array([0.9, -3.4, -1.5])", "Me)/fp(E, e) iters = 0 while abs(ratio) > tol and", "-29656]) # (km) final position vector correct_v_1 = np.array([0.86280, -3.2116,", "use for orbital parameters :param method: `str` (--) which numerical", "v as 3-vectors :param body: `CelestialBody` (--) the celestial body", "specified primary body r0 = np.linalg.norm(r_0) # (km) initial position", "fd = calc_fd(mu, r1, r0, alpha, chi) gd = calc_gd(chi,", "f = calc_f(chi, r0, alpha) g = calc_g(dt, mu, chi,", "C, S from CelestialBody import BODIES from numerical import newton,", "breaking :return: (km) final position 3-vector, (km/s) final velocity 3-vector", "using the specified numerical method. Applies Algorithm 3.4 from Orbital", "-3.4, -1.5]) # (km/s) initial velocity vector dt = 2*60*60", "= Me + e/2 if Me < np.pi else Me", "initial velocity 3-vector :param dt: `float` (s) time after initial", "- Me def fp(E, e): return 1 - e*np.cos(E) E", "+ z*(C(z) - 3*S_)) + \\ chi*(1 - z*S_)*(1 -", "calc_g, calc_gd def kepler_chi(chi, alpha, r0, vr0, mu, dt): '''", "print(f'Method \\'{method}\\' is not valid, must be one of {VALID_METHODS}.\\nDefaulting", "`str` (--) which numerical method to use to solve Kepler's", "for use in numerical solvers. ''' z = alpha*chi**2 return", "v_0 = np.array([0.9, -3.4, -1.5]) # (km/s) initial velocity vector", "newton(chi0, kepler_chi, dkepler_dchi, alpha, r0, vr0, mu, dt) else: #", "''' Test the functionality of solve_kepler_chi and solve_kepler_laguerre using Problem", "(km) final position 3-vector, (km/s) final velocity 3-vector ''' VALID_METHODS", "initial velocity magnitude vr0 = np.dot(v_0, r_0)/r0 # (km/s) initial", "above methods r_n, v_n = solve_kepler_chi(r_0, v_0, dt, Earth, method='newton')", "= alpha*chi**2 return (r0*vr0/np.sqrt(mu))*chi**2*C(z) + \\ (1 - alpha*r0)*chi**3*S(z) +", "calc_fd(mu, r1, r0, alpha, chi) gd = calc_gd(chi, r1, alpha)", "- e*np.cos(E) E = Me + e/2 if Me <", "the functionality of solve_kepler_chi and solve_kepler_laguerre using Problem 3.20 from", "r_1, v_1 def solve_kepler_E(e, Me, tol=1e-7, max_iters=100): ''' Solve Kepler's", "dkepler_dchi(chi, alpha, r0, vr0, mu, dt): ''' Derivative of Kepler's", "e): return 1 - e*np.cos(E) E = Me + e/2", "information Earth = BODIES['Earth'] # `CelestialBody` (--) Earth and all", "Me, tol=1e-7, max_iters=100): ''' Solve Kepler's Equation in the form", "BODIES from numerical import newton, laguerre from lagrange import calc_f,", "Me)/fp(E, e) iters += 1 E -= ratio converged =", "to solve for r, v as 3-vectors :param body: `CelestialBody`", "CelestialBody import BODIES from numerical import newton, laguerre from lagrange", "mu, dt) else: # method == 'laguerre' chi, _, _", "must be one of {VALID_METHODS}.\\nDefaulting to laguerre method.') chi, _,", "np.pi else Me - e/2 ratio = f(E, e, Me)/fp(E,", "laguerre(chi0, kepler_chi, dkepler_dchi, d2kepler_dchi2, alpha, r0, vr0, mu, dt) elif", "r_0 = np.array([20000, -105000, -19000]) # (km) initial position vector", "- 3*S_)) + \\ chi*(1 - z*S_)*(1 - alpha*r0) def", "kepler_chi, dkepler_dchi, d2kepler_dchi2, alpha, r0, vr0, mu, dt) elif method", "method before breaking :return: (km) final position 3-vector, (km/s) final", "r0, vr0, mu, dt) else: # method == 'laguerre' chi,", "= np.linalg.norm(r_0) # (km) initial position magnitude v0 = np.linalg.norm(v_0)", "given answers newton_valid = np.allclose(r_n, correct_r_1, atol=1) and np.allclose(v_n, correct_v_1,", "body to use for orbital parameters :param method: `str` (--)", "initial radial velocity magnitude alpha = 2/r0 - v0**2/mu #", "function make use of one of the numerical methods in", "max_iters=100): ''' Solve Kepler's Equation of the universal anomaly chi", ":param r_0: `iterable` (km) initial position 3-vector :param v_0: `iterable`", "= alpha*chi**2 return (r0*vr0/np.sqrt(mu))*chi*(1 - alpha*chi**2*S(z)) + \\ (1 -", "= alpha*chi**2 S_ = S(z) return (r0*vr0/np.sqrt(mu))*(1 - 3*z*S_ +" ]
[ "'PGAA setup with XYZOmega sample table' group = 'basic' sysconfig", "'pilz', 'detector', 'collimation', ] devices = dict( mcasink = device('nicos_mlz.pgaa.devices.MCASink',", "settypes = {'point'}, detectors = ['_60p', 'LEGe'], ), chnsink =", "includes = [ 'system', 'reactor', 'nl4b', 'pressure', 'sampletable', 'pilz', 'detector',", "device('nicos_mlz.pgaa.devices.MCASink', settypes = {'point'}, detectors = ['_60p', 'LEGe'], ), chnsink", "), ) startupcode = \"\"\" SetDetectors('_60p', 'LEGe') SetEnvironment(chamber_pressure) printinfo(\"============================================================\") printinfo(\"Welcome", "'collimation', ] devices = dict( mcasink = device('nicos_mlz.pgaa.devices.MCASink', settypes =", "] devices = dict( mcasink = device('nicos_mlz.pgaa.devices.MCASink', settypes = {'point'},", "= {'point'}, detectors = ['_60p', 'LEGe'], ), csvsink = device('nicos_mlz.pgaa.devices.CSVDataSink',", ") startupcode = \"\"\" SetDetectors('_60p', 'LEGe') SetEnvironment(chamber_pressure) printinfo(\"============================================================\") printinfo(\"Welcome to", "), chnsink = device('nicos_mlz.pgaa.devices.CHNSink', settypes = {'point'}, detectors = ['_60p',", "['_60p', 'LEGe'], ), chnsink = device('nicos_mlz.pgaa.devices.CHNSink', settypes = {'point'}, detectors", "'livesink'] ) includes = [ 'system', 'reactor', 'nl4b', 'pressure', 'sampletable',", "= {'point'}, ), ) startupcode = \"\"\" SetDetectors('_60p', 'LEGe') SetEnvironment(chamber_pressure)", "= dict( datasinks = ['mcasink', 'chnsink', 'csvsink', 'livesink'] ) includes", "description = 'PGAA setup with XYZOmega sample table' group =", "{'point'}, detectors = ['_60p', 'LEGe'], ), csvsink = device('nicos_mlz.pgaa.devices.CSVDataSink', settypes", "['mcasink', 'chnsink', 'csvsink', 'livesink'] ) includes = [ 'system', 'reactor',", "'LEGe'], ), csvsink = device('nicos_mlz.pgaa.devices.CSVDataSink', settypes = {'point'}, ), )", "csvsink = device('nicos_mlz.pgaa.devices.CSVDataSink', settypes = {'point'}, ), ) startupcode =", "dict( datasinks = ['mcasink', 'chnsink', 'csvsink', 'livesink'] ) includes =", "XYZOmega sample table' group = 'basic' sysconfig = dict( datasinks", "group = 'basic' sysconfig = dict( datasinks = ['mcasink', 'chnsink',", "[ 'system', 'reactor', 'nl4b', 'pressure', 'sampletable', 'pilz', 'detector', 'collimation', ]", "\"\"\" SetDetectors('_60p', 'LEGe') SetEnvironment(chamber_pressure) printinfo(\"============================================================\") printinfo(\"Welcome to the NICOS PGAI", "'LEGe') SetEnvironment(chamber_pressure) printinfo(\"============================================================\") printinfo(\"Welcome to the NICOS PGAI demo setup.\")", "setup with XYZOmega sample table' group = 'basic' sysconfig =", "detectors = ['_60p', 'LEGe'], ), chnsink = device('nicos_mlz.pgaa.devices.CHNSink', settypes =", "{'point'}, detectors = ['_60p', 'LEGe'], ), chnsink = device('nicos_mlz.pgaa.devices.CHNSink', settypes", "device('nicos_mlz.pgaa.devices.CHNSink', settypes = {'point'}, detectors = ['_60p', 'LEGe'], ), csvsink", "devices = dict( mcasink = device('nicos_mlz.pgaa.devices.MCASink', settypes = {'point'}, detectors", "'reactor', 'nl4b', 'pressure', 'sampletable', 'pilz', 'detector', 'collimation', ] devices =", "'pressure', 'sampletable', 'pilz', 'detector', 'collimation', ] devices = dict( mcasink", "), csvsink = device('nicos_mlz.pgaa.devices.CSVDataSink', settypes = {'point'}, ), ) startupcode", "dict( mcasink = device('nicos_mlz.pgaa.devices.MCASink', settypes = {'point'}, detectors = ['_60p',", "mcasink = device('nicos_mlz.pgaa.devices.MCASink', settypes = {'point'}, detectors = ['_60p', 'LEGe'],", "datasinks = ['mcasink', 'chnsink', 'csvsink', 'livesink'] ) includes = [", "'basic' sysconfig = dict( datasinks = ['mcasink', 'chnsink', 'csvsink', 'livesink']", "startupcode = \"\"\" SetDetectors('_60p', 'LEGe') SetEnvironment(chamber_pressure) printinfo(\"============================================================\") printinfo(\"Welcome to the", "['_60p', 'LEGe'], ), csvsink = device('nicos_mlz.pgaa.devices.CSVDataSink', settypes = {'point'}, ),", "= [ 'system', 'reactor', 'nl4b', 'pressure', 'sampletable', 'pilz', 'detector', 'collimation',", "table' group = 'basic' sysconfig = dict( datasinks = ['mcasink',", "'chnsink', 'csvsink', 'livesink'] ) includes = [ 'system', 'reactor', 'nl4b',", "= device('nicos_mlz.pgaa.devices.CHNSink', settypes = {'point'}, detectors = ['_60p', 'LEGe'], ),", "{'point'}, ), ) startupcode = \"\"\" SetDetectors('_60p', 'LEGe') SetEnvironment(chamber_pressure) printinfo(\"============================================================\")", "SetEnvironment(chamber_pressure) printinfo(\"============================================================\") printinfo(\"Welcome to the NICOS PGAI demo setup.\") printinfo(\"============================================================\")", "sample table' group = 'basic' sysconfig = dict( datasinks =", "with XYZOmega sample table' group = 'basic' sysconfig = dict(", "'detector', 'collimation', ] devices = dict( mcasink = device('nicos_mlz.pgaa.devices.MCASink', settypes", "= device('nicos_mlz.pgaa.devices.MCASink', settypes = {'point'}, detectors = ['_60p', 'LEGe'], ),", "= 'basic' sysconfig = dict( datasinks = ['mcasink', 'chnsink', 'csvsink',", "= device('nicos_mlz.pgaa.devices.CSVDataSink', settypes = {'point'}, ), ) startupcode = \"\"\"", "sysconfig = dict( datasinks = ['mcasink', 'chnsink', 'csvsink', 'livesink'] )", "chnsink = device('nicos_mlz.pgaa.devices.CHNSink', settypes = {'point'}, detectors = ['_60p', 'LEGe'],", "settypes = {'point'}, detectors = ['_60p', 'LEGe'], ), csvsink =", "detectors = ['_60p', 'LEGe'], ), csvsink = device('nicos_mlz.pgaa.devices.CSVDataSink', settypes =", "printinfo(\"============================================================\") printinfo(\"Welcome to the NICOS PGAI demo setup.\") printinfo(\"============================================================\") \"\"\"", "device('nicos_mlz.pgaa.devices.CSVDataSink', settypes = {'point'}, ), ) startupcode = \"\"\" SetDetectors('_60p',", "= ['mcasink', 'chnsink', 'csvsink', 'livesink'] ) includes = [ 'system',", "'system', 'reactor', 'nl4b', 'pressure', 'sampletable', 'pilz', 'detector', 'collimation', ] devices", "'sampletable', 'pilz', 'detector', 'collimation', ] devices = dict( mcasink =", "= ['_60p', 'LEGe'], ), chnsink = device('nicos_mlz.pgaa.devices.CHNSink', settypes = {'point'},", "= \"\"\" SetDetectors('_60p', 'LEGe') SetEnvironment(chamber_pressure) printinfo(\"============================================================\") printinfo(\"Welcome to the NICOS", "'LEGe'], ), chnsink = device('nicos_mlz.pgaa.devices.CHNSink', settypes = {'point'}, detectors =", "SetDetectors('_60p', 'LEGe') SetEnvironment(chamber_pressure) printinfo(\"============================================================\") printinfo(\"Welcome to the NICOS PGAI demo", "= {'point'}, detectors = ['_60p', 'LEGe'], ), chnsink = device('nicos_mlz.pgaa.devices.CHNSink',", "'nl4b', 'pressure', 'sampletable', 'pilz', 'detector', 'collimation', ] devices = dict(", "= dict( mcasink = device('nicos_mlz.pgaa.devices.MCASink', settypes = {'point'}, detectors =", ") includes = [ 'system', 'reactor', 'nl4b', 'pressure', 'sampletable', 'pilz',", "= 'PGAA setup with XYZOmega sample table' group = 'basic'", "= ['_60p', 'LEGe'], ), csvsink = device('nicos_mlz.pgaa.devices.CSVDataSink', settypes = {'point'},", "'csvsink', 'livesink'] ) includes = [ 'system', 'reactor', 'nl4b', 'pressure',", "settypes = {'point'}, ), ) startupcode = \"\"\" SetDetectors('_60p', 'LEGe')" ]
[ "_test_pool3d(relay.nn.avg_pool3d, padding=(2, 0, 0, 2, 0, 0), out_shape=(1, 3, 18,", "y = opfunc(x, pool_size=(2, 2, 2), strides=(2, 2, 2), padding=padding)", "\"int8\")) y = relay.nn.conv2d(x, w, out_dtype=\"int32\") assert \"out_dtype=\\\"int32\\\"\" in y.astext()", "\"sp\", [8, 1]], \\ [\"tile_ow\", \"sp\", [1, 8]], \\ [\"reorder_0\",", "== relay.TensorType( (2, 10, 3), \"float32\") # infer by shape", "depthwise conv2d dshape = (1, 32, 18, 18) kshape =", "15) dshape = (n, ic, ih, iw) x = relay.var(\"x\",", "dshape = (1, 3, 18, 18) kshape = (10, 3,", "== relay.TensorType((3, 24), \"float32\") x = relay.var(\"x\", relay.TensorType((d1, 2, d3,", "= opfunc(x, pool_size=(2,), strides=(2,), padding=(0, 0)) func = relay.Function([x], y)", "= run_infer_type(y) assert yy.checked_type == relay.TensorType( (n, 2, 222, 222),", "1, 2, 3, 4, 5, 8, 6, 7]], \\ [\"reorder_1\",", "kernel = np.random.uniform(-scale, scale, size=kshape).astype(dtype) ref_res = topi.testing.conv2d_nchw_python( data.astype(out_dtype), kernel.astype(out_dtype),", "alpha=.00001 beta=0.75 z = relay.nn.lrn(x, size=size, axis=axis, bias=bias, alpha=alpha, beta=beta)", "def test_batch_flatten(): t1 = relay.TensorType((5, 10, 5)) x = relay.Var(\"x\",", "relay.var(\"x\", relay.TensorType(data_shape, input_dtype)) else: raise ValueError('Not supported') if kernel_layout ==", "\"pad_width=\" in y.astext() yy = run_infer_type(y) assert yy.checked_type == relay.TensorType((3,", "\"int32\") def test_conv3d_run(): def run_test_conv3d(dtype, out_dtype, scale, dshape, kshape, padding=(1,", "relay.var(\"x\", shape=dshape) y = relay.nn.avg_pool2d(x, pool_size=(kh, kw), strides=(sw, sw), padding=(ph,", "2) ph, pw = (2, 2) n = 1 (ic,", "strides=(2, 2, 2), padding=padding) func = relay.Function([x], y) # check", "2.0 (the # \"License\"); you may not use this file", "relay.var(\"w\", relay.TensorType((2, 10, 3), \"int8\")) y = relay.nn.conv1d(x, w, out_dtype=\"int32\")", "relay.Function([x, weight], y) wdata = np.random.rand(*kernel_shape) * 10 parameters =", "is HWOI and kernel_layout is HWIO y = relay.nn.conv2d_transpose(x, w,", "2, 2), (2, 2, 2), padding, out_shape, pool_type, False) for", "mod, params = tvm.relay.build(mod, target=\"llvm -device=arm_cpu\") # depthwise conv2d dshape", "w = relay.var(\"w\", shape=kshape, dtype=dtype) y = relay.nn.conv2d(x, w, padding=padding,", "h, w = tvm.size_var(\"n\"), tvm.size_var(\"c\"), 224, 224 x = relay.var(\"x\",", "= (1,) + ishape x = relay.var(\"x\", shape=dshape) y =", "400), \"float32\") def _test_pool2d(opfunc, reffunc): n, c, h, w =", "data = np.random.uniform(size=dshape).astype(dtype) if method == \"nearest_neighbor\": ref = topi.testing.upsampling3d_python(data,", "test assembly contains *pmadd* instructions targets = [\"llvm -mcpu=skylake-avx512\", \"llvm", "= run_infer_type(y) assert yy.checked_type == relay.TensorType( (n, 32, 222, 222),", "for ic in [1, 4, 6]: asm = _compile(ic=ic, oc=16,", "\"float32\")) y = relay.nn.upsampling3d(x, scale_d=2, scale_h=2, scale_w=2, layout=\"NCDHW\", method=\"trilinear\") yy", "scale_w=scale_w,\\ layout=layout, method=method,\\ coordinate_transformation_mode=coordinate_transformation_mode) yy = run_infer_type(y) assert yy.checked_type ==", "d_np for target, ctx in ctx_list(): intrp1 = relay.create_executor(\"graph\", ctx=ctx,", "ctx=ctx, target=target) op_res1 = intrp1.evaluate(func)(data) tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5, atol=1e-5) _test_pool1d(relay.nn.max_pool1d)", "x = relay.var(\"x\", relay.TensorType((n,) + ishape, dtype)) y = relay.nn.upsampling3d(x,", "relay.var(\"x\", shape=dshape) y = relay.nn.upsampling3d(x, scale_d=scale_d, scale_h=scale_h, scale_w=scale_w,\\ layout=layout, method=method,\\", "y = relay.nn.conv1d(x, wt, kernel_size=3, padding=(1, 1), channels=16, data_layout=\"NWC\", out_dtype=\"int32\")", "of w, mixed precision n, h, w, c = tvm.size_var(\"n\"),", "= {\"weight\": tvm.nd.array(wdata.astype(weight_dtype))} with relay.build_config(opt_level=3): graph, lib, params = relay.build(func,", "\\ [\"reorder_1\", \"re\", [0, 1, 2, 3, 6, 4, 5]],", "= relay.Function([x, w], y) data = np.random.uniform(-scale, scale, size=dshape).astype(dtype) kernel", "y = relay.nn.upsampling3d(x, scale_d=2, scale_h=2, scale_w=2, layout=\"NCDHW\", method=\"trilinear\") yy =", "data = np.random.rand(5, 10, 5).astype(t1.dtype) ref_res = batch_flatten(data) for target,", "True) _test_upsampling(\"NHWC\", \"nearest_neighbor\") _test_upsampling(\"NHWC\", \"bilinear\", True) def _test_upsampling3d(layout, method, coordinate_transformation_mode=\"half_pixel\"):", "dtypes n, h, w, ch, cw = 1, 64, 64,", "run_test_conv1d(dtype, out_dtype, scale, dshape, kshape, padding=(1, 1), fref=None, dilation=1, except_targets=None,", "3 ,3)) def test_conv3d_ndhwc_run(): def run_test_conv3d(dtype, out_dtype, scale, dshape, kshape,", "test_conv2d_infer_type(): # symbolic in batch dimension n, c, h, w", "str(opfunc) else 'avg' y = opfunc(x, pool_size=(2,), strides=(2,), padding=(0, 0))", "relay.var(\"w\", relay.ty.TensorType((32, 32, 3, 3), \"int16\")) y = relay.nn.bitserial_conv2d( x,", "= (3, 28, 28) (oc, oh, ow) = (3, 15,", "1, 1), channels=16, data_layout=\"NDHWC\", out_dtype=\"int32\") yy = run_infer_type(y) assert yy.checked_type", "shape=(n, c , h, w)) y = relay.nn.lrn(x, size=10, axis=2,", "c, d, h, w), \"int8\")) w = relay.var(\"w\", relay.TensorType((2, 10,", "layout) for target, ctx in ctx_list(): executor = relay.create_executor(\"graph\", ctx=ctx,", "kernel = np.random.uniform(-scale, scale, size=kshape).astype(dtype) ref_res = topi.testing.conv1d_ncw_python( data.astype(out_dtype), kernel.astype(out_dtype),", "ctx=ctx, target=target) op_res1 = intrp1.evaluate(func)(data) tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5, atol=1e-5) _test_pool3d(relay.nn.max_pool3d)", "data_layout=\"NHWC\", kernel_layout='HWIO', dtypes=dtypes) assert _has_fast_int8_instructions(asm, target) # Check that both", "= autotvm.task.space.SplitEntity([-1, 1]) cfg['auto_unroll_max_setp'] = autotvm.task.space.OtherOptionEntity(1500) cfg['unroll_explicit'] = autotvm.task.space.OtherOptionEntity(1) self.memory[key]", "padding=(1,), output_padding=(2,)) func = relay.Function([x, w], y) dtype = \"float32\"", "method=\"bilinear\") yy = run_infer_type(y) assert yy.checked_type == relay.TensorType((n, c, 200,", "_has_fast_int8_instructions(asm, target) for ic in [1, 4, 6]: asm =", "def run_test_conv2d(dtype, out_dtype, scale, dshape, kshape, padding=(1, 1), fref=None, groups=1,", "= run_infer_type(y) assert yy.checked_type == relay.TensorType( (n, 2, 224, 224),", "28, 28) x = relay.var(\"x\", shape=dshape) y = opfunc(x, pool_size=(2,", "y = opfunc(x, pool_size=(2, 2), strides=(2, 2), padding=(0, 0)) func", "0), out_shape=(1, 3, 18, 16, 16)) _test_pool3d(relay.nn.avg_pool3d, padding=(0, 3, 0,", "+ 1] return np.reshape(data, (shape[0], target_dim)) def test_batch_flatten(): t1 =", "= topi.testing.pool1d_ncw_python(data, (2,), (2,), (0, 0), (1, 3, 16), pool_type,", "relay.nn.conv2d(x, w, padding=padding, dilation=dilation, groups=groups, **attrs) func = relay.Function([x, w],", "224, 224) kshape = (10, 3, 3, 3) run_test_conv2d(\"float32\", \"float32\",", "See the License for the # specific language governing permissions", "== relay.TensorType( (4, 8, 3, 3, 4, 4), \"int8\") #", "h, w = tvm.size_var(\"n\"), tvm.size_var(\"c\"), tvm.size_var(\"h\"), tvm.size_var(\"w\") scale = tvm.const(2.0,", "= (64, 1, 3, 3) weight = relay.var(\"weight\", relay.TensorType(kernel_shape, weight_dtype))", "np.mean) _test_pool2d_int(relay.nn.avg_pool2d, np.mean, 'int32') _test_pool2d_int(relay.nn.avg_pool2d, np.mean, 'uint16') _test_global_pool2d(relay.nn.global_max_pool2d, np.max) _test_global_pool2d(relay.nn.global_avg_pool2d,", "relay.var(\"w\", relay.TensorType((2, 10, 3, 3), \"int8\")) y = relay.nn.conv2d(x, w,", "18, 18) kshape = (10, 3, 3, 3) run_test_conv2d(\"float32\", \"float32\",", "= tvm.contrib.graph_runtime.create(graph, lib, ctx) module.set_input('x', tvm.nd.array(data)) module.set_input(**params) module.run() op_res1 =", "ref_res = topi.testing.conv3d_ncdhw_python( data.astype(out_dtype), dkernel.astype(out_dtype), 1, padding, groups=groups) else: ref_res", "padding=(1, 1, 1), channels=10, kernel_size=(3, 3 ,3), except_targets=[\"cuda\"]) def test_conv2d_transpose_infer_type():", "op_res1 = intrp1.evaluate(func)(data) tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5, atol=1e-5) def _test_global_pool2d(opfunc, reffunc):", "channels to check int8 robustness # Output channels should be", "= get_shape() x = relay.var(\"x\", relay.TensorType((n,) + ishape, dtype)) y", "= relay.var(\"w\", dtype=dtype) y = relay.nn.conv2d(x, w, padding=padding, dilation=dilation, groups=groups,", "10, 12), \"float32\") assert yy.args[1].checked_type == relay.TensorType( (10, 15, 3,", "3) run_test_conv2d(\"float32\", \"float32\", 1, dshape, kshape, padding=(1, 1), channels=32, groups=32,", "10, 3) oshape_nhwc = (1, 37, 37, 10) x =", "relay.TensorType((n + 2, 6, 9, w + 8), \"float32\") def", "int(round(w*scale_w))) else: return (d, h, w, c), (int(round(d*scale_d)), int(round(h*scale_h)),\\ int(round(w*scale_w)),", "n, c, w = 4, 32, 224 x = relay.var(\"x\",", "intrinisic is not present in the assembly. assert not _has_fast_int8_instructions(asm,", "1), except_targets=None, **attrs): if except_targets is None: except_targets = []", "in batch dimension n, c, w = tvm.var(\"n\"), 10, 224", "iw+2*pw)).astype(dtype) no_zero = (range(n), range(ic), (range(ph, ih+ph)), (range(pw, iw+pw))) pad_np[np.ix_(*no_zero)]", "(3, 10, 3, 3) oshape = (1, 10, 37, 37)", "(n, 15, 10, 12), \"float32\") assert yy.args[1].checked_type == relay.TensorType( (10,", "tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5, atol=1e-5) def test_conv2d_transpose_nhwc_run(): dshape_nhwc = (1, 18,", "that fast instructions can be picked up. for target in", "padding=(1, 1, 1), fref=None, groups=1, dilation=(1, 1, 1), except_targets=None, **attrs):", "10) run_test_conv3d(\"float32\", \"float32\", 1, dshape, kshape, padding=(1, 1, 1), channels=10,", "yy.checked_type == relay.TensorType( (n, 2, 222, 222), \"int32\") # infer", "\\ \"Output shape mismatch. expected {}, actual {}\".format(out_shape, f_out_shape) data", "20)) _test_pool3d(relay.nn.avg_pool3d) _test_pool3d(relay.nn.avg_pool3d, padding=(2, 0, 0, 2, 0, 0), out_shape=(1,", "from tvm import autotvm from tvm import relay from tvm.relay", "dtype = \"float32\" x = relay.var(\"x\", relay.TensorType(shape, dtype)) eps=0.001 axis=1", "1], \"float32\"], \\ {\"i\": 743640, \"t\": \"contrib_spatial_pack\", \"c\": null, \\", "axis=1 z = relay.nn.l2_normalize(x, eps=0.001, axis=[axis]) yy = run_infer_type(z) assert", "Check that a vectorized instruction is generated for older Intel", "2), padding=padding) func = relay.Function([x], y) # check output shape", "_test_pool3d(relay.nn.max_pool3d, padding=(0, 3, 0, 0, 3, 0), out_shape=(1, 3, 16,", "kernel_size=3, padding=(1, 1), channels=2) yy = run_infer_type(y) assert yy.checked_type ==", "tvm.size_var(\"n\"), tvm.size_var(\"c\") x = relay.var(\"x\", relay.TensorType((n, c, 100, 100, 200),", "yy = run_infer_type(y) assert yy.checked_type == relay.TensorType( (n, 32, 222,", "(1, 10, 37, 37) x = relay.var(\"x\", shape=dshape) w =", "= relay.Function([x], y) # check output shape f_out_shape = tuple(map(lambda", "channels=64, groups=32, kernel_size=(3 ,3), except_targets=['cuda']) # normal conv2d dshape =", "return (h, w, c), (int(round(h*scale_h)), int(round(w*scale_w)), c) ishape, oshape =", "atol=1e-5) def _test_pool2d_int(opfunc, reffunc, dtype): n, c, h, w =", "\"float32\")) y = opfunc(x, pool_size=(1,)) assert \"pool_size=\" in y.astext() yy", "10, 3), \"float32\") # infer by shape of w, mixed", "f_out_shape, \\ \"Output shape mismatch. expected {}, actual {}\".format(out_shape, f_out_shape)", "c , h, w)) shape = (1, 5, 10, 10)", "in batch dimension n, c, d, h, w = tvm.size_var(\"n\"),", "\"bilinear\", True) _test_upsampling(\"NHWC\", \"nearest_neighbor\") _test_upsampling(\"NHWC\", \"bilinear\", True) def _test_upsampling3d(layout, method,", "target=target) op_res1 = intrp1.evaluate(func)(data) tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5, atol=1e-5) def _test_pool2d_int(opfunc,", "the assembly. assert not _has_fast_int8_instructions(asm, target) # Check that a", "def get_shape(): if layout == \"NCHW\": return (c, h, w),", "x = relay.var(\"x\", relay.TensorType((n, c, 100, 100, 200), \"float32\")) y", "intrp = relay.create_executor(\"graph\", ctx=ctx, target=target) op_res = intrp.evaluate(func)(data) np.testing.assert_allclose(op_res.asnumpy(), ref_res,", "1), data_layout=data_layout, kernel_layout=kernel_layout, out_dtype=output_dtype) func = relay.Function([x, weight], y) wdata", "= tvm.size_var(\"n\"), 10, 224, 224 x = relay.var(\"x\", relay.TensorType((n, c,", "c_np def test_conv1d_transpose_ncw_run(): dshape = (1, 3, 18) kshape =", "shape of w, mixed precision n, c, d, h, w", "x = relay.var(\"x\", relay.ty.TensorType((n, c, d, h, w), \"float32\")) w", "in ctx_list(): intrp1 = relay.create_executor(\"graph\", ctx=ctx, target=target) op_res1 = intrp1.evaluate(func)(data)", "kshape, padding=(1, 1), fref=None, groups=1, dilation=(1, 1), except_targets=None, **attrs): if", "scale_w), layout) else: ref = topi.testing.bilinear_resize_python(data, (int(round(h*scale_h)), int(round(w*scale_w))), layout) for", "(2, 2), (3, 3), (4, 4))) yy = run_infer_type(y) assert", "self.memory[key] cfg = autotvm.task.space.FallbackConfigEntity() cfg.template_key = 'winograd' cfg.is_fallback = False", "test_conv2d_transpose_nhwc_run(): dshape_nhwc = (1, 18, 18, 3) kshape_hwoi = (3,", "groups=32, kernel_size=(3 ,3), except_targets=['cuda']) # normal conv2d dshape = (1,", "n, c, h, w = 1, 2, 3, 4 t", "c//4, h, w, 4, 4), \"int8\")) wt = relay.var(\"w\") y", "groups=1, dilation=(1, 1), **attrs): x = relay.var(\"x\", shape=dshape, dtype=dtype) w", "# extended winograd: stride 1, padding N, kernel NxN kshape", "= relay.nn.conv1d(x, w, kernel_size=3, padding=(1, 1), channels=2) yy = run_infer_type(y)", "= (1, 3, 18) kshape = (10, 3, 3) run_test_conv1d(\"float32\",", "np.random.uniform(size=dshape).astype(dtype) kernel = np.random.uniform(size=kshape).astype(dtype) c_np = topi.testing.conv1d_transpose_ncw_python( data, kernel, 2,", ",3), except_targets=['cuda']) # normal conv2d dshape = (1, 3, 224,", "group conv2d dshape = (1, 32, 18, 18) kshape =", "w), \"float32\")) w = relay.var(\"w\") y = relay.nn.conv3d(x, w, kernel_size=(3,", "= run_infer_type(y) assert yy.checked_type == relay.TensorType((n + 2, 6, 9,", "NxN kshape = (192, 80, 7, 7) run_test_conv2d_cuda(\"float32\", \"float32\", 1,", "x = relay.var(\"x\", shape=dshape) w = relay.var(\"w\") y = relay.nn.conv2d_transpose(x,", "dtypes=dtypes) assert _has_fast_int8_instructions(asm, target) for ic in [1, 4, 6]:", "10, 3) oshape = (1, 10, 37) x = relay.var(\"x\",", "= relay.var(\"w\", shape=kshape, dtype=dtype) y = relay.nn.conv2d(x, w, padding=padding, dilation=dilation,", "func = relay.Function([x, w], y) data = np.random.uniform(-scale, scale, size=dshape).astype(dtype)", "yy = run_infer_type(y) assert yy.checked_type == relay.TensorType( (n, 15, 10,", "1) d_np = np.zeros(shape=oshape_nhwc) d_np[:,0:c_np.shape[1],0:c_np.shape[2],:] = c_np def test_conv1d_transpose_ncw_run(): dshape", "yy = run_infer_type(y) assert yy.checked_type == relay.TensorType((n, c, 200, 400),", "target_dim = 1 for i in range(len(shape) - 1): target_dim", "n, c , h, w = tvm.size_var(\"n\"), tvm.size_var(\"c\"), tvm.size_var(\"h\"), tvm.size_var(\"w\")", "\"nearest_neighbor\") _test_upsampling3d(\"NCDHW\", \"trilinear\", \"align_corners\") _test_upsampling3d(\"NDHWC\", \"nearest_neighbor\") _test_upsampling3d(\"NDHWC\", \"trilinear\", \"align_corners\") def", "10, 224, 224 x = relay.var(\"x\", relay.ty.TensorType((n, c, h, w),", "kshape = (10, 3, 3, 3) run_test_conv2d(\"float32\", \"float32\", 1, dshape,", "x = relay.var(\"x\", relay.TensorType((d1, d2, d3, d4), \"float32\")) y =", "2), (3, 3), (4, 4))) \"pad_width=\" in y.astext() yy =", "yy.args[1].checked_type == relay.TensorType( (10, 15, 3, 3), \"float32\") # infer", "llvm_version >= 8: dtypes = ('uint8', 'int8', 'int32') # Sweep", "\\ [1, 1], [1, 1], [1, 1], \"float32\"], {}, \\", "c, h, w), \"float32\")) y = relay.nn.pad(t, ((1, 1), (2,", "tvm.contrib import util import topi.testing def test_conv1d_infer_type(): # symbolic in", "relay.nn.pad(t, ((1, 1), (2, 2), (3, 3), (4, 4))) \"pad_width=\"", "def compile_test_conv2d_arm_cpu(dtype, out_dtype, scale, dshape, kshape, padding=(1, 1), groups=1, dilation=(1,", "= relay.nn.batch_flatten(x) yy = run_infer_type(y) assert yy.checked_type == relay.TensorType((3, 24),", "\"method=\\\"BINLINEAR\\\"\" in y.astext() yy = run_infer_type(y) assert yy.checked_type == relay.TensorType((n,", "def _test_pool2d_int(opfunc, reffunc, dtype): n, c, h, w = tvm.size_var(\"n\"),", "3), \"int8\")) y = relay.nn.conv1d(x, w, out_dtype=\"int32\") assert \"out_dtype=\\\"int32\\\"\" in", "print('Compiling...') graph_json, mod, params = tvm.relay.build(mod, target=\"llvm -device=arm_cpu\") # depthwise", "= _compile(ic=8, oc=oc, target=target, data_layout=\"NHWC\", kernel_layout='HWIO', dtypes=dtypes) assert _has_fast_int8_instructions(asm, target)", "3), \"int16\")) y = relay.nn.bitserial_conv2d( x, w, kernel_size=(3, 3), padding=(0,", "See the NOTICE file # distributed with this work for", "continue params = {'w': tvm.nd.array(kernel)} graph, lib, params = relay.build_module.build(mod,", "tvm.size_var(\"n\"), 10, 10, 12 x = relay.var(\"x\", relay.TensorType((n, h, w,", "224 x = relay.var(\"x\", relay.TensorType((n, c, h, w), \"float32\")) y", "= func mod = relay.transform.InferType()(mod) data = np.random.uniform(-scale, scale, size=dshape).astype(dtype)", "ctx_list(): if target in except_targets: continue intrp1 = relay.create_executor(\"graph\", ctx=ctx,", "73, 73) kshape = (192, 80, 3, 3) run_test_conv2d_cuda(\"float32\", \"float32\",", "32, 224, 224, 224 x = relay.var(\"x\", relay.TensorType((n, d, h,", "weight_dtype, output_dtype = dtypes n, h, w, ch, cw =", "= run_infer_type(y) assert yy.checked_type == relay.TensorType((n, c, 200, 400), \"float32\")", "n, c, d, h, w = 4, 32, 224, 224,", "3, 0), out_shape=(1, 3, 16, 19, 16)) _test_pool3d(relay.nn.max_pool3d, padding=(0, 0,", "range(oh): for j in range(ow): pad_count = np.sum(pad_np[:, :, i*sh:i*sh+kh,", "= relay.var(\"w\") y = relay.nn.conv1d(x, w, kernel_size=3, padding=(1, 1), channels=2)", "Apache License, Version 2.0 (the # \"License\"); you may not", "test_pad_infer_type() test_pad_run() test_conv2d_transpose_infer_type() test_conv2d_transpose_nchw_run() test_conv2d_transpose_nhwc_run() test_conv1d_transpose_ncw_run() test_conv1d_run() test_conv2d_run() test_conv2d_winograd() test_conv3d_run()", "_test_pool2d(relay.nn.avg_pool2d, np.mean) _test_pool2d_int(relay.nn.avg_pool2d, np.mean, 'int32') _test_pool2d_int(relay.nn.avg_pool2d, np.mean, 'uint16') _test_global_pool2d(relay.nn.global_max_pool2d, np.max)", "1, padding N, kernel 3x3 run_test_conv2d_cuda(\"float32\", \"float32\", 1, dshape, kshape,", "[1, 1], [1, 1], \"float32\"], \\ {\"i\": 743640, \"t\": \"contrib_spatial_pack\",", "224), \"float32\") assert yy.args[1].checked_type == relay.TensorType( (2, 10, 3), \"float32\")", "3, 3) compile_test_conv2d_arm_cpu(\"float32\", \"float32\", 1, dshape, kshape, padding=(1, 1), channels=512,", "= np.random.uniform(size=dshape).astype(dtype) ref_res = topi.testing.pool3d_ncdhw_python(data, (2, 2, 2), (2, 2,", "h, w), \"float32\")) w = relay.var(\"w\") y = relay.nn.conv3d(x, w,", "strides=(2, 2), padding=(1, 1), output_padding=(2, 2), data_layout=\"NHWC\", kernel_layout=\"HWIO\") func =", "= relay.nn.pad(t, ((1, 1), (2, 2), (3, 3), (4, 4)))", "targets: if llvm_version >= 8: with relay.build_config(opt_level=3): graph, lib, params", "2), padding=(0, 0)) func = relay.Function([x], y) data = np.random.uniform(size=dshape).astype(dtype)", "def run_test_conv2d_cuda(dtype, out_dtype, scale, dshape, kshape, padding=(1, 1), groups=1, dilation=(1,", "= np.zeros(shape=oshape_nhwc) d_np[:,0:c_np.shape[1],0:c_np.shape[2],:] = c_np def test_conv1d_transpose_ncw_run(): dshape = (1,", "dshape, kshape, padding=(1, 1), channels=10, kernel_size=(3 ,3)) kshape = (10,", "axis=(2,3)) b_np[:,:,i,j] = np.sum(pad_np[:, :, i*sh:i*sh+kh, j*sw:j*sw+kw], axis=(2,3)) / np.maximum(pad_count,", "yy.args[1].checked_type == relay.TensorType( (2, 10, 3, 3), \"float32\") # infer", "\\ \"e\": [[\"tile_co\", \"sp\", [32, 16]], [\"tile_oh\", \"sp\", [8, 1]],", "w), \"float32\")) w = relay.var(\"w\") y = relay.nn.conv2d(x, w, kernel_size=(3,", "224 x = relay.var(\"x\", relay.TensorType((n, c, h, w), \"uint8\")) w", "module.set_input(**params) module.run() op_res1 = module.get_output(0) tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-3, atol=1e-3) #", "relay.TensorType((n, c, h, w), \"float32\")) y = opfunc(x) yy =", "== relay.TensorType((n + 2, 6, 9, w + 8), \"float32\")", "layout here - HWOI c_np = topi.testing.conv2d_transpose_nhwc_python(data, kernel, 'HWOI', 2,", "import tvm from tvm import autotvm from tvm import relay", "size=dshape).astype(dtype) kernel = np.random.uniform(-scale, scale, size=kshape).astype(dtype) ref_res = topi.testing.conv1d_ncw_python( data.astype(out_dtype),", "32, 32, \"float32\"], \\ [512, 1, 3, 3, \"float32\"], [1,", "atol=1e-5) _test_run('float32') _test_run('int32') def test_lrn(): n, c , h, w", "= (1, 5, 224, 224, 6) kshape = (3, 3,", "a different layout n, c, h, w = 4, 32,", "7, 7) x = relay.var(\"x\", shape=dshape) y = relay.nn.pad(x, ((1,", "oshape = get_shape() x = relay.var(\"x\", relay.TensorType((n,) + ishape, dtype))", "2, 3, 6, 4, 5]], \\ [\"ann_reduce\", \"an\", [\"unroll\", \"none\"]],", "relay.create_executor(\"graph\", ctx=ctx, target=target) op_res1 = intrp1.evaluate(func)(data) tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5, atol=1e-5)", "kshape, padding=(1, 1), channels=10, kernel_size=3, dilation=3) def test_conv2d_infer_type(): # symbolic", "layout=\"NCHW\", method=\"bilinear\") \"method=\\\"BINLINEAR\\\"\" in y.astext() yy = run_infer_type(y) assert yy.checked_type", "c, h, w = 4, 32, 224, 224 x =", "w, padding=padding, dilation=dilation, groups=groups, data_layout=\"NDHWC\", kernel_layout=\"DHWIO\", **attrs) func = relay.Function([x,", "3) run_test_conv2d(\"float32\", \"float32\", 1, dshape, kshape, padding=(1, 1), channels=32, groups=8,", "i*sh:i*sh+kh, j*sw:j*sw+kw], axis=(2,3)) / np.maximum(pad_count, 1) ref_res = np.maximum(b_np, 0.0)", "use this file except in compliance # with the License.", "\"int32\" dshape = (1, 3, 28, 28) x = relay.var(\"x\",", "because we default to NCHWc layout. target = \"llvm -mcpu=core-avx2\"", "w, kernel_size=(3, 3), padding=(1, 1), channels=2) yy = run_infer_type(y) assert", "test with ambiguous batch. n, c, h, w = tvm.size_var(\"n\"),", "d, h, w, 16), \"int32\") def test_conv3d_run(): def run_test_conv3d(dtype, out_dtype,", "kshape = (32, 1, 3, 3) run_test_conv2d(\"float32\", \"float32\", 1, dshape,", "'max' in str(opfunc) else 'avg' y = opfunc(x, pool_size=(2, 2,", "np.random.uniform(size=dshape).astype(dtype) ref_res = reffunc(data.reshape(1, 3, 14, 2, 14, 2), axis=(3,", "relay.TensorType((n, c, h, w), \"int8\")) w = relay.var(\"w\", relay.TensorType((2, 10,", "{}\".format(out_shape, f_out_shape) data = np.random.uniform(size=dshape).astype(dtype) ref_res = topi.testing.pool3d_ncdhw_python(data, (2, 2,", "x = relay.Var(\"x\", t1) func = relay.Function([x], relay.nn.batch_flatten(x)) data =", "16, 32, 32 scale_h = 2.0 scale_w = 2.0 dtype", "inference. o, i, h, w = 32, 32, 128, 128", "dshape = (n, ic, ih, iw) x = relay.var(\"x\", shape=dshape)", "= relay.var(\"x\", relay.TensorType((n, c, d, h, w), \"uint8\")) w =", "None: ref_res = topi.testing.conv3d_ndhwc_python( data.astype(out_dtype), dkernel.astype(out_dtype), 1, padding) else: ref_res", "pool_size=(2, 2), strides=(2, 2), padding=(0, 0)) func = relay.Function([x], y)", "_test_global_pool2d(relay.nn.global_max_pool2d, np.max) _test_global_pool2d(relay.nn.global_avg_pool2d, np.mean) def test_pool1d(): def _test_pool1d(opfunc): n, c,", "3), (4, 4))) yy = run_infer_type(y) assert yy.checked_type == relay.TensorType((n", "_test_upsampling(\"NCHW\", \"bilinear\", True) _test_upsampling(\"NHWC\", \"nearest_neighbor\") _test_upsampling(\"NHWC\", \"bilinear\", True) def _test_upsampling3d(layout,", "tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5, atol=1e-5) def test_upsampling_infer_type(): n, c , h,", "a multiple of 4 internally. for ic in [1, 4,", "np.random.uniform(-scale, scale, size=dshape).astype(dtype) kernel = np.random.uniform(-scale, scale, size=kshape).astype(dtype) ref_res =", "fref is None: ref_res = topi.testing.conv3d_ndhwc_python( data.astype(out_dtype), dkernel.astype(out_dtype), 1, padding)", "transform from tvm.relay.testing import ctx_list, run_infer_type from tvm.contrib import util", "(1, 3, 32, 32, 32) x = relay.var(\"x\", shape=dshape) pool_type", "data.astype(out_dtype), kernel.astype(out_dtype), 1, padding, groups=groups) with WinogradFallback(), relay.build_config(opt_level=3): for target,", "c, d, h, w), \"float32\")) y = opfunc(x, pool_size=(1, 1,", "_test_upsampling3d(\"NCDHW\", \"trilinear\", \"align_corners\") _test_upsampling3d(\"NDHWC\", \"nearest_neighbor\") _test_upsampling3d(\"NDHWC\", \"trilinear\", \"align_corners\") def test_conv2d_int8_intrinsics():", "= tvm.size_var(\"n\"), 32, 224, 224 x = relay.var(\"x\", relay.ty.TensorType((n, c,", "fref is None: ref_res = topi.testing.conv3d_ncdhw_python( data.astype(out_dtype), dkernel.astype(out_dtype), 1, padding,", "[1, 512, 32, 32], \"float32\"], \\ [\"TENSOR\", [512, 1, 3,", "3, 3, 3), \"int8\")) y = relay.nn.conv3d(x, w, out_dtype=\"int32\") assert", "(4, 4))) func = relay.Function([x], y) data = np.random.uniform(size=dshape).astype(dtype) ref_res", "2, 3, 4, 5, 8, 6, 7]], \\ [\"reorder_1\", \"re\",", "dshape = (1, 80, 73, 73) kshape = (192, 80,", "2, 2), padding=padding) func = relay.Function([x], y) # check output", "test_conv3d_ndhwc_run(): def run_test_conv3d(dtype, out_dtype, scale, dshape, kshape, padding=(1, 1, 1),", "y = relay.nn.pad(x, ((1, 1), (2, 2), (3, 3), (4,", "ishape, oshape = get_shape() x = relay.var(\"x\", relay.TensorType((n,) + ishape,", "pw), count_include_pad=False) func = relay.Function([x], y) dtype = \"float32\" a_np", "yy.args[1].checked_type == relay.TensorType( (4, 8, 3, 3, 4, 4), \"int8\")", "yy.checked_type == relay.TensorType((n,) + oshape, dtype) dshape = (1,) +", "y = relay.nn.conv2d(x, w, padding=padding, dilation=dilation, groups=groups, **attrs) func =", "data, kernel, 2, 1) d_np = np.zeros(shape=oshape) d_np[:,:,0:c_np.shape[2]] = c_np", "relay.TensorType((d1, 2, d3, 3), \"float32\")) y = relay.nn.batch_flatten(x) yy =", "relay.Function([x], y) data = np.random.uniform(size=dshape).astype(dtype) ref_res = reffunc(data.reshape(1, 3, 14,", "= \"float32\" data = np.random.uniform(size=dshape_nhwc).astype(dtype) kernel = np.random.uniform(size=kshape_hwoi).astype(dtype) # use", "executor = relay.create_executor(\"graph\", ctx=ctx, target=target) out = executor.evaluate(func)(data) tvm.testing.assert_allclose(out.asnumpy(), ref,", "atol=1e-5) def test_upsampling(): _test_upsampling(\"NCHW\", \"nearest_neighbor\") _test_upsampling(\"NCHW\", \"bilinear\", True) _test_upsampling(\"NHWC\", \"nearest_neighbor\")", "'int32' data_shape = (1, 64, 56, 56) x = relay.var(\"x\",", "18, 3) kshape_hwoi = (3, 3, 10, 3) oshape_nhwc =", "tvm.size_var(\"n\"), 16, 32, 32 scale_h = 2.0 scale_w = 2.0", "_compile(ic=17, oc=29, target=target, data_layout=\"NCHW\", kernel_layout='OIHW', dtypes=dtypes) assert _has_fast_int8_instructions(asm, target) asm", "1), fref=None, groups=1, dilation=(1, 1), except_targets=None, **attrs): if except_targets is", "oc=29, target=target, data_layout=\"NHWC\", kernel_layout='HWIO', dtypes=dtypes) assert _has_fast_int8_instructions(asm, target) # Check", "np.zeros(shape=oshape) d_np[:,:,0:c_np.shape[2]] = c_np ref_res = d_np for target, ctx", "= relay.nn.lrn(x, size=10, axis=2, bias=0.5, alpha=.00001, beta=0.75) \"alpha=\" in y.astext()", "work for additional information # regarding copyright ownership. The ASF", "w = relay.var(\"w\", relay.TensorType((2, 10, 3), \"int8\")) y = relay.nn.conv1d(x,", "relay.TensorType((n, c, 1, 1), \"float32\") # test execution dtype =", "**attrs) func = relay.Function([x, w], y) mod = relay.Module() mod['main']", "kernel_size=3, dilation=3) def test_conv2d_infer_type(): # symbolic in batch dimension n,", "np.random.uniform(size=dshape).astype(dtype) ref_res = topi.testing.pool3d_ncdhw_python(data, (2, 2, 2), (2, 2, 2),", "= tvm.const(2.0, \"float64\") x = relay.var(\"x\", relay.TensorType((n, c, d, h,", "int8 x int8 goes through legalization so that fast instructions", "relay.TensorType(shape, dtype) func = relay.Function([x], z) x_data = np.random.uniform(low=-1, high=1,", "test_flatten_infer_type() test_pad_infer_type() test_pad_run() test_conv2d_transpose_infer_type() test_conv2d_transpose_nchw_run() test_conv2d_transpose_nhwc_run() test_conv1d_transpose_ncw_run() test_conv1d_run() test_conv2d_run() test_conv2d_winograd()", "np.random.rand(*kernel_shape) * 10 parameters = {\"weight\": tvm.nd.array(wdata.astype(weight_dtype))} targets = [\"llvm", "\"t\": \"contrib_spatial_pack\", \"c\": null, \\ \"e\": [[\"tile_co\", \"sp\", [32, 16]],", "3, 16, 16, 16)): n, c, d, h, w =", "# software distributed under the License is distributed on an", "pool_size=(2, 2, 2), strides=(2, 2, 2), padding=padding) func = relay.Function([x],", "graph, lib, params = relay.build(func, target, params=parameters) def test_bitserial_conv2d_infer_type(): #", "\"SAME\")) # depthwise conv2d for arm_cpu dshape = (1, 512,", "relay.TensorType((n, 1, 1, c), \"float32\") n, c, h, w =", "relay.Function([x], y) data = np.random.uniform(size=dshape).astype(dtype) ref_res = np.pad(data, ((1, 1),", "def test_conv2d_int8_intrinsics(): def _compile(ic, oc, target, data_layout, kernel_layout, dtypes): input_dtype,", "the License. You may obtain a copy of the License", "w + 8), \"float32\") def test_pad_run(): def _test_run(dtype): dshape =", "{}, \\ [\"depthwise_conv2d_nchw\", [1, 512, 32, 32, \"float32\"], \\ [512,", "generated when datatypes are not HW supported. dtypes = ('uint8',", "a vectorized instruction is generated for older Intel # generations,", "(n, 2, 222, 222), \"int32\") # Infer with a different", "ow)).astype(dtype) for i in range(oh): for j in range(ow): pad_count", "32, 32, 32) x = relay.var(\"x\", shape=dshape) pool_type = 'max'", "high=1, size=shape).astype(dtype) ref_res = topi.testing.l2_normalize_python(x_data, eps, axis) for target, ctx", "20]: asm = _compile(ic=8, oc=oc, target=target, data_layout=\"NCHW\", kernel_layout='OIHW', dtypes=dtypes) assert", "3, 3, 3) run_test_conv2d(\"float32\", \"float32\", 1, dshape, kshape, padding=(1, 1),", "run_test_conv2d(\"float32\", \"float32\", 1, dshape, kshape, padding=(1, 1), channels=64, groups=32, kernel_size=(3", "3), padding=(1, 1), channels=16, data_layout=\"NCHW4n4c\", kernel_layout=\"OIHW4o4i\", out_dtype=\"int32\") yy = run_infer_type(y)", "19, 16)) _test_pool3d(relay.nn.avg_pool3d, padding=(0, 0, 4, 0, 0, 4), out_shape=(1,", "axis, bias, alpha, beta) for target, ctx in ctx_list(): intrp1", "= relay.var(\"x\", relay.ty.TensorType((o, i, h, w), \"int16\")) y = relay.nn.bitpack(x,", "mixed precision n, c, h, w = tvm.size_var(\"n\"), 10, 224,", ",3)) # mixed precision run_test_conv2d(\"int8\", \"int32\", 1, dshape, kshape, padding=(1,", "= (10, 3, 3) run_test_conv1d(\"float32\", \"float32\", 1, dshape, kshape, padding=(1,", "y = relay.nn.conv2d(x, wt, kernel_size=(3, 3), padding=(1, 1), channels=16, data_layout=\"NCHW4n4c\",", "3, 224) kshape = (10, 3, 3) run_test_conv1d(\"float32\", \"float32\", 1,", "= run_infer_type(y) assert yy.checked_type == relay.TensorType( (n, 15, 15, 11),", "h, w = tvm.size_var(\"n\"), 32, 224, 224 x = relay.var(\"x\",", "distributed on an # \"AS IS\" BASIS, WITHOUT WARRANTIES OR", "y) data = np.random.uniform(size=dshape).astype(dtype) if method == \"nearest_neighbor\": ref =", "x = relay.var(\"x\", relay.ty.TensorType((n, c, h, w), \"float32\")) w =", "(h, w, c), (int(round(h*scale_h)), int(round(w*scale_w)), c) ishape, oshape = get_shape()", "dshape, kshape, padding=(1, 1), channels=32, groups=32, kernel_size=(3 ,3), fref=lambda x,", "12), \"float32\") # some symbolic values n, c, h, w", "d2, d3, d4 = tvm.size_var(\"d1\"), tvm.size_var(\"d2\"), tvm.size_var(\"d3\"), tvm.size_var(\"d4\") x =", "2, 222, 222), \"int32\") # Infer with a different layout", "8, 6, 7]], \\ [\"reorder_1\", \"re\", [0, 1, 2, 3,", "= run_infer_type(y) assert yy.checked_type == relay.TensorType((n, 10, 224), \"float32\") #", "19, 16)) _test_pool3d(relay.nn.max_pool3d, padding=(0, 0, 4, 0, 0, 4), out_shape=(1,", "= np.random.rand(5, 10, 5).astype(t1.dtype) ref_res = batch_flatten(data) for target, ctx", "\"float32\")) w = relay.var(\"w\") y = relay.nn.conv1d(x, w, kernel_size=3, padding=(1,", "topi.testing.conv3d_ndhwc_python( data.astype(out_dtype), dkernel.astype(out_dtype), 1, padding) else: ref_res = fref(data.astype(out_dtype), dkernel.astype(out_dtype))", "relay.var(\"x\", relay.TensorType(shape, dtype)) size=5 axis=1 bias=0.5 alpha=.00001 beta=0.75 z =", "relay.var(\"x\", relay.TensorType((n,) + ishape, dtype)) y = relay.nn.upsampling(x, scale_h=scale_h, scale_w=scale_w,", "test_avg_pool2d_no_count_pad() test_lrn() test_l2_normalize() test_conv1d_infer_type() test_conv2d_infer_type() test_conv3d_infer_type() test_bitpack_infer_type() test_upsampling_infer_type() test_upsampling3d_infer_type() test_flatten_infer_type()", "= autotvm.task.space.SplitEntity([-1, 1, 1, 1]) cfg['tile_x'] = autotvm.task.space.SplitEntity([-1, 1, 1,", "z) x_data = np.random.uniform(low=-1, high=1, size=shape).astype(dtype) ref_res = topi.testing.lrn_python(x_data, size,", "relay.TensorType((n, c, w), \"uint8\")) w = relay.var(\"w\", relay.TensorType((2, 10, 3),", "# use true kshape layout here - HWOI c_np =", "= (1, 32, 18, 18) kshape = (32, 4, 3,", "mixed precision n, c, d, h, w = tvm.size_var(\"n\"), 10,", "= tvm.relay.build(mod, target=\"llvm -device=arm_cpu\") # depthwise conv2d dshape = (1,", "1], [1, 1], [1, 1], \"float32\"], {}, \\ [\"depthwise_conv2d_nchw\", [1,", "= run_infer_type(y) assert yy.checked_type == relay.TensorType( (n, w, 16), \"int32\")", "10, 224, 224), dtype) # test execution dtype = \"int32\"", "kshape = (3, 3, 3, 6, 10) run_test_conv3d(\"float32\", \"float32\", 1,", "that a vectorized instruction is generated for older Intel #", "atol=1e-5) # normal conv3d dshape = (1, 3, 5, 224,", "x = relay.var(\"x\", relay.TensorType(shape, dtype)) size=5 axis=1 bias=0.5 alpha=.00001 beta=0.75", "(4, 8, 3, 3, 4, 4), \"int8\") # Infer with", "10, 5, 224, 224), \"float32\") # test execution dtype =", "shape=dshape) w = relay.var(\"w\") y = relay.nn.conv1d_transpose(x, w, channels=10, kernel_size=(3,),", "multiple of 16 internally. for oc in [4, 16, 20]:", "this work for additional information # regarding copyright ownership. The", "c, h, w), \"int16\")) w = relay.var(\"w\", relay.ty.TensorType((32, 32, 3,", "ref_res = topi.testing.l2_normalize_python(x_data, eps, axis) for target, ctx in ctx_list():", "dtypes for input and weight. n, c, d, h, w", "the NOTICE file # distributed with this work for additional", "2.0 dtype = \"float32\" def get_shape(): if layout == \"NCDHW\":", "# Check that int8 x int8 goes through legalization so", "== 'NHWC': data_shape = (n, h, w, ic) x =", "keepdims=True) for target, ctx in ctx_list(): intrp1 = relay.create_executor(\"graph\", ctx=ctx,", "743640, \"t\": \"contrib_spatial_pack\", \"c\": null, \\ \"e\": [[\"tile_co\", \"sp\", [32,", "2, 2), strides=(2, 2, 2), padding=padding) func = relay.Function([x], y)", "n = 1 (ic, ih, iw) = (3, 28, 28)", "run_infer_type from tvm.contrib import util import topi.testing def test_conv1d_infer_type(): #", "infer by shape of w, mixed precision n, h, w,", "5, 10, 10) o_shape = (1, 500) dtype = \"float32\"", "= intrp1.evaluate(func)(data, kernel) tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5, atol=1e-5) def test_conv2d_transpose_nhwc_run(): dshape_nhwc", "bias=0.5 alpha=.00001 beta=0.75 z = relay.nn.lrn(x, size=size, axis=axis, bias=bias, alpha=alpha,", "for target in targets: if llvm_version >= 8: dtypes =", "run_infer_type(y) assert yy.checked_type == relay.TensorType((3, 24), \"float32\") x = relay.var(\"x\",", "padding=padding, dilation=dilation, groups=groups, **attrs) func = relay.Function([x, w], y) mod", "73) kshape = (192, 80, 3, 3) run_test_conv2d_cuda(\"float32\", \"float32\", 1,", "= relay.var(\"x\", relay.ty.TensorType((n, c, h, w), \"int16\")) w = relay.var(\"w\",", "input and weight. n, c, w = tvm.var(\"n\"), 10, 224", "\"float32\" x = relay.var(\"x\", relay.TensorType(shape, dtype)) z = relay.nn.batch_flatten(x) yy", "0), out_shape=(1, 3, 16, 16, 16)): n, c, d, h,", "yy = run_infer_type(y) assert yy.checked_type == relay.TensorType( (n, 2, 222),", "(n, 2, 222, 222, 222), \"int32\") # Infer with NDHWC", "kernel_layout, dtypes): input_dtype, weight_dtype, output_dtype = dtypes n, h, w,", "= relay.nn.conv1d(x, wt, kernel_size=3, padding=(1, 1), channels=16, data_layout=\"NWC\", out_dtype=\"int32\") yy", "224, 224 x = relay.var(\"x\", relay.TensorType((n, c, h, w), \"int8\"))", "relay.ty.TensorType((n, c, w), \"float32\")) w = relay.var(\"w\") y = relay.nn.conv1d(x,", "_has_fast_int8_instructions(asm, target) asm = _compile(ic=17, oc=29, target=target, data_layout=\"NHWC\", kernel_layout='HWIO', dtypes=dtypes)", "= np.sum(pad_np[:, :, i*sh:i*sh+kh, j*sw:j*sw+kw], axis=(2,3)) / np.maximum(pad_count, 1) ref_res", "def _test_global_pool2d(opfunc, reffunc): n, c, h, w = tvm.size_var(\"n\"), tvm.size_var(\"c\"),", "output_padding=(2, 2)) func = relay.Function([x, w], y) dtype = \"float32\"", "= \"int32\" dshape = (1, 3, 28, 28) x =", "x = relay.var(\"x\", relay.ty.TensorType((n, c, h, w), \"int16\")) w =", "np.max) _test_global_pool2d(relay.nn.global_avg_pool2d, np.mean) def test_pool1d(): def _test_pool1d(opfunc): n, c, w", "\"align_corners\") _test_upsampling3d(\"NDHWC\", \"nearest_neighbor\") _test_upsampling3d(\"NDHWC\", \"trilinear\", \"align_corners\") def test_conv2d_int8_intrinsics(): def _compile(ic,", "arm_cpu dshape = (1, 512, 32, 32) kshape = (512,", "= relay.nn.conv2d_transpose(x, w, kernel_size=(3, 3), padding=(1, 1), channels=15) assert \"channels=15\"", "op_res1 = intrp1.evaluate(func)(data) tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5, atol=1e-5) def _test_pool2d_int(opfunc, reffunc,", "224, 224 x = relay.var(\"x\", relay.TensorType((n, h, w, c), \"float32\"))", "f_out_shape = tuple(map(lambda x: int(x), run_infer_type(func).ret_type.shape)) assert out_shape == f_out_shape,", "older Intel # generations, because we default to NCHWc layout.", "= np.random.uniform(size=dshape).astype(dtype) if method == \"nearest_neighbor\": ref = topi.testing.upsampling_python(data, (scale_h,", "/ np.maximum(pad_count, 1) ref_res = np.maximum(b_np, 0.0) data = a_np", "= np.zeros(shape=(n, ic, ih+2*ph, iw+2*pw)).astype(dtype) no_zero = (range(n), range(ic), (range(ph,", "224, 224 x = relay.var(\"x\", relay.TensorType((n//4, c//4, h, w, 4,", "224, 224, 224 x = relay.var(\"x\", relay.TensorType((n, c, d, h,", "c, 1, 1), \"float32\") # test execution dtype = \"float32\"", "200), \"float32\")) y = relay.nn.upsampling(x, scale_h=2, scale_w=2, layout=\"NCHW\", method=\"bilinear\") yy", "c), (int(round(h*scale_h)), int(round(w*scale_w)), c) ishape, oshape = get_shape() x =", "can be picked up. for target in targets: if llvm_version", "[\"data_pad_inline\", \"ot\", 4], [\"data_vec_inline\", \"ot\", 1], \\ [\"conv_inline\", \"ot\", 0]]}],", "run_infer_type(y) assert yy.checked_type == relay.TensorType((n, 10, 5, 224, 224), \"float32\")", "(n, ic, h, w) x = relay.var(\"x\", relay.TensorType(data_shape, input_dtype)) elif", "= (1, 18, 18, 3) kshape_hwoi = (3, 3, 10,", "= opfunc(x) yy = run_infer_type(y) assert yy.checked_type == relay.TensorType((n, c,", "kernel_size=(3,3), strides=(2,2), padding=(1,1), output_padding=(2, 2)) func = relay.Function([x, w], y)", "y = relay.nn.upsampling(x, scale_h=2, scale_w=2, layout=\"NCHW\", method=\"bilinear\") \"method=\\\"BINLINEAR\\\"\" in y.astext()", "ic, ih, iw)).astype(dtype) pad_np = np.zeros(shape=(n, ic, ih+2*ph, iw+2*pw)).astype(dtype) no_zero", "1, dshape, kshape, padding=(1, 1), channels=10, kernel_size=(3 ,3)) # mixed", "[\"data_vec_inline\", \"ot\", 1], \\ [\"conv_inline\", \"ot\", 0]]}], \"r\": [[0.0002933163], \\", "dtype=dtype) w = relay.var(\"w\", shape=kshape, dtype=dtype) y = relay.nn.conv2d(x, w,", "= autotvm.task.space.SplitEntity([-1, 1, 1, 1]) cfg['tile_rc'] = autotvm.task.space.SplitEntity([-1, 1]) cfg['auto_unroll_max_setp']", "w), \"uint8\")) w = relay.var(\"w\", relay.TensorType((2, 10, 3, 3, 3),", "tvm.nd.array(wdata.astype(weight_dtype))} with relay.build_config(opt_level=3): graph, lib, params = relay.build(func, target, params=parameters)", "\"channels=15\" in y.astext() yy = run_infer_type(y) assert yy.checked_type == relay.TensorType(", "= (1, 3, 18, 18) kshape = (3, 10, 3,", "0, 3, 0), out_shape=(1, 3, 16, 19, 16)) _test_pool3d(relay.nn.max_pool3d, padding=(0,", "assembly def _has_fast_int8_instructions(asm, target): if 'skylake-avx512' in target: return \"pmaddubs\"", "1]) cfg['auto_unroll_max_setp'] = autotvm.task.space.OtherOptionEntity(1500) cfg['unroll_explicit'] = autotvm.task.space.OtherOptionEntity(1) self.memory[key] = cfg", "18) kshape = (10, 3, 3, 3) run_test_conv2d(\"float32\", \"float32\", 1,", "dshape = (1, 3, 32, 32, 32) x = relay.var(\"x\",", "oc, target, data_layout, kernel_layout, dtypes): input_dtype, weight_dtype, output_dtype = dtypes", "dshape = (4, 10, 7, 7) x = relay.var(\"x\", shape=dshape)", "ref_res = batch_flatten(data) for target, ctx in ctx_list(): intrp =", "w), \"int8\")) w = relay.var(\"w\", relay.TensorType((2, 10, 3, 3), \"int8\"))", "kshape, padding=(1, 1), fref=None, dilation=1, except_targets=None, **attrs): if except_targets is", "padding=(0, 0, 0, 0, 0, 0), out_shape=(1, 3, 16, 16,", "int8 robustness # Input channels should be a multiple of", "tvm.var(\"n\"), 10, 224 x = relay.var(\"x\", relay.ty.TensorType((n, c, w), \"float32\"))", "relay.Function([x], z) x_data = np.random.uniform(low=-1, high=1, size=shape).astype(dtype) ref_res = topi.testing.l2_normalize_python(x_data,", "channels=32, groups=8, kernel_size=(3 ,3), except_targets=['cuda']) # also group conv2d dshape", "relay.var(\"x\", relay.TensorType((d1, 2, d3, 3), \"float32\")) y = relay.nn.batch_flatten(x) yy", "padding=(1, 1), fref=None, groups=1, dilation=(1, 1), except_targets=None, **attrs): if except_targets", "instructions can be picked up. for target in targets: if", "w = relay.var(\"w\", relay.ty.TensorType((32, 32, 3, 3), \"int16\")) y =", "np.random.uniform(low=-1, high=1, size=shape).astype(dtype) ref_res = topi.testing.lrn_python(x_data, size, axis, bias, alpha,", "relay.var(\"x\", shape=dshape, dtype=dtype) w = relay.var(\"w\", dtype=dtype) y = relay.nn.conv1d(x,", "dshape, kshape, padding=(1, 1, 1), channels=10, kernel_size=(3, 3 ,3)) def", "d, h, w = tvm.size_var(\"n\"), 10, 5, 224, 224 x", "= relay.nn.batch_flatten(x) yy = run_infer_type(y) assert yy.checked_type == relay.TensorType((d1, ((2*d3)*3)),", "h, w, 16), \"int32\") def test_conv2d_run(): def run_test_conv2d(dtype, out_dtype, scale,", "= run_infer_type(y) assert yy.checked_type == relay.TensorType( (1, 4, 224, 224,", "== relay.TensorType( (n, 2, 224), \"float32\") assert yy.args[1].checked_type == relay.TensorType(", "c_np = topi.testing.conv2d_transpose_nhwc_python(data, kernel, 'HWOI', 2, 1) d_np = np.zeros(shape=oshape_nhwc)", "relay.TensorType((n, c, h, w), \"float32\")) y = opfunc(x, pool_size=(1, 1))", "3) oshape = (1, 10, 37) x = relay.var(\"x\", shape=dshape)", "# test execution dtype = \"float32\" dshape = (1, 3,", "28, 28) (oc, oh, ow) = (3, 15, 15) dshape", "tvm from tvm import autotvm from tvm import relay from", "18, 16, 16)) _test_pool3d(relay.nn.avg_pool3d, padding=(0, 3, 0, 0, 3, 0),", "(2, 2) n = 1 (ic, ih, iw) = (3,", "\"nearest_neighbor\") _test_upsampling3d(\"NDHWC\", \"trilinear\", \"align_corners\") def test_conv2d_int8_intrinsics(): def _compile(ic, oc, target,", "relay.nn.conv1d(x, w, out_dtype=\"int32\") assert \"out_dtype=\\\"int32\\\"\" in y.astext() yy = run_infer_type(y)", "relay.var(\"w\") # kshape and kernel_layout should have swapped IO. #", "for arm_cpu dshape = (1, 512, 32, 32) kshape =", "intrp2.evaluate(func)(x_data) tvm.testing.assert_allclose(op_res2.asnumpy(), ref_res, rtol=1e-5) def test_pad_infer_type(): # entirely concrete case", "def _test_upsampling3d(layout, method, coordinate_transformation_mode=\"half_pixel\"): n, c, d, h, w =", "x = relay.var(\"x\", relay.TensorType((n, c, d, h, w), \"float32\")) y", "kernel_layout='HWIO', dtypes=dtypes) assert _has_fast_int8_instructions(asm, target) # Ensure that code is", "def test_bitpack_infer_type(): # Test axis packing shape inference. o, i,", "w, (1, 1), \"SAME\")) # depthwise conv2d for arm_cpu dshape", "# under the License. \"\"\" Support level2 operator test cases.", "[[0.0002933163], \\ 0, 3.1976189613342285, 1570811630.6058347], \"v\": 0.1}' temp = util.tempdir()", "lib.get_source(\"asm\") return assembly def _has_fast_int8_instructions(asm, target): if 'skylake-avx512' in target:", "relay.nn.conv3d(x, wt, kernel_size=(3, 3, 3), padding=(1, 1, 1), channels=16, data_layout=\"NDHWC\",", "size=kshape).astype(dtype) ref_res = topi.testing.conv2d_nchw_python( data.astype(out_dtype), kernel.astype(out_dtype), 1, padding, groups=groups) with", "run_test_conv3d(\"float32\", \"float32\", 1, dshape, kshape, padding=(1, 1, 1), channels=10, kernel_size=(3,", "224, 224 x = relay.var(\"x\", relay.TensorType((n, h, w, c), \"int8\"))", "\"float32\") shape = (1, 5, 10, 10) o_shape = (1,", "= relay.var(\"x\", relay.TensorType((n, d, h, w, c), \"int8\")) wt =", "dilation=dilation, groups=groups, **attrs) func = relay.Function([x, w], y) data =", "== relay.TensorType( (n, 15, 10, 12), \"float32\") assert yy.args[1].checked_type ==", "[512, 1, 3, 3, \"float32\"], [1, 1], [1, 1], [1,", "shape=dshape, dtype=dtype) w = relay.var(\"w\", dtype=dtype) y = relay.nn.conv1d(x, w,", "1), channels=10, kernel_size=(1 ,3)) # dilated conv2d dshape = (1,", "np import tvm from tvm import autotvm from tvm import", "func = relay.Function([x, w], y) mod = relay.Module() mod['main'] =", "run_infer_type(y) assert yy.checked_type == relay.TensorType((n,) + oshape, dtype) dshape =", "oshape, dtype) dshape = (1,) + ishape x = relay.var(\"x\",", "dshape = (1, 32, 18, 18) kshape = (32, 1,", "3, 3, 3), \"float32\") # infer by shape of w,", "dshape = (1, 512, 32, 32) kshape = (512, 1,", "np.random.rand(*kernel_shape) * 10 parameters = {\"weight\": tvm.nd.array(wdata.astype(weight_dtype))} with relay.build_config(opt_level=3): graph,", "y) data = np.random.uniform(size=dshape).astype(dtype) ref_res = topi.testing.pool1d_ncw_python(data, (2,), (2,), (0,", "x, w, kernel_size=(3, 3), padding=(0, 0), channels=32) yy = run_infer_type(y)", "y = relay.nn.conv3d(x, wt, kernel_size=(3, 3, 3), padding=(1, 1, 1),", "batch_flatten(data): shape = data.shape target_dim = 1 for i in", "= relay.nn.upsampling3d(x, scale_d=scale_d, scale_h=scale_h, scale_w=scale_w,\\ layout=layout, method=method,\\ coordinate_transformation_mode=coordinate_transformation_mode) func =", "padding 1, kernel 3x3 dshape = (1, 80, 73, 73)", "3), padding=(1, 1), channels=2) yy = run_infer_type(y) assert yy.checked_type ==", "autotvm.task.space.SplitEntity([-1, 1, 1, 1]) cfg['tile_y'] = autotvm.task.space.SplitEntity([-1, 1, 1, 1])", "eps, axis) for target, ctx in ctx_list(): intrp1 = relay.create_executor(\"graph\",", "data.shape target_dim = 1 for i in range(len(shape) - 1):", "self.memory[key] = cfg return cfg def run_test_conv2d_cuda(dtype, out_dtype, scale, dshape,", "from tvm.contrib import util import topi.testing def test_conv1d_infer_type(): # symbolic", "3, 224, 224) kshape = (10, 3, 3, 3) run_test_conv2d(\"float32\",", "scale_h=2, scale_w=2, layout=\"NCHW\", method=\"bilinear\") yy = run_infer_type(y) assert yy.checked_type ==", "\\ [\"reorder_0\", \"re\", [0, 1, 2, 3, 4, 5, 8,", "1), dilation=(1, 1), data_layout=data_layout, kernel_layout=kernel_layout, out_dtype=output_dtype) func = relay.Function([x, weight],", "32], \"float32\"], \\ [\"TENSOR\", [512, 1, 3, 3], \"float32\"], \\", "extended winograd: stride 1, padding N, kernel 3x3 run_test_conv2d_cuda(\"float32\", \"float32\",", "# Infer with NHWC n, c, h, w = 4,", "y = relay.nn.conv1d(x, w, kernel_size=3, padding=(1, 1), channels=2) yy =", "(c, d, h, w), (c, int(round(d*scale_d)), int(round(h*scale_h)),\\ int(round(w*scale_w))) else: return", "= opfunc(x, layout=\"NHWC\") yy = run_infer_type(y) assert yy.checked_type == relay.TensorType((n,", "relay.TensorType((n, c, d, h, w), \"float32\")) y = opfunc(x, pool_size=(1,", "[32, 16]], [\"tile_oh\", \"sp\", [8, 1]], \\ [\"tile_ow\", \"sp\", [1,", "kshape, padding=(2, 2), channels=192, kernel_size=(7, 7)) def test_conv3d_infer_type(): # symbolic", "2, 224), \"float32\") assert yy.args[1].checked_type == relay.TensorType( (2, 10, 3),", "in [1, 4, 6]: asm = _compile(ic=ic, oc=16, target=target, data_layout=\"NHWC\",", "= \"float32\" dshape = (1, 3, 28, 28) x =", "= relay.nn.upsampling(x, scale_h=scale_h, scale_w=scale_w, layout=layout, method=method, align_corners=align_corners) yy = run_infer_type(y)", "padding N, kernel 3x3 run_test_conv2d_cuda(\"float32\", \"float32\", 1, dshape, kshape, padding=(0,", "0, 0, 4), out_shape=(1, 3, 16, 16, 20)) _test_pool3d(relay.nn.avg_pool3d) _test_pool3d(relay.nn.avg_pool3d,", "(3, 28, 28) (oc, oh, ow) = (3, 15, 15)", "np.random.uniform(size=dshape).astype(dtype) if method == \"nearest_neighbor\": ref = topi.testing.upsampling3d_python(data, (scale_d, scale_h,", "5), \"float32\")) y = relay.nn.conv2d_transpose(x, w, output_padding=(1, 1), channels=11, data_layout=\"NHWC\")", "w, mixed precision n, c, w = tvm.var(\"n\"), 10, 224", "= relay.nn.l2_normalize(x, eps=0.001, axis=[1]) \"axis=\" in y.astext() yy = run_infer_type(y)", "assert _has_fast_int8_instructions(asm, target) # Check that both non-divisible oc and", "= run_infer_type(y) assert yy.checked_type == relay.TensorType((d1, ((2*d3)*3)), \"float32\") shape =", "= (1, 3, 18) kshape = (3, 10, 3) oshape", "oc=16, target=target, data_layout=\"NHWC\", kernel_layout='HWIO', dtypes=dtypes) assert _has_fast_int8_instructions(asm, target) # Sweep", "w), \"float32\")) y = relay.nn.pad(t, ((1, 1), (2, 2), (3,", "relay.create_executor(\"graph\", ctx=ctx, target=target) op_res = intrp.evaluate(func)(data) np.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=0.01) def", "tvm.size_var(\"n\"), 32, 224, 224 x = relay.var(\"x\", relay.ty.TensorType((n, c, h,", "1], [1, 1], \"float32\"], \\ {\"i\": 743640, \"t\": \"contrib_spatial_pack\", \"c\":", ",3)) kshape = (10, 3, 1, 3) # mixed precision.", "internally. for oc in [4, 16, 20]: asm = _compile(ic=8,", "N, kernel 3x3 run_test_conv2d_cuda(\"float32\", \"float32\", 1, dshape, kshape, padding=(0, 0),", "(c, int(round(h*scale_h)), int(round(w*scale_w))) else: return (h, w, c), (int(round(h*scale_h)), int(round(w*scale_w)),", "kernel_size=(3, 3 ,3), except_targets=[\"cuda\"]) def test_conv2d_transpose_infer_type(): # symbolic in batch", "1, dshape, kshape, padding=(1, 1), channels=10, kernel_size=(3 ,3)) kshape =", "= np.random.random_integers(low=-128, high=128, size=dshape) ref_res = reffunc(data.reshape(1,3,14,2,14,2), axis=(3,5)).astype(dtype) for target,", "def _test_run(dtype): dshape = (4, 10, 7, 7) x =", "for target, ctx in ctx_list(): if target != 'cuda': continue", "except_targets = [] x = relay.var(\"x\", shape=dshape, dtype=dtype) w =", "relay.TensorType((n, c, h, w), \"float32\")) y = relay.nn.pad(t, ((1, 1),", "relay.nn.conv2d(x, weight, kernel_size=(3, 3), groups=64, padding=(1, 1), dilation=(1, 1), out_dtype=output_dtype)", "= a_np b_np = np.zeros(shape=(n, oc, oh, ow)).astype(dtype) for i", "dshape = (1, 3, 224, 224) kshape = (10, 3,", "tvm import autotvm from tvm import relay from tvm.relay import", "dshape = (1, 3, 32) x = relay.var(\"x\", shape=dshape) pool_type", "through legalization so that fast instructions can be picked up.", "relay.TensorType( (n, 2, 222, 222), \"int32\") # Infer with a", "data_layout=\"NCHW\", kernel_layout='OIHW', dtypes=dtypes) assert _has_fast_int8_instructions(asm, target) asm = _compile(ic=17, oc=29,", "\"vpmulld\" in asm and \"vpadd\" in asm def test_depthwise_conv2d_int8(): input_dtype", "ref_res, rtol=1e-5) op_res2 = intrp2.evaluate(func)(x_data) tvm.testing.assert_allclose(op_res2.asnumpy(), ref_res, rtol=1e-5) def test_pad_infer_type():", "= relay.nn.conv2d(x, w, out_dtype=\"int32\") assert \"out_dtype=\\\"int32\\\"\" in y.astext() yy =", "\"an\", [\"unroll\", \"unroll\", \"vec\"]], \\ [\"data_pad_inline\", \"ot\", 4], [\"data_vec_inline\", \"ot\",", "= 4, 32, 224 x = relay.var(\"x\", relay.TensorType((n, w, c),", "cases. \"\"\" import numpy as np import tvm from tvm", "kernel_layout should have swapped IO. # kshape is HWOI and", "tvm.relay.testing import ctx_list, run_infer_type from tvm.contrib import util import topi.testing", "target in targets: if llvm_version >= 8: with relay.build_config(opt_level=3): graph,", "assert yy.checked_type == relay.TensorType( (n, 2, 222, 222), \"int32\") #", "= 2.0 scale_h = 2.0 scale_w = 2.0 dtype =", "run_infer_type(y) assert yy.checked_type == relay.TensorType((n, c, tvm.expr.Cast(\"int32\", tvm.round(h*scale)), tvm.expr.Cast(\"int32\", tvm.round(w*scale))),", "(ic, ih, iw) = (3, 28, 28) (oc, oh, ow)", "4, 224, 224, 4, 4), \"int32\") assert yy.args[1].checked_type == relay.TensorType(", "\"float32\"], \\ [512, 1, 3, 3, \"float32\"], [1, 1], [1,", "3) run_test_conv3d(\"float32\", \"float32\", 1, dshape, kshape, padding=(1, 1, 1), channels=10,", "target=target) op_res1 = intrp1.evaluate(func)(data) tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5, atol=1e-5) def _test_global_pool2d(opfunc,", "y = relay.nn.conv2d_transpose(x, w, channels=10, kernel_size=(3, 3), strides=(2, 2), padding=(1,", "target, data_layout, kernel_layout, dtypes): input_dtype, weight_dtype, output_dtype = dtypes n,", "= relay.var(\"x\", relay.TensorType((n, c, h, w), \"int8\")) w = relay.var(\"w\",", "Skylake or Cascadelake\" # compile conv2d for x86 (skylake, cascadelake)", "oshape_nhwc = (1, 37, 37, 10) x = relay.var(\"x\", shape=dshape_nhwc)", "1), channels=64, groups=32, kernel_size=(3 ,3), except_targets=['cuda']) # normal conv2d dshape", "w), \"float32\")) y = opfunc(x, pool_size=(1,)) assert \"pool_size=\" in y.astext()", "3), (4, 4))) func = relay.Function([x], y) data = np.random.uniform(size=dshape).astype(dtype)", "relay.TensorType( (10, 15, 3, 3), \"float32\") # infer by shape", "222), \"int32\") # infer shape in case of different dtypes", "= relay.var(\"x\", relay.TensorType((n//4, c//4, h, w, 4, 4), \"int8\")) wt", "oc=29, target=target, data_layout=\"NCHW\", kernel_layout='OIHW', dtypes=dtypes) assert _has_fast_int8_instructions(asm, target) asm =", "= relay.var(\"x\", relay.TensorType((3, 2, 4, 3), \"float32\")) y = relay.nn.batch_flatten(x)", "(1, 3, 224, 224) kshape = (10, 3, 3, 3)", "\"uint8\")) w = relay.var(\"w\", relay.TensorType((2, 10, 3), \"int8\")) y =", "(range(ph, ih+ph)), (range(pw, iw+pw))) pad_np[np.ix_(*no_zero)] = a_np b_np = np.zeros(shape=(n,", "ctx_list(): intrp1 = relay.create_executor(\"graph\", ctx=ctx, target=target) op_res1 = intrp1.evaluate(func)(data) tvm.testing.assert_allclose(op_res1.asnumpy(),", "10, 3, 3, 3), \"float32\") # infer by shape of", "batch_flatten(data) for target, ctx in ctx_list(): intrp = relay.create_executor(\"graph\", ctx=ctx,", "lib, params = relay.build(func, target, params=parameters) assembly = lib.get_source(\"asm\") return", "'OIHW': kernel_shape = (oc, ic, ch, cw) elif kernel_layout ==", "relay.var(\"w\") y = relay.nn.conv2d(x, wt, kernel_size=(3, 3), padding=(1, 1), channels=16,", "# \"License\"); you may not use this file except in", "**attrs) func = relay.Function([x, w], y) mod = tvm.relay.Module() mod[\"main\"]", "run_infer_type(y) assert yy.checked_type == relay.TensorType( (n, 15, 15, 11), \"float32\")", "== relay.TensorType((n, c, tvm.expr.Cast(\"int32\", tvm.round(h*scale)), tvm.expr.Cast(\"int32\", tvm.round(w*scale))), \"float32\") n, c", "data_shape = (n, ic, h, w) x = relay.var(\"x\", relay.TensorType(data_shape,", "\"sp\", [32, 16]], [\"tile_oh\", \"sp\", [8, 1]], \\ [\"tile_ow\", \"sp\",", "y = opfunc(x, pool_size=(1, 1)) assert \"pool_size=\" in y.astext() yy", "tvm.size_var(\"w\") scale = tvm.const(2.0, \"float64\") x = relay.var(\"x\", relay.TensorType((n, c,", "out_shape == f_out_shape, \\ \"Output shape mismatch. expected {}, actual", "scale, dshape, kshape, padding=(1, 1), fref=None, dilation=1, except_targets=None, **attrs): if", "9, w + 8), \"float32\") def test_pad_run(): def _test_run(dtype): dshape", "x = relay.var(\"x\", relay.TensorType((n, c, d, h, w), \"uint8\")) w", "1, kernel 3x3 dshape = (1, 80, 73, 73) kshape", "[[\"TENSOR\", [1, 512, 32, 32], \"float32\"], \\ [\"TENSOR\", [512, 1,", "= run_infer_type(y) assert yy.checked_type == relay.TensorType((n, 1, 1, c), \"float32\")", "tvm.size_var(\"c\"), tvm.size_var(\"h\"), tvm.size_var(\"w\") x = relay.var(\"x\", relay.TensorType((n, c, h, w),", "symbolic in batch dimension n, c, h, w = tvm.size_var(\"n\"),", "1, padding) else: ref_res = fref(data.astype(out_dtype), dkernel.astype(out_dtype)) for target, ctx", "a_np b_np = np.zeros(shape=(n, oc, oh, ow)).astype(dtype) for i in", "rtol=1e-5, atol=1e-5) def _test_pool2d_int(opfunc, reffunc, dtype): n, c, h, w", "y.astext() yy = run_infer_type(y) assert yy.checked_type == relay.TensorType((3, 6, 9,", "\"contrib_spatial_pack\", \"c\": null, \\ \"e\": [[\"tile_co\", \"sp\", [32, 16]], [\"tile_oh\",", "return \"vpdpbusd\" in asm else: assert False, \"Target should be", "padding=(1, 1), channels=2) yy = run_infer_type(y) assert yy.checked_type == relay.TensorType(", "autotvm.apply_history_best(temp.relpath(\"temp.log\")): with relay.build_config(opt_level=3): print('Compiling...') graph_json, mod, params = tvm.relay.build(mod, target=\"llvm", "writing, # software distributed under the License is distributed on", "1, 1), except_targets=None, **attrs): if except_targets is None: except_targets =", "128, 128 x = relay.var(\"x\", relay.ty.TensorType((o, i, h, w), \"int16\"))", "x = relay.var(\"x\", relay.TensorType((n, c, h, w), \"float32\")) w =", "relay.build_config(opt_level=3): for target, ctx in ctx_list(): if target != 'cuda':", "3, 16), pool_type, False) for target, ctx in ctx_list(): intrp1", "relay.Function([x], relay.nn.batch_flatten(x)) data = np.random.rand(5, 10, 5).astype(t1.dtype) ref_res = batch_flatten(data)", "w = tvm.size_var(\"n\"), 32, 224, 224 x = relay.var(\"x\", relay.ty.TensorType((n,", "w), \"uint8\")) w = relay.var(\"w\", relay.TensorType((2, 10, 3), \"int8\")) y", "ref, rtol=1e-5, atol=1e-5) def test_upsampling(): _test_upsampling(\"NCHW\", \"nearest_neighbor\") _test_upsampling(\"NCHW\", \"bilinear\", True)", "relay.TensorType(data_shape, input_dtype)) elif data_layout == 'NHWC': data_shape = (n, h,", "data.astype(out_dtype), dkernel.astype(out_dtype), 1, padding, groups=groups) else: ref_res = fref(data.astype(out_dtype), dkernel.astype(out_dtype))", "kernel_size=(3, 3), strides=(2, 2), padding=(1, 1), output_padding=(2, 2), data_layout=\"NHWC\", kernel_layout=\"HWIO\")", "kernel_shape = (oc, ic, ch, cw) elif kernel_layout == 'HWIO':", "x = relay.var(\"x\", relay.TensorType(data_shape, input_dtype)) kernel_shape = (64, 1, 3,", "= run_infer_type(y) assert yy.checked_type == relay.TensorType( (n, d, h, w,", "dtypes=dtypes) assert _has_fast_int8_instructions(asm, target) for oc in [4, 16, 20]:", "6, 9, w + 8), \"float32\") def test_pad_run(): def _test_run(dtype):", "c, h, w), \"float32\")) y = relay.nn.upsampling(x, scale_h=2, scale_w=2, layout=\"NCHW\",", "(4, 4))) yy = run_infer_type(y) assert yy.checked_type == relay.TensorType((n +", "224 x = relay.var(\"x\", relay.ty.TensorType((n, c, h, w), \"float32\")) w", "\"float32\", 1, dshape, kshape, padding=(1, 1), channels=512, groups=512, kernel_size=(3 ,3))", "relay.nn.conv3d(x, w, kernel_size=(3, 3, 3), padding=(1, 1, 1), channels=2) yy", "ref_res, rtol=1e-5) op_res2 = intrp2.evaluate(func)(x_data) tvm.testing.assert_allclose(op_res2.asnumpy(), ref_res, rtol=1e-5) def batch_flatten(data):", "input channels to check int8 robustness # Input channels should", "relay.TensorType((n, c, d, h, w), \"float32\")) y = relay.nn.upsampling3d(x, scale_d=2,", "== relay.TensorType((n, c, 200, 200, 400), \"float32\") def _test_pool2d(opfunc, reffunc):", "4))) \"pad_width=\" in y.astext() yy = run_infer_type(y) assert yy.checked_type ==", "target) asm = _compile(ic=17, oc=29, target=target, data_layout=\"NHWC\", kernel_layout='HWIO', dtypes=dtypes) assert", "= \"float32\" dshape = (1, 3, 32, 32, 32) x", "shape = (1, 5, 10, 10) o_shape = (1, 500)", "= (1, 3, 32, 32, 32) x = relay.var(\"x\", shape=dshape)", "in asm and \"vpadd\" in asm def test_depthwise_conv2d_int8(): input_dtype =", "by shape of w, mixed precision n, c, h, w", "[\"conv_inline\", \"ot\", 0]]}], \"r\": [[0.0002933163], \\ 0, 3.1976189613342285, 1570811630.6058347], \"v\":", "y = relay.nn.upsampling(x, scale_h=scale_h, scale_w=scale_w, layout=layout, method=method, align_corners=align_corners) func =", "= np.random.uniform(size=dshape).astype(dtype) kernel = np.random.uniform(size=kshape).astype(dtype) c_np = topi.testing.conv2d_transpose_nchw_python( data, kernel,", "# # Unless required by applicable law or agreed to", "Version 2.0 (the # \"License\"); you may not use this", "level2 operator test cases. \"\"\" import numpy as np import", "relay.Function([x, w], y) dtype = \"float32\" data = np.random.uniform(size=dshape).astype(dtype) kernel", "yy.checked_type == relay.TensorType((n, 10, 224), \"float32\") # test execution dtype", "one # or more contributor license agreements. See the NOTICE", "\"int32\", 1, dshape, kshape, padding=(1, 1), channels=10, kernel_size=(3 ,3)) kshape", "not present in the assembly. assert not _has_fast_int8_instructions(asm, target) #", "relay.TensorType((n, 10, 5, 224, 224), \"float32\") # test execution dtype", "h, w = tvm.size_var(\"n\"), 8, 16, 16, 16 scale_d =", "\"float64\") x = relay.var(\"x\", relay.TensorType((n, c, d, h, w), \"float32\"))", "relay.nn.upsampling3d(x, scale_d=2, scale_h=2, scale_w=2, layout=\"NCDHW\", method=\"trilinear\") yy = run_infer_type(y) assert", "h, w) x = relay.var(\"x\", relay.TensorType(data_shape, input_dtype)) elif data_layout ==", "= \"float32\" def get_shape(): if layout == \"NCHW\": return (c,", "target, ctx in ctx_list(): executor = relay.create_executor(\"graph\", ctx=ctx, target=target) out", "op_res1 = intrp1.evaluate(func)(data) tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5, atol=1e-5) _test_pool3d(relay.nn.max_pool3d) _test_pool3d(relay.nn.max_pool3d, padding=(2,", "yy.checked_type == relay.TensorType( (1, 4, 224, 224, 4, 4), \"int32\")", "10, 224 x = relay.var(\"x\", relay.TensorType((n, c, w), \"float32\")) y", "precision n, c, h, w = tvm.size_var(\"n\"), 10, 224, 224", "w), \"float32\")) y = opfunc(x) yy = run_infer_type(y) assert yy.checked_type", "== relay.TensorType((n, c, 1, 1), \"float32\") # test execution dtype", "32, \"float32\"], \\ [512, 1, 3, 3, \"float32\"], [1, 1],", "[1, 8]], \\ [\"reorder_0\", \"re\", [0, 1, 2, 3, 4,", "data = np.random.uniform(size=dshape).astype(dtype) kernel = np.random.uniform(size=kshape).astype(dtype) c_np = topi.testing.conv2d_transpose_nchw_python( data,", "0]]}], \"r\": [[0.0002933163], \\ 0, 3.1976189613342285, 1570811630.6058347], \"v\": 0.1}' temp", "relay.var(\"x\", relay.TensorType((n, h, w, c), \"float32\")) w = relay.var(\"w\", relay.TensorType((12,", "strides=(2,), padding=(0, 0)) func = relay.Function([x], y) data = np.random.uniform(size=dshape).astype(dtype)", "n, c, d, h, w = tvm.size_var(\"n\"), 8, 16, 16,", "in ctx_list(): if target != 'cuda': continue params = {'w':", "1), dilation=(1, 1), out_dtype=output_dtype) func = relay.Function([x, weight], y) wdata", "padding N, kernel NxN kshape = (192, 80, 7, 7)", "14, 2), axis=(3, 5)) for target, ctx in ctx_list(): intrp1", "autotvm.task.space.SplitEntity([-1, 1, 1, 1]) cfg['tile_rc'] = autotvm.task.space.SplitEntity([-1, 1]) cfg['auto_unroll_max_setp'] =", "in ctx_list(): intrp = relay.create_executor(\"graph\", ctx=ctx, target=target) op_res = intrp.evaluate(func)(data)", "oc) else: raise ValueError('Not supported') weight = relay.var(\"weight\", relay.TensorType(kernel_shape, weight_dtype))", "if layout == \"NCHW\": return (c, h, w), (c, int(round(h*scale_h)),", "int(round(w*scale_w)), c) ishape, oshape = get_shape() x = relay.var(\"x\", relay.TensorType((n,)", "kernel_shape = (64, 1, 3, 3) weight = relay.var(\"weight\", relay.TensorType(kernel_shape,", "channels=10, kernel_size=(3, 3), strides=(2, 2), padding=(1, 1), output_padding=(2, 2), data_layout=\"NHWC\",", "_test_run('float32') _test_run('int32') def test_lrn(): n, c , h, w =", "NOTICE file # distributed with this work for additional information", "tvm.testing.assert_allclose(op_res2.asnumpy(), ref_res, rtol=1e-5) def batch_flatten(data): shape = data.shape target_dim =", "= {'w': tvm.nd.array(kernel)} graph, lib, params = relay.build_module.build(mod, target=target, params=params)", "2), channels=192, kernel_size=(7, 7)) def test_conv3d_infer_type(): # symbolic in batch", "= relay.var(\"w\") y = relay.nn.conv3d(x, wt, kernel_size=(3, 3, 3), padding=(1,", "= tvm.size_var(\"n\"), 10, 224, 224, 224 x = relay.var(\"x\", relay.TensorType((n,", "= relay.var(\"w\") y = relay.nn.conv1d(x, wt, kernel_size=3, padding=(1, 1), channels=16,", "this file except in compliance # with the License. You", "relay.nn.batch_flatten(x)) data = np.random.rand(5, 10, 5).astype(t1.dtype) ref_res = batch_flatten(data) for", "padding, groups=groups) with WinogradFallback(), relay.build_config(opt_level=3): for target, ctx in ctx_list():", "= (3, 15, 15) dshape = (n, ic, ih, iw)", "= np.random.uniform(-scale, scale, size=dshape).astype(dtype) kernel = np.random.uniform(-scale, scale, size=kshape).astype(dtype) ref_res", "_test_upsampling3d(\"NCDHW\", \"nearest_neighbor\") _test_upsampling3d(\"NCDHW\", \"trilinear\", \"align_corners\") _test_upsampling3d(\"NDHWC\", \"nearest_neighbor\") _test_upsampling3d(\"NDHWC\", \"trilinear\", \"align_corners\")", "dtypes = ('uint8', 'uint8', 'int32') asm = _compile(ic=16, oc=32, target=target,", "h, w), \"int8\")) w = relay.var(\"w\", relay.TensorType((2, 10, 3, 3),", "HWOI and kernel_layout is HWIO y = relay.nn.conv2d_transpose(x, w, channels=10,", "1, dshape, kshape, padding=(1, 1), channels=512, groups=512, kernel_size=(3 ,3)) #", "run_infer_type(y) assert yy.checked_type == relay.TensorType((n, 10, 224, 224), \"float32\") #", "\"float32\", 1, dshape, kshape, padding=(1, 1), channels=32, groups=8, kernel_size=(3 ,3),", "[\"unroll\", \"none\"]], \\ [\"ann_spatial\", \"an\", [\"unroll\", \"unroll\", \"vec\"]], \\ [\"data_pad_inline\",", "target=target) op_res1 = intrp1.evaluate(func)(data, kernel) tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5, atol=1e-5) def", "else: ref_res = fref(data.astype(out_dtype), dkernel.astype(out_dtype)) for target, ctx in ctx_list():", "= \"float32\" x = relay.var(\"x\", relay.TensorType(shape, dtype)) size=5 axis=1 bias=0.5", "rtol=1e-5, atol=1e-5) def compile_test_conv2d_arm_cpu(dtype, out_dtype, scale, dshape, kshape, padding=(1, 1),", "'int32') asm = _compile(ic=16, oc=32, target=target, data_layout=\"NHWC\", kernel_layout='HWIO', dtypes=dtypes) #", "o_shape = (1, 500) dtype = \"float32\" x = relay.var(\"x\",", "c, d, h, w), \"float32\")) w = relay.var(\"w\") y =", "relay.TensorType((n, c, h, w), dtype)) y = opfunc(x, pool_size=(1, 1))", "(2,), (0, 0), (1, 3, 16), pool_type, False) for target,", "tvm.size_var(\"n\"), tvm.size_var(\"c\"), 224, 224 x = relay.var(\"x\", relay.TensorType((n, h, w,", "ref_res = topi.testing.lrn_python(x_data, size, axis, bias, alpha, beta) for target,", "relay.TensorType( (n, 2, 222, 222), \"int32\") # infer shape in", "ref_res, rtol=1e-3, atol=1e-3) # normal winograd: stride 1, padding 1,", "or Cascadelake\" # compile conv2d for x86 (skylake, cascadelake) and", "5).astype(t1.dtype) ref_res = batch_flatten(data) for target, ctx in ctx_list(): intrp", "# entirely concrete case n, c, h, w = 1,", "0, 0, 0), out_shape=(1, 3, 16, 16, 16)): n, c,", "int(x), run_infer_type(func).ret_type.shape)) assert out_shape == f_out_shape, \\ \"Output shape mismatch.", "be picked up. for target in targets: if llvm_version >=", "4, 4), \"int8\") # Infer with NHWC n, c, h,", "# extended winograd: stride 1, padding N, kernel 3x3 run_test_conv2d_cuda(\"float32\",", "80, 3, 3) run_test_conv2d_cuda(\"float32\", \"float32\", 1, dshape, kshape, padding=(1, 1),", "fast instructions can be picked up. for target in targets:", "h, w), \"int16\")) w = relay.var(\"w\", relay.ty.TensorType((32, 32, 3, 3),", "NDHWC n, c, d, h, w = 4, 32, 224,", "np.mean) def test_pool1d(): def _test_pool1d(opfunc): n, c, w = tvm.var(\"n\"),", "1]) cfg['tile_y'] = autotvm.task.space.SplitEntity([-1, 1, 1, 1]) cfg['tile_x'] = autotvm.task.space.SplitEntity([-1,", "padding=(1, 1), channels=10, kernel_size=3, dilation=3) def test_conv2d_infer_type(): # symbolic in", "WinogradFallback(), relay.build_config(opt_level=3): for target, ctx in ctx_list(): if target !=", "f_out_shape) data = np.random.uniform(size=dshape).astype(dtype) ref_res = topi.testing.pool3d_ncdhw_python(data, (2, 2, 2),", "n, c, d, h, w = tvm.size_var(\"n\"), 10, 224, 224,", "x = relay.var(\"x\", shape=dshape) y = relay.nn.upsampling3d(x, scale_d=scale_d, scale_h=scale_h, scale_w=scale_w,\\", "data_layout=\"NHWC\", kernel_layout=\"HWIO\") func = relay.Function([x, w], y) dtype = \"float32\"", "relay.TensorType((n, 10, 224, 224), dtype) # test execution dtype =", "dtype) # test execution dtype = \"int32\" dshape = (1,", "shape=dshape) y = relay.nn.avg_pool2d(x, pool_size=(kh, kw), strides=(sw, sw), padding=(ph, pw),", "w, output_padding=(1, 1), channels=11, data_layout=\"NHWC\") yy = run_infer_type(y) assert yy.checked_type", "padding=(1, 1, 1), channels=10, kernel_size=(3, 3 ,3)) def test_conv3d_ndhwc_run(): def", "224 x = relay.var(\"x\", relay.TensorType((n, c, w), \"float32\")) y =", "not use this file except in compliance # with the", "scale_h = 2.0 scale_w = 2.0 dtype = \"float32\" def", "relay.ty.TensorType((o, i, h, w), \"int16\")) y = relay.nn.bitpack(x, bit_axis=4, pack_axis=1,", "224, 224, 4, 4), \"int32\") assert yy.args[1].checked_type == relay.TensorType( (4,", "_compile(ic=16, oc=32, target=target, data_layout=\"NCHW\", kernel_layout='OIHW', dtypes=fast_int8_dtypes) # Check that vector", "1, 2, 3, 4 t = relay.var(\"t\", relay.TensorType((n, c, h,", "scale_w=scale_w, layout=layout, method=method, align_corners=align_corners) yy = run_infer_type(y) assert yy.checked_type ==", "n, c, h, w = tvm.size_var(\"n\"), tvm.size_var(\"c\"), tvm.size_var(\"h\"), tvm.size_var(\"w\") x", "dilation=dilation, groups=groups, data_layout=\"NDHWC\", kernel_layout=\"DHWIO\", **attrs) func = relay.Function([x, w], y)", "= _compile(ic=17, oc=29, target=target, data_layout=\"NCHW\", kernel_layout='OIHW', dtypes=dtypes) assert _has_fast_int8_instructions(asm, target)", "Unless required by applicable law or agreed to in writing,", "y = relay.nn.batch_flatten(x) yy = run_infer_type(y) assert yy.checked_type == relay.TensorType((3,", "w = relay.var(\"w\", dtype=dtype) y = relay.nn.conv1d(x, w, padding=padding, dilation=dilation,", "atol=1e-5) # normal conv1d dshape = (1, 3, 224) kshape", "(10, 3, 3, 3) run_test_conv2d(\"float32\", \"float32\", 1, dshape, kshape, padding=(1,", "= relay.var(\"w\") y = relay.nn.conv2d_transpose(x, w, channels=10, kernel_size=(3,3), strides=(2,2), padding=(1,1),", "assert _has_fast_int8_instructions(asm, target) # Sweep the output channels to check", "= relay.nn.upsampling(x, scale_h=scale_h, scale_w=scale_w, layout=layout, method=method, align_corners=align_corners) func = relay.Function([x],", "h, w, c), (int(round(d*scale_d)), int(round(h*scale_h)),\\ int(round(w*scale_w)), c) ishape, oshape =", "data_layout=\"NDHWC\", kernel_layout=\"DHWIO\", **attrs) func = relay.Function([x, w], y) data =", "1 for i in range(len(shape) - 1): target_dim = target_dim", "# Ensure that code is generated when datatypes are not", "= tvm.size_var(\"n\"), tvm.size_var(\"c\") x = relay.var(\"x\", relay.TensorType((n, c, 100, 100,", "= (1, 5, 10, 10) o_shape = (1, 500) dtype", "relay.TensorType((n, h, w, c), \"float32\")) w = relay.var(\"w\", relay.TensorType((12, 11,", "yy = run_infer_type(y) assert yy.checked_type == relay.TensorType((3, 24), \"float32\") x", "tvm.size_var(\"w\") t = relay.var(\"t\", relay.TensorType((n, c, h, w), \"float32\")) y", "4 internally. for ic in [1, 4, 6]: asm =", "ctx_list(): executor = relay.create_executor(\"graph\", ctx=ctx, target=target) out = executor.evaluate(func)(data) tvm.testing.assert_allclose(out.asnumpy(),", "= (192, 80, 3, 3) run_test_conv2d_cuda(\"float32\", \"float32\", 1, dshape, kshape,", "3, 3, \"float32\"], [1, 1], [1, 1], [1, 1], \"float32\"],", "yy.checked_type == relay.TensorType( (n, 2, 224, 224, 224), \"float32\") assert", "== relay.TensorType( (n, 2, 222), \"int32\") # Infer with NWC", "kernel) tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5, atol=1e-5) # normal conv1d dshape =", "(1, 3, 28, 28) x = relay.var(\"x\", shape=dshape) y =", "run_test_conv1d(\"int8\", \"int32\", 1, dshape, kshape, padding=(1, 1), channels=10, kernel_size=3) #", "relay.nn.pad(x, ((1, 1), (2, 2), (3, 3), (4, 4))) func", "ref_res, rtol=1e-5) def test_l2_normalize(): n, c , h, w =", "(int(round(d*scale_d)), int(round(h*scale_h)),\\ int(round(w*scale_w)), c) ishape, oshape = get_shape() x =", "relay.nn.conv3d(x, w, padding=padding, dilation=dilation, groups=groups, **attrs) func = relay.Function([x, w],", "weight. n, c, h, w = tvm.size_var(\"n\"), 10, 224, 224", "precision n, c, w = tvm.var(\"n\"), 10, 224 x =", "= \"float32\" dshape = (1, 1024, 7, 7) x =", "values n, c, h, w = tvm.size_var(\"n\"), 2, 3, tvm.size_var(\"w\")", "w = tvm.var(\"n\"), 10, 224 x = relay.var(\"x\", relay.TensorType((n, c,", "h, w, 16), \"int32\") def test_conv3d_run(): def run_test_conv3d(dtype, out_dtype, scale,", "= relay.Function([x], y) data = np.random.uniform(size=dshape).astype(dtype) ref_res = reffunc(data.reshape(1, 3,", "yy = run_infer_type(y) assert yy.checked_type == relay.TensorType( (n, 15, 15,", "= reffunc(data, axis=(2,3), keepdims=True) for target, ctx in ctx_list(): intrp1", "\"float32\") assert yy.args[1].checked_type == relay.TensorType( (10, 15, 3, 3), \"float32\")", "= \"float32\" def get_shape(): if layout == \"NCDHW\": return (c,", "test cases. \"\"\" import numpy as np import tvm from", "assert yy.checked_type == relay.TensorType((n, 1, 1, c), \"float32\") n, c,", "(int(round(h*scale_h)), int(round(w*scale_w)), c) ishape, oshape = get_shape() x = relay.var(\"x\",", "assert _has_fast_int8_instructions(asm, target) for oc in [4, 16, 20]: asm", "= relay.nn.conv3d(x, wt, kernel_size=(3, 3, 3), padding=(1, 1, 1), channels=16,", "Infer with NHWC n, c, h, w = 4, 32,", "scale_d = 2.0 scale_h = 2.0 scale_w = 2.0 dtype", "channels=16, data_layout=\"NDHWC\", out_dtype=\"int32\") yy = run_infer_type(y) assert yy.checked_type == relay.TensorType(", "= 4, 32, 224, 224 x = relay.var(\"x\", relay.TensorType((n//4, c//4,", "target=target) op_res1 = intrp1.evaluate(func)(data) tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5, atol=1e-5) _test_pool3d(relay.nn.max_pool3d) _test_pool3d(relay.nn.max_pool3d,", "yy.checked_type == relay.TensorType((d1, ((2*d3)*3)), \"float32\") shape = (1, 5, 10,", "def test_l2_normalize(): n, c , h, w = tvm.size_var(\"n\"), tvm.size_var(\"c\"),", "dtypes for input and weight. n, c, w = tvm.var(\"n\"),", "**attrs) func = relay.Function([x, w], y) data = np.random.uniform(-scale, scale,", "# Output channels should be a multiple of 16 internally.", "test_conv2d_transpose_infer_type() test_conv2d_transpose_nchw_run() test_conv2d_transpose_nhwc_run() test_conv1d_transpose_ncw_run() test_conv1d_run() test_conv2d_run() test_conv2d_winograd() test_conv3d_run() test_conv3d_ndhwc_run() test_bitserial_conv2d_infer_type()", "(1, 3, 16), pool_type, False) for target, ctx in ctx_list():", "c_np = topi.testing.conv2d_transpose_nchw_python( data, kernel, 2, 1) d_np = np.zeros(shape=oshape)", "tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5, atol=1e-5) # normal conv1d dshape = (1,", "assert yy.checked_type == relay.TensorType( (n, 2, 224), \"float32\") assert yy.args[1].checked_type", "wt, kernel_size=3, padding=(1, 1), channels=16, data_layout=\"NWC\", out_dtype=\"int32\") yy = run_infer_type(y)", "conv1d dshape = (1, 3, 224) kshape = (10, 3,", "assert yy.checked_type == relay.TensorType( (n, 2, 224, 224, 224), \"float32\")", "np.zeros(shape=oshape_nhwc) d_np[:,0:c_np.shape[1],0:c_np.shape[2],:] = c_np def test_conv1d_transpose_ncw_run(): dshape = (1, 3,", "for target in targets: if llvm_version >= 8: with relay.build_config(opt_level=3):", "37) x = relay.var(\"x\", shape=dshape) w = relay.var(\"w\") y =", "_has_fast_int8_instructions(asm, target) for oc in [4, 16, 20]: asm =", "target=target) op_res1 = intrp1.evaluate(func)(data) tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5, atol=1e-5) _test_pool1d(relay.nn.max_pool1d) _test_pool1d(relay.nn.avg_pool1d)", "topi.testing.upsampling_python(data, (scale_h, scale_w), layout) else: ref = topi.testing.bilinear_resize_python(data, (int(round(h*scale_h)), int(round(w*scale_w))),", "relay.var(\"x\", shape=dshape) pool_type = 'max' if 'max' in str(opfunc) else", "8: with relay.build_config(opt_level=3): graph, lib, params = relay.build(func, target, params=parameters)", "dshape, kshape, padding=(1, 1), channels=10, kernel_size=(3 ,3), dilation=(3, 3)) def", "4, 32, 224 x = relay.var(\"x\", relay.TensorType((n, w, c), \"int8\"))", "groups=1, dilation=(1, 1), except_targets=None, **attrs): if except_targets is None: except_targets", "and limitations # under the License. \"\"\" Support level2 operator", "dshape, kshape, padding=(1, 1), channels=10, kernel_size=3, dilation=3) def test_conv2d_infer_type(): #", "[1, 1], \"float32\"], {}, \\ [\"depthwise_conv2d_nchw\", [1, 512, 32, 32,", "3x3 dshape = (1, 80, 73, 73) kshape = (192,", "relay.nn.lrn(x, size=size, axis=axis, bias=bias, alpha=alpha, beta=beta) yy = run_infer_type(z) assert", "h, w = 4, 32, 224, 224 x = relay.var(\"x\",", "on an # \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS", "topi.testing.dilate_python(kernel, (1, 1) + dilation) if fref is None: ref_res", "w = tvm.size_var(\"n\"), 10, 224, 224 x = relay.var(\"x\", relay.ty.TensorType((n,", "= autotvm.task.space.OtherOptionEntity(1500) cfg['unroll_explicit'] = autotvm.task.space.OtherOptionEntity(1) self.memory[key] = cfg return cfg", "y) wdata = np.random.rand(*kernel_shape) * 10 parameters = {\"weight\": tvm.nd.array(wdata.astype(weight_dtype))}", "224, 224, 224 x = relay.var(\"x\", relay.TensorType((n, d, h, w,", "\"float32\" dshape = (1, 3, 32) x = relay.var(\"x\", shape=dshape)", "topi.testing def test_conv1d_infer_type(): # symbolic in batch dimension n, c,", "relay.Function([x], y) dtype = \"float32\" a_np = np.random.uniform(low=0.001, size=(n, ic,", "5, 224, 224), \"float32\") # test execution dtype = \"float32\"", "relay.Function([x], y) data = np.random.uniform(size=dshape).astype(dtype) if method == \"nearest_neighbor\": ref", "2, 222, 222, 222), \"int32\") # Infer with NDHWC n,", "= (3, 3, 10, 3) oshape_nhwc = (1, 37, 37,", "_test_run('int32') def test_lrn(): n, c , h, w = tvm.size_var(\"n\"),", "= intrp1.evaluate(func)(data) tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5, atol=1e-5) def test_pool2d(): _test_pool2d(relay.nn.max_pool2d, np.max)", "32, 224, 224 x = relay.var(\"x\", relay.ty.TensorType((n, c, h, w),", "oc=16, target=target, data_layout=\"NCHW\", kernel_layout='OIHW', dtypes=dtypes) assert _has_fast_int8_instructions(asm, target) for ic", "of w, mixed precision n, c, h, w = tvm.size_var(\"n\"),", "atol=1e-5) # normal conv3d dshape = (1, 5, 224, 224,", "x = relay.var(\"x\", shape=dshape) y = relay.nn.pad(x, ((1, 1), (2,", "= {\"weight\": tvm.nd.array(wdata.astype(weight_dtype))} targets = [\"llvm -mcpu=skylake-avx512\", \"llvm -mcpu=cascadelake\"] llvm_version", "c), \"float32\") n, c, h, w = tvm.size_var(\"n\"), tvm.size_var(\"c\"), tvm.size_var(\"h\"),", "'int32') asm = _compile(ic=16, oc=32, target=target, data_layout=\"NCHW\", kernel_layout='OIHW', dtypes=fast_int8_dtypes) #", "ValueError('Not supported') if kernel_layout == 'OIHW': kernel_shape = (oc, ic,", "topi.testing.bilinear_resize_python(data, (int(round(h*scale_h)), int(round(w*scale_w))), layout) for target, ctx in ctx_list(): executor", "in [4, 16, 20]: asm = _compile(ic=8, oc=oc, target=target, data_layout=\"NCHW\",", "\"float32\")) y = relay.nn.upsampling(x, scale_h=2, scale_w=2, layout=\"NCHW\", method=\"bilinear\") yy =", "rtol=1e-5, atol=1e-5) _test_run('float32') _test_run('int32') def test_lrn(): n, c , h,", "relay.nn.conv2d_transpose(x, w, kernel_size=(3, 3), padding=(1, 1), channels=15) assert \"channels=15\" in", "test_bitserial_conv2d_infer_type(): # Basic shape test with ambiguous batch. n, c,", "alpha=alpha, beta=beta) yy = run_infer_type(z) assert yy.checked_type == relay.TensorType(shape, dtype)", "64, 56, 56) x = relay.var(\"x\", relay.TensorType(data_shape, input_dtype)) kernel_shape =", "16, 16 scale_d = 2.0 scale_h = 2.0 scale_w =", "= run_infer_type(y) assert yy.checked_type == relay.TensorType((d1, ((d2*d3)*d4)), \"float32\") x =", "3) compile_test_conv2d_arm_cpu(\"float32\", \"float32\", 1, dshape, kshape, padding=(1, 1), channels=512, groups=512,", "(range(n), range(ic), (range(ph, ih+ph)), (range(pw, iw+pw))) pad_np[np.ix_(*no_zero)] = a_np b_np", "ishape, dtype)) y = relay.nn.upsampling3d(x, scale_d=scale_d, scale_h=scale_h, scale_w=scale_w,\\ layout=layout, method=method,\\", "asm = _compile(ic=ic, oc=16, target=target, data_layout=\"NCHW\", kernel_layout='OIHW', dtypes=dtypes) assert _has_fast_int8_instructions(asm,", "target=target, data_layout=\"NHWC\", kernel_layout='HWIO', dtypes=dtypes) assert _has_fast_int8_instructions(asm, target) # Check that", "= relay.var(\"w\") # kshape and kernel_layout should have swapped IO.", "= run_infer_type(y) assert yy.checked_type == relay.TensorType( (n, 15, 10, 12),", "be a multiple of 4 internally. for ic in [1,", "c, tvm.expr.Cast(\"int32\", tvm.round(d*scale)), tvm.expr.Cast(\"int32\", tvm.round(h*scale)), tvm.expr.Cast(\"int32\", tvm.round(w*scale))), \"float32\") n, c", "o, i, h, w = 32, 32, 128, 128 x", "input_dtype)) elif data_layout == 'NHWC': data_shape = (n, h, w,", "relay.var(\"x\", relay.TensorType(data_shape, input_dtype)) kernel_shape = (64, 1, 3, 3) weight", "3) run_test_conv2d_cuda(\"float32\", \"float32\", 1, dshape, kshape, padding=(1, 1), channels=192, kernel_size=(3,", "= relay.var(\"x\", shape=dshape) pool_type = 'max' if 'max' in str(opfunc)", "y = opfunc(x) func = relay.Function([x], y) data = np.random.uniform(size=dshape).astype(dtype)", "1), fref=None, groups=1, dilation=(1, 1, 1), except_targets=None, **attrs): if except_targets", "is not present in the assembly. assert not _has_fast_int8_instructions(asm, target)", "= (192, 80, 7, 7) run_test_conv2d_cuda(\"float32\", \"float32\", 1, dshape, kshape,", "in y.astext() yy = run_infer_type(y) assert yy.checked_type == relay.TensorType( (n,", "h, w), \"float32\")) w = relay.var(\"w\") y = relay.nn.conv2d(x, w,", "= relay.var(\"x\", relay.TensorType((n, c, h, w), \"float32\")) y = opfunc(x,", "target in targets: if llvm_version >= 8: dtypes = (('int8',", "128, 1), \"uint16\") if __name__ == \"__main__\": test_pool1d() test_pool2d() test_pool3d()", "ctx in ctx_list(): intrp = relay.create_executor(\"graph\", ctx=ctx, target=target) op_res =", "w = relay.var(\"w\", dtype=dtype) y = relay.nn.conv3d(x, w, padding=padding, dilation=dilation,", "**attrs): if except_targets is None: except_targets = [] x =", "func = relay.Function([x], y) data = np.random.uniform(size=dshape).astype(dtype) if method ==", "ic, ih+2*ph, iw+2*pw)).astype(dtype) no_zero = (range(n), range(ic), (range(ph, ih+ph)), (range(pw,", "_has_fast_int8_instructions(asm, target) # Check that both non-divisible oc and ic", "no_zero = (range(n), range(ic), (range(ph, ih+ph)), (range(pw, iw+pw))) pad_np[np.ix_(*no_zero)] =", "3, 4, 4), \"int8\") # Infer with NHWC n, c,", "data = np.random.uniform(size=dshape).astype(dtype) kernel = np.random.uniform(size=kshape).astype(dtype) c_np = topi.testing.conv1d_transpose_ncw_python( data,", "ref_res = topi.testing.pool1d_ncw_python(data, (2,), (2,), (0, 0), (1, 3, 16),", "padding=(1, 1), dilation=(1, 1), data_layout=data_layout, kernel_layout=kernel_layout, out_dtype=output_dtype) func = relay.Function([x,", "Licensed to the Apache Software Foundation (ASF) under one #", "[\"reorder_0\", \"re\", [0, 1, 2, 3, 4, 5, 8, 6,", "= relay.Function([x], y) data = np.random.random_integers(low=-128, high=128, size=dshape) ref_res =", "tvm.size_var(\"d4\") x = relay.var(\"x\", relay.TensorType((d1, d2, d3, d4), \"float32\")) y", "3), strides=(2, 2), padding=(1, 1), output_padding=(2, 2), data_layout=\"NHWC\", kernel_layout=\"HWIO\") func", "0.1}' temp = util.tempdir() with open(temp.relpath(\"temp.log\"), \"w\") as log_file: log_file.write(test_schedule)", "channels=10, kernel_size=(3,3), strides=(2,2), padding=(1,1), output_padding=(2, 2)) func = relay.Function([x, w],", "= np.random.uniform(size=kshape_hwoi).astype(dtype) # use true kshape layout here - HWOI", "oc=oc, target=target, data_layout=\"NHWC\", kernel_layout='HWIO', dtypes=dtypes) assert _has_fast_int8_instructions(asm, target) # Check", "padding=(2, 2), channels=192, kernel_size=(3, 3)) # extended winograd: stride 1,", "\"trilinear\", \"align_corners\") _test_upsampling3d(\"NDHWC\", \"nearest_neighbor\") _test_upsampling3d(\"NDHWC\", \"trilinear\", \"align_corners\") def test_conv2d_int8_intrinsics(): def", "3) oshape = (1, 10, 37, 37) x = relay.var(\"x\",", "[] x = relay.var(\"x\", shape=dshape, dtype=dtype) w = relay.var(\"w\", dtype=dtype)", "assert yy.checked_type == relay.TensorType( (n, 15, 10, 12), \"float32\") assert", "weight, kernel_size=(3, 3), groups=64, padding=(1, 1), dilation=(1, 1), out_dtype=output_dtype) func", "* 10 parameters = {\"weight\": tvm.nd.array(wdata.astype(weight_dtype))} targets = [\"llvm -mcpu=skylake-avx512\",", "= relay.var(\"w\", relay.TensorType((12, 11, 5, 5), \"float32\")) y = relay.nn.conv2d_transpose(x,", "(2, 10, 3, 3), \"float32\") # infer by shape of", "7) x = relay.var(\"x\", shape=dshape) y = relay.nn.pad(x, ((1, 1),", "d1, d2, d3, d4 = tvm.size_var(\"d1\"), tvm.size_var(\"d2\"), tvm.size_var(\"d3\"), tvm.size_var(\"d4\") x", "yy = run_infer_type(y) assert yy.checked_type == relay.TensorType( (n, d, h,", "0, 0, 0, 0, 0), out_shape=(1, 3, 16, 16, 16)):", "kshape = (3, 10, 3, 3) oshape = (1, 10,", "dtypes=fast_int8_dtypes) # Check that vector int mult and add instructions", "tvm.size_var(\"n\"), 10, 224, 224, 224 x = relay.var(\"x\", relay.TensorType((n, c,", "method, coordinate_transformation_mode=\"half_pixel\"): n, c, d, h, w = tvm.size_var(\"n\"), 8,", "assert yy.checked_type == relay.TensorType((n, 10, 224, 224), \"float32\") # test", "= relay.Function([x], y) data = np.random.uniform(size=dshape).astype(dtype) if method == \"nearest_neighbor\":", "'uint8' weight_dtype = 'int8' output_dtype = 'int32' data_shape = (1,", "w), dtype)) y = opfunc(x, pool_size=(1, 1)) assert \"pool_size=\" in", "\"\"\" import numpy as np import tvm from tvm import", "3), padding=(1, 1), channels=16, data_layout=\"NHWC\", out_dtype=\"int32\") yy = run_infer_type(y) assert", "data_layout=\"NCHW\", kernel_layout='OIHW', dtypes=dtypes) assert _has_fast_int8_instructions(asm, target) for oc in [4,", "3, 18) kshape = (3, 10, 3) oshape = (1,", "h, w), (c, int(round(d*scale_d)), int(round(h*scale_h)),\\ int(round(w*scale_w))) else: return (d, h,", "# KIND, either express or implied. See the License for", "== \"__main__\": test_pool1d() test_pool2d() test_pool3d() test_avg_pool2d_no_count_pad() test_lrn() test_l2_normalize() test_conv1d_infer_type() test_conv2d_infer_type()", "= lib.get_source(\"asm\") return assembly def _has_fast_int8_instructions(asm, target): if 'skylake-avx512' in", "target=target) op_res1 = intrp1.evaluate(func)(data) tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5, atol=1e-5) _test_run('float32') _test_run('int32')", "(10, 3, 3) run_test_conv1d(\"float32\", \"float32\", 1, dshape, kshape, padding=(1, 1),", "data = np.random.random_integers(low=-128, high=128, size=dshape) ref_res = reffunc(data.reshape(1,3,14,2,14,2), axis=(3,5)).astype(dtype) for", "run_infer_type(y) assert yy.checked_type == relay.TensorType( (32, 2, 128, 128, 1),", "up. for target in targets: if llvm_version >= 8: dtypes", "dshape = (1, 3, 18, 18) kshape = (3, 10,", "tvm.size_var(\"n\"), tvm.size_var(\"c\"),\\ tvm.size_var(\"d\"), tvm.size_var(\"h\"), tvm.size_var(\"w\") scale = tvm.const(2.0, \"float64\") x", "bias, alpha, beta) for target, ctx in ctx_list(): intrp1 =", "input_dtype)) kernel_shape = (64, 1, 3, 3) weight = relay.var(\"weight\",", "execution dtype = \"float32\" dshape = (1, 3, 28, 28)", "224), \"float32\") assert yy.args[1].checked_type == relay.TensorType( (2, 10, 3, 3,", "atol=1e-5) def _test_global_pool2d(opfunc, reffunc): n, c, h, w = tvm.size_var(\"n\"),", "case of different dtypes for input and weight. n, c,", "37, 37, 10) x = relay.var(\"x\", shape=dshape_nhwc) w = relay.var(\"w\")", "atol=1e-5) def test_upsampling_infer_type(): n, c , h, w = tvm.size_var(\"n\"),", "target=target) out = executor.evaluate(func)(data) tvm.testing.assert_allclose(out.asnumpy(), ref, rtol=1e-5, atol=1e-5) def test_upsampling():", ",3), except_targets=[\"cuda\"]) def test_conv2d_transpose_infer_type(): # symbolic in batch dimension n,", "test_depthwise_conv2d_int8(): input_dtype = 'uint8' weight_dtype = 'int8' output_dtype = 'int32'", "3)) # extended winograd: stride 1, padding N, kernel NxN", "compile conv2d for x86 (skylake, cascadelake) and test assembly contains", "conv3d dshape = (1, 5, 224, 224, 6) kshape =", "18, 16, 16)) _test_pool3d(relay.nn.max_pool3d, padding=(0, 3, 0, 0, 3, 0),", "dtypes=dtypes) assert _has_fast_int8_instructions(asm, target) # Sweep the output channels to", "elif kernel_layout == 'HWIO': kernel_shape = (ch, cw, ic, oc)", "def test_pad_run(): def _test_run(dtype): dshape = (4, 10, 7, 7)", "\"ot\", 0]]}], \"r\": [[0.0002933163], \\ 0, 3.1976189613342285, 1570811630.6058347], \"v\": 0.1}'", "extended winograd: stride 1, padding N, kernel NxN kshape =", "= topi.testing.conv3d_ncdhw_python( data.astype(out_dtype), dkernel.astype(out_dtype), 1, padding, groups=groups) else: ref_res =", "x = relay.var(\"x\", shape=dshape_nhwc) w = relay.var(\"w\") # kshape and", "rtol=1e-5, atol=1e-5) def test_pool2d(): _test_pool2d(relay.nn.max_pool2d, np.max) _test_pool2d(relay.nn.avg_pool2d, np.mean) _test_pool2d_int(relay.nn.avg_pool2d, np.mean,", "in str(opfunc) else 'avg' y = opfunc(x, pool_size=(2,), strides=(2,), padding=(0,", "if 'skylake-avx512' in target: return \"pmaddubs\" in asm elif 'cascadelake'", "= relay.nn.conv2d(x, w, padding=padding, dilation=dilation, groups=groups, **attrs) func = relay.Function([x,", "relay.TensorType((n, h, w, c), \"int8\")) wt = relay.var(\"w\") y =", "run_infer_type(y) assert yy.checked_type == relay.TensorType( (n, d, h, w, 16),", "axis=axis, bias=bias, alpha=alpha, beta=beta) yy = run_infer_type(z) assert yy.checked_type ==", "2.0 dtype = \"float32\" def get_shape(): if layout == \"NCHW\":", "method=\"trilinear\") yy = run_infer_type(y) assert yy.checked_type == relay.TensorType((n, c, 200,", "= intrp.evaluate(func)(data) np.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=0.01) def _test_upsampling(layout, method, align_corners=False): n,", "strides=(2,2), padding=(1,1), output_padding=(2, 2)) func = relay.Function([x, w], y) dtype", "You may obtain a copy of the License at #", "# test execution dtype = \"int32\" dshape = (1, 3,", "layout n, c, h, w = 4, 32, 224, 224", "10 parameters = {\"weight\": tvm.nd.array(wdata.astype(weight_dtype))} targets = [\"llvm -mcpu=skylake-avx512\", \"llvm", "tvm.size_var(\"n\"), 10, 5, 224, 224 x = relay.var(\"x\", relay.TensorType((n, c,", "assert yy.checked_type == relay.TensorType((3, 24), \"float32\") x = relay.var(\"x\", relay.TensorType((d1,", "x = relay.var(\"x\", shape=(n, c , h, w)) y =", "instructions targets = [\"llvm -mcpu=skylake-avx512\", \"llvm -mcpu=cascadelake\"] llvm_version = tvm.codegen.llvm_version_major()", "shape of w, mixed precision n, c, w = tvm.var(\"n\"),", "swapped IO. # kshape is HWOI and kernel_layout is HWIO", "dtype=dtype) w = relay.var(\"w\", dtype=dtype) y = relay.nn.conv1d(x, w, padding=padding,", "ref_res, rtol=1e-5, atol=1e-5) def test_pool2d(): _test_pool2d(relay.nn.max_pool2d, np.max) _test_pool2d(relay.nn.avg_pool2d, np.mean) _test_pool2d_int(relay.nn.avg_pool2d,", "= np.random.uniform(size=dshape).astype(dtype) if method == \"nearest_neighbor\": ref = topi.testing.upsampling3d_python(data, (scale_d,", "assert yy.checked_type == relay.TensorType((d1, ((2*d3)*3)), \"float32\") shape = (1, 5,", "[1, 4, 6]: asm = _compile(ic=ic, oc=16, target=target, data_layout=\"NCHW\", kernel_layout='OIHW',", "kernel) tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5, atol=1e-5) # normal conv3d dshape =", "dilation) if fref is None: ref_res = topi.testing.conv3d_ncdhw_python( data.astype(out_dtype), dkernel.astype(out_dtype),", "pad_np = np.zeros(shape=(n, ic, ih+2*ph, iw+2*pw)).astype(dtype) no_zero = (range(n), range(ic),", "tvm.codegen.llvm_version_major() for target in targets: if llvm_version >= 8: with", "w = tvm.size_var(\"n\"), 16, 32, 32 scale_h = 2.0 scale_w", "graph, lib, params = relay.build(func, target, params=parameters) assembly = lib.get_source(\"asm\")", "out = executor.evaluate(func)(data) tvm.testing.assert_allclose(out.asnumpy(), ref, rtol=1e-5, atol=1e-5) def test_upsampling3d(): _test_upsampling3d(\"NCDHW\",", "20]: asm = _compile(ic=8, oc=oc, target=target, data_layout=\"NHWC\", kernel_layout='HWIO', dtypes=dtypes) assert", "n, c, h, w = tvm.size_var(\"n\"), 10, 10, 12 x", "w), \"uint8\")) w = relay.var(\"w\", relay.TensorType((2, 10, 3, 3), \"int8\"))", "(512, 1, 3, 3) compile_test_conv2d_arm_cpu(\"float32\", \"float32\", 1, dshape, kshape, padding=(1,", "w), \"float32\")) w = relay.var(\"w\") y = relay.nn.conv1d(x, w, kernel_size=3,", "yy.checked_type == relay.TensorType( (n, 2, 224), \"float32\") assert yy.args[1].checked_type ==", "_has_fast_int8_instructions(asm, target) # Ensure that code is generated when datatypes", "data.astype(out_dtype), kernel.astype(out_dtype), 1, padding, dilation) for target, ctx in ctx_list():", "= relay.var(\"w\") y = relay.nn.conv2d(x, wt, kernel_size=(3, 3), padding=(1, 1),", "OF ANY # KIND, either express or implied. See the", "= \"float32\" a_np = np.random.uniform(low=0.001, size=(n, ic, ih, iw)).astype(dtype) pad_np", "18) kshape = (3, 10, 3) oshape = (1, 10,", "3)) run_test_conv2d_cuda(\"float32\", \"float32\", 1, dshape, kshape, padding=(2, 2), channels=192, kernel_size=(3,", "= relay.var(\"w\", relay.TensorType((2, 10, 3), \"int8\")) y = relay.nn.conv1d(x, w,", "= ('uint8', 'int8', 'int32') # Sweep the input channels to", "n, c, h, w = tvm.size_var(\"n\"), 2, 3, tvm.size_var(\"w\") t", "= func test_schedule='{\"i\": [\"llvm -device=arm_cpu\", \"topi_nn_depthwise_conv2d_nchw\", \\ [[\"TENSOR\", [1, 512,", "w, c), (int(round(d*scale_d)), int(round(h*scale_h)),\\ int(round(w*scale_w)), c) ishape, oshape = get_shape()", "Apache Software Foundation (ASF) under one # or more contributor", "1, dshape, kshape, padding=(0, 1), channels=10, kernel_size=(1 ,3)) # dilated", "padding=(1,1), output_padding=(2, 2)) func = relay.Function([x, w], y) dtype =", "topi.testing.pool3d_ncdhw_python(data, (2, 2, 2), (2, 2, 2), padding, out_shape, pool_type,", "\"float32\")) y = relay.nn.batch_flatten(x) yy = run_infer_type(y) assert yy.checked_type ==", "= relay.nn.l2_normalize(x, eps=0.001, axis=[axis]) yy = run_infer_type(z) assert yy.checked_type ==", "+ oshape, dtype) dshape = (1,) + ishape x =", "shape mismatch. expected {}, actual {}\".format(out_shape, f_out_shape) data = np.random.uniform(size=dshape).astype(dtype)", "mod = relay.Module() mod['main'] = func mod = relay.transform.InferType()(mod) data", "(target, workload) if key in self.memory: return self.memory[key] cfg =", "intrp1 = relay.create_executor(\"graph\", ctx=ctx, target=target) op_res1 = intrp1.evaluate(func)(data) tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res,", "shape test with ambiguous batch. n, c, h, w =", "w = 4, 32, 224, 224 x = relay.var(\"x\", relay.TensorType((n//4,", "fref is None: ref_res = topi.testing.conv2d_nchw_python( data.astype(out_dtype), dkernel.astype(out_dtype), 1, padding,", "target_dim * shape[i + 1] return np.reshape(data, (shape[0], target_dim)) def", "relay.nn.batch_flatten(x) yy = run_infer_type(y) assert yy.checked_type == relay.TensorType((d1, ((d2*d3)*d4)), \"float32\")", "tvm.testing.assert_allclose(op_res2.asnumpy(), ref_res, rtol=1e-5) def test_l2_normalize(): n, c , h, w", "padding=(1, 1), channels=10, kernel_size=3) # dilated conv2d dshape = (1,", "= relay.var(\"x\", relay.TensorType((n, c, 100, 100, 200), \"float32\")) y =", "# test execution dtype = \"float32\" dshape = (1, 1024,", "\"float32\") assert yy.args[1].checked_type == relay.TensorType( (2, 10, 3, 3, 3),", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "x_data = np.random.uniform(low=-1, high=1, size=shape).astype(dtype) ref_res = topi.testing.lrn_python(x_data, size, axis,", "dshape, kshape, padding=(1, 1), channels=10, kernel_size=3) # dilated conv2d dshape", "None: ref_res = topi.testing.conv3d_ncdhw_python( data.astype(out_dtype), dkernel.astype(out_dtype), 1, padding, groups=groups) else:", "padding=(1, 1), channels=512, groups=512, kernel_size=(3 ,3)) # CUDA is disabled", "and add instructions are generated. assert \"vpmulld\" in asm and", "under the License is distributed on an # \"AS IS\"", "data = np.random.uniform(size=dshape).astype(dtype) ref_res = np.pad(data, ((1, 1), (2, 2),", "x = relay.var(\"x\", shape=dshape, dtype=dtype) w = relay.var(\"w\", dtype=dtype) y", "here - HWOI c_np = topi.testing.conv2d_transpose_nhwc_python(data, kernel, 'HWOI', 2, 1)", "output_padding=(2, 2), data_layout=\"NHWC\", kernel_layout=\"HWIO\") func = relay.Function([x, w], y) dtype", "test_conv3d_infer_type(): # symbolic in batch dimension n, c, d, h,", "BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either", "normal winograd: stride 1, padding 1, kernel 3x3 dshape =", "padding=(0, 1), channels=10, kernel_size=(1 ,3)) # dilated conv2d dshape =", "224) kshape = (10, 3, 3, 3) run_test_conv2d(\"float32\", \"float32\", 1,", "200, 200, 400), \"float32\") def _test_pool2d(opfunc, reffunc): n, c, h,", "y) mod = relay.Module() mod['main'] = func mod = relay.transform.InferType()(mod)", "\"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY #", "x = relay.var(\"x\", relay.TensorType((n, h, w, c), \"int8\")) wt =", "'direct' schedule: # https://github.com/apache/incubator-tvm/pull/3070#issuecomment-486597553 # group conv2d dshape = (1,", "pool_size=(1,)) assert \"pool_size=\" in y.astext() yy = run_infer_type(y) assert yy.checked_type", "= relay.Function([x, w], y) mod = tvm.relay.Module() mod[\"main\"] = func", "c, d, h, w = tvm.size_var(\"n\"), 10, 5, 224, 224", "yy.checked_type == relay.TensorType((n, 10, 224, 224), \"float32\") # test execution", "4), \"int8\")) wt = relay.var(\"w\") y = relay.nn.conv2d(x, wt, kernel_size=(3,", "yy = run_infer_type(y) assert yy.checked_type == relay.TensorType( (n, 2, 224,", "1, padding 1, kernel 3x3 dshape = (1, 80, 73,", "run_test_conv1d(\"float32\", \"float32\", 1, dshape, kshape, padding=(1, 1), channels=10, kernel_size=3) #", "\"sp\", [1, 8]], \\ [\"reorder_0\", \"re\", [0, 1, 2, 3,", "relay.nn.conv1d_transpose(x, w, channels=10, kernel_size=(3,), strides=(2,), padding=(1,), output_padding=(2,)) func = relay.Function([x,", "h, w)) y = relay.nn.lrn(x, size=10, axis=2, bias=0.5, alpha=.00001, beta=0.75)", "3) weight = relay.var(\"weight\", relay.TensorType(kernel_shape, weight_dtype)) y = relay.nn.conv2d(x, weight,", "d, h, w), \"uint8\")) w = relay.var(\"w\", relay.TensorType((2, 10, 3,", "3), (4, 4)), 'constant') for target, ctx in ctx_list(): intrp1", "data_layout=\"NWC\", out_dtype=\"int32\") yy = run_infer_type(y) assert yy.checked_type == relay.TensorType( (n,", "= topi.testing.conv2d_transpose_nchw_python( data, kernel, 2, 1) d_np = np.zeros(shape=oshape) d_np[:,:,0:c_np.shape[2],0:c_np.shape[3]]", "y = relay.nn.conv1d(x, w, out_dtype=\"int32\") assert \"out_dtype=\\\"int32\\\"\" in y.astext() yy", "yy = run_infer_type(z) assert yy.checked_type == relay.TensorType(o_shape, dtype) func =", "(1, 3, 32) x = relay.var(\"x\", shape=dshape) pool_type = 'max'", "relay.TensorType((n, c, tvm.expr.Cast(\"int32\", tvm.round(d*scale)), tvm.expr.Cast(\"int32\", tvm.round(h*scale)), tvm.expr.Cast(\"int32\", tvm.round(w*scale))), \"float32\") n,", "w = tvm.size_var(\"n\"), 10, 224, 224 x = relay.var(\"x\", relay.TensorType((n,", "(1, 1), \"SAME\")) # depthwise conv2d for arm_cpu dshape =", "y = relay.nn.conv2d_transpose(x, w, channels=10, kernel_size=(3,3), strides=(2,2), padding=(1,1), output_padding=(2, 2))", "= relay.Function([x], y) data = np.random.uniform(size=dshape).astype(dtype) ref_res = np.pad(data, ((1,", "dtype) dshape = (1,) + ishape x = relay.var(\"x\", shape=dshape)", "asm = _compile(ic=ic, oc=16, target=target, data_layout=\"NHWC\", kernel_layout='HWIO', dtypes=dtypes) assert _has_fast_int8_instructions(asm,", "channels=2) yy = run_infer_type(y) assert yy.checked_type == relay.TensorType( (n, 2,", "target, ctx in ctx_list(): if target in except_targets: continue intrp1", "2), strides=(2, 2), padding=(0, 0)) func = relay.Function([x], y) data", "\"e\": [[\"tile_co\", \"sp\", [32, 16]], [\"tile_oh\", \"sp\", [8, 1]], \\", "ref_res, rtol=1e-5, atol=1e-5) def test_upsampling_infer_type(): n, c , h, w", "yy.checked_type == relay.TensorType((n, c, 1, 1), \"float32\") # test execution", "_test_pool3d(relay.nn.avg_pool3d, padding=(0, 3, 0, 0, 3, 0), out_shape=(1, 3, 16,", "(1, 1) + dilation) if fref is None: ref_res =", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "dtype = \"float32\" a_np = np.random.uniform(low=0.001, size=(n, ic, ih, iw)).astype(dtype)", "h, w), (c, int(round(h*scale_h)), int(round(w*scale_w))) else: return (h, w, c),", "test_conv3d_infer_type() test_bitpack_infer_type() test_upsampling_infer_type() test_upsampling3d_infer_type() test_flatten_infer_type() test_pad_infer_type() test_pad_run() test_conv2d_transpose_infer_type() test_conv2d_transpose_nchw_run() test_conv2d_transpose_nhwc_run()", "1), channels=16, data_layout=\"NHWC\", out_dtype=\"int32\") yy = run_infer_type(y) assert yy.checked_type ==", "relay.var(\"x\", relay.TensorType((n, c, h, w), \"float32\")) y = relay.nn.upsampling(x, scale_h=2,", "3, 16, 16, 20)) def test_avg_pool2d_no_count_pad(): kh, kw = (4,", "op_res = intrp.evaluate(func)(data) np.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=0.01) def _test_upsampling(layout, method, align_corners=False):", "\"float32\" dshape = (1, 1024, 7, 7) x = relay.var(\"x\",", "c, h, w), \"float32\")) w = relay.var(\"w\") y = relay.nn.conv2d(x,", "file # distributed with this work for additional information #", "# depthwise conv2d for arm_cpu dshape = (1, 512, 32,", "run_infer_type(y) assert yy.checked_type == relay.TensorType((n, 1, 1, c), \"float32\") n,", "relay.var(\"x\", relay.TensorType(data_shape, input_dtype)) elif data_layout == 'NHWC': data_shape = (n,", "\"float32\") def test_upsampling3d_infer_type(): n, c, d, h, w = tvm.size_var(\"n\"),", "ow) = (3, 15, 15) dshape = (n, ic, ih,", "= reffunc(data.reshape(1,3,14,2,14,2), axis=(3,5)).astype(dtype) for target, ctx in ctx_list(): intrp1 =", "16 internally. for oc in [4, 16, 20]: asm =", "w, 4, 4), \"int8\")) wt = relay.var(\"w\") y = relay.nn.conv2d(x,", "padding=(1, 1), channels=64, groups=32, kernel_size=(3 ,3), except_targets=['cuda']) # normal conv2d", "data_layout == 'NCHW': data_shape = (n, ic, h, w) x", "ctx=ctx, target=target) op_res = intrp.evaluate(func)(data) np.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=0.01) def _test_upsampling(layout,", "== relay.TensorType( (n, 15, 15, 11), \"float32\") def test_conv2d_transpose_nchw_run(): dshape", "= relay.nn.conv1d(x, w, padding=padding, dilation=dilation, **attrs) func = relay.Function([x, w],", "16, 16)): n, c, d, h, w = tvm.size_var(\"n\"), 10,", "0), channels=192, kernel_size=(3, 3)) run_test_conv2d_cuda(\"float32\", \"float32\", 1, dshape, kshape, padding=(2,", "{'w': tvm.nd.array(kernel)} graph, lib, params = relay.build_module.build(mod, target=target, params=params) module", "\"float32\")) w = relay.var(\"w\", relay.IncompleteType()) y = relay.nn.conv2d_transpose(x, w, kernel_size=(3,", "128, 128, 1), \"uint16\") if __name__ == \"__main__\": test_pool1d() test_pool2d()", "assert yy.checked_type == relay.TensorType( (n, d, h, w, 16), \"int32\")", "relay.TensorType(kernel_shape, weight_dtype)) y = relay.nn.conv2d(x, weight, kernel_size=(3, 3), groups=64, padding=(1,", "dilation=(3, 3)) def test_conv2d_winograd(): class WinogradFallback(autotvm.FallbackContext): def _query_inside(self, target, workload):", "dshape, kshape, padding=(1, 1), channels=10, kernel_size=(3 ,3)) # mixed precision", "yy.checked_type == relay.TensorType( (n, h, w, 16), \"int32\") def test_conv2d_run():", "d, h, w), (c, int(round(d*scale_d)), int(round(h*scale_h)),\\ int(round(w*scale_w))) else: return (d,", "= intrp2.evaluate(func)(x_data) tvm.testing.assert_allclose(op_res2.asnumpy(), ref_res, rtol=1e-5) def batch_flatten(data): shape = data.shape", "else 'avg' y = opfunc(x, pool_size=(2,), strides=(2,), padding=(0, 0)) func", "= run_infer_type(y) assert yy.checked_type == relay.TensorType((n, 10, 224, 224), dtype)", "cfg.is_fallback = False cfg['tile_b'] = autotvm.task.space.SplitEntity([-1, 1, 1, 1]) cfg['tile_y']", ",3)) def test_conv3d_ndhwc_run(): def run_test_conv3d(dtype, out_dtype, scale, dshape, kshape, padding=(1,", "(3, 3, 3, 6, 10) run_test_conv3d(\"float32\", \"float32\", 1, dshape, kshape,", "\"nearest_neighbor\") _test_upsampling(\"NHWC\", \"bilinear\", True) def _test_upsampling3d(layout, method, coordinate_transformation_mode=\"half_pixel\"): n, c,", "batch. n, c, h, w = tvm.size_var(\"n\"), 32, 224, 224", "(c, int(round(d*scale_d)), int(round(h*scale_h)),\\ int(round(w*scale_w))) else: return (d, h, w, c),", "layout) else: ref = topi.testing.bilinear_resize_python(data, (int(round(h*scale_h)), int(round(w*scale_w))), layout) for target,", "dshape = (1, 3, 224) kshape = (10, 3, 3)", "2), strides=(2, 2, 2), padding=padding) func = relay.Function([x], y) #", "dtypes=dtypes) assert _has_fast_int8_instructions(asm, target) asm = _compile(ic=17, oc=29, target=target, data_layout=\"NHWC\",", "align_corners=False): n, c, h, w = tvm.size_var(\"n\"), 16, 32, 32", "rtol=1e-5, atol=1e-5) # normal conv3d dshape = (1, 5, 224,", "'NCHW': data_shape = (n, ic, h, w) x = relay.var(\"x\",", "2, 222, 222), \"int32\") # infer shape in case of", "in [1, 4, 6]: asm = _compile(ic=ic, oc=16, target=target, data_layout=\"NCHW\",", "ctx_list(): if target != 'cuda': continue params = {'w': tvm.nd.array(kernel)}", "7]], \\ [\"reorder_1\", \"re\", [0, 1, 2, 3, 6, 4,", "key in self.memory: return self.memory[key] cfg = autotvm.task.space.FallbackConfigEntity() cfg.template_key =", "rtol=0.01) def _test_upsampling(layout, method, align_corners=False): n, c, h, w =", "llvm_version >= 8: with relay.build_config(opt_level=3): graph, lib, params = relay.build(func,", "kshape = (64, 1, 3, 3) run_test_conv2d(\"float32\", \"float32\", 1, dshape,", "run_infer_type(y) assert yy.checked_type == relay.TensorType((d1, ((d2*d3)*d4)), \"float32\") x = relay.var(\"x\",", "method=method, align_corners=align_corners) func = relay.Function([x], y) data = np.random.uniform(size=dshape).astype(dtype) if", "== 'HWIO': kernel_shape = (ch, cw, ic, oc) else: raise", "topi.testing.conv2d_transpose_nhwc_python(data, kernel, 'HWOI', 2, 1) d_np = np.zeros(shape=oshape_nhwc) d_np[:,0:c_np.shape[1],0:c_np.shape[2],:] =", "padding, out_shape, pool_type, False) for target, ctx in ctx_list(): intrp1", "int8 goes through legalization so that fast instructions can be", "groups=64, padding=(1, 1), dilation=(1, 1), out_dtype=output_dtype) func = relay.Function([x, weight],", "= intrp1.evaluate(func)(data) tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5, atol=1e-5) _test_pool1d(relay.nn.max_pool1d) _test_pool1d(relay.nn.avg_pool1d) def test_pool3d():", "License, Version 2.0 (the # \"License\"); you may not use", "= tvm.size_var(\"n\"), 16, 32, 32 scale_h = 2.0 scale_w =", "dshape, kshape, padding=(0, 1), channels=10, kernel_size=(1 ,3)) # dilated conv2d", "channels=15) assert \"channels=15\" in y.astext() yy = run_infer_type(y) assert yy.checked_type", "x int8 goes through legalization so that fast instructions can", "contains *pmadd* instructions targets = [\"llvm -mcpu=skylake-avx512\", \"llvm -mcpu=cascadelake\"] llvm_version", "relay.TensorType( (n, 32, 222, 222), \"int16\") def test_bitpack_infer_type(): # Test", "3 ,3), except_targets=[\"cuda\"]) def test_conv2d_transpose_infer_type(): # symbolic in batch dimension", "relay.var(\"x\", relay.ty.TensorType((n, c, h, w), \"int16\")) w = relay.var(\"w\", relay.ty.TensorType((32,", "18) kshape = (32, 4, 3, 3) run_test_conv2d(\"float32\", \"float32\", 1,", "1), \"uint16\") if __name__ == \"__main__\": test_pool1d() test_pool2d() test_pool3d() test_avg_pool2d_no_count_pad()", "3, 3) run_test_conv2d(\"float32\", \"float32\", 1, dshape, kshape, padding=(1, 1), channels=10,", "1], \\ [\"conv_inline\", \"ot\", 0]]}], \"r\": [[0.0002933163], \\ 0, 3.1976189613342285,", "\"NCDHW\": return (c, d, h, w), (c, int(round(d*scale_d)), int(round(h*scale_h)),\\ int(round(w*scale_w)))", "yy.checked_type == relay.TensorType( (n, 2, 224, 224), \"float32\") assert yy.args[1].checked_type", "= run_infer_type(y) assert yy.checked_type == relay.TensorType((n, c, tvm.expr.Cast(\"int32\", tvm.round(d*scale)), tvm.expr.Cast(\"int32\",", "relay.nn.bitserial_conv2d( x, w, kernel_size=(3, 3), padding=(0, 0), channels=32) yy =", "# infer shape in case of different dtypes for input", "== relay.TensorType( (n, d, h, w, 16), \"int32\") def test_conv3d_run():", "test execution dtype = \"int32\" dshape = (1, 3, 28,", "(3, 10, 3) oshape = (1, 10, 37) x =", "open(temp.relpath(\"temp.log\"), \"w\") as log_file: log_file.write(test_schedule) with autotvm.apply_history_best(temp.relpath(\"temp.log\")): with relay.build_config(opt_level=3): print('Compiling...')", "c, 200, 200, 400), \"float32\") def _test_pool2d(opfunc, reffunc): n, c,", "224 x = relay.var(\"x\", relay.TensorType((n, c, w), \"uint8\")) w =", "10, 3, 3), \"float32\") # infer by shape of w,", "3, 14, 2, 14, 2), axis=(3, 5)) for target, ctx", "12), \"float32\") assert yy.args[1].checked_type == relay.TensorType( (10, 15, 3, 3),", "\"unroll\", \"vec\"]], \\ [\"data_pad_inline\", \"ot\", 4], [\"data_vec_inline\", \"ot\", 1], \\", "target) for oc in [4, 16, 20]: asm = _compile(ic=8,", "[\"tile_oh\", \"sp\", [8, 1]], \\ [\"tile_ow\", \"sp\", [1, 8]], \\", "ref_res = topi.testing.conv2d_nchw_python( data.astype(out_dtype), kernel.astype(out_dtype), 1, padding, groups=groups) with WinogradFallback(),", "[1, 1], \"float32\"], \\ {\"i\": 743640, \"t\": \"contrib_spatial_pack\", \"c\": null,", "kernel 3x3 dshape = (1, 80, 73, 73) kshape =", "def test_conv3d_infer_type(): # symbolic in batch dimension n, c, d,", "topi.testing.conv1d_transpose_ncw_python( data, kernel, 2, 1) d_np = np.zeros(shape=oshape) d_np[:,:,0:c_np.shape[2]] =", "3, 5, 224, 224) kshape = (10, 3, 3, 3,", "assert yy.args[1].checked_type == relay.TensorType( (2, 10, 3), \"float32\") # infer", "5, 5), \"float32\")) y = relay.nn.conv2d_transpose(x, w, output_padding=(1, 1), channels=11,", "\"int8\")) w = relay.var(\"w\", relay.TensorType((2, 10, 3), \"int8\")) y =", "h, w = tvm.size_var(\"n\"), 16, 32, 32 scale_h = 2.0", "else: return (d, h, w, c), (int(round(d*scale_d)), int(round(h*scale_h)),\\ int(round(w*scale_w)), c)", "# mixed precision run_test_conv2d(\"int8\", \"int32\", 1, dshape, kshape, padding=(1, 1),", "= relay.var(\"w\", relay.TensorType((2, 10, 3, 3, 3), \"int8\")) y =", "18) kshape = (32, 1, 3, 3) run_test_conv2d(\"float32\", \"float32\", 1,", "_test_pool3d(relay.nn.avg_pool3d, padding=(0, 0, 4, 0, 0, 4), out_shape=(1, 3, 16,", "= (1, 1024, 7, 7) x = relay.var(\"x\", shape=dshape) y", "kshape, padding=(0, 0), channels=192, kernel_size=(3, 3)) run_test_conv2d_cuda(\"float32\", \"float32\", 1, dshape,", "relay.TensorType( (32, 2, 128, 128, 1), \"uint16\") if __name__ ==", "the License. \"\"\" Support level2 operator test cases. \"\"\" import", "c, h, w), \"float32\")) w = relay.var(\"w\", relay.IncompleteType()) y =", "kernel_shape = (ch, cw, ic, oc) else: raise ValueError('Not supported')", "generated. assert \"vpmulld\" in asm and \"vpadd\" in asm def", "= 1 (ic, ih, iw) = (3, 28, 28) (oc,", "\"uint16\") if __name__ == \"__main__\": test_pool1d() test_pool2d() test_pool3d() test_avg_pool2d_no_count_pad() test_lrn()", "dshape, kshape, padding=(1, 1), fref=None, dilation=1, except_targets=None, **attrs): if except_targets", "target=target, data_layout=\"NCHW\", kernel_layout='OIHW', dtypes=dtypes) assert _has_fast_int8_instructions(asm, target) for oc in", "0), out_shape=(1, 3, 16, 19, 16)) _test_pool3d(relay.nn.avg_pool3d, padding=(0, 0, 4,", "(1, 32, 18, 18) kshape = (32, 1, 3, 3)", "= opfunc(x, pool_size=(1,)) assert \"pool_size=\" in y.astext() yy = run_infer_type(y)", "opfunc(x, pool_size=(2, 2, 2), strides=(2, 2, 2), padding=padding) func =", "4))) func = relay.Function([x], y) data = np.random.uniform(size=dshape).astype(dtype) ref_res =", "= tvm.codegen.llvm_version_major() for target in targets: if llvm_version >= 8:", "'avg' y = opfunc(x, pool_size=(2, 2, 2), strides=(2, 2, 2),", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "under the License. \"\"\" Support level2 operator test cases. \"\"\"", "x = relay.var(\"x\", relay.TensorType((n, w, c), \"int8\")) wt = relay.var(\"w\")", "= (target, workload) if key in self.memory: return self.memory[key] cfg", "channels=10, kernel_size=(3, 3 ,3)) def test_conv3d_ndhwc_run(): def run_test_conv3d(dtype, out_dtype, scale,", "4, 5]], \\ [\"ann_reduce\", \"an\", [\"unroll\", \"none\"]], \\ [\"ann_spatial\", \"an\",", "mixed precision n, h, w, c = tvm.size_var(\"n\"), 10, 10,", "1, 1), fref=None, groups=1, dilation=(1, 1, 1), except_targets=None, **attrs): if", "1), groups=1, dilation=(1, 1), **attrs): x = relay.var(\"x\", shape=dshape, dtype=dtype)", "dtype = \"int32\" dshape = (1, 3, 28, 28) x", "assert yy.checked_type == relay.TensorType((n, c, 1, 1), \"float32\") # test", "run_infer_type(y) assert yy.checked_type == relay.TensorType((n, 10, 224, 224), dtype) #", "1024, 7, 7) x = relay.var(\"x\", shape=dshape) y = opfunc(x)", "(0, 0), (1, 3, 16), pool_type, False) for target, ctx", "layout=layout, method=method, align_corners=align_corners) yy = run_infer_type(y) assert yy.checked_type == relay.TensorType((n,)", "w = relay.var(\"w\") y = relay.nn.conv2d(x, w, kernel_size=(3, 3), padding=(1,", "y.astext() yy = run_infer_type(y) assert yy.checked_type == relay.TensorType((n, 10, 224),", "0, 0, 3, 0), out_shape=(1, 3, 16, 19, 16)) _test_pool3d(relay.nn.avg_pool3d,", "4 t = relay.var(\"t\", relay.TensorType((n, c, h, w), \"float32\")) y", "yy.checked_type == relay.TensorType( (n, d, h, w, 16), \"int32\") def", "intrp2.evaluate(func)(x_data) tvm.testing.assert_allclose(op_res2.asnumpy(), ref_res, rtol=1e-5) def test_l2_normalize(): n, c , h,", "target) # Check that both non-divisible oc and ic work", "'int32') # Sweep the input channels to check int8 robustness", "ref_res, rtol=1e-5, atol=1e-5) def test_conv2d_transpose_nhwc_run(): dshape_nhwc = (1, 18, 18,", "w = relay.var(\"w\") y = relay.nn.conv1d(x, w, kernel_size=3, padding=(1, 1),", "kernel_layout='OIHW', dtypes=dtypes) assert _has_fast_int8_instructions(asm, target) for oc in [4, 16,", "rtol=1e-3, atol=1e-3) # normal winograd: stride 1, padding 1, kernel", "= run_infer_type(y) assert yy.checked_type == relay.TensorType( (n, 2, 224, 224,", "import ctx_list, run_infer_type from tvm.contrib import util import topi.testing def", "224, 224 x = relay.var(\"x\", relay.ty.TensorType((n, c, h, w), \"float32\"))", "y = relay.nn.pad(t, ((1, 1), (2, 2), (3, 3), (4,", "dshape = (1,) + ishape x = relay.var(\"x\", shape=dshape) y", "dtype) func = relay.Function([x], z) x_data = np.random.uniform(low=-1, high=1, size=shape).astype(dtype)", "tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5, atol=1e-5) # normal conv3d dshape = (1,", "\"float32\")) y = relay.nn.upsampling(x, scale_h=2, scale_w=2, layout=\"NCHW\", method=\"bilinear\") \"method=\\\"BINLINEAR\\\"\" in", "relay.TensorType( (n, d, h, w, 16), \"int32\") def test_conv3d_run(): def", "scale_w=2, layout=\"NCDHW\", method=\"trilinear\") yy = run_infer_type(y) assert yy.checked_type == relay.TensorType((n,", "Check that int8 x int8 goes through legalization so that", "padding=(1, 1), output_padding=(2, 2), data_layout=\"NHWC\", kernel_layout=\"HWIO\") func = relay.Function([x, w],", "dkernel.astype(out_dtype), 1, padding, groups=groups) else: ref_res = fref(data.astype(out_dtype), dkernel.astype(out_dtype)) for", "data_layout=\"NCHW\", kernel_layout='OIHW', dtypes=dtypes) assert _has_fast_int8_instructions(asm, target) for ic in [1,", "for oc in [4, 16, 20]: asm = _compile(ic=8, oc=oc,", "\"re\", [0, 1, 2, 3, 6, 4, 5]], \\ [\"ann_reduce\",", "topi.testing.conv2d_nchw_python( data.astype(out_dtype), kernel.astype(out_dtype), 1, padding, groups=groups) with WinogradFallback(), relay.build_config(opt_level=3): for", "kshape is HWOI and kernel_layout is HWIO y = relay.nn.conv2d_transpose(x,", "6, 10) run_test_conv3d(\"float32\", \"float32\", 1, dshape, kshape, padding=(1, 1, 1),", "x86 (skylake, cascadelake) and test assembly contains *pmadd* instructions targets", "range(ow): pad_count = np.sum(pad_np[:, :, i*sh:i*sh+kh, j*sw:j*sw+kw] > 0, axis=(2,3))", "= relay.nn.conv2d_transpose(x, w, channels=10, kernel_size=(3,3), strides=(2,2), padding=(1,1), output_padding=(2, 2)) func", "= \"float32\" data = np.random.uniform(size=dshape).astype(dtype) kernel = np.random.uniform(size=kshape).astype(dtype) c_np =", "run_test_conv2d(\"float32\", \"float32\", 1, dshape, kshape, padding=(1, 1), channels=10, kernel_size=(3 ,3),", "np.random.rand(5, 10, 5).astype(t1.dtype) ref_res = batch_flatten(data) for target, ctx in", "def _has_fast_int8_instructions(asm, target): if 'skylake-avx512' in target: return \"pmaddubs\" in", "ref_res = topi.testing.conv2d_nchw_python( data.astype(out_dtype), dkernel.astype(out_dtype), 1, padding, groups=groups) else: ref_res", "def test_upsampling(): _test_upsampling(\"NCHW\", \"nearest_neighbor\") _test_upsampling(\"NCHW\", \"bilinear\", True) _test_upsampling(\"NHWC\", \"nearest_neighbor\") _test_upsampling(\"NHWC\",", "relay.TensorType((n, c , h, w)) shape = (1, 5, 10,", "3, tvm.size_var(\"w\") t = relay.var(\"t\", relay.TensorType((n, c, h, w), \"float32\"))", "else: ref = topi.testing.trilinear_resize3d_python(data, (int(round(d*scale_d)),\\ int(round(h*scale_h)),\\ int(round(w*scale_w))), layout) for target,", "w, 16), \"int32\") def test_conv1d_run(): def run_test_conv1d(dtype, out_dtype, scale, dshape,", "10) x = relay.var(\"x\", shape=dshape_nhwc) w = relay.var(\"w\") # kshape", "_test_pool2d(relay.nn.max_pool2d, np.max) _test_pool2d(relay.nn.avg_pool2d, np.mean) _test_pool2d_int(relay.nn.avg_pool2d, np.mean, 'int32') _test_pool2d_int(relay.nn.avg_pool2d, np.mean, 'uint16')", "= module.get_output(0) tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-3, atol=1e-3) # normal winograd: stride", "= run_infer_type(y) assert yy.checked_type == relay.TensorType( (n, 2, 222, 222,", "(1, 32, 18, 18) kshape = (64, 1, 3, 3)", "np.max) _test_pool2d(relay.nn.avg_pool2d, np.mean) _test_pool2d_int(relay.nn.avg_pool2d, np.mean, 'int32') _test_pool2d_int(relay.nn.avg_pool2d, np.mean, 'uint16') _test_global_pool2d(relay.nn.global_max_pool2d,", "2), padding, out_shape, pool_type, False) for target, ctx in ctx_list():", "\"float32\" data = np.random.uniform(size=dshape).astype(dtype) kernel = np.random.uniform(size=kshape).astype(dtype) c_np = topi.testing.conv1d_transpose_ncw_python(", "ctx in ctx_list(): intrp1 = relay.create_executor(\"graph\", ctx=ctx, target=target) op_res1 =", "run_infer_type(y) assert yy.checked_type == relay.TensorType((n + 2, 6, 9, w", "\"float32\" x = relay.var(\"x\", relay.TensorType(shape, dtype)) size=5 axis=1 bias=0.5 alpha=.00001", "if __name__ == \"__main__\": test_pool1d() test_pool2d() test_pool3d() test_avg_pool2d_no_count_pad() test_lrn() test_l2_normalize()", "= tvm.size_var(\"n\"), tvm.size_var(\"c\") x = relay.var(\"x\", relay.TensorType((n, c, 100, 200),", "def test_conv2d_run(): def run_test_conv2d(dtype, out_dtype, scale, dshape, kshape, padding=(1, 1),", "== \"nearest_neighbor\": ref = topi.testing.upsampling3d_python(data, (scale_d, scale_h, scale_w), layout) else:", "ishape x = relay.var(\"x\", shape=dshape) y = relay.nn.upsampling3d(x, scale_d=scale_d, scale_h=scale_h,", "\"vec\"]], \\ [\"data_pad_inline\", \"ot\", 4], [\"data_vec_inline\", \"ot\", 1], \\ [\"conv_inline\",", "relay.Function([x], y) data = np.random.random_integers(low=-128, high=128, size=dshape) ref_res = reffunc(data.reshape(1,3,14,2,14,2),", "return cfg def run_test_conv2d_cuda(dtype, out_dtype, scale, dshape, kshape, padding=(1, 1),", "relay.transform.InferType()(mod) data = np.random.uniform(-scale, scale, size=dshape).astype(dtype) kernel = np.random.uniform(-scale, scale,", "op_res2 = intrp2.evaluate(func)(x_data) tvm.testing.assert_allclose(op_res2.asnumpy(), ref_res, rtol=1e-5) def test_l2_normalize(): n, c", "oc in [4, 16, 20]: asm = _compile(ic=8, oc=oc, target=target,", "oc=29, target=target, data_layout=\"NHWC\", kernel_layout='HWIO', dtypes=dtypes) assert _has_fast_int8_instructions(asm, target) # Ensure", "assert False, \"Target should be Skylake or Cascadelake\" # compile", "= opfunc(x, pool_size=(2, 2), strides=(2, 2), padding=(0, 0)) func =", "'uint16') _test_global_pool2d(relay.nn.global_max_pool2d, np.max) _test_global_pool2d(relay.nn.global_avg_pool2d, np.mean) def test_pool1d(): def _test_pool1d(opfunc): n,", "_has_fast_int8_instructions(asm, target) # Sweep the output channels to check int8", "as np import tvm from tvm import autotvm from tvm", "# kshape and kernel_layout should have swapped IO. # kshape", "scale_w=2, layout=\"NCHW\", method=\"bilinear\") yy = run_infer_type(y) assert yy.checked_type == relay.TensorType((n,", "(3, 3, 10, 3) oshape_nhwc = (1, 37, 37, 10)", "atol=1e-5) def test_flatten_infer_type(): d1, d2, d3, d4 = tvm.size_var(\"d1\"), tvm.size_var(\"d2\"),", "10, 224 x = relay.var(\"x\", relay.TensorType((n, c, w), \"uint8\")) w", "ref_res = np.pad(data, ((1, 1), (2, 2), (3, 3), (4,", "kshape = (32, 4, 3, 3) run_test_conv2d(\"float32\", \"float32\", 1, dshape,", "kernel_size=(3, 3)) run_test_conv2d_cuda(\"float32\", \"float32\", 1, dshape, kshape, padding=(2, 2), channels=192,", "target, ctx in ctx_list(): intrp1 = relay.create_executor(\"graph\", ctx=ctx, target=target) intrp2", "10, 10) dtype = \"float32\" x = relay.var(\"x\", relay.TensorType(shape, dtype))", "3, 16, 19, 16)) _test_pool3d(relay.nn.max_pool3d, padding=(0, 0, 4, 0, 0,", "10, 224 x = relay.var(\"x\", relay.TensorType((n, c, w), \"int8\")) w", "run_infer_type(y) assert yy.checked_type == relay.TensorType((3, 6, 9, 12), \"float32\") #", "relay.var(\"x\", relay.TensorType((n, c, 100, 200), \"float32\")) y = relay.nn.upsampling(x, scale_h=2,", "true kshape layout here - HWOI c_np = topi.testing.conv2d_transpose_nhwc_python(data, kernel,", "_compile(ic=8, oc=oc, target=target, data_layout=\"NCHW\", kernel_layout='OIHW', dtypes=dtypes) assert _has_fast_int8_instructions(asm, target) for", "= 'uint8' weight_dtype = 'int8' output_dtype = 'int32' data_shape =", "c), \"int8\")) wt = relay.var(\"w\") y = relay.nn.conv3d(x, wt, kernel_size=(3,", "op_res1 = intrp1.evaluate(func)(x_data) tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5) op_res2 = intrp2.evaluate(func)(x_data) tvm.testing.assert_allclose(op_res2.asnumpy(),", "224) kshape = (10, 3, 3) run_test_conv1d(\"float32\", \"float32\", 1, dshape,", "Test axis packing shape inference. o, i, h, w =", "= topi.testing.trilinear_resize3d_python(data, (int(round(d*scale_d)),\\ int(round(h*scale_h)),\\ int(round(w*scale_w))), layout) for target, ctx in", "assert yy.checked_type == relay.TensorType((n, c, tvm.expr.Cast(\"int32\", tvm.round(h*scale)), tvm.expr.Cast(\"int32\", tvm.round(w*scale))), \"float32\")", "intrp1.evaluate(func)(data) tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5, atol=1e-5) def test_pool2d(): _test_pool2d(relay.nn.max_pool2d, np.max) _test_pool2d(relay.nn.avg_pool2d,", "coordinate_transformation_mode=coordinate_transformation_mode) yy = run_infer_type(y) assert yy.checked_type == relay.TensorType((n,) + oshape,", "kernel_layout='HWIO', dtypes=dtypes) assert _has_fast_int8_instructions(asm, target) # Check that both non-divisible", "c, d, h, w), \"uint8\")) w = relay.var(\"w\", relay.TensorType((2, 10,", "= np.zeros(shape=oshape) d_np[:,:,0:c_np.shape[2],0:c_np.shape[3]] = c_np ref_res = d_np for target,", "[1, 512, 32, 32, \"float32\"], \\ [512, 1, 3, 3,", "in y.astext() yy = run_infer_type(y) assert yy.checked_type == relay.TensorType((n, 10,", "kshape and kernel_layout should have swapped IO. # kshape is", "params = tvm.relay.build(mod, target=\"llvm -device=arm_cpu\") # depthwise conv2d dshape =", "pool_type, False) for target, ctx in ctx_list(): intrp1 = relay.create_executor(\"graph\",", "x = relay.var(\"x\", relay.TensorType((n,) + ishape, dtype)) y = relay.nn.upsampling(x,", "3x3 run_test_conv2d_cuda(\"float32\", \"float32\", 1, dshape, kshape, padding=(0, 0), channels=192, kernel_size=(3,", "2, 222, 222, 222), \"int32\") # infer shape in case", "3, 4 t = relay.var(\"t\", relay.TensorType((n, c, h, w), \"float32\"))", "+ dilation) if fref is None: ref_res = topi.testing.conv3d_ncdhw_python( data.astype(out_dtype),", "bias=bias, alpha=alpha, beta=beta) yy = run_infer_type(z) assert yy.checked_type == relay.TensorType(shape,", "conv2d dshape = (1, 32, 18, 18) kshape = (32,", "\"float32\"], \\ {\"i\": 743640, \"t\": \"contrib_spatial_pack\", \"c\": null, \\ \"e\":", "intrp1.evaluate(func)(data) tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5, atol=1e-5) def test_flatten_infer_type(): d1, d2, d3,", "== relay.TensorType( (n, 2, 222), \"int32\") # infer shape in", "relay.nn.upsampling3d(x, scale_d=scale_d, scale_h=scale_h, scale_w=scale_w,\\ layout=layout, method=method,\\ coordinate_transformation_mode=coordinate_transformation_mode) yy = run_infer_type(y)", "= (3, 3, 3, 6, 10) run_test_conv3d(\"float32\", \"float32\", 1, dshape,", "pool_type = 'max' if 'max' in str(opfunc) else 'avg' y", "b_np[:,:,i,j] = np.sum(pad_np[:, :, i*sh:i*sh+kh, j*sw:j*sw+kw], axis=(2,3)) / np.maximum(pad_count, 1)", "relay.nn.upsampling(x, scale_h=2, scale_w=2, layout=\"NCHW\", method=\"bilinear\") yy = run_infer_type(y) assert yy.checked_type", "https://github.com/apache/incubator-tvm/pull/3070#issuecomment-486597553 # group conv2d dshape = (1, 32, 18, 18)", "relay.var(\"x\", relay.TensorType((n, c, h, w), \"float32\")) w = relay.var(\"w\", relay.IncompleteType())", "# symbolic in batch dimension n, c, w = tvm.var(\"n\"),", "224, 224 x = relay.var(\"x\", relay.TensorType((n, d, h, w, c),", "222, 222), \"int16\") def test_bitpack_infer_type(): # Test axis packing shape", "= tvm.size_var(\"n\"), 10, 224, 224 x = relay.var(\"x\", relay.ty.TensorType((n, c,", "with NDHWC n, c, d, h, w = 4, 32,", "test execution dtype = \"float32\" dshape = (1, 3, 32)", "relay.var(\"w\", dtype=dtype) y = relay.nn.conv3d(x, w, padding=padding, dilation=dilation, groups=groups, **attrs)", "scale_d=2, scale_h=2, scale_w=2, layout=\"NCDHW\", method=\"trilinear\") yy = run_infer_type(y) assert yy.checked_type", "tvm.var(\"n\"), 10, 224 x = relay.var(\"x\", relay.TensorType((n, c, w), \"uint8\"))", "if method == \"nearest_neighbor\": ref = topi.testing.upsampling3d_python(data, (scale_d, scale_h, scale_w),", "relay.build(func, target, params=parameters) assembly = lib.get_source(\"asm\") return assembly def _has_fast_int8_instructions(asm,", "groups=512, kernel_size=(3 ,3)) # CUDA is disabled for 'direct' schedule:", "assert yy.checked_type == relay.TensorType( (n, w, 16), \"int32\") def test_conv1d_run():", "\"int16\")) y = relay.nn.bitpack(x, bit_axis=4, pack_axis=1, pack_type='uint16', bits=1) yy =", "10, 5, 224, 224 x = relay.var(\"x\", relay.TensorType((n, c, d,", "h, w, ic) x = relay.var(\"x\", relay.TensorType(data_shape, input_dtype)) else: raise", "6, 4, 5]], \\ [\"ann_reduce\", \"an\", [\"unroll\", \"none\"]], \\ [\"ann_spatial\",", "channels=512, groups=512, kernel_size=(3 ,3)) # CUDA is disabled for 'direct'", "= \"llvm -mcpu=core-avx2\" fast_int8_dtypes = ('uint8', 'int8', 'int32') asm =", "2, 1) d_np = np.zeros(shape=oshape) d_np[:,:,0:c_np.shape[2],0:c_np.shape[3]] = c_np ref_res =", "n, c, h, w = 4, 32, 224, 224 x", "2), (3, 3), (4, 4)), 'constant') for target, ctx in", "if target != 'cuda': continue params = {'w': tvm.nd.array(kernel)} graph,", "(64, 1, 3, 3) weight = relay.var(\"weight\", relay.TensorType(kernel_shape, weight_dtype)) y", "test_conv2d_int8_intrinsics(): def _compile(ic, oc, target, data_layout, kernel_layout, dtypes): input_dtype, weight_dtype,", "test_upsampling3d_infer_type() test_flatten_infer_type() test_pad_infer_type() test_pad_run() test_conv2d_transpose_infer_type() test_conv2d_transpose_nchw_run() test_conv2d_transpose_nhwc_run() test_conv1d_transpose_ncw_run() test_conv1d_run() test_conv2d_run()", "with relay.build_config(opt_level=3): print('Compiling...') graph_json, mod, params = tvm.relay.build(mod, target=\"llvm -device=arm_cpu\")", "18) kshape = (10, 3, 3) run_test_conv1d(\"float32\", \"float32\", 1, dshape,", "\"re\", [0, 1, 2, 3, 4, 5, 8, 6, 7]],", "(1, 1024, 7, 7) x = relay.var(\"x\", shape=dshape) y =", "# normal conv3d dshape = (1, 5, 224, 224, 6)", "w = relay.var(\"w\") # kshape and kernel_layout should have swapped", "3)) def test_conv2d_winograd(): class WinogradFallback(autotvm.FallbackContext): def _query_inside(self, target, workload): key", "kernel_size=(3, 3), padding=(1, 1), channels=16, data_layout=\"NHWC\", out_dtype=\"int32\") yy = run_infer_type(y)", "get_shape(): if layout == \"NCDHW\": return (c, d, h, w),", "kshape layout here - HWOI c_np = topi.testing.conv2d_transpose_nhwc_python(data, kernel, 'HWOI',", "Input channels should be a multiple of 4 internally. for", "= np.sum(pad_np[:, :, i*sh:i*sh+kh, j*sw:j*sw+kw] > 0, axis=(2,3)) b_np[:,:,i,j] =", "y = relay.nn.conv2d(x, weight, kernel_size=(3, 3), groups=64, padding=(1, 1), dilation=(1,", "dkernel = topi.testing.dilate_python(kernel, (1, 1) + dilation) if fref is", "def test_bitserial_conv2d_infer_type(): # Basic shape test with ambiguous batch. n,", "yy.args[1].checked_type == relay.TensorType( (2, 10, 3), \"float32\") # infer by", "bit_axis=4, pack_axis=1, pack_type='uint16', bits=1) yy = run_infer_type(y) assert yy.checked_type ==", "tvm.const(2.0, \"float64\") x = relay.var(\"x\", relay.TensorType((n, c, d, h, w),", "16, 16, 16)): n, c, d, h, w = tvm.size_var(\"n\"),", "llvm_version >= 8: dtypes = (('int8', 'int8', 'int32')) # Check", "import transform from tvm.relay.testing import ctx_list, run_infer_type from tvm.contrib import", "cw = 1, 64, 64, 3, 3 if data_layout ==", "# Test axis packing shape inference. o, i, h, w", "h, w), \"float32\")) w = relay.var(\"w\", relay.IncompleteType()) y = relay.nn.conv2d_transpose(x,", "c, h, w), \"float32\")) y = opfunc(x, pool_size=(1, 1)) assert", "10, 224, 224 x = relay.var(\"x\", relay.TensorType((n, c, h, w),", "= relay.Module() mod['main'] = func mod = relay.transform.InferType()(mod) data =", "relay.nn.upsampling(x, scale_h=2, scale_w=2, layout=\"NCHW\", method=\"bilinear\") \"method=\\\"BINLINEAR\\\"\" in y.astext() yy =", "200, 400), \"float32\") def _test_pool2d(opfunc, reffunc): n, c, h, w", "1) ref_res = np.maximum(b_np, 0.0) data = a_np for target,", "scale_w = 2.0 dtype = \"float32\" def get_shape(): if layout", "== relay.TensorType( (10, 15, 3, 3), \"float32\") # infer by", "params=params) module = tvm.contrib.graph_runtime.create(graph, lib, ctx) module.set_input('x', tvm.nd.array(data)) module.set_input(**params) module.run()", "# Infer with NDHWC n, c, d, h, w =", "\"float32\")) w = relay.var(\"w\") y = relay.nn.conv2d(x, w, kernel_size=(3, 3),", "3) run_test_conv2d(\"float32\", \"float32\", 1, dshape, kshape, padding=(1, 1), channels=64, groups=32,", "'avg' y = opfunc(x, pool_size=(2,), strides=(2,), padding=(0, 0)) func =", "= relay.nn.upsampling3d(x, scale_d=scale_d, scale_h=scale_h, scale_w=scale_w,\\ layout=layout, method=method,\\ coordinate_transformation_mode=coordinate_transformation_mode) yy =", "_test_upsampling3d(\"NDHWC\", \"nearest_neighbor\") _test_upsampling3d(\"NDHWC\", \"trilinear\", \"align_corners\") def test_conv2d_int8_intrinsics(): def _compile(ic, oc,", "assert \"vpmulld\" in asm and \"vpadd\" in asm def test_depthwise_conv2d_int8():", "tvm.testing.assert_allclose(out.asnumpy(), ref, rtol=1e-5, atol=1e-5) def test_upsampling3d(): _test_upsampling3d(\"NCDHW\", \"nearest_neighbor\") _test_upsampling3d(\"NCDHW\", \"trilinear\",", "tvm.size_var(\"n\"), 10, 224, 224, 224 x = relay.var(\"x\", relay.ty.TensorType((n, c,", "0, 3, 0), out_shape=(1, 3, 16, 19, 16)) _test_pool3d(relay.nn.avg_pool3d, padding=(0,", "assert yy.checked_type == relay.TensorType((n, c, 200, 400), \"float32\") def test_upsampling3d_infer_type():", "test_pad_run() test_conv2d_transpose_infer_type() test_conv2d_transpose_nchw_run() test_conv2d_transpose_nhwc_run() test_conv1d_transpose_ncw_run() test_conv1d_run() test_conv2d_run() test_conv2d_winograd() test_conv3d_run() test_conv3d_ndhwc_run()", "\"vpdpbusd\" in asm else: assert False, \"Target should be Skylake", "w = 32, 32, 128, 128 x = relay.var(\"x\", relay.ty.TensorType((o,", "np.random.uniform(size=dshape).astype(dtype) ref_res = topi.testing.pool1d_ncw_python(data, (2,), (2,), (0, 0), (1, 3,", "assert yy.checked_type == relay.TensorType( (n, 2, 222, 222, 222), \"int32\")", "infer shape in case of different dtypes for input and", "different dtypes for input and weight. n, c, w =", "from tvm import relay from tvm.relay import transform from tvm.relay.testing", "kernel_size=(3 ,3), except_targets=['cuda']) # normal conv2d dshape = (1, 3,", "3, 16, 16, 20)) _test_pool3d(relay.nn.avg_pool3d) _test_pool3d(relay.nn.avg_pool3d, padding=(2, 0, 0, 2,", "np.sum(pad_np[:, :, i*sh:i*sh+kh, j*sw:j*sw+kw] > 0, axis=(2,3)) b_np[:,:,i,j] = np.sum(pad_np[:,", "fref(data.astype(out_dtype), dkernel.astype(out_dtype)) for target, ctx in ctx_list(): if target in", "(scale_h, scale_w), layout) else: ref = topi.testing.bilinear_resize_python(data, (int(round(h*scale_h)), int(round(w*scale_w))), layout)", "size=dshape) ref_res = reffunc(data.reshape(1,3,14,2,14,2), axis=(3,5)).astype(dtype) for target, ctx in ctx_list():", "data_layout=\"NHWC\", kernel_layout='HWIO', dtypes=dtypes) assert _has_fast_int8_instructions(asm, target) # Sweep the output", "tvm.size_var(\"n\"), 10, 10, 12 x = relay.var(\"x\", relay.TensorType((n, c, h,", "w: topi.testing.depthwise_conv2d_python_nchw( x, w, (1, 1), \"SAME\")) # depthwise conv2d", "of 16 internally. for oc in [4, 16, 20]: asm", "\"int8\")) wt = relay.var(\"w\") y = relay.nn.conv2d(x, wt, kernel_size=(3, 3),", "dtype=dtype) y = opfunc(x, pool_size=(2, 2), strides=(2, 2), padding=(0, 0))", "relay.nn.l2_normalize(x, eps=0.001, axis=[1]) \"axis=\" in y.astext() yy = run_infer_type(y) assert", "2.0 scale_w = 2.0 dtype = \"float32\" def get_shape(): if", "relay.nn.conv2d(x, weight, kernel_size=(ch, cw), channels=oc, padding=(1, 1), dilation=(1, 1), data_layout=data_layout,", "oc, oh, ow)).astype(dtype) for i in range(oh): for j in", "tvm.var(\"n\"), 10, 224 x = relay.var(\"x\", relay.TensorType((n, c, w), \"float32\"))", "4, 32, 224, 224 x = relay.var(\"x\", relay.TensorType((n//4, c//4, h,", "= relay.Function([x], y) data = np.random.uniform(size=dshape).astype(dtype) ref_res = reffunc(data, axis=(2,3),", "\"float32\", 1, dshape, kshape, padding=(1, 1), channels=64, groups=32, kernel_size=(3 ,3),", "n, c, w = tvm.var(\"n\"), 10, 224 x = relay.var(\"x\",", "express or implied. See the License for the # specific", "target_dim)) def test_batch_flatten(): t1 = relay.TensorType((5, 10, 5)) x =", "w, padding=padding, dilation=dilation, **attrs) func = relay.Function([x, w], y) data", "\"uint8\")) w = relay.var(\"w\", relay.TensorType((2, 10, 3, 3, 3), \"int8\"))", "out_shape=(1, 3, 16, 16, 20)) _test_pool3d(relay.nn.avg_pool3d) _test_pool3d(relay.nn.avg_pool3d, padding=(2, 0, 0,", "h, w), \"uint8\")) w = relay.var(\"w\", relay.TensorType((2, 10, 3, 3,", "tvm.size_var(\"c\"), 224, 224 x = relay.var(\"x\", relay.TensorType((n, h, w, c),", "relay.nn.upsampling3d(x, scale_d=scale_d, scale_h=scale_h, scale_w=scale_w,\\ layout=layout, method=method,\\ coordinate_transformation_mode=coordinate_transformation_mode) func = relay.Function([x],", "padding=(1, 1), dilation=(1, 1), out_dtype=output_dtype) func = relay.Function([x, weight], y)", "kernel_size=(3 ,3), dilation=(3, 3)) def test_conv2d_winograd(): class WinogradFallback(autotvm.FallbackContext): def _query_inside(self,", "topi.testing.conv2d_nchw_python( data.astype(out_dtype), dkernel.astype(out_dtype), 1, padding, groups=groups) else: ref_res = fref(data.astype(out_dtype),", "np.sum(pad_np[:, :, i*sh:i*sh+kh, j*sw:j*sw+kw], axis=(2,3)) / np.maximum(pad_count, 1) ref_res =", "precision n, h, w, c = tvm.size_var(\"n\"), 10, 10, 12", "shape=dshape) pool_type = 'max' if 'max' in str(opfunc) else 'avg'", "y = relay.nn.conv2d(x, weight, kernel_size=(ch, cw), channels=oc, padding=(1, 1), dilation=(1,", "x_data.flatten().reshape(o_shape) for target, ctx in ctx_list(): intrp1 = relay.create_executor(\"graph\", ctx=ctx,", "ph, pw = (2, 2) n = 1 (ic, ih,", "y = relay.nn.conv3d(x, w, padding=padding, dilation=dilation, groups=groups, data_layout=\"NDHWC\", kernel_layout=\"DHWIO\", **attrs)", "10, 10, 12 x = relay.var(\"x\", relay.TensorType((n, c, h, w),", "IO. # kshape is HWOI and kernel_layout is HWIO y", "1) d_np = np.zeros(shape=oshape) d_np[:,:,0:c_np.shape[2]] = c_np ref_res = d_np", "= topi.testing.upsampling3d_python(data, (scale_d, scale_h, scale_w), layout) else: ref = topi.testing.trilinear_resize3d_python(data,", "n, c, h, w = tvm.size_var(\"n\"), 32, 224, 224 x", "\"float32\")) y = opfunc(x) yy = run_infer_type(y) assert yy.checked_type ==", "target) # Sweep the output channels to check int8 robustness", "\"nearest_neighbor\": ref = topi.testing.upsampling3d_python(data, (scale_d, scale_h, scale_w), layout) else: ref", "may obtain a copy of the License at # #", "20)) def test_avg_pool2d_no_count_pad(): kh, kw = (4, 4) sh, sw", "1), channels=32, groups=32, kernel_size=(3 ,3), fref=lambda x, w: topi.testing.depthwise_conv2d_python_nchw( x,", "return (d, h, w, c), (int(round(d*scale_d)), int(round(h*scale_h)),\\ int(round(w*scale_w)), c) ishape,", "precision run_test_conv2d(\"int8\", \"int32\", 1, dshape, kshape, padding=(1, 1), channels=10, kernel_size=(3", "target, params=parameters) def test_bitserial_conv2d_infer_type(): # Basic shape test with ambiguous", "Intel # generations, because we default to NCHWc layout. target", "= topi.testing.lrn_python(x_data, size, axis, bias, alpha, beta) for target, ctx", "10, 7, 7) x = relay.var(\"x\", shape=dshape) y = relay.nn.pad(x,", "kernel_size=(3 ,3)) # mixed precision run_test_conv2d(\"int8\", \"int32\", 1, dshape, kshape,", "c, h, w = tvm.size_var(\"n\"), tvm.size_var(\"c\"), tvm.size_var(\"h\"), tvm.size_var(\"w\") x =", "\"float32\" dshape = (1, 3, 32, 32, 32) x =", "= run_infer_type(y) assert yy.checked_type == relay.TensorType( (32, 2, 128, 128,", "def batch_flatten(data): shape = data.shape target_dim = 1 for i", "normal conv2d dshape = (1, 3, 224, 224) kshape =", "yy.checked_type == relay.TensorType( (n, w, 16), \"int32\") def test_conv1d_run(): def", "_test_upsampling(layout, method, align_corners=False): n, c, h, w = tvm.size_var(\"n\"), 16,", "kernel_size=(3,), strides=(2,), padding=(1,), output_padding=(2,)) func = relay.Function([x, w], y) dtype", "\\ [\"TENSOR\", [512, 1, 3, 3], \"float32\"], \\ [1, 1],", "= relay.var(\"x\", relay.TensorType((n, c, h, w), \"float32\")) y = relay.nn.upsampling(x,", "d, h, w = 4, 32, 224, 224, 224 x", "(2,), (2,), (0, 0), (1, 3, 16), pool_type, False) for", "language governing permissions and limitations # under the License. \"\"\"", "# Infer with NWC n, c, w = 4, 32,", "yy.args[1].checked_type == relay.TensorType( (2, 10, 3, 3, 3), \"float32\") #", "op_res1 = intrp1.evaluate(func)(data) tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5, atol=1e-5) _test_pool1d(relay.nn.max_pool1d) _test_pool1d(relay.nn.avg_pool1d) def", "cfg['unroll_explicit'] = autotvm.task.space.OtherOptionEntity(1) self.memory[key] = cfg return cfg def run_test_conv2d_cuda(dtype,", "scale, dshape, kshape, padding=(1, 1), groups=1, dilation=(1, 1), **attrs): x", "c , h, w)) y = relay.nn.lrn(x, size=10, axis=2, bias=0.5,", "kernel_layout='HWIO', dtypes=dtypes) assert _has_fast_int8_instructions(asm, target) # Sweep the output channels", "by shape of w, mixed precision n, c, d, h,", "channels=10, kernel_size=(3, 3 ,3), except_targets=[\"cuda\"]) def test_conv2d_transpose_infer_type(): # symbolic in", "yy = run_infer_type(y) assert yy.checked_type == relay.TensorType( (n, 2, 224),", "ctx_list(): intrp1 = relay.create_executor(\"graph\", ctx=ctx, target=target) intrp2 = relay.create_executor(\"debug\", ctx=ctx,", "relay.var(\"x\", shape=dshape_nhwc) w = relay.var(\"w\") # kshape and kernel_layout should", "relay.TensorType((3, 24), \"float32\") x = relay.var(\"x\", relay.TensorType((d1, 2, d3, 3),", "sw), padding=(ph, pw), count_include_pad=False) func = relay.Function([x], y) dtype =", "500) dtype = \"float32\" x = relay.var(\"x\", relay.TensorType(shape, dtype)) z", "= (1, 10, 37) x = relay.var(\"x\", shape=dshape) w =", "'int8', 'int32') # Sweep the input channels to check int8", "relay.var(\"x\", relay.TensorType((n, h, w, c), \"float32\")) y = opfunc(x, layout=\"NHWC\")", "Foundation (ASF) under one # or more contributor license agreements.", "method=\"trilinear\") yy = run_infer_type(y) assert yy.checked_type == relay.TensorType((n, c, tvm.expr.Cast(\"int32\",", "= relay.create_executor(\"debug\", ctx=ctx, target=target) op_res1 = intrp1.evaluate(func)(x_data) tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5)", "x = relay.var(\"x\", relay.TensorType(data_shape, input_dtype)) else: raise ValueError('Not supported') if", "ref_res, rtol=1e-5, atol=1e-5) # normal conv3d dshape = (1, 3,", "= intrp1.evaluate(func)(data) tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5, atol=1e-5) _test_run('float32') _test_run('int32') def test_lrn():", "= relay.var(\"w\", dtype=dtype) y = relay.nn.conv3d(x, w, padding=padding, dilation=dilation, groups=groups,", "Infer with NDHWC n, c, d, h, w = 4,", "relay.var(\"x\", relay.TensorType((n, c, h, w), \"float32\")) y = opfunc(x, pool_size=(1,", "cfg['tile_rc'] = autotvm.task.space.SplitEntity([-1, 1]) cfg['auto_unroll_max_setp'] = autotvm.task.space.OtherOptionEntity(1500) cfg['unroll_explicit'] = autotvm.task.space.OtherOptionEntity(1)", "3, 3 if data_layout == 'NCHW': data_shape = (n, ic,", "vectorized instruction is generated for older Intel # generations, because", "for i in range(oh): for j in range(ow): pad_count =", "relay.var(\"x\", relay.TensorType((n, d, h, w, c), \"int8\")) wt = relay.var(\"w\")", "dshape, kshape, padding=(2, 2), channels=192, kernel_size=(7, 7)) def test_conv3d_infer_type(): #", "[\"TENSOR\", [512, 1, 3, 3], \"float32\"], \\ [1, 1], [1,", "padding=padding, dilation=dilation, groups=groups, **attrs) func = relay.Function([x, w], y) data", "6, 9, 12), \"float32\") # some symbolic values n, c,", "size=kshape).astype(dtype) ref_res = topi.testing.conv1d_ncw_python( data.astype(out_dtype), kernel.astype(out_dtype), 1, padding, dilation) for", "relay.nn.conv2d_transpose(x, w, channels=10, kernel_size=(3, 3), strides=(2, 2), padding=(1, 1), output_padding=(2,", "relay.nn.conv1d(x, wt, kernel_size=3, padding=(1, 1), channels=16, data_layout=\"NWC\", out_dtype=\"int32\") yy =", "\"int8\")) wt = relay.var(\"w\") y = relay.nn.conv1d(x, wt, kernel_size=3, padding=(1,", "1, dshape, kshape, padding=(1, 1), channels=10, kernel_size=3) # mixed precision", "0, 0, 4), out_shape=(1, 3, 16, 16, 20)) def test_avg_pool2d_no_count_pad():", "in compliance # with the License. You may obtain a", "# to you under the Apache License, Version 2.0 (the", "License for the # specific language governing permissions and limitations", "y.astext() yy = run_infer_type(y) assert yy.checked_type == relay.TensorType((n, c, tvm.expr.Cast(\"int32\",", "(32, 1, 3, 3) run_test_conv2d(\"float32\", \"float32\", 1, dshape, kshape, padding=(1,", "(2, 2) ph, pw = (2, 2) n = 1", "\"int32\", 1, dshape, kshape, padding=(1, 1), channels=10, kernel_size=3) # dilated", "relay.nn.avg_pool2d(x, pool_size=(kh, kw), strides=(sw, sw), padding=(ph, pw), count_include_pad=False) func =", "= _compile(ic=16, oc=32, target=target, data_layout=\"NCHW\", kernel_layout='OIHW', dtypes=fast_int8_dtypes) # Check that", "d, h, w = tvm.size_var(\"n\"), 8, 16, 16, 16 scale_d", "compile_test_conv2d_arm_cpu(\"float32\", \"float32\", 1, dshape, kshape, padding=(1, 1), channels=512, groups=512, kernel_size=(3", "import autotvm from tvm import relay from tvm.relay import transform", "mult and add instructions are generated. assert \"vpmulld\" in asm", "relay.nn.conv3d(x, w, out_dtype=\"int32\") assert \"out_dtype=\\\"int32\\\"\" in y.astext() yy = run_infer_type(y)", "0, 0, 2, 0, 0), out_shape=(1, 3, 18, 16, 16))", "instructions are generated. assert \"vpmulld\" in asm and \"vpadd\" in", "batch dimension n, c, d, h, w = tvm.size_var(\"n\"), 10,", "tvm.round(w*scale))), \"float32\") n, c = tvm.size_var(\"n\"), tvm.size_var(\"c\") x = relay.var(\"x\",", "= (4, 4) sh, sw = (2, 2) ph, pw", "func = relay.Function([x], y) data = np.random.random_integers(low=-128, high=128, size=dshape) ref_res", "kshape, padding=(1, 1), channels=192, kernel_size=(3, 3)) # extended winograd: stride", "shape f_out_shape = tuple(map(lambda x: int(x), run_infer_type(func).ret_type.shape)) assert out_shape ==", "1), out_dtype=output_dtype) func = relay.Function([x, weight], y) wdata = np.random.rand(*kernel_shape)", "supported') weight = relay.var(\"weight\", relay.TensorType(kernel_shape, weight_dtype)) y = relay.nn.conv2d(x, weight,", "autotvm.task.space.OtherOptionEntity(1) self.memory[key] = cfg return cfg def run_test_conv2d_cuda(dtype, out_dtype, scale,", "= np.random.uniform(size=dshape).astype(dtype) kernel = np.random.uniform(size=kshape).astype(dtype) c_np = topi.testing.conv1d_transpose_ncw_python( data, kernel,", "target) # Check that a vectorized instruction is generated for", "relay.create_executor(\"graph\", ctx=ctx, target=target) intrp2 = relay.create_executor(\"debug\", ctx=ctx, target=target) op_res1 =", "37, 37) x = relay.var(\"x\", shape=dshape) w = relay.var(\"w\") y", "run_infer_type(y) assert yy.checked_type == relay.TensorType((n, c, 1, 1), \"float32\") #", "= relay.nn.pad(x, ((1, 1), (2, 2), (3, 3), (4, 4)))", "_has_fast_int8_instructions(asm, target): if 'skylake-avx512' in target: return \"pmaddubs\" in asm", "robustness # Output channels should be a multiple of 16", "1, 3, 3) run_test_conv2d(\"float32\", \"float32\", 1, dshape, kshape, padding=(1, 1),", "ic in [1, 4, 6]: asm = _compile(ic=ic, oc=16, target=target,", "1), channels=16, data_layout=\"NCHW4n4c\", kernel_layout=\"OIHW4o4i\", out_dtype=\"int32\") yy = run_infer_type(y) assert yy.checked_type", "kernel_size=3) # dilated conv2d dshape = (1, 3, 18) kshape", "rtol=1e-5) op_res2 = intrp2.evaluate(func)(x_data) tvm.testing.assert_allclose(op_res2.asnumpy(), ref_res, rtol=1e-5) def test_l2_normalize(): n,", "3, 18, 18) kshape = (3, 10, 3, 3) oshape", "= relay.var(\"x\", relay.TensorType((n, c, h, w), \"uint8\")) w = relay.var(\"w\",", "\\ [\"ann_reduce\", \"an\", [\"unroll\", \"none\"]], \\ [\"ann_spatial\", \"an\", [\"unroll\", \"unroll\",", "multiple of 4 internally. for ic in [1, 4, 6]:", "c), \"int8\")) wt = relay.var(\"w\") y = relay.nn.conv2d(x, wt, kernel_size=(3,", "WinogradFallback(autotvm.FallbackContext): def _query_inside(self, target, workload): key = (target, workload) if", "w = 4, 32, 224 x = relay.var(\"x\", relay.TensorType((n, w,", "def test_conv1d_infer_type(): # symbolic in batch dimension n, c, w", "tvm.size_var(\"n\"), tvm.size_var(\"c\"), tvm.size_var(\"h\"), tvm.size_var(\"w\") x = relay.var(\"x\", relay.TensorType((n, c, h,", "is generated when datatypes are not HW supported. dtypes =", "\"float32\") n, c, h, w = tvm.size_var(\"n\"), tvm.size_var(\"c\"), tvm.size_var(\"h\"), tvm.size_var(\"w\")", "= relay.create_executor(\"graph\", ctx=ctx, target=target) op_res = intrp.evaluate(func)(data) np.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=0.01)", "== relay.TensorType( (n, 2, 222, 222), \"int32\") # infer shape", "= (64, 1, 3, 3) run_test_conv2d(\"float32\", \"float32\", 1, dshape, kshape,", "3, 3) run_test_conv3d(\"float32\", \"float32\", 1, dshape, kshape, padding=(1, 1, 1),", "check output shape f_out_shape = tuple(map(lambda x: int(x), run_infer_type(func).ret_type.shape)) assert", "target, ctx in ctx_list(): intrp1 = relay.create_executor(\"graph\", ctx=ctx, target=target) op_res1", "# Infer with a different layout n, c, h, w", "License is distributed on an # \"AS IS\" BASIS, WITHOUT", "yy.checked_type == relay.TensorType( (n, 2, 222), \"int32\") # infer shape", "(n, 2, 222), \"int32\") # Infer with NWC n, c,", "dtypes=dtypes) assert _has_fast_int8_instructions(asm, target) # Ensure that code is generated", "assert yy.checked_type == relay.TensorType( (n, 32, 222, 222), \"int16\") def", "1)) assert \"pool_size=\" in y.astext() yy = run_infer_type(y) assert yy.checked_type", "relay.TensorType((n, c, d, h, w), \"uint8\")) w = relay.var(\"w\", relay.TensorType((2,", "scale, size=kshape).astype(dtype) ref_res = topi.testing.conv1d_ncw_python( data.astype(out_dtype), kernel.astype(out_dtype), 1, padding, dilation)", "3, 3) oshape = (1, 10, 37, 37) x =", "if kernel_layout == 'OIHW': kernel_shape = (oc, ic, ch, cw)", "c_np = topi.testing.conv1d_transpose_ncw_python( data, kernel, 2, 1) d_np = np.zeros(shape=oshape)", "kshape, padding=(1, 1, 1), channels=10, kernel_size=(3, 3 ,3)) def test_conv3d_ndhwc_run():", "data = np.random.uniform(size=dshape).astype(dtype) ref_res = topi.testing.pool3d_ncdhw_python(data, (2, 2, 2), (2,", "1, 1), channels=10, kernel_size=(3, 3 ,3), except_targets=[\"cuda\"]) def test_conv2d_transpose_infer_type(): #", "(1, 18, 18, 3) kshape_hwoi = (3, 3, 10, 3)", "2, 0, 0), out_shape=(1, 3, 18, 16, 16)) _test_pool3d(relay.nn.max_pool3d, padding=(0,", "7) x = relay.var(\"x\", shape=dshape) y = opfunc(x) func =", "0, 4, 0, 0, 4), out_shape=(1, 3, 16, 16, 20))", "= (oc, ic, ch, cw) elif kernel_layout == 'HWIO': kernel_shape", "def test_pool2d(): _test_pool2d(relay.nn.max_pool2d, np.max) _test_pool2d(relay.nn.avg_pool2d, np.mean) _test_pool2d_int(relay.nn.avg_pool2d, np.mean, 'int32') _test_pool2d_int(relay.nn.avg_pool2d,", "1), (2, 2), (3, 3), (4, 4)), 'constant') for target,", "test_upsampling(): _test_upsampling(\"NCHW\", \"nearest_neighbor\") _test_upsampling(\"NCHW\", \"bilinear\", True) _test_upsampling(\"NHWC\", \"nearest_neighbor\") _test_upsampling(\"NHWC\", \"bilinear\",", "np.random.uniform(size=kshape).astype(dtype) c_np = topi.testing.conv2d_transpose_nchw_python( data, kernel, 2, 1) d_np =", "16), \"int32\") def test_conv2d_run(): def run_test_conv2d(dtype, out_dtype, scale, dshape, kshape,", "yy.checked_type == relay.TensorType((3, 6, 9, 12), \"float32\") # some symbolic", "ic) x = relay.var(\"x\", relay.TensorType(data_shape, input_dtype)) else: raise ValueError('Not supported')", "is generated for older Intel # generations, because we default", "= c_np ref_res = d_np for target, ctx in ctx_list():", "np.maximum(b_np, 0.0) data = a_np for target, ctx in ctx_list():", "37, 10) x = relay.var(\"x\", shape=dshape_nhwc) w = relay.var(\"w\") #", "2) n = 1 (ic, ih, iw) = (3, 28,", "dilation) for target, ctx in ctx_list(): if target in except_targets:", "goes through legalization so that fast instructions can be picked", "graph_json, mod, params = tvm.relay.build(mod, target=\"llvm -device=arm_cpu\") # depthwise conv2d", "1]) cfg['tile_rc'] = autotvm.task.space.SplitEntity([-1, 1]) cfg['auto_unroll_max_setp'] = autotvm.task.space.OtherOptionEntity(1500) cfg['unroll_explicit'] =", "limitations # under the License. \"\"\" Support level2 operator test", "have swapped IO. # kshape is HWOI and kernel_layout is", "relay.TensorType( (n, h, w, 16), \"int32\") def test_conv2d_run(): def run_test_conv2d(dtype,", "assert yy.checked_type == relay.TensorType((3, 6, 9, 12), \"float32\") # some", "x = relay.var(\"x\", relay.TensorType((d1, 2, d3, 3), \"float32\")) y =", "= np.random.rand(*kernel_shape) * 10 parameters = {\"weight\": tvm.nd.array(wdata.astype(weight_dtype))} targets =", "= (1, 3, 224) kshape = (10, 3, 3) run_test_conv1d(\"float32\",", "x = relay.var(\"x\", relay.TensorType((n, h, w, c), \"float32\")) y =", "= tvm.const(2.0, \"float64\") x = relay.var(\"x\", relay.TensorType((n, c, h, w),", "ic, h, w) x = relay.var(\"x\", relay.TensorType(data_shape, input_dtype)) elif data_layout", "1], [1, 1], [1, 1], \"float32\"], \\ {\"i\": 743640, \"t\":", "d3, 3), \"float32\")) y = relay.nn.batch_flatten(x) yy = run_infer_type(y) assert", "relay.var(\"x\", relay.TensorType((n//4, c//4, h, w, 4, 4), \"int8\")) wt =", "d_np[:,:,0:c_np.shape[2]] = c_np ref_res = d_np for target, ctx in", "relay.var(\"x\", relay.ty.TensorType((o, i, h, w), \"int16\")) y = relay.nn.bitpack(x, bit_axis=4,", "28) x = relay.var(\"x\", shape=dshape) y = opfunc(x, pool_size=(2, 2),", "= (2, 2) ph, pw = (2, 2) n =", "tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5) op_res2 = intrp2.evaluate(func)(x_data) tvm.testing.assert_allclose(op_res2.asnumpy(), ref_res, rtol=1e-5) def", "eps=0.001, axis=[axis]) yy = run_infer_type(z) assert yy.checked_type == relay.TensorType(shape, dtype)", "def test_pool1d(): def _test_pool1d(opfunc): n, c, w = tvm.var(\"n\"), 10,", "target != 'cuda': continue params = {'w': tvm.nd.array(kernel)} graph, lib,", "= relay.nn.conv3d(x, w, out_dtype=\"int32\") assert \"out_dtype=\\\"int32\\\"\" in y.astext() yy =", "1), channels=16, data_layout=\"NWC\", out_dtype=\"int32\") yy = run_infer_type(y) assert yy.checked_type ==", "intrp1.evaluate(func)(data, kernel) tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5, atol=1e-5) def test_upsampling_infer_type(): n, c", "relay.TensorType( (n, 2, 222), \"int32\") # infer shape in case", "= topi.testing.conv2d_nchw_python( data.astype(out_dtype), kernel.astype(out_dtype), 1, padding, groups=groups) with WinogradFallback(), relay.build_config(opt_level=3):", "y = opfunc(x, pool_size=(1, 1, 1)) assert \"pool_size=\" in y.astext()", "= relay.var(\"x\", relay.TensorType((n, c, h, w), \"float32\")) w = relay.var(\"w\",", "= relay.nn.avg_pool2d(x, pool_size=(kh, kw), strides=(sw, sw), padding=(ph, pw), count_include_pad=False) func", "autotvm.task.space.OtherOptionEntity(1500) cfg['unroll_explicit'] = autotvm.task.space.OtherOptionEntity(1) self.memory[key] = cfg return cfg def", "= relay.nn.bitpack(x, bit_axis=4, pack_axis=1, pack_type='uint16', bits=1) yy = run_infer_type(y) assert", "return \"pmaddubs\" in asm elif 'cascadelake' in target: return \"vpdpbusd\"", "_test_pool3d(opfunc, padding=(0, 0, 0, 0, 0, 0), out_shape=(1, 3, 16,", "= ('uint8', 'uint8', 'int32') asm = _compile(ic=16, oc=32, target=target, data_layout=\"NHWC\",", "(4, 4))) \"pad_width=\" in y.astext() yy = run_infer_type(y) assert yy.checked_type", "((1, 1), (2, 2), (3, 3), (4, 4))) func =", "x = relay.var(\"x\", relay.TensorType((n, h, w, c), \"float32\")) w =", "NHWC n, c, h, w = 4, 32, 224, 224", "= (n, h, w, ic) x = relay.var(\"x\", relay.TensorType(data_shape, input_dtype))", "\"float32\", 1, dshape, kshape, padding=(1, 1), channels=10, kernel_size=3) # mixed", "tvm.size_var(\"d\"), tvm.size_var(\"h\"), tvm.size_var(\"w\") scale = tvm.const(2.0, \"float64\") x = relay.var(\"x\",", "layout == \"NCDHW\": return (c, d, h, w), (c, int(round(d*scale_d)),", "the Apache Software Foundation (ASF) under one # or more", "222, 222), \"int32\") # infer shape in case of different", "0)) func = relay.Function([x], y) data = np.random.uniform(size=dshape).astype(dtype) ref_res =", "224, 224), \"float32\") # test execution dtype = \"float32\" dshape", "except_targets=['cuda']) # also group conv2d dshape = (1, 32, 18,", "Cascadelake\" # compile conv2d for x86 (skylake, cascadelake) and test", "in asm def test_depthwise_conv2d_int8(): input_dtype = 'uint8' weight_dtype = 'int8'", "test_conv2d_transpose_nchw_run() test_conv2d_transpose_nhwc_run() test_conv1d_transpose_ncw_run() test_conv1d_run() test_conv2d_run() test_conv2d_winograd() test_conv3d_run() test_conv3d_ndhwc_run() test_bitserial_conv2d_infer_type() test_batch_flatten()", "224 x = relay.var(\"x\", relay.TensorType((n, c, d, h, w), \"float32\"))", "out_shape=(1, 3, 18, 16, 16)) _test_pool3d(relay.nn.avg_pool3d, padding=(0, 3, 0, 0,", "tvm.expr.Cast(\"int32\", tvm.round(h*scale)), tvm.expr.Cast(\"int32\", tvm.round(w*scale))), \"float32\") n, c = tvm.size_var(\"n\"), tvm.size_var(\"c\")", "topi.testing.depthwise_conv2d_python_nchw( x, w, (1, 1), \"SAME\")) # depthwise conv2d for", "if 'max' in str(opfunc) else 'avg' y = opfunc(x, pool_size=(2,),", "targets = [\"llvm -mcpu=skylake-avx512\", \"llvm -mcpu=cascadelake\"] llvm_version = tvm.codegen.llvm_version_major() for", "-mcpu=core-avx2\" fast_int8_dtypes = ('uint8', 'int8', 'int32') asm = _compile(ic=16, oc=32,", "32) x = relay.var(\"x\", shape=dshape) pool_type = 'max' if 'max'", "+ dilation) if fref is None: ref_res = topi.testing.conv2d_nchw_python( data.astype(out_dtype),", "mod[\"main\"] = func test_schedule='{\"i\": [\"llvm -device=arm_cpu\", \"topi_nn_depthwise_conv2d_nchw\", \\ [[\"TENSOR\", [1,", "padding=(0, 0)) func = relay.Function([x], y) data = np.random.uniform(size=dshape).astype(dtype) ref_res", "infer by shape of w, mixed precision n, c, d,", "shape=dshape, dtype=dtype) w = relay.var(\"w\", dtype=dtype) y = relay.nn.conv2d(x, w,", "rtol=1e-5) def test_l2_normalize(): n, c , h, w = tvm.size_var(\"n\"),", "1, 1, 1]) cfg['tile_rc'] = autotvm.task.space.SplitEntity([-1, 1]) cfg['auto_unroll_max_setp'] = autotvm.task.space.OtherOptionEntity(1500)", "w = relay.var(\"w\") y = relay.nn.conv3d(x, w, kernel_size=(3, 3, 3),", "dilation=(1, 1), out_dtype=output_dtype) func = relay.Function([x, weight], y) wdata =", "op_res2 = intrp2.evaluate(func)(x_data) tvm.testing.assert_allclose(op_res2.asnumpy(), ref_res, rtol=1e-5) def test_pad_infer_type(): # entirely", "1], [1, 1], \"float32\"], {}, \\ [\"depthwise_conv2d_nchw\", [1, 512, 32,", "1, dshape, kshape, padding=(1, 1, 1), channels=10, kernel_size=(3, 3 ,3))", "_test_upsampling(\"NHWC\", \"nearest_neighbor\") _test_upsampling(\"NHWC\", \"bilinear\", True) def _test_upsampling3d(layout, method, coordinate_transformation_mode=\"half_pixel\"): n,", "sw = (2, 2) ph, pw = (2, 2) n", "16 scale_d = 2.0 scale_h = 2.0 scale_w = 2.0", "except in compliance # with the License. You may obtain", "[\"unroll\", \"unroll\", \"vec\"]], \\ [\"data_pad_inline\", \"ot\", 4], [\"data_vec_inline\", \"ot\", 1],", "4), out_shape=(1, 3, 16, 16, 20)) _test_pool3d(relay.nn.avg_pool3d) _test_pool3d(relay.nn.avg_pool3d, padding=(2, 0,", "\"float32\")) y = relay.nn.pad(t, ((1, 1), (2, 2), (3, 3),", "relay.TensorType((n, c, w), \"float32\")) y = opfunc(x, pool_size=(1,)) assert \"pool_size=\"", "dilation=(1, 1), data_layout=data_layout, kernel_layout=kernel_layout, out_dtype=output_dtype) func = relay.Function([x, weight], y)", "present in the assembly. assert not _has_fast_int8_instructions(asm, target) # Check", "= executor.evaluate(func)(data) tvm.testing.assert_allclose(out.asnumpy(), ref, rtol=1e-5, atol=1e-5) def test_upsampling3d(): _test_upsampling3d(\"NCDHW\", \"nearest_neighbor\")", "scale_w=2, layout=\"NCHW\", method=\"bilinear\") \"method=\\\"BINLINEAR\\\"\" in y.astext() yy = run_infer_type(y) assert", "channels=32) yy = run_infer_type(y) assert yy.checked_type == relay.TensorType( (n, 32,", "(n, 2, 222, 222), \"int32\") # infer shape in case", "axis=[1]) \"axis=\" in y.astext() yy = run_infer_type(y) assert yy.checked_type ==", "3, 3), padding=(1, 1, 1), channels=16, data_layout=\"NDHWC\", out_dtype=\"int32\") yy =", "yy = run_infer_type(y) assert yy.checked_type == relay.TensorType( (n, h, w,", "dilation=1, except_targets=None, **attrs): if except_targets is None: except_targets = []", "yy.checked_type == relay.TensorType( (32, 2, 128, 128, 1), \"uint16\") if", "out_shape, pool_type, False) for target, ctx in ctx_list(): intrp1 =", "= 'int8' output_dtype = 'int32' data_shape = (1, 64, 56,", "= tvm.var(\"n\"), 10, 224 x = relay.var(\"x\", relay.ty.TensorType((n, c, w),", "license agreements. See the NOTICE file # distributed with this", "test_pad_run(): def _test_run(dtype): dshape = (4, 10, 7, 7) x", "= relay.var(\"x\", relay.TensorType(shape, dtype)) size=5 axis=1 bias=0.5 alpha=.00001 beta=0.75 z", "relay.TensorType((n,) + ishape, dtype)) y = relay.nn.upsampling(x, scale_h=scale_h, scale_w=scale_w, layout=layout,", "n, c, h, w = tvm.size_var(\"n\"), 10, 224, 224 x", "1], \"float32\"], {}, \\ [\"depthwise_conv2d_nchw\", [1, 512, 32, 32, \"float32\"],", "np.random.uniform(size=dshape).astype(dtype) if method == \"nearest_neighbor\": ref = topi.testing.upsampling_python(data, (scale_h, scale_w),", "required by applicable law or agreed to in writing, #", "c, 100, 100, 200), \"float32\")) y = relay.nn.upsampling3d(x, scale_d=2, scale_h=2,", "work asm = _compile(ic=17, oc=29, target=target, data_layout=\"NCHW\", kernel_layout='OIHW', dtypes=dtypes) assert", "kshape = (192, 80, 3, 3) run_test_conv2d_cuda(\"float32\", \"float32\", 1, dshape,", "that code is generated when datatypes are not HW supported.", "32, 18, 18) kshape = (32, 4, 3, 3) run_test_conv2d(\"float32\",", ",3)) # dilated conv2d dshape = (1, 3, 18, 18)", "np.random.uniform(size=dshape).astype(dtype) kernel = np.random.uniform(size=kshape).astype(dtype) c_np = topi.testing.conv2d_transpose_nchw_python( data, kernel, 2,", "10, 224), \"float32\") # test execution dtype = \"float32\" dshape", "in asm else: assert False, \"Target should be Skylake or", "tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5, atol=1e-5) def _test_pool2d_int(opfunc, reffunc, dtype): n, c,", "222, 222, 222), \"int32\") # Infer with NDHWC n, c,", "scale_d=scale_d, scale_h=scale_h, scale_w=scale_w,\\ layout=layout, method=method,\\ coordinate_transformation_mode=coordinate_transformation_mode) yy = run_infer_type(y) assert", "= relay.var(\"t\", relay.TensorType((n, c, h, w), \"float32\")) y = relay.nn.pad(t,", "== relay.TensorType((d1, ((2*d3)*3)), \"float32\") shape = (1, 5, 10, 10)", "('uint8', 'int8', 'int32') asm = _compile(ic=16, oc=32, target=target, data_layout=\"NCHW\", kernel_layout='OIHW',", "for target, ctx in ctx_list(): if target in except_targets: continue", "i in range(oh): for j in range(ow): pad_count = np.sum(pad_np[:,", "kernel, 'HWOI', 2, 1) d_np = np.zeros(shape=oshape_nhwc) d_np[:,0:c_np.shape[1],0:c_np.shape[2],:] = c_np", "1, 1]) cfg['tile_y'] = autotvm.task.space.SplitEntity([-1, 1, 1, 1]) cfg['tile_x'] =", "4), out_shape=(1, 3, 16, 16, 20)) def test_avg_pool2d_no_count_pad(): kh, kw", "4, 3), \"float32\")) y = relay.nn.batch_flatten(x) yy = run_infer_type(y) assert", "data_layout=\"NHWC\", kernel_layout='HWIO', dtypes=dtypes) assert _has_fast_int8_instructions(asm, target) # Ensure that code", "fref=lambda x, w: topi.testing.depthwise_conv2d_python_nchw( x, w, (1, 1), \"SAME\")) #", "also group conv2d dshape = (1, 32, 18, 18) kshape", "mixed precision. run_test_conv2d(\"int8\", \"int32\", 1, dshape, kshape, padding=(0, 1), channels=10,", "3), (4, 4))) \"pad_width=\" in y.astext() yy = run_infer_type(y) assert", "15, 3, 3), \"float32\") # infer by shape of w,", "def run_test_conv1d(dtype, out_dtype, scale, dshape, kshape, padding=(1, 1), fref=None, dilation=1,", "1, padding N, kernel NxN kshape = (192, 80, 7,", "3, 10, 3) oshape_nhwc = (1, 37, 37, 10) x", "topi.testing.lrn_python(x_data, size, axis, bias, alpha, beta) for target, ctx in", "224 x = relay.var(\"x\", relay.TensorType((n, c, w), \"int8\")) w =", "relay.Function([x], y) # check output shape f_out_shape = tuple(map(lambda x:", "yy.checked_type == relay.TensorType( (n, 2, 222, 222), \"int32\") # Infer", "224 x = relay.var(\"x\", relay.TensorType((n, d, h, w, c), \"int8\"))", "400), \"float32\") def test_upsampling3d_infer_type(): n, c, d, h, w =", "\"llvm -mcpu=core-avx2\" fast_int8_dtypes = ('uint8', 'int8', 'int32') asm = _compile(ic=16,", "10, 3), \"int8\")) y = relay.nn.conv1d(x, w, out_dtype=\"int32\") assert \"out_dtype=\\\"int32\\\"\"", "if target in except_targets: continue intrp1 = relay.create_executor(\"graph\", ctx=ctx, target=target)", "Check that both non-divisible oc and ic work asm =", "'constant') for target, ctx in ctx_list(): intrp1 = relay.create_executor(\"graph\", ctx=ctx,", "module.set_input('x', tvm.nd.array(data)) module.set_input(**params) module.run() op_res1 = module.get_output(0) tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-3,", "kernel_layout=kernel_layout, out_dtype=output_dtype) func = relay.Function([x, weight], y) wdata = np.random.rand(*kernel_shape)", "func = relay.Function([x, weight], y) wdata = np.random.rand(*kernel_shape) * 10", "bits=1) yy = run_infer_type(y) assert yy.checked_type == relay.TensorType( (32, 2,", "kshape, padding=(1, 1), channels=10, kernel_size=(3 ,3), dilation=(3, 3)) def test_conv2d_winograd():", "\"float32\") def test_pad_run(): def _test_run(dtype): dshape = (4, 10, 7,", "1), channels=11, data_layout=\"NHWC\") yy = run_infer_type(y) assert yy.checked_type == relay.TensorType(", "relay.TensorType( (n, 2, 224, 224), \"float32\") assert yy.args[1].checked_type == relay.TensorType(", "16, 16, 20)) def test_avg_pool2d_no_count_pad(): kh, kw = (4, 4)", "= run_infer_type(y) assert yy.checked_type == relay.TensorType((n, 10, 5, 224, 224),", "224), \"float32\") assert yy.args[1].checked_type == relay.TensorType( (2, 10, 3, 3),", "of different dtypes for input and weight. n, c, h,", "kernel = np.random.uniform(-scale, scale, size=kshape).astype(dtype) dkernel = topi.testing.dilate_python(kernel, (1, 1)", "relay.TensorType((n, 10, 224, 224), \"float32\") # test execution dtype =", "t1 = relay.TensorType((5, 10, 5)) x = relay.Var(\"x\", t1) func", "pool_size=(1, 1, 1)) assert \"pool_size=\" in y.astext() yy = run_infer_type(y)", "cfg['tile_b'] = autotvm.task.space.SplitEntity([-1, 1, 1, 1]) cfg['tile_y'] = autotvm.task.space.SplitEntity([-1, 1,", "kernel.astype(out_dtype), 1, padding, groups=groups) with WinogradFallback(), relay.build_config(opt_level=3): for target, ctx", "ref_res, rtol=1e-5, atol=1e-5) def test_flatten_infer_type(): d1, d2, d3, d4 =", "yy.checked_type == relay.TensorType((3, 24), \"float32\") x = relay.var(\"x\", relay.TensorType((d1, 2,", "with autotvm.apply_history_best(temp.relpath(\"temp.log\")): with relay.build_config(opt_level=3): print('Compiling...') graph_json, mod, params = tvm.relay.build(mod,", "normal conv3d dshape = (1, 3, 5, 224, 224) kshape", "_test_pool2d_int(relay.nn.avg_pool2d, np.mean, 'int32') _test_pool2d_int(relay.nn.avg_pool2d, np.mean, 'uint16') _test_global_pool2d(relay.nn.global_max_pool2d, np.max) _test_global_pool2d(relay.nn.global_avg_pool2d, np.mean)", "(ASF) under one # or more contributor license agreements. See", "channels=10, kernel_size=(3 ,3)) # mixed precision run_test_conv2d(\"int8\", \"int32\", 1, dshape,", "np.mean, 'int32') _test_pool2d_int(relay.nn.avg_pool2d, np.mean, 'uint16') _test_global_pool2d(relay.nn.global_max_pool2d, np.max) _test_global_pool2d(relay.nn.global_avg_pool2d, np.mean) def", "(range(pw, iw+pw))) pad_np[np.ix_(*no_zero)] = a_np b_np = np.zeros(shape=(n, oc, oh,", "# or more contributor license agreements. See the NOTICE file", "28, 28) x = relay.var(\"x\", shape=dshape, dtype=dtype) y = opfunc(x,", "def test_avg_pool2d_no_count_pad(): kh, kw = (4, 4) sh, sw =", "yy.checked_type == relay.TensorType((n + 2, 6, 9, w + 8),", "dtypes=dtypes) assert _has_fast_int8_instructions(asm, target) # Check that int8 x int8", "shape in case of different dtypes for input and weight.", "ctx in ctx_list(): if target in except_targets: continue intrp1 =", "with a different layout n, c, h, w = 4,", "conv2d dshape = (1, 32, 18, 18) kshape = (64,", "4], [\"data_vec_inline\", \"ot\", 1], \\ [\"conv_inline\", \"ot\", 0]]}], \"r\": [[0.0002933163],", "16, 16)) _test_pool3d(relay.nn.avg_pool3d, padding=(0, 3, 0, 0, 3, 0), out_shape=(1,", "NCHWc layout. target = \"llvm -mcpu=core-avx2\" fast_int8_dtypes = ('uint8', 'int8',", "cascadelake) and test assembly contains *pmadd* instructions targets = [\"llvm", "kshape = (3, 10, 3) oshape = (1, 10, 37)", "= topi.testing.l2_normalize_python(x_data, eps, axis) for target, ctx in ctx_list(): intrp1", "execution dtype = \"float32\" dshape = (1, 3, 32) x", "= np.random.uniform(low=0.001, size=(n, ic, ih, iw)).astype(dtype) pad_np = np.zeros(shape=(n, ic,", "(int(round(h*scale_h)), int(round(w*scale_w))), layout) for target, ctx in ctx_list(): executor =", "z) x_data = np.random.uniform(low=-1, high=1, size=shape).astype(dtype) ref_res = x_data.flatten().reshape(o_shape) for", "relay.var(\"x\", shape=(n, c , h, w)) y = relay.nn.lrn(x, size=10,", "compile_test_conv2d_arm_cpu(dtype, out_dtype, scale, dshape, kshape, padding=(1, 1), groups=1, dilation=(1, 1),", "224 x = relay.var(\"x\", relay.ty.TensorType((n, c, d, h, w), \"float32\"))", "relay.var(\"w\") y = relay.nn.conv1d(x, wt, kernel_size=3, padding=(1, 1), channels=16, data_layout=\"NWC\",", "run_infer_type(y) assert yy.checked_type == relay.TensorType( (n, h, w, 16), \"int32\")", "= relay.var(\"x\", relay.ty.TensorType((n, c, d, h, w), \"float32\")) w =", "scale_h=2, scale_w=2, layout=\"NCDHW\", method=\"trilinear\") yy = run_infer_type(y) assert yy.checked_type ==", "= np.random.uniform(size=dshape).astype(dtype) ref_res = np.pad(data, ((1, 1), (2, 2), (3,", "opfunc(x, pool_size=(1,)) assert \"pool_size=\" in y.astext() yy = run_infer_type(y) assert", "ih+ph)), (range(pw, iw+pw))) pad_np[np.ix_(*no_zero)] = a_np b_np = np.zeros(shape=(n, oc,", "rtol=1e-5) op_res2 = intrp2.evaluate(func)(x_data) tvm.testing.assert_allclose(op_res2.asnumpy(), ref_res, rtol=1e-5) def test_pad_infer_type(): #", "relay.var(\"weight\", relay.TensorType(kernel_shape, weight_dtype)) y = relay.nn.conv2d(x, weight, kernel_size=(ch, cw), channels=oc,", "3, 3, 3) run_test_conv3d(\"float32\", \"float32\", 1, dshape, kshape, padding=(1, 1,", "in ctx_list(): intrp1 = relay.create_executor(\"graph\", ctx=ctx, target=target) intrp2 = relay.create_executor(\"debug\",", "1, 1]) cfg['tile_x'] = autotvm.task.space.SplitEntity([-1, 1, 1, 1]) cfg['tile_rc'] =", "= relay.nn.upsampling(x, scale_h=2, scale_w=2, layout=\"NCHW\", method=\"bilinear\") \"method=\\\"BINLINEAR\\\"\" in y.astext() yy", "llvm_version = tvm.codegen.llvm_version_major() for target in targets: if llvm_version >=", "== relay.TensorType((n, c, 200, 400), \"float32\") def test_upsampling3d_infer_type(): n, c,", "dshape, kshape, padding=(0, 0), channels=192, kernel_size=(3, 3)) run_test_conv2d_cuda(\"float32\", \"float32\", 1,", "test_flatten_infer_type(): d1, d2, d3, d4 = tvm.size_var(\"d1\"), tvm.size_var(\"d2\"), tvm.size_var(\"d3\"), tvm.size_var(\"d4\")", "= relay.var(\"weight\", relay.TensorType(kernel_shape, weight_dtype)) y = relay.nn.conv2d(x, weight, kernel_size=(ch, cw),", "asm = _compile(ic=17, oc=29, target=target, data_layout=\"NHWC\", kernel_layout='HWIO', dtypes=dtypes) assert _has_fast_int8_instructions(asm,", "np.random.uniform(-scale, scale, size=kshape).astype(dtype) ref_res = topi.testing.conv2d_nchw_python( data.astype(out_dtype), kernel.astype(out_dtype), 1, padding,", "rtol=1e-5, atol=1e-5) def test_conv2d_transpose_nhwc_run(): dshape_nhwc = (1, 18, 18, 3)", "h, w), \"uint8\")) w = relay.var(\"w\", relay.TensorType((2, 10, 3, 3),", "y = relay.nn.conv2d(x, w, out_dtype=\"int32\") assert \"out_dtype=\\\"int32\\\"\" in y.astext() yy", "relay.var(\"x\", relay.TensorType((n, c, w), \"float32\")) y = opfunc(x, pool_size=(1,)) assert", "use true kshape layout here - HWOI c_np = topi.testing.conv2d_transpose_nhwc_python(data,", "+ 8), \"float32\") def test_pad_run(): def _test_run(dtype): dshape = (4,", "(('int8', 'int8', 'int32')) # Check that both non-divisible oc and", "scale_h=2, scale_w=2, layout=\"NCHW\", method=\"bilinear\") \"method=\\\"BINLINEAR\\\"\" in y.astext() yy = run_infer_type(y)", "224, 224 x = relay.var(\"x\", relay.TensorType((n, c, h, w), \"float32\"))", "d_np[:,0:c_np.shape[1],0:c_np.shape[2],:] = c_np def test_conv1d_transpose_ncw_run(): dshape = (1, 3, 18)", "1, 3, 3) weight = relay.var(\"weight\", relay.TensorType(kernel_shape, weight_dtype)) y =", "\"float32\", 1, dshape, kshape, padding=(1, 1), channels=10, kernel_size=3, dilation=3) def", "# Check that a vectorized instruction is generated for older", "tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5, atol=1e-5) def compile_test_conv2d_arm_cpu(dtype, out_dtype, scale, dshape, kshape,", "yy = run_infer_type(y) assert yy.checked_type == relay.TensorType((n, c , h,", "18, 18, 3) kshape_hwoi = (3, 3, 10, 3) oshape_nhwc", "kernel_size=(3, 3, 3), padding=(1, 1, 1), channels=16, data_layout=\"NDHWC\", out_dtype=\"int32\") yy", "((1, 1), (2, 2), (3, 3), (4, 4)), 'constant') for", "test_pool1d(): def _test_pool1d(opfunc): n, c, w = tvm.var(\"n\"), 10, 224", "(n, ic, ih, iw) x = relay.var(\"x\", shape=dshape) y =", "axis=(2,3)) / np.maximum(pad_count, 1) ref_res = np.maximum(b_np, 0.0) data =", "= intrp2.evaluate(func)(x_data) tvm.testing.assert_allclose(op_res2.asnumpy(), ref_res, rtol=1e-5) def test_pad_infer_type(): # entirely concrete", "target=target) out = executor.evaluate(func)(data) tvm.testing.assert_allclose(out.asnumpy(), ref, rtol=1e-5, atol=1e-5) def test_upsampling3d():", "assert _has_fast_int8_instructions(asm, target) # Ensure that code is generated when", "\"float32\", 1, dshape, kshape, padding=(1, 1), channels=10, kernel_size=(3 ,3)) #", "c, h, w = tvm.size_var(\"n\"), 10, 224, 224 x =", "ic, ch, cw) elif kernel_layout == 'HWIO': kernel_shape = (ch,", "groups=groups, **attrs) func = relay.Function([x, w], y) data = np.random.uniform(-scale,", "padding=(1, 1), channels=16, data_layout=\"NHWC\", out_dtype=\"int32\") yy = run_infer_type(y) assert yy.checked_type", "conv3d dshape = (1, 3, 5, 224, 224) kshape =", "dtype = \"float32\" data = np.random.uniform(size=dshape).astype(dtype) kernel = np.random.uniform(size=kshape).astype(dtype) c_np", "ref_res = d_np for target, ctx in ctx_list(): intrp1 =", "yy = run_infer_type(y) assert yy.checked_type == relay.TensorType((d1, ((2*d3)*3)), \"float32\") shape", "= relay.var(\"x\", relay.TensorType(data_shape, input_dtype)) else: raise ValueError('Not supported') if kernel_layout", "yy.checked_type == relay.TensorType( (n, 15, 15, 11), \"float32\") def test_conv2d_transpose_nchw_run():", "high=1, size=shape).astype(dtype) ref_res = topi.testing.lrn_python(x_data, size, axis, bias, alpha, beta)", "if llvm_version >= 8: with relay.build_config(opt_level=3): graph, lib, params =", "3) kshape_hwoi = (3, 3, 10, 3) oshape_nhwc = (1,", "assert yy.args[1].checked_type == relay.TensorType( (4, 8, 3, 3, 4, 4),", "relay.nn.batch_flatten(x) yy = run_infer_type(z) assert yy.checked_type == relay.TensorType(o_shape, dtype) func", "= relay.var(\"w\") y = relay.nn.conv2d(x, w, kernel_size=(3, 3), padding=(1, 1),", "relay.TensorType( (4, 8, 3, 3, 4, 4), \"int8\") # Infer", "method=\"bilinear\") \"method=\\\"BINLINEAR\\\"\" in y.astext() yy = run_infer_type(y) assert yy.checked_type ==", "a_np = np.random.uniform(low=0.001, size=(n, ic, ih, iw)).astype(dtype) pad_np = np.zeros(shape=(n,", "c, w = 4, 32, 224 x = relay.var(\"x\", relay.TensorType((n,", "\"llvm -mcpu=cascadelake\"] llvm_version = tvm.codegen.llvm_version_major() for target in targets: if", "except_targets=[\"cuda\"]) def test_conv2d_transpose_infer_type(): # symbolic in batch dimension n, c,", "kshape, padding=(1, 1), groups=1, dilation=(1, 1), **attrs): x = relay.var(\"x\",", "rtol=1e-5, atol=1e-5) def test_upsampling3d(): _test_upsampling3d(\"NCDHW\", \"nearest_neighbor\") _test_upsampling3d(\"NCDHW\", \"trilinear\", \"align_corners\") _test_upsampling3d(\"NDHWC\",", "np.random.uniform(-scale, scale, size=dshape).astype(dtype) kernel = np.random.uniform(-scale, scale, size=kshape).astype(dtype) dkernel =", "16)) _test_pool3d(relay.nn.avg_pool3d, padding=(0, 0, 4, 0, 0, 4), out_shape=(1, 3,", "relay.var(\"w\") y = relay.nn.conv2d(x, w, kernel_size=(3, 3), padding=(1, 1), channels=2)", "h, w), \"float32\")) y = relay.nn.upsampling3d(x, scale_d=2, scale_h=2, scale_w=2, layout=\"NCDHW\",", "kernel_size=(3, 3), groups=64, padding=(1, 1), dilation=(1, 1), out_dtype=output_dtype) func =", "= run_infer_type(y) assert yy.checked_type == relay.TensorType( (n, h, w, 16),", "[0, 1, 2, 3, 4, 5, 8, 6, 7]], \\", "method=method,\\ coordinate_transformation_mode=coordinate_transformation_mode) yy = run_infer_type(y) assert yy.checked_type == relay.TensorType((n,) +", "== relay.TensorType((n, 10, 224, 224), dtype) # test execution dtype", "x = relay.var(\"x\", relay.TensorType((n, c, h, w), \"float32\")) y =", "3, 32) x = relay.var(\"x\", shape=dshape) pool_type = 'max' if", "run_test_conv1d(\"float32\", \"float32\", 1, dshape, kshape, padding=(1, 1), channels=10, kernel_size=3, dilation=3)", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "the input channels to check int8 robustness # Input channels", "run_infer_type(y) assert yy.checked_type == relay.TensorType( (n, 2, 224, 224, 224),", "channels=10, kernel_size=(3 ,3), dilation=(3, 3)) def test_conv2d_winograd(): class WinogradFallback(autotvm.FallbackContext): def", "test_upsampling_infer_type(): n, c , h, w = tvm.size_var(\"n\"), tvm.size_var(\"c\"), tvm.size_var(\"h\"),", "5, 10, 10) dtype = \"float32\" x = relay.var(\"x\", relay.TensorType(shape,", "null, \\ \"e\": [[\"tile_co\", \"sp\", [32, 16]], [\"tile_oh\", \"sp\", [8,", "Basic shape test with ambiguous batch. n, c, h, w", "ref_res, rtol=1e-5, atol=1e-5) _test_pool3d(relay.nn.max_pool3d) _test_pool3d(relay.nn.max_pool3d, padding=(2, 0, 0, 2, 0,", "'HWOI', 2, 1) d_np = np.zeros(shape=oshape_nhwc) d_np[:,0:c_np.shape[1],0:c_np.shape[2],:] = c_np def", "pool_size=(1, 1)) assert \"pool_size=\" in y.astext() yy = run_infer_type(y) assert", "yy = run_infer_type(y) assert yy.checked_type == relay.TensorType( (32, 2, 128,", "intrp2 = relay.create_executor(\"debug\", ctx=ctx, target=target) op_res1 = intrp1.evaluate(func)(x_data) tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res,", "relay.nn.conv2d(x, w, kernel_size=(3, 3), padding=(1, 1), channels=2) yy = run_infer_type(y)", "alpha, beta) for target, ctx in ctx_list(): intrp1 = relay.create_executor(\"graph\",", "relay.nn.upsampling(x, scale_h=scale_h, scale_w=scale_w, layout=layout, method=method, align_corners=align_corners) func = relay.Function([x], y)", "224, 224, 224), \"float32\") assert yy.args[1].checked_type == relay.TensorType( (2, 10,", "kernel_layout=\"HWIO\") func = relay.Function([x, w], y) dtype = \"float32\" data", "should be a multiple of 4 internally. for ic in", "= relay.var(\"x\", relay.TensorType((n, c, w), \"uint8\")) w = relay.var(\"w\", relay.TensorType((2,", "3, 28, 28) x = relay.var(\"x\", shape=dshape, dtype=dtype) y =", "\"float32\") # some symbolic values n, c, h, w =", "1), channels=512, groups=512, kernel_size=(3 ,3)) # CUDA is disabled for", "run_test_conv2d(\"float32\", \"float32\", 1, dshape, kshape, padding=(1, 1), channels=32, groups=32, kernel_size=(3", "out_dtype, scale, dshape, kshape, padding=(1, 1, 1), fref=None, groups=1, dilation=(1,", "= relay.nn.conv3d(x, w, padding=padding, dilation=dilation, groups=groups, **attrs) func = relay.Function([x,", "iw)).astype(dtype) pad_np = np.zeros(shape=(n, ic, ih+2*ph, iw+2*pw)).astype(dtype) no_zero = (range(n),", "\"trilinear\", \"align_corners\") def test_conv2d_int8_intrinsics(): def _compile(ic, oc, target, data_layout, kernel_layout,", "(shape[0], target_dim)) def test_batch_flatten(): t1 = relay.TensorType((5, 10, 5)) x", "= relay.TensorType((5, 10, 5)) x = relay.Var(\"x\", t1) func =", "+ ishape x = relay.var(\"x\", shape=dshape) y = relay.nn.upsampling(x, scale_h=scale_h,", "(n, h, w, ic) x = relay.var(\"x\", relay.TensorType(data_shape, input_dtype)) else:", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "padding=(0, 0)) func = relay.Function([x], y) data = np.random.random_integers(low=-128, high=128,", "out_shape=(1, 3, 16, 16, 16)): n, c, d, h, w", "output_padding=(1, 1), channels=11, data_layout=\"NHWC\") yy = run_infer_type(y) assert yy.checked_type ==", "key = (target, workload) if key in self.memory: return self.memory[key]", "the Apache License, Version 2.0 (the # \"License\"); you may", "test_conv2d_run(): def run_test_conv2d(dtype, out_dtype, scale, dshape, kshape, padding=(1, 1), fref=None,", "0, 4), out_shape=(1, 3, 16, 16, 20)) def test_avg_pool2d_no_count_pad(): kh,", "executor.evaluate(func)(data) tvm.testing.assert_allclose(out.asnumpy(), ref, rtol=1e-5, atol=1e-5) def test_upsampling3d(): _test_upsampling3d(\"NCDHW\", \"nearest_neighbor\") _test_upsampling3d(\"NCDHW\",", "test_conv1d_infer_type(): # symbolic in batch dimension n, c, w =", "cfg['tile_x'] = autotvm.task.space.SplitEntity([-1, 1, 1, 1]) cfg['tile_rc'] = autotvm.task.space.SplitEntity([-1, 1])", "relay.var(\"x\", relay.TensorType((n, h, w, c), \"int8\")) wt = relay.var(\"w\") y", "224, 4, 4), \"int32\") assert yy.args[1].checked_type == relay.TensorType( (4, 8,", "512, 32, 32], \"float32\"], \\ [\"TENSOR\", [512, 1, 3, 3],", "relay.nn.conv2d(x, wt, kernel_size=(3, 3), padding=(1, 1), channels=16, data_layout=\"NCHW4n4c\", kernel_layout=\"OIHW4o4i\", out_dtype=\"int32\")", "t = relay.var(\"t\", relay.TensorType((n, c, h, w), \"float32\")) y =", "= opfunc(x, pool_size=(1, 1, 1)) assert \"pool_size=\" in y.astext() yy", "with WinogradFallback(), relay.build_config(opt_level=3): for target, ctx in ctx_list(): if target", "you under the Apache License, Version 2.0 (the # \"License\");", "Support level2 operator test cases. \"\"\" import numpy as np", "w = 4, 32, 224, 224, 224 x = relay.var(\"x\",", "d_np[:,:,0:c_np.shape[2],0:c_np.shape[3]] = c_np ref_res = d_np for target, ctx in", "2, 2), padding, out_shape, pool_type, False) for target, ctx in", "int(round(w*scale_w))) else: return (h, w, c), (int(round(h*scale_h)), int(round(w*scale_w)), c) ishape,", "\\ [\"tile_ow\", \"sp\", [1, 8]], \\ [\"reorder_0\", \"re\", [0, 1,", "= relay.var(\"x\", relay.TensorType((n,) + ishape, dtype)) y = relay.nn.upsampling3d(x, scale_d=scale_d,", "params=parameters) assembly = lib.get_source(\"asm\") return assembly def _has_fast_int8_instructions(asm, target): if", "-device=arm_cpu\", \"topi_nn_depthwise_conv2d_nchw\", \\ [[\"TENSOR\", [1, 512, 32, 32], \"float32\"], \\", "relay.nn.conv2d(x, w, out_dtype=\"int32\") assert \"out_dtype=\\\"int32\\\"\" in y.astext() yy = run_infer_type(y)", "(32, 2, 128, 128, 1), \"uint16\") if __name__ == \"__main__\":", "_has_fast_int8_instructions(asm, target) # Check that int8 x int8 goes through", "with relay.build_config(opt_level=3): graph, lib, params = relay.build(func, target, params=parameters) def", "ih, iw) = (3, 28, 28) (oc, oh, ow) =", "= np.random.uniform(size=kshape).astype(dtype) c_np = topi.testing.conv2d_transpose_nchw_python( data, kernel, 2, 1) d_np", "relay.TensorType((2, 10, 3, 3, 3), \"int8\")) y = relay.nn.conv3d(x, w,", "except_targets: continue intrp1 = relay.create_executor(\"graph\", ctx=ctx, target=target) op_res1 = intrp1.evaluate(func)(data,", "of different dtypes for input and weight. n, c, d,", "= c_np def test_conv1d_transpose_ncw_run(): dshape = (1, 3, 18) kshape", "tvm.expr.Cast(\"int32\", tvm.round(w*scale))), \"float32\") n, c = tvm.size_var(\"n\"), tvm.size_var(\"c\") x =", "run_test_conv2d(\"float32\", \"float32\", 1, dshape, kshape, padding=(1, 1), channels=32, groups=8, kernel_size=(3", "= (1, 3, 28, 28) x = relay.var(\"x\", shape=dshape) y", "(1, 3, 224) kshape = (10, 3, 3) run_test_conv1d(\"float32\", \"float32\",", "= relay.nn.upsampling3d(x, scale_d=2, scale_h=2, scale_w=2, layout=\"NCDHW\", method=\"trilinear\") yy = run_infer_type(y)", "dilation=dilation, groups=groups, **attrs) func = relay.Function([x, w], y) mod =", "w, ic) x = relay.var(\"x\", relay.TensorType(data_shape, input_dtype)) else: raise ValueError('Not", "1), channels=16, data_layout=\"NDHWC\", out_dtype=\"int32\") yy = run_infer_type(y) assert yy.checked_type ==", "# Unless required by applicable law or agreed to in", "relay.Function([x], y) data = np.random.uniform(size=dshape).astype(dtype) ref_res = topi.testing.pool1d_ncw_python(data, (2,), (2,),", "relay.TensorType(shape, dtype)) z = relay.nn.batch_flatten(x) yy = run_infer_type(z) assert yy.checked_type", "5, 8, 6, 7]], \\ [\"reorder_1\", \"re\", [0, 1, 2,", "(2, 10, 3, 3, 3), \"float32\") # infer by shape", "tvm.size_var(\"c\"), tvm.size_var(\"h\"), tvm.size_var(\"w\") x = relay.var(\"x\", shape=(n, c , h,", "out_shape=(1, 3, 16, 19, 16)) _test_pool3d(relay.nn.avg_pool3d, padding=(0, 0, 4, 0,", "= relay.nn.conv3d(x, w, padding=padding, dilation=dilation, groups=groups, data_layout=\"NDHWC\", kernel_layout=\"DHWIO\", **attrs) func", ":, i*sh:i*sh+kh, j*sw:j*sw+kw], axis=(2,3)) / np.maximum(pad_count, 1) ref_res = np.maximum(b_np,", "IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND,", "relay.var(\"x\", relay.TensorType((n, c, h, w), \"int8\")) w = relay.var(\"w\", relay.TensorType((2,", "assert yy.checked_type == relay.TensorType((n, 10, 224, 224), dtype) # test", "relay from tvm.relay import transform from tvm.relay.testing import ctx_list, run_infer_type", "3, 18) kshape = (10, 3, 3) run_test_conv1d(\"float32\", \"float32\", 1,", "[\"llvm -mcpu=skylake-avx512\", \"llvm -mcpu=cascadelake\"] llvm_version = tvm.codegen.llvm_version_major() for target in", "in the assembly. assert not _has_fast_int8_instructions(asm, target) # Check that", "y) data = np.random.uniform(size=dshape).astype(dtype) ref_res = reffunc(data.reshape(1, 3, 14, 2,", "yy = run_infer_type(y) assert yy.checked_type == relay.TensorType((n, c, 1, 1),", "weight = relay.var(\"weight\", relay.TensorType(kernel_shape, weight_dtype)) y = relay.nn.conv2d(x, weight, kernel_size=(3,", "# group conv2d dshape = (1, 32, 18, 18) kshape", "(3, 3), (4, 4)), 'constant') for target, ctx in ctx_list():", "(n, w, 16), \"int32\") def test_conv1d_run(): def run_test_conv1d(dtype, out_dtype, scale,", "dtype)) y = opfunc(x, pool_size=(1, 1)) assert \"pool_size=\" in y.astext()", "self.memory: return self.memory[key] cfg = autotvm.task.space.FallbackConfigEntity() cfg.template_key = 'winograd' cfg.is_fallback", "if fref is None: ref_res = topi.testing.conv3d_ndhwc_python( data.astype(out_dtype), dkernel.astype(out_dtype), 1,", "tvm.size_var(\"c\") x = relay.var(\"x\", relay.TensorType((n, c, 100, 100, 200), \"float32\"))", "3, 28, 28) x = relay.var(\"x\", shape=dshape) y = opfunc(x,", "robustness # Input channels should be a multiple of 4", "of w, mixed precision n, c, w = tvm.var(\"n\"), 10,", "layout=layout, method=method,\\ coordinate_transformation_mode=coordinate_transformation_mode) func = relay.Function([x], y) data = np.random.uniform(size=dshape).astype(dtype)", "(n, 32, 222, 222), \"int16\") def test_bitpack_infer_type(): # Test axis", "op_res1 = intrp1.evaluate(func)(data, kernel) tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5, atol=1e-5) def compile_test_conv2d_arm_cpu(dtype,", "c, tvm.expr.Cast(\"int32\", tvm.round(h*scale)), tvm.expr.Cast(\"int32\", tvm.round(w*scale))), \"float32\") n, c = tvm.size_var(\"n\"),", "_compile(ic=ic, oc=16, target=target, data_layout=\"NHWC\", kernel_layout='HWIO', dtypes=dtypes) assert _has_fast_int8_instructions(asm, target) #", "def test_conv2d_transpose_infer_type(): # symbolic in batch dimension n, c, h,", "10, 3, 3) oshape = (1, 10, 37, 37) x", "high=1, size=shape).astype(dtype) ref_res = x_data.flatten().reshape(o_shape) for target, ctx in ctx_list():", "= relay.var(\"x\", relay.TensorType((n, c, h, w), dtype)) y = opfunc(x,", "\"float32\") def test_conv2d_transpose_nchw_run(): dshape = (1, 3, 18, 18) kshape", "layout. target = \"llvm -mcpu=core-avx2\" fast_int8_dtypes = ('uint8', 'int8', 'int32')", "# Check that vector int mult and add instructions are", "tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5, atol=1e-5) _test_pool1d(relay.nn.max_pool1d) _test_pool1d(relay.nn.avg_pool1d) def test_pool3d(): def _test_pool3d(opfunc,", "dtype)) y = relay.nn.upsampling(x, scale_h=scale_h, scale_w=scale_w, layout=layout, method=method, align_corners=align_corners) yy", "3], \"float32\"], \\ [1, 1], [1, 1], [1, 1], \"float32\"],", "func = relay.Function([x, w], y) mod = tvm.relay.Module() mod[\"main\"] =", "= topi.testing.conv3d_ndhwc_python( data.astype(out_dtype), dkernel.astype(out_dtype), 1, padding) else: ref_res = fref(data.astype(out_dtype),", "strides=(2,), padding=(1,), output_padding=(2,)) func = relay.Function([x, w], y) dtype =", "= tvm.size_var(\"n\"), tvm.size_var(\"c\"), tvm.size_var(\"h\"), tvm.size_var(\"w\") scale = tvm.const(2.0, \"float64\") x", "data_layout, kernel_layout, dtypes): input_dtype, weight_dtype, output_dtype = dtypes n, h,", "func = relay.Function([x], z) x_data = np.random.uniform(low=-1, high=1, size=shape).astype(dtype) ref_res", "kernel_size=(3 ,3)) # CUDA is disabled for 'direct' schedule: #", "conv2d dshape = (1, 3, 18) kshape = (10, 3,", "1), channels=10, kernel_size=3) # dilated conv2d dshape = (1, 3,", "'cuda': continue params = {'w': tvm.nd.array(kernel)} graph, lib, params =", "\"float32\")) y = relay.nn.conv2d_transpose(x, w, output_padding=(1, 1), channels=11, data_layout=\"NHWC\") yy", "512, 32, 32) kshape = (512, 1, 3, 3) compile_test_conv2d_arm_cpu(\"float32\",", "= np.random.uniform(size=kshape).astype(dtype) c_np = topi.testing.conv1d_transpose_ncw_python( data, kernel, 2, 1) d_np", "code is generated when datatypes are not HW supported. dtypes", "relay.ty.TensorType((32, 32, 3, 3), \"int16\")) y = relay.nn.bitserial_conv2d( x, w,", "padding=(1, 1), channels=192, kernel_size=(3, 3)) # extended winograd: stride 1,", "relay.TensorType( (2, 10, 3, 3, 3), \"float32\") # infer by", "func = relay.Function([x], y) dtype = \"float32\" a_np = np.random.uniform(low=0.001,", "intrp1 = relay.create_executor(\"graph\", ctx=ctx, target=target) intrp2 = relay.create_executor(\"debug\", ctx=ctx, target=target)", "size, axis, bias, alpha, beta) for target, ctx in ctx_list():", "32, 32 scale_h = 2.0 scale_w = 2.0 dtype =", "wt = relay.var(\"w\") y = relay.nn.conv2d(x, wt, kernel_size=(3, 3), padding=(1,", "in case of different dtypes for input and weight. n,", "= run_infer_type(y) assert yy.checked_type == relay.TensorType((n, 10, 224, 224), \"float32\")", "import relay from tvm.relay import transform from tvm.relay.testing import ctx_list,", "2), padding=(0, 0)) func = relay.Function([x], y) data = np.random.random_integers(low=-128,", "== relay.TensorType( (n, 2, 222, 222, 222), \"int32\") # Infer", "1] return np.reshape(data, (shape[0], target_dim)) def test_batch_flatten(): t1 = relay.TensorType((5,", "add instructions are generated. assert \"vpmulld\" in asm and \"vpadd\"", "more contributor license agreements. See the NOTICE file # distributed", "2, 0, 0), out_shape=(1, 3, 18, 16, 16)) _test_pool3d(relay.nn.avg_pool3d, padding=(0,", "32, 222, 222), \"int16\") def test_bitpack_infer_type(): # Test axis packing", "kernel_size=(3, 3), padding=(1, 1), channels=2) yy = run_infer_type(y) assert yy.checked_type", "kshape, padding=(2, 2), channels=192, kernel_size=(3, 3)) # extended winograd: stride", "and weight. n, c, w = tvm.var(\"n\"), 10, 224 x", "dilation=3) def test_conv2d_infer_type(): # symbolic in batch dimension n, c,", "= (1, 3, 18, 18) kshape = (10, 3, 3,", "10, 37) x = relay.var(\"x\", shape=dshape) w = relay.var(\"w\") y", "method, align_corners=False): n, c, h, w = tvm.size_var(\"n\"), 16, 32,", "out_dtype, scale, dshape, kshape, padding=(1, 1), groups=1, dilation=(1, 1), **attrs):", "np.random.uniform(size=dshape).astype(dtype) ref_res = reffunc(data, axis=(2,3), keepdims=True) for target, ctx in", "224 x = relay.var(\"x\", relay.TensorType((n, h, w, c), \"float32\")) y", "kernel NxN kshape = (192, 80, 7, 7) run_test_conv2d_cuda(\"float32\", \"float32\",", "224, 224 x = relay.var(\"x\", relay.ty.TensorType((n, c, d, h, w),", "in y.astext() yy = run_infer_type(y) assert yy.checked_type == relay.TensorType((n, c", "relay.var(\"x\", relay.ty.TensorType((n, c, w), \"float32\")) w = relay.var(\"w\") y =", "relay.TensorType((2, 10, 3, 3), \"int8\")) y = relay.nn.conv2d(x, w, out_dtype=\"int32\")", "relay.nn.lrn(x, size=10, axis=2, bias=0.5, alpha=.00001, beta=0.75) \"alpha=\" in y.astext() yy", "not _has_fast_int8_instructions(asm, target) # Check that a vectorized instruction is", "in targets: if llvm_version >= 8: dtypes = (('int8', 'int8',", "= tuple(map(lambda x: int(x), run_infer_type(func).ret_type.shape)) assert out_shape == f_out_shape, \\", "= tvm.size_var(\"n\"), 10, 10, 12 x = relay.var(\"x\", relay.TensorType((n, c,", "in ctx_list(): if target in except_targets: continue intrp1 = relay.create_executor(\"graph\",", "4, 32, 224, 224 x = relay.var(\"x\", relay.TensorType((n, h, w,", "shape=dshape) y = relay.nn.upsampling3d(x, scale_d=scale_d, scale_h=scale_h, scale_w=scale_w,\\ layout=layout, method=method,\\ coordinate_transformation_mode=coordinate_transformation_mode)", "mismatch. expected {}, actual {}\".format(out_shape, f_out_shape) data = np.random.uniform(size=dshape).astype(dtype) ref_res", "for 'direct' schedule: # https://github.com/apache/incubator-tvm/pull/3070#issuecomment-486597553 # group conv2d dshape =", "= topi.testing.dilate_python(kernel, (1, 1) + dilation) if fref is None:", "fast_int8_dtypes = ('uint8', 'int8', 'int32') asm = _compile(ic=16, oc=32, target=target,", "[\"depthwise_conv2d_nchw\", [1, 512, 32, 32, \"float32\"], \\ [512, 1, 3,", "specific language governing permissions and limitations # under the License.", "= np.random.uniform(-scale, scale, size=kshape).astype(dtype) ref_res = topi.testing.conv1d_ncw_python( data.astype(out_dtype), kernel.astype(out_dtype), 1,", "autotvm.task.space.SplitEntity([-1, 1, 1, 1]) cfg['tile_x'] = autotvm.task.space.SplitEntity([-1, 1, 1, 1])", "w), \"int16\")) w = relay.var(\"w\", relay.ty.TensorType((32, 32, 3, 3), \"int16\"))", "= autotvm.task.space.SplitEntity([-1, 1, 1, 1]) cfg['tile_y'] = autotvm.task.space.SplitEntity([-1, 1, 1,", "= dtypes n, h, w, ch, cw = 1, 64,", "opfunc(x, pool_size=(2,), strides=(2,), padding=(0, 0)) func = relay.Function([x], y) data", "32, 32, 128, 128 x = relay.var(\"x\", relay.ty.TensorType((o, i, h,", "0, 2, 0, 0), out_shape=(1, 3, 18, 16, 16)) _test_pool3d(relay.nn.max_pool3d,", "check int8 robustness # Input channels should be a multiple", "0, 2, 0, 0), out_shape=(1, 3, 18, 16, 16)) _test_pool3d(relay.nn.avg_pool3d,", "relay.var(\"x\", relay.TensorType((n, c, h, w), \"float32\")) y = opfunc(x) yy", "pad_count = np.sum(pad_np[:, :, i*sh:i*sh+kh, j*sw:j*sw+kw] > 0, axis=(2,3)) b_np[:,:,i,j]", "scale = tvm.const(2.0, \"float64\") x = relay.var(\"x\", relay.TensorType((n, c, d,", "\"w\") as log_file: log_file.write(test_schedule) with autotvm.apply_history_best(temp.relpath(\"temp.log\")): with relay.build_config(opt_level=3): print('Compiling...') graph_json,", "= 2.0 scale_w = 2.0 dtype = \"float32\" def get_shape():", "kernel = np.random.uniform(size=kshape).astype(dtype) c_np = topi.testing.conv2d_transpose_nchw_python( data, kernel, 2, 1)", "padding, groups=groups) else: ref_res = fref(data.astype(out_dtype), dkernel.astype(out_dtype)) for target, ctx", "12 x = relay.var(\"x\", relay.TensorType((n, h, w, c), \"float32\")) w", "(n, 2, 222, 222, 222), \"int32\") # infer shape in", "h, w), \"float32\")) y = relay.nn.upsampling(x, scale_h=2, scale_w=2, layout=\"NCHW\", method=\"bilinear\")", "weight_dtype = 'int8' output_dtype = 'int32' data_shape = (1, 64,", "distributed with this work for additional information # regarding copyright", "== relay.TensorType( (n, 2, 222, 222), \"int32\") # Infer with", "shape=dshape, dtype=dtype) w = relay.var(\"w\", shape=kshape, dtype=dtype) y = relay.nn.conv2d(x,", "= relay.var(\"x\", shape=(n, c , h, w)) y = relay.nn.lrn(x,", "= relay.var(\"x\", shape=dshape) y = opfunc(x, pool_size=(2, 2), strides=(2, 2),", "data_layout=\"NCHW\", kernel_layout='OIHW', dtypes=fast_int8_dtypes) # Check that vector int mult and", "assert \"channels=15\" in y.astext() yy = run_infer_type(y) assert yy.checked_type ==", "dtype)) y = relay.nn.upsampling3d(x, scale_d=scale_d, scale_h=scale_h, scale_w=scale_w,\\ layout=layout, method=method,\\ coordinate_transformation_mode=coordinate_transformation_mode)", "mixed precision run_test_conv2d(\"int8\", \"int32\", 1, dshape, kshape, padding=(1, 1), channels=10,", "for the # specific language governing permissions and limitations #", "= relay.var(\"x\", relay.TensorType((n, h, w, c), \"float32\")) w = relay.var(\"w\",", "dtypes=dtypes) assert _has_fast_int8_instructions(asm, target) # Check that both non-divisible oc", "ctx) module.set_input('x', tvm.nd.array(data)) module.set_input(**params) module.run() op_res1 = module.get_output(0) tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res,", "of w, mixed precision n, c, d, h, w =", "HW supported. dtypes = ('uint8', 'uint8', 'int32') asm = _compile(ic=16,", "def run_test_conv3d(dtype, out_dtype, scale, dshape, kshape, padding=(1, 1, 1), fref=None,", "(int(round(d*scale_d)),\\ int(round(h*scale_h)),\\ int(round(w*scale_w))), layout) for target, ctx in ctx_list(): executor", "is disabled for 'direct' schedule: # https://github.com/apache/incubator-tvm/pull/3070#issuecomment-486597553 # group conv2d", "scale_h=scale_h, scale_w=scale_w, layout=layout, method=method, align_corners=align_corners) func = relay.Function([x], y) data", "padding=(1, 1), channels=10, kernel_size=3) # mixed precision run_test_conv1d(\"int8\", \"int32\", 1,", "both non-divisible oc and ic work asm = _compile(ic=17, oc=29,", "\"int32\") # Infer with NDHWC n, c, d, h, w", "x = relay.var(\"x\", shape=dshape) y = opfunc(x) func = relay.Function([x],", ", h, w)) y = relay.nn.l2_normalize(x, eps=0.001, axis=[1]) \"axis=\" in", "np.maximum(pad_count, 1) ref_res = np.maximum(b_np, 0.0) data = a_np for", "of 4 internally. for ic in [1, 4, 6]: asm", "2), channels=192, kernel_size=(3, 3)) # extended winograd: stride 1, padding", "(32, 4, 3, 3) run_test_conv2d(\"float32\", \"float32\", 1, dshape, kshape, padding=(1,", "\"float32\") n, c = tvm.size_var(\"n\"), tvm.size_var(\"c\") x = relay.var(\"x\", relay.TensorType((n,", "padding=(1, 1), channels=32, groups=8, kernel_size=(3 ,3), except_targets=['cuda']) # also group", "asm = _compile(ic=16, oc=32, target=target, data_layout=\"NHWC\", kernel_layout='HWIO', dtypes=dtypes) # Check", "w), \"float32\")) y = relay.nn.upsampling3d(x, scale_d=2, scale_h=2, scale_w=2, layout=\"NCDHW\", method=\"trilinear\")", "relay.var(\"x\", relay.TensorType((n, c, d, h, w), \"uint8\")) w = relay.var(\"w\",", "yy = run_infer_type(y) assert yy.checked_type == relay.TensorType( (n, 2, 222,", "w, c), \"int8\")) wt = relay.var(\"w\") y = relay.nn.conv2d(x, wt,", "padding=(1, 1, 1), channels=16, data_layout=\"NDHWC\", out_dtype=\"int32\") yy = run_infer_type(y) assert", "\"float32\", 1, dshape, kshape, padding=(1, 1), channels=10, kernel_size=(3 ,3), dilation=(3,", "d, h, w = tvm.size_var(\"n\"), tvm.size_var(\"c\"),\\ tvm.size_var(\"d\"), tvm.size_var(\"h\"), tvm.size_var(\"w\") scale", "yy = run_infer_type(y) assert yy.checked_type == relay.TensorType((n, 10, 5, 224,", "\"int32\") # infer shape in case of different dtypes for", "y.astext() yy = run_infer_type(y) assert yy.checked_type == relay.TensorType( (n, 2,", "dshape_nhwc = (1, 18, 18, 3) kshape_hwoi = (3, 3,", "True) def _test_upsampling3d(layout, method, coordinate_transformation_mode=\"half_pixel\"): n, c, d, h, w", "with ambiguous batch. n, c, h, w = tvm.size_var(\"n\"), 32,", "relay.var(\"x\", shape=dshape, dtype=dtype) y = opfunc(x, pool_size=(2, 2), strides=(2, 2),", "= (1, 80, 73, 73) kshape = (192, 80, 3,", "relay.TensorType((n, c, 200, 400), \"float32\") def test_upsampling3d_infer_type(): n, c, d,", "return (c, d, h, w), (c, int(round(d*scale_d)), int(round(h*scale_h)),\\ int(round(w*scale_w))) else:", "relay.ty.TensorType((n, c, d, h, w), \"float32\")) w = relay.var(\"w\") y", "ctx_list(): intrp = relay.create_executor(\"graph\", ctx=ctx, target=target) op_res = intrp.evaluate(func)(data) np.testing.assert_allclose(op_res.asnumpy(),", "assert yy.checked_type == relay.TensorType( (n, 2, 224, 224), \"float32\") assert", "dshape = (1, 32, 18, 18) kshape = (32, 4,", "relay.var(\"x\", shape=dshape) y = opfunc(x, pool_size=(2, 2), strides=(2, 2), padding=(0,", "3, 0, 0, 3, 0), out_shape=(1, 3, 16, 19, 16))", "channels=10, kernel_size=(3 ,3)) kshape = (10, 3, 1, 3) #", "== 'NCHW': data_shape = (n, ic, h, w) x =", "= relay.var(\"weight\", relay.TensorType(kernel_shape, weight_dtype)) y = relay.nn.conv2d(x, weight, kernel_size=(3, 3),", "shape=dshape) y = opfunc(x, pool_size=(2, 2), strides=(2, 2), padding=(0, 0))", "to you under the Apache License, Version 2.0 (the #", "wt, kernel_size=(3, 3), padding=(1, 1), channels=16, data_layout=\"NHWC\", out_dtype=\"int32\") yy =", "target) for ic in [1, 4, 6]: asm = _compile(ic=ic,", "util import topi.testing def test_conv1d_infer_type(): # symbolic in batch dimension", "1), channels=10, kernel_size=(3 ,3)) # mixed precision run_test_conv2d(\"int8\", \"int32\", 1,", "dshape, kshape, padding=(1, 1), channels=192, kernel_size=(3, 3)) # extended winograd:", "dshape = (1, 3, 28, 28) x = relay.var(\"x\", shape=dshape)", "in y.astext() yy = run_infer_type(y) assert yy.checked_type == relay.TensorType((3, 6,", "size=size, axis=axis, bias=bias, alpha=alpha, beta=beta) yy = run_infer_type(z) assert yy.checked_type", "padding=padding, dilation=dilation, **attrs) func = relay.Function([x, w], y) data =", "func = relay.Function([x], y) data = np.random.uniform(size=dshape).astype(dtype) ref_res = reffunc(data.reshape(1,", "224, 224, 6) kshape = (3, 3, 3, 6, 10)", "ih, iw) x = relay.var(\"x\", shape=dshape) y = relay.nn.avg_pool2d(x, pool_size=(kh,", "may not use this file except in compliance # with", "1), (2, 2), (3, 3), (4, 4))) yy = run_infer_type(y)", "get_shape(): if layout == \"NCHW\": return (c, h, w), (c,", "in target: return \"pmaddubs\" in asm elif 'cascadelake' in target:", "depthwise conv2d for arm_cpu dshape = (1, 512, 32, 32)", "= relay.var(\"x\", shape=dshape, dtype=dtype) y = opfunc(x, pool_size=(2, 2), strides=(2,", "n, h, w, c = tvm.size_var(\"n\"), 10, 10, 12 x", "h, w, ch, cw = 1, 64, 64, 3, 3", "rtol=1e-5, atol=1e-5) def test_upsampling_infer_type(): n, c , h, w =", "cfg.template_key = 'winograd' cfg.is_fallback = False cfg['tile_b'] = autotvm.task.space.SplitEntity([-1, 1,", "kshape, padding=(1, 1), channels=64, groups=32, kernel_size=(3 ,3), except_targets=['cuda']) # normal", "relay.TensorType((d1, ((2*d3)*3)), \"float32\") shape = (1, 5, 10, 10) o_shape", "c, h, w), dtype)) y = opfunc(x, pool_size=(1, 1)) assert", "h, w, c), \"int8\")) wt = relay.var(\"w\") y = relay.nn.conv2d(x,", "= (3, 10, 3, 3) oshape = (1, 10, 37,", "3, 3, 6, 10) run_test_conv3d(\"float32\", \"float32\", 1, dshape, kshape, padding=(1,", "func mod = relay.transform.InferType()(mod) data = np.random.uniform(-scale, scale, size=dshape).astype(dtype) kernel", "relay.var(\"w\") y = relay.nn.conv2d_transpose(x, w, channels=10, kernel_size=(3,3), strides=(2,2), padding=(1,1), output_padding=(2,", "0)) func = relay.Function([x], y) data = np.random.random_integers(low=-128, high=128, size=dshape)", "yy.checked_type == relay.TensorType((n, 1, 1, c), \"float32\") n, c, h,", "\"Target should be Skylake or Cascadelake\" # compile conv2d for", "relay.TensorType((n, c, h, w), \"float32\")) w = relay.var(\"w\", relay.IncompleteType()) y", "224, 224 x = relay.var(\"x\", relay.TensorType((n, c, h, w), \"uint8\"))", "10, 10, 12 x = relay.var(\"x\", relay.TensorType((n, h, w, c),", "c = tvm.size_var(\"n\"), tvm.size_var(\"c\") x = relay.var(\"x\", relay.TensorType((n, c, 100,", "(n, h, w, 16), \"int32\") def test_conv2d_run(): def run_test_conv2d(dtype, out_dtype,", "= (1, 32, 18, 18) kshape = (32, 1, 3,", "(1, 80, 73, 73) kshape = (192, 80, 3, 3)", "reffunc): n, c, h, w = tvm.size_var(\"n\"), tvm.size_var(\"c\"), 224, 224", "= relay.Function([x], relay.nn.batch_flatten(x)) data = np.random.rand(5, 10, 5).astype(t1.dtype) ref_res =", "Sweep the input channels to check int8 robustness # Input", "ref_res, rtol=0.01) def _test_upsampling(layout, method, align_corners=False): n, c, h, w", "= (('int8', 'int8', 'int32')) # Check that both non-divisible oc", "pool_size=(kh, kw), strides=(sw, sw), padding=(ph, pw), count_include_pad=False) func = relay.Function([x],", "test_conv2d_winograd(): class WinogradFallback(autotvm.FallbackContext): def _query_inside(self, target, workload): key = (target,", "ref_res, rtol=1e-5) def batch_flatten(data): shape = data.shape target_dim = 1", "y = opfunc(x, layout=\"NHWC\") yy = run_infer_type(y) assert yy.checked_type ==", "additional information # regarding copyright ownership. The ASF licenses this", "= intrp1.evaluate(func)(data, kernel) tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5, atol=1e-5) # normal conv3d", "2), (3, 3), (4, 4))) yy = run_infer_type(y) assert yy.checked_type", "= np.random.uniform(-scale, scale, size=kshape).astype(dtype) dkernel = topi.testing.dilate_python(kernel, (1, 1) +", "dtype = \"float32\" def get_shape(): if layout == \"NCHW\": return", "axis=[axis]) yy = run_infer_type(z) assert yy.checked_type == relay.TensorType(shape, dtype) func", "except_targets=None, **attrs): if except_targets is None: except_targets = [] x", "1), channels=10, kernel_size=(3 ,3)) kshape = (10, 3, 1, 3)", "# https://github.com/apache/incubator-tvm/pull/3070#issuecomment-486597553 # group conv2d dshape = (1, 32, 18,", "(3, 3), (4, 4))) yy = run_infer_type(y) assert yy.checked_type ==", "yy.checked_type == relay.TensorType((n, c , h, w)) shape = (1,", "= tvm.var(\"n\"), 10, 224 x = relay.var(\"x\", relay.TensorType((n, c, w),", "size=shape).astype(dtype) ref_res = topi.testing.lrn_python(x_data, size, axis, bias, alpha, beta) for", "run_infer_type(y) assert yy.checked_type == relay.TensorType( (n, 2, 224, 224), \"float32\")", "[512, 1, 3, 3], \"float32\"], \\ [1, 1], [1, 1],", "input and weight. n, c, h, w = tvm.size_var(\"n\"), 10,", "groups=8, kernel_size=(3 ,3), except_targets=['cuda']) # also group conv2d dshape =", "# mixed precision run_test_conv1d(\"int8\", \"int32\", 1, dshape, kshape, padding=(1, 1),", "ctx in ctx_list(): if target != 'cuda': continue params =", "op_res1 = module.get_output(0) tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-3, atol=1e-3) # normal winograd:", "_test_pool1d(opfunc): n, c, w = tvm.var(\"n\"), 10, 224 x =", "y) # check output shape f_out_shape = tuple(map(lambda x: int(x),", "y = relay.nn.upsampling(x, scale_h=2, scale_w=2, layout=\"NCHW\", method=\"bilinear\") yy = run_infer_type(y)", "run_infer_type(y) assert yy.checked_type == relay.TensorType( (n, w, 16), \"int32\") def", "= 4, 32, 224, 224 x = relay.var(\"x\", relay.TensorType((n, h,", "18, 18) kshape = (3, 10, 3, 3) oshape =", "(1, 3, 18, 18) kshape = (10, 3, 3, 3)", "target, ctx in ctx_list(): if target != 'cuda': continue params", "(1, 10, 37) x = relay.var(\"x\", shape=dshape) w = relay.var(\"w\")", "out_shape=(1, 3, 18, 16, 16)) _test_pool3d(relay.nn.max_pool3d, padding=(0, 3, 0, 0,", "size=shape).astype(dtype) ref_res = x_data.flatten().reshape(o_shape) for target, ctx in ctx_list(): intrp1", "3, 3) weight = relay.var(\"weight\", relay.TensorType(kernel_shape, weight_dtype)) y = relay.nn.conv2d(x,", "224 x = relay.var(\"x\", relay.TensorType((n, c, d, h, w), \"int8\"))", "conv2d dshape = (1, 3, 224, 224) kshape = (10,", "\"float32\", 1, dshape, kshape, padding=(2, 2), channels=192, kernel_size=(3, 3)) #", "layout == \"NCHW\": return (c, h, w), (c, int(round(h*scale_h)), int(round(w*scale_w)))", "cw, ic, oc) else: raise ValueError('Not supported') weight = relay.var(\"weight\",", "in range(len(shape) - 1): target_dim = target_dim * shape[i +", "wdata = np.random.rand(*kernel_shape) * 10 parameters = {\"weight\": tvm.nd.array(wdata.astype(weight_dtype))} with", "= (1, 3, 32) x = relay.var(\"x\", shape=dshape) pool_type =", "channels=10, kernel_size=3) # mixed precision run_test_conv1d(\"int8\", \"int32\", 1, dshape, kshape,", "test_conv2d_transpose_infer_type(): # symbolic in batch dimension n, c, h, w", "target): if 'skylake-avx512' in target: return \"pmaddubs\" in asm elif", "test_conv3d_run(): def run_test_conv3d(dtype, out_dtype, scale, dshape, kshape, padding=(1, 1, 1),", "# normal conv3d dshape = (1, 3, 5, 224, 224)", "w = tvm.size_var(\"n\"), 10, 10, 12 x = relay.var(\"x\", relay.TensorType((n,", "16, 20)) _test_pool3d(relay.nn.avg_pool3d) _test_pool3d(relay.nn.avg_pool3d, padding=(2, 0, 0, 2, 0, 0),", "x = relay.var(\"x\", relay.TensorType((n, c, w), \"uint8\")) w = relay.var(\"w\",", "c, w), \"float32\")) w = relay.var(\"w\") y = relay.nn.conv1d(x, w,", "# specific language governing permissions and limitations # under the", "intrp1.evaluate(func)(data, kernel) tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5, atol=1e-5) # normal conv1d dshape", "\"float32\")) w = relay.var(\"w\") y = relay.nn.conv3d(x, w, kernel_size=(3, 3,", "intrp1.evaluate(func)(data, kernel) tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5, atol=1e-5) # normal conv3d dshape", "z = relay.nn.lrn(x, size=size, axis=axis, bias=bias, alpha=alpha, beta=beta) yy =", "for target, ctx in ctx_list(): intrp = relay.create_executor(\"graph\", ctx=ctx, target=target)", "if key in self.memory: return self.memory[key] cfg = autotvm.task.space.FallbackConfigEntity() cfg.template_key", "c, h, w = tvm.size_var(\"n\"), 16, 32, 32 scale_h =", "with relay.build_config(opt_level=3): graph, lib, params = relay.build(func, target, params=parameters) assembly", "== f_out_shape, \\ \"Output shape mismatch. expected {}, actual {}\".format(out_shape,", "x_data = np.random.uniform(low=-1, high=1, size=shape).astype(dtype) ref_res = topi.testing.l2_normalize_python(x_data, eps, axis)", "== 'OIHW': kernel_shape = (oc, ic, ch, cw) elif kernel_layout", "sh, sw = (2, 2) ph, pw = (2, 2)", "w, kernel_size=(3, 3), padding=(1, 1), channels=15) assert \"channels=15\" in y.astext()", "layout=layout, method=method, align_corners=align_corners) func = relay.Function([x], y) data = np.random.uniform(size=dshape).astype(dtype)", "3, 6, 10) run_test_conv3d(\"float32\", \"float32\", 1, dshape, kshape, padding=(1, 1,", "'int8', 'int32')) # Check that both non-divisible oc and ic", "you may not use this file except in compliance #", "\"int32\") def test_conv1d_run(): def run_test_conv1d(dtype, out_dtype, scale, dshape, kshape, padding=(1,", "oc and ic work asm = _compile(ic=17, oc=29, target=target, data_layout=\"NCHW\",", "yy.checked_type == relay.TensorType((n, c, 200, 400), \"float32\") def test_upsampling3d_infer_type(): n,", "1, dshape, kshape, padding=(1, 1), channels=32, groups=8, kernel_size=(3 ,3), except_targets=['cuda'])", "= np.random.uniform(-scale, scale, size=kshape).astype(dtype) ref_res = topi.testing.conv2d_nchw_python( data.astype(out_dtype), kernel.astype(out_dtype), 1,", "= (ch, cw, ic, oc) else: raise ValueError('Not supported') weight", "6) kshape = (3, 3, 3, 6, 10) run_test_conv3d(\"float32\", \"float32\",", "(1, 512, 32, 32) kshape = (512, 1, 3, 3)", "scale_h=scale_h, scale_w=scale_w, layout=layout, method=method, align_corners=align_corners) yy = run_infer_type(y) assert yy.checked_type", "1, 1, 1]) cfg['tile_x'] = autotvm.task.space.SplitEntity([-1, 1, 1, 1]) cfg['tile_rc']", "1): target_dim = target_dim * shape[i + 1] return np.reshape(data,", "3), \"int8\")) y = relay.nn.conv2d(x, w, out_dtype=\"int32\") assert \"out_dtype=\\\"int32\\\"\" in", "4, 0, 0, 4), out_shape=(1, 3, 16, 16, 20)) _test_pool3d(relay.nn.avg_pool3d)", "= topi.testing.pool3d_ncdhw_python(data, (2, 2, 2), (2, 2, 2), padding, out_shape,", "= relay.nn.conv1d(x, w, out_dtype=\"int32\") assert \"out_dtype=\\\"int32\\\"\" in y.astext() yy =", "in targets: if llvm_version >= 8: dtypes = ('uint8', 'int8',", "1), channels=10, kernel_size=(3 ,3), dilation=(3, 3)) def test_conv2d_winograd(): class WinogradFallback(autotvm.FallbackContext):", "cw), channels=oc, padding=(1, 1), dilation=(1, 1), data_layout=data_layout, kernel_layout=kernel_layout, out_dtype=output_dtype) func", "shape of w, mixed precision n, c, h, w =", "w, c), (int(round(h*scale_h)), int(round(w*scale_w)), c) ishape, oshape = get_shape() x", "relay.create_executor(\"graph\", ctx=ctx, target=target) out = executor.evaluate(func)(data) tvm.testing.assert_allclose(out.asnumpy(), ref, rtol=1e-5, atol=1e-5)", "dtype = \"float32\" dshape = (1, 3, 32) x =", "kernel_layout == 'HWIO': kernel_shape = (ch, cw, ic, oc) else:", "w = relay.var(\"w\", relay.TensorType((2, 10, 3, 3), \"int8\")) y =", "= relay.nn.conv3d(x, w, kernel_size=(3, 3, 3), padding=(1, 1, 1), channels=2)", "relay.var(\"w\", relay.IncompleteType()) y = relay.nn.conv2d_transpose(x, w, kernel_size=(3, 3), padding=(1, 1),", "wt, kernel_size=(3, 3), padding=(1, 1), channels=16, data_layout=\"NCHW4n4c\", kernel_layout=\"OIHW4o4i\", out_dtype=\"int32\") yy", "j*sw:j*sw+kw] > 0, axis=(2,3)) b_np[:,:,i,j] = np.sum(pad_np[:, :, i*sh:i*sh+kh, j*sw:j*sw+kw],", "out_shape=(1, 3, 16, 16, 20)) def test_avg_pool2d_no_count_pad(): kh, kw =", "groups=groups) with WinogradFallback(), relay.build_config(opt_level=3): for target, ctx in ctx_list(): if", "x = relay.var(\"x\", relay.TensorType(data_shape, input_dtype)) elif data_layout == 'NHWC': data_shape", "w], y) dtype = \"float32\" data = np.random.uniform(size=dshape_nhwc).astype(dtype) kernel =", "\"float32\") assert yy.args[1].checked_type == relay.TensorType( (2, 10, 3, 3), \"float32\")", "shape=dshape) y = opfunc(x) func = relay.Function([x], y) data =", "3, 3) run_test_conv2d(\"float32\", \"float32\", 1, dshape, kshape, padding=(1, 1), channels=64,", "test_lrn(): n, c , h, w = tvm.size_var(\"n\"), tvm.size_var(\"c\"), tvm.size_var(\"h\"),", "with this work for additional information # regarding copyright ownership.", "= relay.create_executor(\"graph\", ctx=ctx, target=target) out = executor.evaluate(func)(data) tvm.testing.assert_allclose(out.asnumpy(), ref, rtol=1e-5,", "4, 32, 224, 224, 224 x = relay.var(\"x\", relay.TensorType((n, d,", "\"alpha=\" in y.astext() yy = run_infer_type(y) assert yy.checked_type == relay.TensorType((n,", "x = relay.var(\"x\", relay.TensorType((n, c, h, w), \"uint8\")) w =", "out_dtype=output_dtype) func = relay.Function([x, weight], y) wdata = np.random.rand(*kernel_shape) *", "relay.var(\"weight\", relay.TensorType(kernel_shape, weight_dtype)) y = relay.nn.conv2d(x, weight, kernel_size=(3, 3), groups=64,", "= relay.var(\"x\", relay.TensorType((n, c, 100, 200), \"float32\")) y = relay.nn.upsampling(x,", "output_dtype = dtypes n, h, w, ch, cw = 1,", "assert out_shape == f_out_shape, \\ \"Output shape mismatch. expected {},", "test_conv1d_transpose_ncw_run() test_conv1d_run() test_conv2d_run() test_conv2d_winograd() test_conv3d_run() test_conv3d_ndhwc_run() test_bitserial_conv2d_infer_type() test_batch_flatten() test_upsampling() test_upsampling3d()", "is HWIO y = relay.nn.conv2d_transpose(x, w, channels=10, kernel_size=(3, 3), strides=(2,", "= autotvm.task.space.OtherOptionEntity(1) self.memory[key] = cfg return cfg def run_test_conv2d_cuda(dtype, out_dtype,", ",3), fref=lambda x, w: topi.testing.depthwise_conv2d_python_nchw( x, w, (1, 1), \"SAME\"))", "instruction is generated for older Intel # generations, because we", "y = opfunc(x) yy = run_infer_type(y) assert yy.checked_type == relay.TensorType((n,", "= data.shape target_dim = 1 for i in range(len(shape) -", "1 (ic, ih, iw) = (3, 28, 28) (oc, oh,", "def test_flatten_infer_type(): d1, d2, d3, d4 = tvm.size_var(\"d1\"), tvm.size_var(\"d2\"), tvm.size_var(\"d3\"),", "\"float32\" def get_shape(): if layout == \"NCHW\": return (c, h,", "= np.random.uniform(size=dshape).astype(dtype) ref_res = reffunc(data.reshape(1, 3, 14, 2, 14, 2),", "= tvm.relay.Module() mod[\"main\"] = func test_schedule='{\"i\": [\"llvm -device=arm_cpu\", \"topi_nn_depthwise_conv2d_nchw\", \\", "[8, 1]], \\ [\"tile_ow\", \"sp\", [1, 8]], \\ [\"reorder_0\", \"re\",", "== relay.TensorType( (n, h, w, 16), \"int32\") def test_conv2d_run(): def", "= (10, 3, 1, 3) # mixed precision. run_test_conv2d(\"int8\", \"int32\",", "assert yy.checked_type == relay.TensorType((n, c, tvm.expr.Cast(\"int32\", tvm.round(d*scale)), tvm.expr.Cast(\"int32\", tvm.round(h*scale)), tvm.expr.Cast(\"int32\",", "3, 3) run_test_conv2d(\"float32\", \"float32\", 1, dshape, kshape, padding=(1, 1), channels=32,", "10, 12 x = relay.var(\"x\", relay.TensorType((n, h, w, c), \"float32\"))", "output channels to check int8 robustness # Output channels should", "\"float32\", 1, dshape, kshape, padding=(0, 0), channels=192, kernel_size=(3, 3)) run_test_conv2d_cuda(\"float32\",", "dilation) if fref is None: ref_res = topi.testing.conv2d_nchw_python( data.astype(out_dtype), dkernel.astype(out_dtype),", "# check output shape f_out_shape = tuple(map(lambda x: int(x), run_infer_type(func).ret_type.shape))", "w, c = tvm.size_var(\"n\"), 10, 10, 12 x = relay.var(\"x\",", "= opfunc(x, pool_size=(2, 2, 2), strides=(2, 2, 2), padding=padding) func", "(c, h, w), (c, int(round(h*scale_h)), int(round(w*scale_w))) else: return (h, w,", "0), out_shape=(1, 3, 16, 19, 16)) _test_pool3d(relay.nn.max_pool3d, padding=(0, 0, 4,", "w = tvm.size_var(\"n\"), 8, 16, 16, 16 scale_d = 2.0", "2, 1) d_np = np.zeros(shape=oshape) d_np[:,:,0:c_np.shape[2]] = c_np ref_res =", "y) dtype = \"float32\" a_np = np.random.uniform(low=0.001, size=(n, ic, ih,", "w), \"int8\")) w = relay.var(\"w\", relay.TensorType((2, 10, 3, 3, 3),", "test_upsampling3d(): _test_upsampling3d(\"NCDHW\", \"nearest_neighbor\") _test_upsampling3d(\"NCDHW\", \"trilinear\", \"align_corners\") _test_upsampling3d(\"NDHWC\", \"nearest_neighbor\") _test_upsampling3d(\"NDHWC\", \"trilinear\",", "return np.reshape(data, (shape[0], target_dim)) def test_batch_flatten(): t1 = relay.TensorType((5, 10,", "# Sweep the input channels to check int8 robustness #", "picked up. for target in targets: if llvm_version >= 8:", "relay.TensorType((n, h, w, c), \"float32\")) y = opfunc(x, layout=\"NHWC\") yy", "= (n, ic, ih, iw) x = relay.var(\"x\", shape=dshape) y", "axis=(2,3), keepdims=True) for target, ctx in ctx_list(): intrp1 = relay.create_executor(\"graph\",", "24), \"float32\") x = relay.var(\"x\", relay.TensorType((d1, 2, d3, 3), \"float32\"))", "in batch dimension n, c, h, w = tvm.size_var(\"n\"), 10,", "tvm.testing.assert_allclose(op_res2.asnumpy(), ref_res, rtol=1e-5) def test_pad_infer_type(): # entirely concrete case n,", "= 1, 2, 3, 4 t = relay.var(\"t\", relay.TensorType((n, c,", "weight], y) wdata = np.random.rand(*kernel_shape) * 10 parameters = {\"weight\":", "data = np.random.uniform(size=dshape_nhwc).astype(dtype) kernel = np.random.uniform(size=kshape_hwoi).astype(dtype) # use true kshape", "relay.var(\"t\", relay.TensorType((n, c, h, w), \"float32\")) y = relay.nn.pad(t, ((1,", "topi.testing.conv2d_transpose_nchw_python( data, kernel, 2, 1) d_np = np.zeros(shape=oshape) d_np[:,:,0:c_np.shape[2],0:c_np.shape[3]] =", "y.astext() yy = run_infer_type(y) assert yy.checked_type == relay.TensorType((n, 10, 5,", "assert \"out_dtype=\\\"int32\\\"\" in y.astext() yy = run_infer_type(y) assert yy.checked_type ==", "kernel, 2, 1) d_np = np.zeros(shape=oshape) d_np[:,:,0:c_np.shape[2]] = c_np ref_res", "\"float32\") def _test_pool2d(opfunc, reffunc): n, c, h, w = tvm.size_var(\"n\"),", "with open(temp.relpath(\"temp.log\"), \"w\") as log_file: log_file.write(test_schedule) with autotvm.apply_history_best(temp.relpath(\"temp.log\")): with relay.build_config(opt_level=3):", "op_res1 = intrp1.evaluate(func)(data, kernel) tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5, atol=1e-5) def test_upsampling_infer_type():", "op_res1 = intrp1.evaluate(func)(data) tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5, atol=1e-5) def test_pool2d(): _test_pool2d(relay.nn.max_pool2d,", "by shape of w, mixed precision n, c, w =", "= (512, 1, 3, 3) compile_test_conv2d_arm_cpu(\"float32\", \"float32\", 1, dshape, kshape,", "h, w, c), \"float32\")) y = opfunc(x, layout=\"NHWC\") yy =", "None: except_targets = [] x = relay.var(\"x\", shape=dshape, dtype=dtype) w", "in self.memory: return self.memory[key] cfg = autotvm.task.space.FallbackConfigEntity() cfg.template_key = 'winograd'", "regarding copyright ownership. The ASF licenses this file # to", "lib, params = relay.build_module.build(mod, target=target, params=params) module = tvm.contrib.graph_runtime.create(graph, lib,", "= np.random.uniform(size=dshape).astype(dtype) ref_res = topi.testing.pool1d_ncw_python(data, (2,), (2,), (0, 0), (1,", "out = executor.evaluate(func)(data) tvm.testing.assert_allclose(out.asnumpy(), ref, rtol=1e-5, atol=1e-5) def test_upsampling(): _test_upsampling(\"NCHW\",", "or agreed to in writing, # software distributed under the", "scale_h=scale_h, scale_w=scale_w,\\ layout=layout, method=method,\\ coordinate_transformation_mode=coordinate_transformation_mode) yy = run_infer_type(y) assert yy.checked_type", "!= 'cuda': continue params = {'w': tvm.nd.array(kernel)} graph, lib, params", "import numpy as np import tvm from tvm import autotvm", "out_dtype=\"int32\") yy = run_infer_type(y) assert yy.checked_type == relay.TensorType( (n, d,", "atol=1e-5) def test_conv2d_transpose_nhwc_run(): dshape_nhwc = (1, 18, 18, 3) kshape_hwoi", "(1, 3, 18) kshape = (3, 10, 3) oshape =", "y) data = np.random.uniform(-scale, scale, size=dshape).astype(dtype) kernel = np.random.uniform(-scale, scale,", "stride 1, padding 1, kernel 3x3 dshape = (1, 80,", "log_file.write(test_schedule) with autotvm.apply_history_best(temp.relpath(\"temp.log\")): with relay.build_config(opt_level=3): print('Compiling...') graph_json, mod, params =", "d, h, w), \"int8\")) w = relay.var(\"w\", relay.TensorType((2, 10, 3,", "dshape, kshape, padding=(1, 1), channels=10, kernel_size=3) # mixed precision run_test_conv1d(\"int8\",", "== relay.TensorType(o_shape, dtype) func = relay.Function([x], z) x_data = np.random.uniform(low=-1,", "128 x = relay.var(\"x\", relay.ty.TensorType((o, i, h, w), \"int16\")) y", "4))) yy = run_infer_type(y) assert yy.checked_type == relay.TensorType((n + 2,", "kernel_layout='OIHW', dtypes=dtypes) assert _has_fast_int8_instructions(asm, target) asm = _compile(ic=17, oc=29, target=target,", "= relay.Function([x], y) dtype = \"float32\" a_np = np.random.uniform(low=0.001, size=(n,", "(n, 15, 15, 11), \"float32\") def test_conv2d_transpose_nchw_run(): dshape = (1,", "7, 7) x = relay.var(\"x\", shape=dshape) y = opfunc(x) func", "\"Output shape mismatch. expected {}, actual {}\".format(out_shape, f_out_shape) data =", "tvm.size_var(\"d3\"), tvm.size_var(\"d4\") x = relay.var(\"x\", relay.TensorType((d1, d2, d3, d4), \"float32\"))", "8: dtypes = ('uint8', 'int8', 'int32') # Sweep the input", "opfunc(x) yy = run_infer_type(y) assert yy.checked_type == relay.TensorType((n, c, 1,", "= False cfg['tile_b'] = autotvm.task.space.SplitEntity([-1, 1, 1, 1]) cfg['tile_y'] =", "eps=0.001 axis=1 z = relay.nn.l2_normalize(x, eps=0.001, axis=[axis]) yy = run_infer_type(z)", "if fref is None: ref_res = topi.testing.conv2d_nchw_python( data.astype(out_dtype), dkernel.astype(out_dtype), 1,", "bias=0.5, alpha=.00001, beta=0.75) \"alpha=\" in y.astext() yy = run_infer_type(y) assert", "**attrs): x = relay.var(\"x\", shape=dshape, dtype=dtype) w = relay.var(\"w\", dtype=dtype)", "15, 15, 11), \"float32\") def test_conv2d_transpose_nchw_run(): dshape = (1, 3,", "relay.nn.upsampling(x, scale_h=scale_h, scale_w=scale_w, layout=layout, method=method, align_corners=align_corners) yy = run_infer_type(y) assert", "\"align_corners\") def test_conv2d_int8_intrinsics(): def _compile(ic, oc, target, data_layout, kernel_layout, dtypes):", "(1,) + ishape x = relay.var(\"x\", shape=dshape) y = relay.nn.upsampling3d(x,", "numpy as np import tvm from tvm import autotvm from", "axis packing shape inference. o, i, h, w = 32,", "to check int8 robustness # Output channels should be a", "relay.var(\"x\", relay.TensorType((n, c, w), \"int8\")) w = relay.var(\"w\", relay.TensorType((2, 10,", "3, 0), out_shape=(1, 3, 16, 19, 16)) _test_pool3d(relay.nn.avg_pool3d, padding=(0, 0,", "test_l2_normalize(): n, c , h, w = tvm.size_var(\"n\"), tvm.size_var(\"c\"), tvm.size_var(\"h\"),", "License. \"\"\" Support level2 operator test cases. \"\"\" import numpy", "3) oshape_nhwc = (1, 37, 37, 10) x = relay.var(\"x\",", "y = relay.nn.l2_normalize(x, eps=0.001, axis=[1]) \"axis=\" in y.astext() yy =", "test_l2_normalize() test_conv1d_infer_type() test_conv2d_infer_type() test_conv3d_infer_type() test_bitpack_infer_type() test_upsampling_infer_type() test_upsampling3d_infer_type() test_flatten_infer_type() test_pad_infer_type() test_pad_run()", "[\"tile_ow\", \"sp\", [1, 8]], \\ [\"reorder_0\", \"re\", [0, 1, 2,", "c , h, w)) y = relay.nn.l2_normalize(x, eps=0.001, axis=[1]) \"axis=\"", "w, 16), \"int32\") def test_conv2d_run(): def run_test_conv2d(dtype, out_dtype, scale, dshape,", "HWOI c_np = topi.testing.conv2d_transpose_nhwc_python(data, kernel, 'HWOI', 2, 1) d_np =", "= relay.var(\"x\", relay.TensorType((n, c, d, h, w), \"int8\")) w =", "tvm.size_var(\"d2\"), tvm.size_var(\"d3\"), tvm.size_var(\"d4\") x = relay.var(\"x\", relay.TensorType((d1, d2, d3, d4),", "target) # Check that int8 x int8 goes through legalization", "'cascadelake' in target: return \"vpdpbusd\" in asm else: assert False,", "dshape = (1, 3, 18) kshape = (3, 10, 3)", "we default to NCHWc layout. target = \"llvm -mcpu=core-avx2\" fast_int8_dtypes", "18) kshape = (64, 1, 3, 3) run_test_conv2d(\"float32\", \"float32\", 1,", "= relay.nn.batch_flatten(x) yy = run_infer_type(y) assert yy.checked_type == relay.TensorType((d1, ((d2*d3)*d4)),", "import topi.testing def test_conv1d_infer_type(): # symbolic in batch dimension n,", "ref_res = reffunc(data, axis=(2,3), keepdims=True) for target, ctx in ctx_list():", "1, dshape, kshape, padding=(1, 1), channels=32, groups=32, kernel_size=(3 ,3), fref=lambda", "intrp1.evaluate(func)(x_data) tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5) op_res2 = intrp2.evaluate(func)(x_data) tvm.testing.assert_allclose(op_res2.asnumpy(), ref_res, rtol=1e-5)", "asm = _compile(ic=8, oc=oc, target=target, data_layout=\"NHWC\", kernel_layout='HWIO', dtypes=dtypes) assert _has_fast_int8_instructions(asm,", "2, 128, 128, 1), \"uint16\") if __name__ == \"__main__\": test_pool1d()", "y = relay.nn.conv1d_transpose(x, w, channels=10, kernel_size=(3,), strides=(2,), padding=(1,), output_padding=(2,)) func", "# dilated conv2d dshape = (1, 3, 18, 18) kshape", "y = relay.nn.conv1d(x, w, padding=padding, dilation=dilation, **attrs) func = relay.Function([x,", "32, 32], \"float32\"], \\ [\"TENSOR\", [512, 1, 3, 3], \"float32\"],", "2), axis=(3, 5)) for target, ctx in ctx_list(): intrp1 =", "topi.testing.l2_normalize_python(x_data, eps, axis) for target, ctx in ctx_list(): intrp1 =", "200), \"float32\")) y = relay.nn.upsampling3d(x, scale_d=2, scale_h=2, scale_w=2, layout=\"NCDHW\", method=\"trilinear\")", "y = relay.nn.upsampling(x, scale_h=scale_h, scale_w=scale_w, layout=layout, method=method, align_corners=align_corners) yy =", "'uint8', 'int32') asm = _compile(ic=16, oc=32, target=target, data_layout=\"NHWC\", kernel_layout='HWIO', dtypes=dtypes)", "= (1, 3, 224, 224) kshape = (10, 3, 3,", "== relay.TensorType(shape, dtype) func = relay.Function([x], z) x_data = np.random.uniform(low=-1,", "= 2.0 dtype = \"float32\" def get_shape(): if layout ==", "= tvm.size_var(\"n\"), 8, 16, 16, 16 scale_d = 2.0 scale_h", "tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5, atol=1e-5) def _test_global_pool2d(opfunc, reffunc): n, c, h,", "c, h, w = 1, 2, 3, 4 t =", "supported') if kernel_layout == 'OIHW': kernel_shape = (oc, ic, ch,", "run_test_conv2d(\"int8\", \"int32\", 1, dshape, kshape, padding=(1, 1), channels=10, kernel_size=(3 ,3))", "= (32, 4, 3, 3) run_test_conv2d(\"float32\", \"float32\", 1, dshape, kshape,", "stride 1, padding N, kernel NxN kshape = (192, 80,", "dtype = \"float32\" dshape = (1, 1024, 7, 7) x", "case n, c, h, w = 1, 2, 3, 4", "ctx in ctx_list(): executor = relay.create_executor(\"graph\", ctx=ctx, target=target) out =", "= relay.var(\"x\", relay.TensorType((n, w, c), \"int8\")) wt = relay.var(\"w\") y", "atol=1e-5) def test_pool2d(): _test_pool2d(relay.nn.max_pool2d, np.max) _test_pool2d(relay.nn.avg_pool2d, np.mean) _test_pool2d_int(relay.nn.avg_pool2d, np.mean, 'int32')", "entirely concrete case n, c, h, w = 1, 2,", "(1, 3, 5, 224, 224) kshape = (10, 3, 3,", "tvm.size_var(\"n\"), 2, 3, tvm.size_var(\"w\") t = relay.var(\"t\", relay.TensorType((n, c, h,", "relay.var(\"w\", shape=kshape, dtype=dtype) y = relay.nn.conv2d(x, w, padding=padding, dilation=dilation, groups=groups,", "KIND, either express or implied. See the License for the", "w, channels=10, kernel_size=(3,3), strides=(2,2), padding=(1,1), output_padding=(2, 2)) func = relay.Function([x,", "that vector int mult and add instructions are generated. assert", "w, mixed precision n, c, h, w = tvm.size_var(\"n\"), 10,", "data = a_np for target, ctx in ctx_list(): intrp1 =", "relay.var(\"x\", shape=dshape, dtype=dtype) w = relay.var(\"w\", dtype=dtype) y = relay.nn.conv2d(x,", "oc=32, target=target, data_layout=\"NHWC\", kernel_layout='HWIO', dtypes=dtypes) # Check that intrinisic is", "asm elif 'cascadelake' in target: return \"vpdpbusd\" in asm else:", "relay.TensorType( (n, 15, 15, 11), \"float32\") def test_conv2d_transpose_nchw_run(): dshape =", "h, w), \"float32\")) y = opfunc(x, pool_size=(1, 1)) assert \"pool_size=\"", "expected {}, actual {}\".format(out_shape, f_out_shape) data = np.random.uniform(size=dshape).astype(dtype) ref_res =", "= (1, 5, 10, 10) dtype = \"float32\" x =", "_test_pool1d(relay.nn.avg_pool1d) def test_pool3d(): def _test_pool3d(opfunc, padding=(0, 0, 0, 0, 0,", "dilated conv2d dshape = (1, 3, 18) kshape = (10,", "np.random.uniform(-scale, scale, size=kshape).astype(dtype) dkernel = topi.testing.dilate_python(kernel, (1, 1) + dilation)", "tvm.size_var(\"h\"), tvm.size_var(\"w\") x = relay.var(\"x\", relay.TensorType((n, c, h, w), \"float32\"))", "1]) cfg['tile_x'] = autotvm.task.space.SplitEntity([-1, 1, 1, 1]) cfg['tile_rc'] = autotvm.task.space.SplitEntity([-1,", "= tvm.size_var(\"d1\"), tvm.size_var(\"d2\"), tvm.size_var(\"d3\"), tvm.size_var(\"d4\") x = relay.var(\"x\", relay.TensorType((d1, d2,", "w, c), \"float32\")) w = relay.var(\"w\", relay.TensorType((12, 11, 5, 5),", "w), \"float32\")) w = relay.var(\"w\", relay.IncompleteType()) y = relay.nn.conv2d_transpose(x, w,", "ref_res = x_data.flatten().reshape(o_shape) for target, ctx in ctx_list(): intrp1 =", "0, 0), out_shape=(1, 3, 18, 16, 16)) _test_pool3d(relay.nn.max_pool3d, padding=(0, 3,", "int(round(h*scale_h)),\\ int(round(w*scale_w)), c) ishape, oshape = get_shape() x = relay.var(\"x\",", "data_layout == 'NHWC': data_shape = (n, h, w, ic) x", "intrp1.evaluate(func)(data) tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5, atol=1e-5) def _test_pool2d_int(opfunc, reffunc, dtype): n,", "= relay.var(\"x\", relay.TensorType((d1, 2, d3, 3), \"float32\")) y = relay.nn.batch_flatten(x)", "shape=dshape, dtype=dtype) w = relay.var(\"w\", dtype=dtype) y = relay.nn.conv3d(x, w,", "32, 32) kshape = (512, 1, 3, 3) compile_test_conv2d_arm_cpu(\"float32\", \"float32\",", "channels=10, kernel_size=(3,), strides=(2,), padding=(1,), output_padding=(2,)) func = relay.Function([x, w], y)", "(1, 4, 224, 224, 4, 4), \"int32\") assert yy.args[1].checked_type ==", "align_corners=align_corners) func = relay.Function([x], y) data = np.random.uniform(size=dshape).astype(dtype) if method", "c , h, w = tvm.size_var(\"n\"), tvm.size_var(\"c\"), tvm.size_var(\"h\"), tvm.size_var(\"w\") scale", "intrp1.evaluate(func)(data) tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5, atol=1e-5) _test_pool1d(relay.nn.max_pool1d) _test_pool1d(relay.nn.avg_pool1d) def test_pool3d(): def", "dshape = (1, 3, 18) kshape = (10, 3, 3)", "y) mod = tvm.relay.Module() mod[\"main\"] = func test_schedule='{\"i\": [\"llvm -device=arm_cpu\",", "1, 1), channels=10, kernel_size=(3, 3 ,3)) def test_conv3d_ndhwc_run(): def run_test_conv3d(dtype,", "tvm.size_var(\"h\"), tvm.size_var(\"w\") scale = tvm.const(2.0, \"float64\") x = relay.var(\"x\", relay.TensorType((n,", "= (1, 64, 56, 56) x = relay.var(\"x\", relay.TensorType(data_shape, input_dtype))", "oh, ow) = (3, 15, 15) dshape = (n, ic,", "batch dimension n, c, w = tvm.var(\"n\"), 10, 224 x", "1), channels=32, groups=8, kernel_size=(3 ,3), except_targets=['cuda']) # also group conv2d", "= relay.Function([x, weight], y) wdata = np.random.rand(*kernel_shape) * 10 parameters", "relay.var(\"x\", relay.TensorType((n, c, d, h, w), \"float32\")) y = opfunc(x,", "scale_w=scale_w, layout=layout, method=method, align_corners=align_corners) func = relay.Function([x], y) data =", "relay.var(\"x\", relay.TensorType(shape, dtype)) z = relay.nn.batch_flatten(x) yy = run_infer_type(z) assert", "c, w), \"uint8\")) w = relay.var(\"w\", relay.TensorType((2, 10, 3), \"int8\"))", "= np.random.uniform(-scale, scale, size=dshape).astype(dtype) kernel = np.random.uniform(-scale, scale, size=kshape).astype(dtype) dkernel", "h, w = tvm.size_var(\"n\"), 2, 3, tvm.size_var(\"w\") t = relay.var(\"t\",", "3 if data_layout == 'NCHW': data_shape = (n, ic, h,", "kw), strides=(sw, sw), padding=(ph, pw), count_include_pad=False) func = relay.Function([x], y)", "dkernel.astype(out_dtype), 1, padding) else: ref_res = fref(data.astype(out_dtype), dkernel.astype(out_dtype)) for target,", "= _compile(ic=16, oc=32, target=target, data_layout=\"NHWC\", kernel_layout='HWIO', dtypes=dtypes) # Check that", "dtype = \"float32\" dshape = (1, 3, 28, 28) x", "relay.Function([x], z) x_data = np.random.uniform(low=-1, high=1, size=shape).astype(dtype) ref_res = x_data.flatten().reshape(o_shape)", "check int8 robustness # Output channels should be a multiple", "(1, 37, 37, 10) x = relay.var(\"x\", shape=dshape_nhwc) w =", "test_conv2d_run() test_conv2d_winograd() test_conv3d_run() test_conv3d_ndhwc_run() test_bitserial_conv2d_infer_type() test_batch_flatten() test_upsampling() test_upsampling3d() test_conv2d_int8_intrinsics() test_depthwise_conv2d_int8()", "# Licensed to the Apache Software Foundation (ASF) under one", "target=target, data_layout=\"NHWC\", kernel_layout='HWIO', dtypes=dtypes) assert _has_fast_int8_instructions(asm, target) # Sweep the", "intrp1 = relay.create_executor(\"graph\", ctx=ctx, target=target) op_res1 = intrp1.evaluate(func)(data, kernel) tvm.testing.assert_allclose(op_res1.asnumpy(),", "= np.random.uniform(low=-1, high=1, size=shape).astype(dtype) ref_res = topi.testing.lrn_python(x_data, size, axis, bias,", "+ ishape, dtype)) y = relay.nn.upsampling(x, scale_h=scale_h, scale_w=scale_w, layout=layout, method=method,", "supported. dtypes = ('uint8', 'uint8', 'int32') asm = _compile(ic=16, oc=32,", "target=target) op_res1 = intrp1.evaluate(func)(x_data) tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5) op_res2 = intrp2.evaluate(func)(x_data)", "int(round(h*scale_h)),\\ int(round(w*scale_w))) else: return (d, h, w, c), (int(round(d*scale_d)), int(round(h*scale_h)),\\", "target=target, data_layout=\"NCHW\", kernel_layout='OIHW', dtypes=fast_int8_dtypes) # Check that vector int mult", "relay.nn.pad(t, ((1, 1), (2, 2), (3, 3), (4, 4))) yy", "if fref is None: ref_res = topi.testing.conv3d_ncdhw_python( data.astype(out_dtype), dkernel.astype(out_dtype), 1,", "ref_res = np.maximum(b_np, 0.0) data = a_np for target, ctx", "-device=arm_cpu\") # depthwise conv2d dshape = (1, 32, 18, 18)", "(1, 5, 10, 10) o_shape = (1, 500) dtype =", "pool_size=(2,), strides=(2,), padding=(0, 0)) func = relay.Function([x], y) data =", "15, 15) dshape = (n, ic, ih, iw) x =", "h, w)) y = relay.nn.l2_normalize(x, eps=0.001, axis=[1]) \"axis=\" in y.astext()", "dtype)) z = relay.nn.batch_flatten(x) yy = run_infer_type(z) assert yy.checked_type ==", "18, 18) kshape = (32, 1, 3, 3) run_test_conv2d(\"float32\", \"float32\",", "def test_conv3d_ndhwc_run(): def run_test_conv3d(dtype, out_dtype, scale, dshape, kshape, padding=(1, 1,", "1, 3, 3, \"float32\"], [1, 1], [1, 1], [1, 1],", "3), groups=64, padding=(1, 1), dilation=(1, 1), out_dtype=output_dtype) func = relay.Function([x,", "mixed precision run_test_conv1d(\"int8\", \"int32\", 1, dshape, kshape, padding=(1, 1), channels=10,", "= relay.var(\"w\", relay.ty.TensorType((32, 32, 3, 3), \"int16\")) y = relay.nn.bitserial_conv2d(", "\"float32\", 1, dshape, kshape, padding=(1, 1, 1), channels=10, kernel_size=(3, 3", "in target: return \"vpdpbusd\" in asm else: assert False, \"Target", "x = relay.var(\"x\", relay.TensorType((n, d, h, w, c), \"int8\")) wt", "[1, 1], [1, 1], [1, 1], \"float32\"], \\ {\"i\": 743640,", "and weight. n, c, d, h, w = tvm.size_var(\"n\"), 10,", "= (1, 37, 37, 10) x = relay.var(\"x\", shape=dshape_nhwc) w", "w), \"int16\")) y = relay.nn.bitpack(x, bit_axis=4, pack_axis=1, pack_type='uint16', bits=1) yy", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "Ensure that code is generated when datatypes are not HW", "7)) def test_conv3d_infer_type(): # symbolic in batch dimension n, c,", "= _compile(ic=ic, oc=16, target=target, data_layout=\"NCHW\", kernel_layout='OIHW', dtypes=dtypes) assert _has_fast_int8_instructions(asm, target)", ", h, w)) shape = (1, 5, 10, 10) dtype", "test_upsampling_infer_type() test_upsampling3d_infer_type() test_flatten_infer_type() test_pad_infer_type() test_pad_run() test_conv2d_transpose_infer_type() test_conv2d_transpose_nchw_run() test_conv2d_transpose_nhwc_run() test_conv1d_transpose_ncw_run() test_conv1d_run()", "Software Foundation (ASF) under one # or more contributor license", "4, 5, 8, 6, 7]], \\ [\"reorder_1\", \"re\", [0, 1,", "assert yy.checked_type == relay.TensorType( (n, 15, 15, 11), \"float32\") def", "relay.var(\"w\") y = relay.nn.conv1d_transpose(x, w, channels=10, kernel_size=(3,), strides=(2,), padding=(1,), output_padding=(2,))", "yy = run_infer_type(z) assert yy.checked_type == relay.TensorType(shape, dtype) func =", "# generations, because we default to NCHWc layout. target =", "tvm.relay import transform from tvm.relay.testing import ctx_list, run_infer_type from tvm.contrib", "# regarding copyright ownership. The ASF licenses this file #", "relay.TensorType(shape, dtype)) eps=0.001 axis=1 z = relay.nn.l2_normalize(x, eps=0.001, axis=[axis]) yy", "= relay.nn.conv2d(x, weight, kernel_size=(3, 3), groups=64, padding=(1, 1), dilation=(1, 1),", "= (32, 1, 3, 3) run_test_conv2d(\"float32\", \"float32\", 1, dshape, kshape,", "winograd: stride 1, padding 1, kernel 3x3 dshape = (1,", "run_infer_type(y) assert yy.checked_type == relay.TensorType((n, c, 200, 400), \"float32\") def", "test_pad_infer_type(): # entirely concrete case n, c, h, w =", "= relay.var(\"x\", relay.TensorType(data_shape, input_dtype)) kernel_shape = (64, 1, 3, 3)", "def test_lrn(): n, c , h, w = tvm.size_var(\"n\"), tvm.size_var(\"c\"),", "= run_infer_type(y) assert yy.checked_type == relay.TensorType( (n, 2, 222), \"int32\")", "\"float32\") # infer by shape of w, mixed precision n,", "(n, 2, 224, 224, 224), \"float32\") assert yy.args[1].checked_type == relay.TensorType(", "ch, cw = 1, 64, 64, 3, 3 if data_layout", "padding=(2, 0, 0, 2, 0, 0), out_shape=(1, 3, 18, 16,", "relay.var(\"x\", shape=dshape) y = relay.nn.pad(x, ((1, 1), (2, 2), (3,", "actual {}\".format(out_shape, f_out_shape) data = np.random.uniform(size=dshape).astype(dtype) ref_res = topi.testing.pool3d_ncdhw_python(data, (2,", "padding=(0, 3, 0, 0, 3, 0), out_shape=(1, 3, 16, 19,", "test_conv1d_run() test_conv2d_run() test_conv2d_winograd() test_conv3d_run() test_conv3d_ndhwc_run() test_bitserial_conv2d_infer_type() test_batch_flatten() test_upsampling() test_upsampling3d() test_conv2d_int8_intrinsics()", "2), (2, 2, 2), padding, out_shape, pool_type, False) for target,", "HWIO y = relay.nn.conv2d_transpose(x, w, channels=10, kernel_size=(3, 3), strides=(2, 2),", "4) sh, sw = (2, 2) ph, pw = (2,", "(ch, cw, ic, oc) else: raise ValueError('Not supported') weight =", "# mixed precision. run_test_conv2d(\"int8\", \"int32\", 1, dshape, kshape, padding=(0, 1),", "kshape = (10, 3, 1, 3) # mixed precision. run_test_conv2d(\"int8\",", "4, 4), \"int32\") assert yy.args[1].checked_type == relay.TensorType( (4, 8, 3,", "this file # to you under the Apache License, Version", "in str(opfunc) else 'avg' y = opfunc(x, pool_size=(2, 2, 2),", "in range(oh): for j in range(ow): pad_count = np.sum(pad_np[:, :,", "in y.astext() yy = run_infer_type(y) assert yy.checked_type == relay.TensorType((n, c,", "func = relay.Function([x], relay.nn.batch_flatten(x)) data = np.random.rand(5, 10, 5).astype(t1.dtype) ref_res", "5, 224, 224) kshape = (10, 3, 3, 3, 3)", "224, 224), \"float32\") assert yy.args[1].checked_type == relay.TensorType( (2, 10, 3,", "32, 224 x = relay.var(\"x\", relay.TensorType((n, w, c), \"int8\")) wt", "relay.TensorType( (n, 2, 224), \"float32\") assert yy.args[1].checked_type == relay.TensorType( (2,", "w, out_dtype=\"int32\") assert \"out_dtype=\\\"int32\\\"\" in y.astext() yy = run_infer_type(y) assert", "= relay.var(\"w\", dtype=dtype) y = relay.nn.conv1d(x, w, padding=padding, dilation=dilation, **attrs)", "kshape = (192, 80, 7, 7) run_test_conv2d_cuda(\"float32\", \"float32\", 1, dshape,", "222), \"int32\") # Infer with NDHWC n, c, d, h,", "except_targets is None: except_targets = [] x = relay.var(\"x\", shape=dshape,", "kernel_size=(3, 3), padding=(1, 1), channels=15) assert \"channels=15\" in y.astext() yy", "= (1, 10, 37, 37) x = relay.var(\"x\", shape=dshape) w", "yy = run_infer_type(y) assert yy.checked_type == relay.TensorType((n,) + oshape, dtype)", "Check that intrinisic is not present in the assembly. assert", "== relay.TensorType( (32, 2, 128, 128, 1), \"uint16\") if __name__", "4, 3, 3) run_test_conv2d(\"float32\", \"float32\", 1, dshape, kshape, padding=(1, 1),", "i, h, w = 32, 32, 128, 128 x =", "16, 19, 16)) _test_pool3d(relay.nn.max_pool3d, padding=(0, 0, 4, 0, 0, 4),", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "= \"float32\" x = relay.var(\"x\", relay.TensorType(shape, dtype)) eps=0.001 axis=1 z", "rtol=1e-5, atol=1e-5) _test_pool3d(relay.nn.max_pool3d) _test_pool3d(relay.nn.max_pool3d, padding=(2, 0, 0, 2, 0, 0),", "def test_pad_infer_type(): # entirely concrete case n, c, h, w", "batch dimension n, c, h, w = tvm.size_var(\"n\"), 10, 10,", "y = relay.nn.upsampling3d(x, scale_d=scale_d, scale_h=scale_h, scale_w=scale_w,\\ layout=layout, method=method,\\ coordinate_transformation_mode=coordinate_transformation_mode) func", "'max' in str(opfunc) else 'avg' y = opfunc(x, pool_size=(2,), strides=(2,),", "= relay.var(\"x\", relay.TensorType((n,) + ishape, dtype)) y = relay.nn.upsampling(x, scale_h=scale_h,", "kernel_layout='HWIO', dtypes=dtypes) assert _has_fast_int8_instructions(asm, target) # Check that int8 x", "def _test_pool1d(opfunc): n, c, w = tvm.var(\"n\"), 10, 224 x", "(1, 3, 18) kshape = (10, 3, 3) run_test_conv1d(\"float32\", \"float32\",", "18, 18) kshape = (32, 4, 3, 3) run_test_conv2d(\"float32\", \"float32\",", "to NCHWc layout. target = \"llvm -mcpu=core-avx2\" fast_int8_dtypes = ('uint8',", "channels=192, kernel_size=(3, 3)) # extended winograd: stride 1, padding N,", "= intrp1.evaluate(func)(data, kernel) tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5, atol=1e-5) def test_upsampling_infer_type(): n,", "= 'winograd' cfg.is_fallback = False cfg['tile_b'] = autotvm.task.space.SplitEntity([-1, 1, 1,", "ch, cw) elif kernel_layout == 'HWIO': kernel_shape = (ch, cw,", "tvm.size_var(\"c\"), tvm.size_var(\"h\"), tvm.size_var(\"w\") scale = tvm.const(2.0, \"float64\") x = relay.var(\"x\",", "w = tvm.size_var(\"n\"), 10, 5, 224, 224 x = relay.var(\"x\",", "test_conv1d_transpose_ncw_run(): dshape = (1, 3, 18) kshape = (3, 10,", "d, h, w), \"float32\")) y = relay.nn.upsampling3d(x, scale_d=2, scale_h=2, scale_w=2,", "padding=(ph, pw), count_include_pad=False) func = relay.Function([x], y) dtype = \"float32\"", "not HW supported. dtypes = ('uint8', 'uint8', 'int32') asm =", "dshape = (1, 32, 18, 18) kshape = (64, 1,", "count_include_pad=False) func = relay.Function([x], y) dtype = \"float32\" a_np =", "if except_targets is None: except_targets = [] x = relay.var(\"x\",", "iw) x = relay.var(\"x\", shape=dshape) y = relay.nn.avg_pool2d(x, pool_size=(kh, kw),", "channels should be a multiple of 16 internally. for oc", "# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY", "kernel) tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5, atol=1e-5) def compile_test_conv2d_arm_cpu(dtype, out_dtype, scale, dshape,", "channels=32, groups=32, kernel_size=(3 ,3), fref=lambda x, w: topi.testing.depthwise_conv2d_python_nchw( x, w,", "dtype)) eps=0.001 axis=1 z = relay.nn.l2_normalize(x, eps=0.001, axis=[axis]) yy =", "'HWIO': kernel_shape = (ch, cw, ic, oc) else: raise ValueError('Not", "to the Apache Software Foundation (ASF) under one # or", "x = relay.var(\"x\", relay.TensorType((n, c, h, w), dtype)) y =", "3), \"float32\")) y = relay.nn.batch_flatten(x) yy = run_infer_type(y) assert yy.checked_type", "\"License\"); you may not use this file except in compliance", "1), (2, 2), (3, 3), (4, 4))) func = relay.Function([x],", "kernel_size=(3 ,3)) kshape = (10, 3, 1, 3) # mixed", "3), padding=(0, 0), channels=32) yy = run_infer_type(y) assert yy.checked_type ==", "execution dtype = \"float32\" dshape = (1, 1024, 7, 7)", "5]], \\ [\"ann_reduce\", \"an\", [\"unroll\", \"none\"]], \\ [\"ann_spatial\", \"an\", [\"unroll\",", "1, padding, groups=groups) with WinogradFallback(), relay.build_config(opt_level=3): for target, ctx in", "so that fast instructions can be picked up. for target", "that both non-divisible oc and ic work asm = _compile(ic=17,", "w, padding=padding, dilation=dilation, groups=groups, **attrs) func = relay.Function([x, w], y)", "3, 3), padding=(1, 1, 1), channels=2) yy = run_infer_type(y) assert", "yy = run_infer_type(y) assert yy.checked_type == relay.TensorType((n, c, tvm.expr.Cast(\"int32\", tvm.round(h*scale)),", "1, 1), channels=2) yy = run_infer_type(y) assert yy.checked_type == relay.TensorType(", "different dtypes for input and weight. n, c, h, w", "c, d, h, w = 4, 32, 224, 224, 224", "\"none\"]], \\ [\"ann_spatial\", \"an\", [\"unroll\", \"unroll\", \"vec\"]], \\ [\"data_pad_inline\", \"ot\",", "= relay.nn.conv1d_transpose(x, w, channels=10, kernel_size=(3,), strides=(2,), padding=(1,), output_padding=(2,)) func =", "tvm.size_var(\"d1\"), tvm.size_var(\"d2\"), tvm.size_var(\"d3\"), tvm.size_var(\"d4\") x = relay.var(\"x\", relay.TensorType((d1, d2, d3,", "# distributed with this work for additional information # regarding", "= np.pad(data, ((1, 1), (2, 2), (3, 3), (4, 4)),", "method == \"nearest_neighbor\": ref = topi.testing.upsampling3d_python(data, (scale_d, scale_h, scale_w), layout)", "weight = relay.var(\"weight\", relay.TensorType(kernel_shape, weight_dtype)) y = relay.nn.conv2d(x, weight, kernel_size=(ch,", "0, 0), out_shape=(1, 3, 16, 16, 16)): n, c, d,", "x = relay.var(\"x\", relay.ty.TensorType((o, i, h, w), \"int16\")) y =", "= relay.var(\"x\", shape=dshape) y = relay.nn.avg_pool2d(x, pool_size=(kh, kw), strides=(sw, sw),", "scale_w), layout) else: ref = topi.testing.trilinear_resize3d_python(data, (int(round(d*scale_d)),\\ int(round(h*scale_h)),\\ int(round(w*scale_w))), layout)", "asm = _compile(ic=8, oc=oc, target=target, data_layout=\"NCHW\", kernel_layout='OIHW', dtypes=dtypes) assert _has_fast_int8_instructions(asm,", "target) # Ensure that code is generated when datatypes are", "shape[i + 1] return np.reshape(data, (shape[0], target_dim)) def test_batch_flatten(): t1", "\"int32\") def test_conv2d_run(): def run_test_conv2d(dtype, out_dtype, scale, dshape, kshape, padding=(1,", "\"int8\")) y = relay.nn.conv1d(x, w, out_dtype=\"int32\") assert \"out_dtype=\\\"int32\\\"\" in y.astext()", "9, 12), \"float32\") # some symbolic values n, c, h,", "= batch_flatten(data) for target, ctx in ctx_list(): intrp = relay.create_executor(\"graph\",", "\"vpadd\" in asm def test_depthwise_conv2d_int8(): input_dtype = 'uint8' weight_dtype =", "dtype=dtype) y = relay.nn.conv1d(x, w, padding=padding, dilation=dilation, **attrs) func =", "kernel_layout=\"DHWIO\", **attrs) func = relay.Function([x, w], y) data = np.random.uniform(-scale,", "h, w = 4, 32, 224, 224, 224 x =", "assert yy.checked_type == relay.TensorType((n, 10, 5, 224, 224), \"float32\") #", "fref=None, groups=1, dilation=(1, 1), except_targets=None, **attrs): if except_targets is None:", "= (2, 2) n = 1 (ic, ih, iw) =", "== relay.TensorType( (n, w, 16), \"int32\") def test_conv1d_run(): def run_test_conv1d(dtype,", "h, w)) shape = (1, 5, 10, 10) dtype =", "\"float32\" x = relay.var(\"x\", relay.TensorType(shape, dtype)) eps=0.001 axis=1 z =", "= relay.build(func, target, params=parameters) def test_bitserial_conv2d_infer_type(): # Basic shape test", "3, 6, 4, 5]], \\ [\"ann_reduce\", \"an\", [\"unroll\", \"none\"]], \\", "3, 3) run_test_conv2d_cuda(\"float32\", \"float32\", 1, dshape, kshape, padding=(1, 1), channels=192,", "(4, 4) sh, sw = (2, 2) ph, pw =", "oshape = (1, 10, 37) x = relay.var(\"x\", shape=dshape) w", "assert yy.args[1].checked_type == relay.TensorType( (2, 10, 3, 3), \"float32\") #", "h, w, c), \"float32\")) w = relay.var(\"w\", relay.TensorType((12, 11, 5,", "np.random.uniform(size=dshape_nhwc).astype(dtype) kernel = np.random.uniform(size=kshape_hwoi).astype(dtype) # use true kshape layout here", "*pmadd* instructions targets = [\"llvm -mcpu=skylake-avx512\", \"llvm -mcpu=cascadelake\"] llvm_version =", "kernel_layout='OIHW', dtypes=fast_int8_dtypes) # Check that vector int mult and add", "3, 3), \"float32\") # infer by shape of w, mixed", "relay.nn.conv2d(x, wt, kernel_size=(3, 3), padding=(1, 1), channels=16, data_layout=\"NHWC\", out_dtype=\"int32\") yy", "else: assert False, \"Target should be Skylake or Cascadelake\" #", "weight. n, c, d, h, w = tvm.size_var(\"n\"), 10, 224,", "\"nearest_neighbor\": ref = topi.testing.upsampling_python(data, (scale_h, scale_w), layout) else: ref =", "d4 = tvm.size_var(\"d1\"), tvm.size_var(\"d2\"), tvm.size_var(\"d3\"), tvm.size_var(\"d4\") x = relay.var(\"x\", relay.TensorType((d1,", "relay.var(\"x\", relay.TensorType((3, 2, 4, 3), \"float32\")) y = relay.nn.batch_flatten(x) yy", "= _compile(ic=ic, oc=16, target=target, data_layout=\"NHWC\", kernel_layout='HWIO', dtypes=dtypes) assert _has_fast_int8_instructions(asm, target)", "tvm.size_var(\"w\") x = relay.var(\"x\", relay.TensorType((n, c, h, w), \"float32\")) y", "asm = _compile(ic=17, oc=29, target=target, data_layout=\"NCHW\", kernel_layout='OIHW', dtypes=dtypes) assert _has_fast_int8_instructions(asm,", "opfunc(x, pool_size=(2, 2), strides=(2, 2), padding=(0, 0)) func = relay.Function([x],", "yy.checked_type == relay.TensorType((d1, ((d2*d3)*d4)), \"float32\") x = relay.var(\"x\", relay.TensorType((3, 2,", "target, ctx in ctx_list(): intrp = relay.create_executor(\"graph\", ctx=ctx, target=target) op_res", "for additional information # regarding copyright ownership. The ASF licenses", "run_infer_type(y) assert yy.checked_type == relay.TensorType((n, 10, 224), \"float32\") # test", "test_pool1d() test_pool2d() test_pool3d() test_avg_pool2d_no_count_pad() test_lrn() test_l2_normalize() test_conv1d_infer_type() test_conv2d_infer_type() test_conv3d_infer_type() test_bitpack_infer_type()", "infer by shape of w, mixed precision n, c, w", "\"int32\") # Infer with a different layout n, c, h,", "= intrp1.evaluate(func)(data) tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5, atol=1e-5) def _test_pool2d_int(opfunc, reffunc, dtype):", "relay.TensorType((n, c, h, w), \"uint8\")) w = relay.var(\"w\", relay.TensorType((2, 10,", "64, 64, 3, 3 if data_layout == 'NCHW': data_shape =", "scale, size=kshape).astype(dtype) dkernel = topi.testing.dilate_python(kernel, (1, 1) + dilation) if", "dshape, kshape, padding=(1, 1), channels=512, groups=512, kernel_size=(3 ,3)) # CUDA", "target: return \"pmaddubs\" in asm elif 'cascadelake' in target: return", "(2, 2), (3, 3), (4, 4))) \"pad_width=\" in y.astext() yy", "pack_type='uint16', bits=1) yy = run_infer_type(y) assert yy.checked_type == relay.TensorType( (32,", "d4), \"float32\")) y = relay.nn.batch_flatten(x) yy = run_infer_type(y) assert yy.checked_type", "governing permissions and limitations # under the License. \"\"\" Support", "relay.Module() mod['main'] = func mod = relay.transform.InferType()(mod) data = np.random.uniform(-scale,", "generated for older Intel # generations, because we default to", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "= relay.nn.conv2d(x, wt, kernel_size=(3, 3), padding=(1, 1), channels=16, data_layout=\"NCHW4n4c\", kernel_layout=\"OIHW4o4i\",", "w = relay.var(\"w\", relay.TensorType((2, 10, 3, 3, 3), \"int8\")) y", "np.random.uniform(size=kshape_hwoi).astype(dtype) # use true kshape layout here - HWOI c_np", "# Basic shape test with ambiguous batch. n, c, h,", "concrete case n, c, h, w = 1, 2, 3,", "test_conv1d_run(): def run_test_conv1d(dtype, out_dtype, scale, dshape, kshape, padding=(1, 1), fref=None,", "n, c = tvm.size_var(\"n\"), tvm.size_var(\"c\") x = relay.var(\"x\", relay.TensorType((n, c,", "c, h, w = tvm.size_var(\"n\"), 10, 10, 12 x =", "32) kshape = (512, 1, 3, 3) compile_test_conv2d_arm_cpu(\"float32\", \"float32\", 1,", "3, 4, 5, 8, 6, 7]], \\ [\"reorder_1\", \"re\", [0,", "10, 224, 224, 224 x = relay.var(\"x\", relay.TensorType((n, c, d,", "run_test_conv2d_cuda(\"float32\", \"float32\", 1, dshape, kshape, padding=(1, 1), channels=192, kernel_size=(3, 3))", "_test_pool3d(relay.nn.avg_pool3d) _test_pool3d(relay.nn.avg_pool3d, padding=(2, 0, 0, 2, 0, 0), out_shape=(1, 3,", "assert _has_fast_int8_instructions(asm, target) asm = _compile(ic=17, oc=29, target=target, data_layout=\"NHWC\", kernel_layout='HWIO',", "= 1, 64, 64, 3, 3 if data_layout == 'NCHW':", "test execution dtype = \"float32\" dshape = (1, 3, 32,", "data = np.random.uniform(size=dshape).astype(dtype) if method == \"nearest_neighbor\": ref = topi.testing.upsampling_python(data,", "222, 222, 222), \"int32\") # infer shape in case of", "= tvm.size_var(\"n\"), 10, 10, 12 x = relay.var(\"x\", relay.TensorType((n, h,", "yy = run_infer_type(y) assert yy.checked_type == relay.TensorType((n, 10, 224), \"float32\")", "c, h, w), \"int8\")) w = relay.var(\"w\", relay.TensorType((2, 10, 3,", "scale, size=kshape).astype(dtype) ref_res = topi.testing.conv2d_nchw_python( data.astype(out_dtype), kernel.astype(out_dtype), 1, padding, groups=groups)", "test_pool3d(): def _test_pool3d(opfunc, padding=(0, 0, 0, 0, 0, 0), out_shape=(1,", "32 scale_h = 2.0 scale_w = 2.0 dtype = \"float32\"", "test_conv1d_infer_type() test_conv2d_infer_type() test_conv3d_infer_type() test_bitpack_infer_type() test_upsampling_infer_type() test_upsampling3d_infer_type() test_flatten_infer_type() test_pad_infer_type() test_pad_run() test_conv2d_transpose_infer_type()", "= (10, 3, 3, 3, 3) run_test_conv3d(\"float32\", \"float32\", 1, dshape,", "out_dtype=\"int32\") yy = run_infer_type(y) assert yy.checked_type == relay.TensorType( (n, w,", "def test_upsampling_infer_type(): n, c , h, w = tvm.size_var(\"n\"), tvm.size_var(\"c\"),", "= relay.var(\"x\", relay.TensorType(shape, dtype)) z = relay.nn.batch_flatten(x) yy = run_infer_type(z)", "int(round(h*scale_h)), int(round(w*scale_w))) else: return (h, w, c), (int(round(h*scale_h)), int(round(w*scale_w)), c)", "224, 6) kshape = (3, 3, 3, 6, 10) run_test_conv3d(\"float32\",", "h, w), \"float32\")) y = opfunc(x) yy = run_infer_type(y) assert", "wt, kernel_size=(3, 3, 3), padding=(1, 1, 1), channels=16, data_layout=\"NDHWC\", out_dtype=\"int32\")", "else 'avg' y = opfunc(x, pool_size=(2, 2, 2), strides=(2, 2,", "* shape[i + 1] return np.reshape(data, (shape[0], target_dim)) def test_batch_flatten():", "= relay.nn.conv2d_transpose(x, w, output_padding=(1, 1), channels=11, data_layout=\"NHWC\") yy = run_infer_type(y)", "3, 18, 16, 16)) _test_pool3d(relay.nn.avg_pool3d, padding=(0, 3, 0, 0, 3,", "dtype = \"float32\" x = relay.var(\"x\", relay.TensorType(shape, dtype)) z =", "dilation=(1, 1), except_targets=None, **attrs): if except_targets is None: except_targets =", "that intrinisic is not present in the assembly. assert not", "= target_dim * shape[i + 1] return np.reshape(data, (shape[0], target_dim))", "temp = util.tempdir() with open(temp.relpath(\"temp.log\"), \"w\") as log_file: log_file.write(test_schedule) with", "h, w, 4, 4), \"int8\")) wt = relay.var(\"w\") y =", "y = relay.nn.upsampling3d(x, scale_d=scale_d, scale_h=scale_h, scale_w=scale_w,\\ layout=layout, method=method,\\ coordinate_transformation_mode=coordinate_transformation_mode) yy", "= [] x = relay.var(\"x\", shape=dshape, dtype=dtype) w = relay.var(\"w\",", "data_layout=\"NHWC\") yy = run_infer_type(y) assert yy.checked_type == relay.TensorType( (n, 15,", "dtype = \"float32\" x = relay.var(\"x\", relay.TensorType(shape, dtype)) size=5 axis=1", "2), (3, 3), (4, 4))) func = relay.Function([x], y) data", "scale_h, scale_w), layout) else: ref = topi.testing.trilinear_resize3d_python(data, (int(round(d*scale_d)),\\ int(round(h*scale_h)),\\ int(round(w*scale_w))),", "size=10, axis=2, bias=0.5, alpha=.00001, beta=0.75) \"alpha=\" in y.astext() yy =", "tvm.size_var(\"n\"), tvm.size_var(\"c\"), tvm.size_var(\"h\"), tvm.size_var(\"w\") x = relay.var(\"x\", shape=(n, c ,", "the License for the # specific language governing permissions and", "= relay.nn.bitserial_conv2d( x, w, kernel_size=(3, 3), padding=(0, 0), channels=32) yy", "\"pmaddubs\" in asm elif 'cascadelake' in target: return \"vpdpbusd\" in", "relay.var(\"x\", relay.TensorType((n, c, w), \"uint8\")) w = relay.var(\"w\", relay.TensorType((2, 10,", "x = relay.var(\"x\", relay.TensorType((n, c, w), \"int8\")) w = relay.var(\"w\",", "kernel_layout=\"OIHW4o4i\", out_dtype=\"int32\") yy = run_infer_type(y) assert yy.checked_type == relay.TensorType( (1,", "tuple(map(lambda x: int(x), run_infer_type(func).ret_type.shape)) assert out_shape == f_out_shape, \\ \"Output", "shape=(n, c , h, w)) y = relay.nn.l2_normalize(x, eps=0.001, axis=[1])", "in range(ow): pad_count = np.sum(pad_np[:, :, i*sh:i*sh+kh, j*sw:j*sw+kw] > 0,", "_test_pool3d(relay.nn.max_pool3d, padding=(2, 0, 0, 2, 0, 0), out_shape=(1, 3, 18,", "oc=32, target=target, data_layout=\"NCHW\", kernel_layout='OIHW', dtypes=fast_int8_dtypes) # Check that vector int", "ANY # KIND, either express or implied. See the License", "iw+pw))) pad_np[np.ix_(*no_zero)] = a_np b_np = np.zeros(shape=(n, oc, oh, ow)).astype(dtype)", "yy = run_infer_type(y) assert yy.checked_type == relay.TensorType((n, 1, 1, c),", "ctx=ctx, target=target) op_res1 = intrp1.evaluate(func)(x_data) tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5) op_res2 =", "3, 3], \"float32\"], \\ [1, 1], [1, 1], [1, 1],", "ref_res, rtol=1e-5, atol=1e-5) _test_run('float32') _test_run('int32') def test_lrn(): n, c ,", "1, 3) # mixed precision. run_test_conv2d(\"int8\", \"int32\", 1, dshape, kshape,", "relay.var(\"x\", shape=dshape) y = relay.nn.upsampling(x, scale_h=scale_h, scale_w=scale_w, layout=layout, method=method, align_corners=align_corners)", "relay.var(\"w\", dtype=dtype) y = relay.nn.conv1d(x, w, padding=padding, dilation=dilation, **attrs) func", "np.random.uniform(low=-1, high=1, size=shape).astype(dtype) ref_res = x_data.flatten().reshape(o_shape) for target, ctx in", "_test_pool2d_int(relay.nn.avg_pool2d, np.mean, 'uint16') _test_global_pool2d(relay.nn.global_max_pool2d, np.max) _test_global_pool2d(relay.nn.global_avg_pool2d, np.mean) def test_pool1d(): def", "int(round(w*scale_w))), layout) for target, ctx in ctx_list(): executor = relay.create_executor(\"graph\",", "else: raise ValueError('Not supported') if kernel_layout == 'OIHW': kernel_shape =", "x = relay.var(\"x\", relay.ty.TensorType((n, c, w), \"float32\")) w = relay.var(\"w\")", "\"an\", [\"unroll\", \"none\"]], \\ [\"ann_spatial\", \"an\", [\"unroll\", \"unroll\", \"vec\"]], \\", "ref_res, rtol=1e-5, atol=1e-5) # normal conv3d dshape = (1, 5,", "data_shape = (n, h, w, ic) x = relay.var(\"x\", relay.TensorType(data_shape,", "rtol=1e-5, atol=1e-5) _test_pool1d(relay.nn.max_pool1d) _test_pool1d(relay.nn.avg_pool1d) def test_pool3d(): def _test_pool3d(opfunc, padding=(0, 0,", "padding=(1, 1), channels=32, groups=32, kernel_size=(3 ,3), fref=lambda x, w: topi.testing.depthwise_conv2d_python_nchw(", "params = {'w': tvm.nd.array(kernel)} graph, lib, params = relay.build_module.build(mod, target=target,", "def test_conv2d_transpose_nchw_run(): dshape = (1, 3, 18, 18) kshape =", "4)), 'constant') for target, ctx in ctx_list(): intrp1 = relay.create_executor(\"graph\",", "\"float32\" data = np.random.uniform(size=dshape).astype(dtype) kernel = np.random.uniform(size=kshape).astype(dtype) c_np = topi.testing.conv2d_transpose_nchw_python(", "3.1976189613342285, 1570811630.6058347], \"v\": 0.1}' temp = util.tempdir() with open(temp.relpath(\"temp.log\"), \"w\")", "> 0, axis=(2,3)) b_np[:,:,i,j] = np.sum(pad_np[:, :, i*sh:i*sh+kh, j*sw:j*sw+kw], axis=(2,3))", "\\ [\"ann_spatial\", \"an\", [\"unroll\", \"unroll\", \"vec\"]], \\ [\"data_pad_inline\", \"ot\", 4],", "tvm.size_var(\"n\"), tvm.size_var(\"c\") x = relay.var(\"x\", relay.TensorType((n, c, 100, 200), \"float32\"))", "ref_res, rtol=1e-5) def test_pad_infer_type(): # entirely concrete case n, c,", "relay.build(func, target, params=parameters) def test_bitserial_conv2d_infer_type(): # Basic shape test with", "ref_res, rtol=1e-5, atol=1e-5) # normal conv1d dshape = (1, 3,", "run_infer_type(y) assert yy.checked_type == relay.TensorType( (n, 15, 10, 12), \"float32\")", "\"float32\"], \\ [\"TENSOR\", [512, 1, 3, 3], \"float32\"], \\ [1,", "except_targets=['cuda']) # normal conv2d dshape = (1, 3, 224, 224)", "out_dtype, scale, dshape, kshape, padding=(1, 1), fref=None, groups=1, dilation=(1, 1),", "= relay.var(\"x\", relay.TensorType((n, h, w, c), \"float32\")) y = opfunc(x,", "get_shape() x = relay.var(\"x\", relay.TensorType((n,) + ishape, dtype)) y =", "n, h, w, ch, cw = 1, 64, 64, 3,", "topi.testing.conv1d_ncw_python( data.astype(out_dtype), kernel.astype(out_dtype), 1, padding, dilation) for target, ctx in", "i, h, w), \"int16\")) y = relay.nn.bitpack(x, bit_axis=4, pack_axis=1, pack_type='uint16',", "4, 4), \"int8\")) wt = relay.var(\"w\") y = relay.nn.conv2d(x, wt,", "= relay.var(\"x\", relay.TensorType((n, h, w, c), \"int8\")) wt = relay.var(\"w\")", "np.random.uniform(-scale, scale, size=kshape).astype(dtype) ref_res = topi.testing.conv1d_ncw_python( data.astype(out_dtype), kernel.astype(out_dtype), 1, padding,", "shape=dshape) y = relay.nn.pad(x, ((1, 1), (2, 2), (3, 3),", "w = tvm.size_var(\"n\"), 10, 224, 224, 224 x = relay.var(\"x\",", "def test_upsampling3d(): _test_upsampling3d(\"NCDHW\", \"nearest_neighbor\") _test_upsampling3d(\"NCDHW\", \"trilinear\", \"align_corners\") _test_upsampling3d(\"NDHWC\", \"nearest_neighbor\") _test_upsampling3d(\"NDHWC\",", "kernel_size=(1 ,3)) # dilated conv2d dshape = (1, 3, 18,", "dtype): n, c, h, w = tvm.size_var(\"n\"), 10, 224, 224", "data = np.random.uniform(-scale, scale, size=dshape).astype(dtype) kernel = np.random.uniform(-scale, scale, size=kshape).astype(dtype)", "1]], \\ [\"tile_ow\", \"sp\", [1, 8]], \\ [\"reorder_0\", \"re\", [0,", "# kshape is HWOI and kernel_layout is HWIO y =", "ih, iw)).astype(dtype) pad_np = np.zeros(shape=(n, ic, ih+2*ph, iw+2*pw)).astype(dtype) no_zero =", "= (1, 500) dtype = \"float32\" x = relay.var(\"x\", relay.TensorType(shape,", "1), channels=15) assert \"channels=15\" in y.astext() yy = run_infer_type(y) assert", "agreed to in writing, # software distributed under the License", "np.random.random_integers(low=-128, high=128, size=dshape) ref_res = reffunc(data.reshape(1,3,14,2,14,2), axis=(3,5)).astype(dtype) for target, ctx", "test_batch_flatten(): t1 = relay.TensorType((5, 10, 5)) x = relay.Var(\"x\", t1)", "np.zeros(shape=(n, oc, oh, ow)).astype(dtype) for i in range(oh): for j", "test execution dtype = \"float32\" dshape = (1, 1024, 7,", "run_infer_type(y) assert yy.checked_type == relay.TensorType((n, c , h, w)) shape", "= a_np for target, ctx in ctx_list(): intrp1 = relay.create_executor(\"graph\",", "weight_dtype)) y = relay.nn.conv2d(x, weight, kernel_size=(3, 3), groups=64, padding=(1, 1),", "= tvm.size_var(\"n\"), 10, 224, 224, 224 x = relay.var(\"x\", relay.ty.TensorType((n,", "28) (oc, oh, ow) = (3, 15, 15) dshape =", ":, i*sh:i*sh+kh, j*sw:j*sw+kw] > 0, axis=(2,3)) b_np[:,:,i,j] = np.sum(pad_np[:, :,", "16)) _test_pool3d(relay.nn.avg_pool3d, padding=(0, 3, 0, 0, 3, 0), out_shape=(1, 3,", "kshape, padding=(1, 1), channels=10, kernel_size=3) # mixed precision run_test_conv1d(\"int8\", \"int32\",", "padding=(1, 1), channels=10, kernel_size=(3 ,3), dilation=(3, 3)) def test_conv2d_winograd(): class", "d3, d4), \"float32\")) y = relay.nn.batch_flatten(x) yy = run_infer_type(y) assert", "= relay.nn.conv2d(x, wt, kernel_size=(3, 3), padding=(1, 1), channels=16, data_layout=\"NHWC\", out_dtype=\"int32\")", "symbolic values n, c, h, w = tvm.size_var(\"n\"), 2, 3,", "func = relay.Function([x], y) data = np.random.uniform(size=dshape).astype(dtype) ref_res = np.pad(data,", "2, 1) d_np = np.zeros(shape=oshape_nhwc) d_np[:,0:c_np.shape[1],0:c_np.shape[2],:] = c_np def test_conv1d_transpose_ncw_run():", "dkernel.astype(out_dtype)) for target, ctx in ctx_list(): if target in except_targets:", "beta) for target, ctx in ctx_list(): intrp1 = relay.create_executor(\"graph\", ctx=ctx,", "= np.random.rand(*kernel_shape) * 10 parameters = {\"weight\": tvm.nd.array(wdata.astype(weight_dtype))} with relay.build_config(opt_level=3):", "channels to check int8 robustness # Input channels should be", "kshape, padding=(1, 1), channels=10, kernel_size=(3 ,3)) kshape = (10, 3,", "\"int16\")) y = relay.nn.bitserial_conv2d( x, w, kernel_size=(3, 3), padding=(0, 0),", "test_lrn() test_l2_normalize() test_conv1d_infer_type() test_conv2d_infer_type() test_conv3d_infer_type() test_bitpack_infer_type() test_upsampling_infer_type() test_upsampling3d_infer_type() test_flatten_infer_type() test_pad_infer_type()", "2, 222), \"int32\") # Infer with NWC n, c, w", "= \"float32\" x = relay.var(\"x\", relay.TensorType(shape, dtype)) z = relay.nn.batch_flatten(x)", "kshape, padding=(1, 1), channels=512, groups=512, kernel_size=(3 ,3)) # CUDA is", "vector int mult and add instructions are generated. assert \"vpmulld\"", "= opfunc(x, pool_size=(1, 1)) assert \"pool_size=\" in y.astext() yy =", "4), \"int8\") # Infer with NHWC n, c, h, w", "autotvm.task.space.SplitEntity([-1, 1]) cfg['auto_unroll_max_setp'] = autotvm.task.space.OtherOptionEntity(1500) cfg['unroll_explicit'] = autotvm.task.space.OtherOptionEntity(1) self.memory[key] =", "rtol=1e-5) op_res2 = intrp2.evaluate(func)(x_data) tvm.testing.assert_allclose(op_res2.asnumpy(), ref_res, rtol=1e-5) def batch_flatten(data): shape", "200, 400), \"float32\") def test_upsampling3d_infer_type(): n, c, d, h, w", "log_file: log_file.write(test_schedule) with autotvm.apply_history_best(temp.relpath(\"temp.log\")): with relay.build_config(opt_level=3): print('Compiling...') graph_json, mod, params", "rtol=1e-5, atol=1e-5) def _test_global_pool2d(opfunc, reffunc): n, c, h, w =", "{\"i\": 743640, \"t\": \"contrib_spatial_pack\", \"c\": null, \\ \"e\": [[\"tile_co\", \"sp\",", "3, 3) run_test_conv1d(\"float32\", \"float32\", 1, dshape, kshape, padding=(1, 1), channels=10,", "False, \"Target should be Skylake or Cascadelake\" # compile conv2d", "topi.testing.pool1d_ncw_python(data, (2,), (2,), (0, 0), (1, 3, 16), pool_type, False)", "ASF licenses this file # to you under the Apache", "relay.var(\"x\", relay.ty.TensorType((n, c, d, h, w), \"float32\")) w = relay.var(\"w\")", "padding=(0, 0), channels=32) yy = run_infer_type(y) assert yy.checked_type == relay.TensorType(", "dtypes for input and weight. n, c, h, w =", "w, mixed precision n, c, d, h, w = tvm.size_var(\"n\"),", "c, h, w = tvm.size_var(\"n\"), tvm.size_var(\"c\"), 224, 224 x =", "* 10 parameters = {\"weight\": tvm.nd.array(wdata.astype(weight_dtype))} with relay.build_config(opt_level=3): graph, lib,", "+ ishape x = relay.var(\"x\", shape=dshape) y = relay.nn.upsampling3d(x, scale_d=scale_d,", "ownership. The ASF licenses this file # to you under", "= np.random.uniform(low=-1, high=1, size=shape).astype(dtype) ref_res = topi.testing.l2_normalize_python(x_data, eps, axis) for", "kernel_layout == 'OIHW': kernel_shape = (oc, ic, ch, cw) elif", "ic work asm = _compile(ic=17, oc=29, target=target, data_layout=\"NCHW\", kernel_layout='OIHW', dtypes=dtypes)", "assert yy.checked_type == relay.TensorType( (n, 2, 222), \"int32\") # infer", "h, w = tvm.size_var(\"n\"), 10, 224, 224, 224 x =", "(3, 3), (4, 4))) func = relay.Function([x], y) data =", "size=(n, ic, ih, iw)).astype(dtype) pad_np = np.zeros(shape=(n, ic, ih+2*ph, iw+2*pw)).astype(dtype)", "\"float32\" a_np = np.random.uniform(low=0.001, size=(n, ic, ih, iw)).astype(dtype) pad_np =", "(oc, oh, ow) = (3, 15, 15) dshape = (n,", "else: raise ValueError('Not supported') weight = relay.var(\"weight\", relay.TensorType(kernel_shape, weight_dtype)) y", "def _test_pool2d(opfunc, reffunc): n, c, h, w = tvm.size_var(\"n\"), 10,", "2.0 scale_h = 2.0 scale_w = 2.0 dtype = \"float32\"", "relay.ty.TensorType((n, c, h, w), \"float32\")) w = relay.var(\"w\") y =", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "x = relay.var(\"x\", shape=dshape, dtype=dtype) w = relay.var(\"w\", shape=kshape, dtype=dtype)", "w, 16), \"int32\") def test_conv3d_run(): def run_test_conv3d(dtype, out_dtype, scale, dshape,", "224, 224), dtype) # test execution dtype = \"int32\" dshape", "= intrp1.evaluate(func)(data) tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5, atol=1e-5) def _test_global_pool2d(opfunc, reffunc): n,", "permissions and limitations # under the License. \"\"\" Support level2", "\"float32\" dshape = (1, 3, 28, 28) x = relay.var(\"x\",", "j*sw:j*sw+kw], axis=(2,3)) / np.maximum(pad_count, 1) ref_res = np.maximum(b_np, 0.0) data", "w = tvm.size_var(\"n\"), tvm.size_var(\"c\"), tvm.size_var(\"h\"), tvm.size_var(\"w\") x = relay.var(\"x\", shape=(n,", "test_schedule='{\"i\": [\"llvm -device=arm_cpu\", \"topi_nn_depthwise_conv2d_nchw\", \\ [[\"TENSOR\", [1, 512, 32, 32],", "10) dtype = \"float32\" x = relay.var(\"x\", relay.TensorType(shape, dtype)) size=5", "1), channels=10, kernel_size=(3, 3 ,3), except_targets=[\"cuda\"]) def test_conv2d_transpose_infer_type(): # symbolic", "conv2d for arm_cpu dshape = (1, 512, 32, 32) kshape", "relay.TensorType(shape, dtype)) size=5 axis=1 bias=0.5 alpha=.00001 beta=0.75 z = relay.nn.lrn(x,", "# infer by shape of w, mixed precision n, h,", "= x_data.flatten().reshape(o_shape) for target, ctx in ctx_list(): intrp1 = relay.create_executor(\"graph\",", "h, w = 1, 2, 3, 4 t = relay.var(\"t\",", "== relay.TensorType( (1, 4, 224, 224, 4, 4), \"int32\") assert", "output shape f_out_shape = tuple(map(lambda x: int(x), run_infer_type(func).ret_type.shape)) assert out_shape", "ValueError('Not supported') weight = relay.var(\"weight\", relay.TensorType(kernel_shape, weight_dtype)) y = relay.nn.conv2d(x,", "data = np.random.uniform(size=dshape).astype(dtype) ref_res = reffunc(data, axis=(2,3), keepdims=True) for target,", "opfunc(x) func = relay.Function([x], y) data = np.random.uniform(size=dshape).astype(dtype) ref_res =", "relay.IncompleteType()) y = relay.nn.conv2d_transpose(x, w, kernel_size=(3, 3), padding=(1, 1), channels=15)", "(192, 80, 7, 7) run_test_conv2d_cuda(\"float32\", \"float32\", 1, dshape, kshape, padding=(2,", "w)) shape = (1, 5, 10, 10) dtype = \"float32\"", "dshape = (1, 5, 224, 224, 6) kshape = (3,", "run_test_conv2d_cuda(\"float32\", \"float32\", 1, dshape, kshape, padding=(2, 2), channels=192, kernel_size=(3, 3))", "tvm.round(h*scale)), tvm.expr.Cast(\"int32\", tvm.round(w*scale))), \"float32\") n, c = tvm.size_var(\"n\"), tvm.size_var(\"c\") x", "mod['main'] = func mod = relay.transform.InferType()(mod) data = np.random.uniform(-scale, scale,", "strides=(sw, sw), padding=(ph, pw), count_include_pad=False) func = relay.Function([x], y) dtype", "dshape = (1, 1024, 7, 7) x = relay.var(\"x\", shape=dshape)", "n, c, d, h, w = tvm.size_var(\"n\"), 10, 5, 224,", "= relay.var(\"w\") y = relay.nn.conv3d(x, w, kernel_size=(3, 3, 3), padding=(1,", "execution dtype = \"float32\" dshape = (1, 3, 32, 32,", "intrp1.evaluate(func)(data) tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5, atol=1e-5) _test_run('float32') _test_run('int32') def test_lrn(): n,", "beta=0.75 z = relay.nn.lrn(x, size=size, axis=axis, bias=bias, alpha=alpha, beta=beta) yy", "'int32')) # Check that both non-divisible oc and ic work", "w = tvm.size_var(\"n\"), tvm.size_var(\"c\"),\\ tvm.size_var(\"d\"), tvm.size_var(\"h\"), tvm.size_var(\"w\") scale = tvm.const(2.0,", "c, h, w), \"float32\")) y = opfunc(x) yy = run_infer_type(y)", "(1, 3, 28, 28) x = relay.var(\"x\", shape=dshape, dtype=dtype) y", "relay.var(\"x\", shape=(n, c , h, w)) y = relay.nn.l2_normalize(x, eps=0.001,", "func = relay.Function([x], y) data = np.random.uniform(size=dshape).astype(dtype) ref_res = topi.testing.pool1d_ncw_python(data,", "== relay.TensorType((n, 10, 5, 224, 224), \"float32\") # test execution", "in targets: if llvm_version >= 8: with relay.build_config(opt_level=3): graph, lib,", "# normal conv1d dshape = (1, 3, 224) kshape =", "= topi.testing.conv1d_transpose_ncw_python( data, kernel, 2, 1) d_np = np.zeros(shape=oshape) d_np[:,:,0:c_np.shape[2]]", "test execution dtype = \"float32\" dshape = (1, 3, 28,", "x = relay.var(\"x\", relay.TensorType(shape, dtype)) eps=0.001 axis=1 z = relay.nn.l2_normalize(x,", "ctx=ctx, target=target) op_res1 = intrp1.evaluate(func)(data) tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5, atol=1e-5) def", "Sweep the output channels to check int8 robustness # Output", "if layout == \"NCDHW\": return (c, d, h, w), (c,", "data, kernel, 2, 1) d_np = np.zeros(shape=oshape) d_np[:,:,0:c_np.shape[2],0:c_np.shape[3]] = c_np", "software distributed under the License is distributed on an #", "= relay.var(\"x\", shape=dshape) y = relay.nn.pad(x, ((1, 1), (2, 2),", "ic, oc) else: raise ValueError('Not supported') weight = relay.var(\"weight\", relay.TensorType(kernel_shape,", "'skylake-avx512' in target: return \"pmaddubs\" in asm elif 'cascadelake' in", "10, 224 x = relay.var(\"x\", relay.ty.TensorType((n, c, w), \"float32\")) w", "for x86 (skylake, cascadelake) and test assembly contains *pmadd* instructions", "def test_upsampling3d_infer_type(): n, c, d, h, w = tvm.size_var(\"n\"), tvm.size_var(\"c\"),\\", "== \"nearest_neighbor\": ref = topi.testing.upsampling_python(data, (scale_h, scale_w), layout) else: ref", "Output channels should be a multiple of 16 internally. for", "== relay.TensorType((n, c , h, w)) shape = (1, 5,", "b_np = np.zeros(shape=(n, oc, oh, ow)).astype(dtype) for i in range(oh):", "continue intrp1 = relay.create_executor(\"graph\", ctx=ctx, target=target) op_res1 = intrp1.evaluate(func)(data, kernel)", "1, dshape, kshape, padding=(0, 0), channels=192, kernel_size=(3, 3)) run_test_conv2d_cuda(\"float32\", \"float32\",", "func = relay.Function([x, w], y) dtype = \"float32\" data =", "kshape_hwoi = (3, 3, 10, 3) oshape_nhwc = (1, 37,", "((d2*d3)*d4)), \"float32\") x = relay.var(\"x\", relay.TensorType((3, 2, 4, 3), \"float32\"))", "8, 16, 16, 16 scale_d = 2.0 scale_h = 2.0", "test_pool2d() test_pool3d() test_avg_pool2d_no_count_pad() test_lrn() test_l2_normalize() test_conv1d_infer_type() test_conv2d_infer_type() test_conv3d_infer_type() test_bitpack_infer_type() test_upsampling_infer_type()", "w = relay.var(\"w\", relay.IncompleteType()) y = relay.nn.conv2d_transpose(x, w, kernel_size=(3, 3),", "np.random.uniform(low=0.001, size=(n, ic, ih, iw)).astype(dtype) pad_np = np.zeros(shape=(n, ic, ih+2*ph,", "relay.TensorType((d1, d2, d3, d4), \"float32\")) y = relay.nn.batch_flatten(x) yy =", "information # regarding copyright ownership. The ASF licenses this file", "1, dshape, kshape, padding=(1, 1), channels=10, kernel_size=3, dilation=3) def test_conv2d_infer_type():", "target in except_targets: continue intrp1 = relay.create_executor(\"graph\", ctx=ctx, target=target) op_res1", "h, w), dtype)) y = opfunc(x, pool_size=(1, 1)) assert \"pool_size=\"", "16, 16, 16 scale_d = 2.0 scale_h = 2.0 scale_w", "axis=(3, 5)) for target, ctx in ctx_list(): intrp1 = relay.create_executor(\"graph\",", "0, 0, 0, 0), out_shape=(1, 3, 16, 16, 16)): n,", "pw = (2, 2) n = 1 (ic, ih, iw)", "== relay.TensorType( (n, 2, 224, 224, 224), \"float32\") assert yy.args[1].checked_type", "w = relay.var(\"w\") y = relay.nn.conv2d_transpose(x, w, channels=10, kernel_size=(3,3), strides=(2,2),", "topi.testing.upsampling3d_python(data, (scale_d, scale_h, scale_w), layout) else: ref = topi.testing.trilinear_resize3d_python(data, (int(round(d*scale_d)),\\", "1), **attrs): x = relay.var(\"x\", shape=dshape, dtype=dtype) w = relay.var(\"w\",", "t1) func = relay.Function([x], relay.nn.batch_flatten(x)) data = np.random.rand(5, 10, 5).astype(t1.dtype)", "as log_file: log_file.write(test_schedule) with autotvm.apply_history_best(temp.relpath(\"temp.log\")): with relay.build_config(opt_level=3): print('Compiling...') graph_json, mod,", "tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5, atol=1e-5) _test_run('float32') _test_run('int32') def test_lrn(): n, c", "h, w), \"int8\")) w = relay.var(\"w\", relay.TensorType((2, 10, 3, 3,", "x, w, (1, 1), \"SAME\")) # depthwise conv2d for arm_cpu", "4), \"int32\") assert yy.args[1].checked_type == relay.TensorType( (4, 8, 3, 3,", "64, 3, 3 if data_layout == 'NCHW': data_shape = (n,", "target=target) op_res1 = intrp1.evaluate(func)(data) tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5, atol=1e-5) def test_pool2d():", "executor.evaluate(func)(data) tvm.testing.assert_allclose(out.asnumpy(), ref, rtol=1e-5, atol=1e-5) def test_upsampling(): _test_upsampling(\"NCHW\", \"nearest_neighbor\") _test_upsampling(\"NCHW\",", "assert yy.checked_type == relay.TensorType( (1, 4, 224, 224, 4, 4),", "w = tvm.size_var(\"n\"), 2, 3, tvm.size_var(\"w\") t = relay.var(\"t\", relay.TensorType((n,", "dshape, kshape, padding=(1, 1), channels=32, groups=8, kernel_size=(3 ,3), except_targets=['cuda']) #", "5)) x = relay.Var(\"x\", t1) func = relay.Function([x], relay.nn.batch_flatten(x)) data", "size=5 axis=1 bias=0.5 alpha=.00001 beta=0.75 z = relay.nn.lrn(x, size=size, axis=axis,", "targets: if llvm_version >= 8: dtypes = ('uint8', 'int8', 'int32')", "_has_fast_int8_instructions(asm, target) # Check that a vectorized instruction is generated", "relay.TensorType( (n, 2, 224, 224, 224), \"float32\") assert yy.args[1].checked_type ==", "\"out_dtype=\\\"int32\\\"\" in y.astext() yy = run_infer_type(y) assert yy.checked_type == relay.TensorType(", "func test_schedule='{\"i\": [\"llvm -device=arm_cpu\", \"topi_nn_depthwise_conv2d_nchw\", \\ [[\"TENSOR\", [1, 512, 32,", "y) dtype = \"float32\" data = np.random.uniform(size=dshape).astype(dtype) kernel = np.random.uniform(size=kshape).astype(dtype)", "fref=None, dilation=1, except_targets=None, **attrs): if except_targets is None: except_targets =", "atol=1e-5) _test_pool3d(relay.nn.max_pool3d) _test_pool3d(relay.nn.max_pool3d, padding=(2, 0, 0, 2, 0, 0), out_shape=(1,", "_compile(ic=8, oc=oc, target=target, data_layout=\"NHWC\", kernel_layout='HWIO', dtypes=dtypes) assert _has_fast_int8_instructions(asm, target) #", "wdata = np.random.rand(*kernel_shape) * 10 parameters = {\"weight\": tvm.nd.array(wdata.astype(weight_dtype))} targets", "def test_pool3d(): def _test_pool3d(opfunc, padding=(0, 0, 0, 0, 0, 0),", "h, w = 32, 32, 128, 128 x = relay.var(\"x\",", "assert _has_fast_int8_instructions(asm, target) for ic in [1, 4, 6]: asm", "= (1, 32, 18, 18) kshape = (64, 1, 3,", "ctx=ctx, target=target) op_res1 = intrp1.evaluate(func)(data) tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5, atol=1e-5) _test_run('float32')", "c, h, w = tvm.size_var(\"n\"), 32, 224, 224 x =", "= tvm.size_var(\"n\"), 2, 3, tvm.size_var(\"w\") t = relay.var(\"t\", relay.TensorType((n, c,", "target_dim = target_dim * shape[i + 1] return np.reshape(data, (shape[0],", "relay.var(\"x\", relay.TensorType((n, c, h, w), dtype)) y = opfunc(x, pool_size=(1,", "1, 1]) cfg['tile_rc'] = autotvm.task.space.SplitEntity([-1, 1]) cfg['auto_unroll_max_setp'] = autotvm.task.space.OtherOptionEntity(1500) cfg['unroll_explicit']", "relay.TensorType((n, c, h, w), \"float32\")) y = relay.nn.upsampling(x, scale_h=2, scale_w=2,", "= relay.Var(\"x\", t1) func = relay.Function([x], relay.nn.batch_flatten(x)) data = np.random.rand(5,", "wt = relay.var(\"w\") y = relay.nn.conv1d(x, wt, kernel_size=3, padding=(1, 1),", "licenses this file # to you under the Apache License,", "8: dtypes = (('int8', 'int8', 'int32')) # Check that both", "== relay.TensorType((n, 1, 1, c), \"float32\") n, c, h, w", "ref_res, rtol=1e-5, atol=1e-5) def _test_global_pool2d(opfunc, reffunc): n, c, h, w", "op_res1 = intrp1.evaluate(func)(data, kernel) tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5, atol=1e-5) # normal", "\"float32\" def get_shape(): if layout == \"NCDHW\": return (c, d,", "= executor.evaluate(func)(data) tvm.testing.assert_allclose(out.asnumpy(), ref, rtol=1e-5, atol=1e-5) def test_upsampling(): _test_upsampling(\"NCHW\", \"nearest_neighbor\")", "by applicable law or agreed to in writing, # software", "12 x = relay.var(\"x\", relay.TensorType((n, c, h, w), \"float32\")) w", "8]], \\ [\"reorder_0\", \"re\", [0, 1, 2, 3, 4, 5,", "relay.TensorType( (n, 2, 222, 222, 222), \"int32\") # Infer with", "== relay.TensorType((n, c, tvm.expr.Cast(\"int32\", tvm.round(d*scale)), tvm.expr.Cast(\"int32\", tvm.round(h*scale)), tvm.expr.Cast(\"int32\", tvm.round(w*scale))), \"float32\")", "c) ishape, oshape = get_shape() x = relay.var(\"x\", relay.TensorType((n,) +", "relay.TensorType((n, w, c), \"int8\")) wt = relay.var(\"w\") y = relay.nn.conv1d(x,", "x_data = np.random.uniform(low=-1, high=1, size=shape).astype(dtype) ref_res = x_data.flatten().reshape(o_shape) for target,", "yy.checked_type == relay.TensorType(o_shape, dtype) func = relay.Function([x], z) x_data =", "1, dshape, kshape, padding=(1, 1), channels=192, kernel_size=(3, 3)) # extended", "(1, 5, 224, 224, 6) kshape = (3, 3, 3,", "asm and \"vpadd\" in asm def test_depthwise_conv2d_int8(): input_dtype = 'uint8'", "c, h, w), \"uint8\")) w = relay.var(\"w\", relay.TensorType((2, 10, 3,", "= 32, 32, 128, 128 x = relay.var(\"x\", relay.ty.TensorType((o, i,", "def test_conv2d_winograd(): class WinogradFallback(autotvm.FallbackContext): def _query_inside(self, target, workload): key =", "= (10, 3, 3, 3) run_test_conv2d(\"float32\", \"float32\", 1, dshape, kshape,", "5)) for target, ctx in ctx_list(): intrp1 = relay.create_executor(\"graph\", ctx=ctx,", "32, 224, 224 x = relay.var(\"x\", relay.TensorType((n, h, w, c),", "(10, 15, 3, 3), \"float32\") # infer by shape of", "cfg['tile_y'] = autotvm.task.space.SplitEntity([-1, 1, 1, 1]) cfg['tile_x'] = autotvm.task.space.SplitEntity([-1, 1,", "weight, kernel_size=(ch, cw), channels=oc, padding=(1, 1), dilation=(1, 1), data_layout=data_layout, kernel_layout=kernel_layout,", "16)): n, c, d, h, w = tvm.size_var(\"n\"), 10, 5,", "compliance # with the License. You may obtain a copy", "= topi.testing.conv2d_nchw_python( data.astype(out_dtype), dkernel.astype(out_dtype), 1, padding, groups=groups) else: ref_res =", "wt = relay.var(\"w\") y = relay.nn.conv3d(x, wt, kernel_size=(3, 3, 3),", "with NHWC n, c, h, w = 4, 32, 224,", "= relay.build(func, target, params=parameters) assembly = lib.get_source(\"asm\") return assembly def", "4, 0, 0, 4), out_shape=(1, 3, 16, 16, 20)) def", "test_bitpack_infer_type() test_upsampling_infer_type() test_upsampling3d_infer_type() test_flatten_infer_type() test_pad_infer_type() test_pad_run() test_conv2d_transpose_infer_type() test_conv2d_transpose_nchw_run() test_conv2d_transpose_nhwc_run() test_conv1d_transpose_ncw_run()", "relay.nn.bitpack(x, bit_axis=4, pack_axis=1, pack_type='uint16', bits=1) yy = run_infer_type(y) assert yy.checked_type", "None: ref_res = topi.testing.conv2d_nchw_python( data.astype(out_dtype), dkernel.astype(out_dtype), 1, padding, groups=groups) else:", "asm = _compile(ic=16, oc=32, target=target, data_layout=\"NCHW\", kernel_layout='OIHW', dtypes=fast_int8_dtypes) # Check", "224 x = relay.var(\"x\", relay.TensorType((n, c, h, w), \"int8\")) w", "np.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=0.01) def _test_upsampling(layout, method, align_corners=False): n, c, h,", "channels=10, kernel_size=3) # dilated conv2d dshape = (1, 3, 18)", "ref_res, rtol=1e-5, atol=1e-5) _test_pool1d(relay.nn.max_pool1d) _test_pool1d(relay.nn.avg_pool1d) def test_pool3d(): def _test_pool3d(opfunc, padding=(0,", "yy = run_infer_type(y) assert yy.checked_type == relay.TensorType((n, c, 200, 200,", "1, dshape, kshape, padding=(2, 2), channels=192, kernel_size=(3, 3)) # extended", "kernel) tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5, atol=1e-5) def test_conv2d_transpose_nhwc_run(): dshape_nhwc = (1,", "data = np.random.uniform(size=dshape).astype(dtype) ref_res = reffunc(data.reshape(1, 3, 14, 2, 14,", "{\"weight\": tvm.nd.array(wdata.astype(weight_dtype))} with relay.build_config(opt_level=3): graph, lib, params = relay.build(func, target,", "be a multiple of 16 internally. for oc in [4,", "run_infer_type(y) assert yy.checked_type == relay.TensorType( (n, 2, 222, 222), \"int32\")", "(4, 10, 7, 7) x = relay.var(\"x\", shape=dshape) y =", "x = relay.var(\"x\", shape=dshape, dtype=dtype) y = opfunc(x, pool_size=(2, 2),", "'int8' output_dtype = 'int32' data_shape = (1, 64, 56, 56)", "tvm.round(d*scale)), tvm.expr.Cast(\"int32\", tvm.round(h*scale)), tvm.expr.Cast(\"int32\", tvm.round(w*scale))), \"float32\") n, c = tvm.size_var(\"n\"),", "def test_conv1d_run(): def run_test_conv1d(dtype, out_dtype, scale, dshape, kshape, padding=(1, 1),", "= (1, 3, 28, 28) x = relay.var(\"x\", shape=dshape, dtype=dtype)", "op_res1 = intrp1.evaluate(func)(data) tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5, atol=1e-5) _test_run('float32') _test_run('int32') def", "w), (c, int(round(d*scale_d)), int(round(h*scale_h)),\\ int(round(w*scale_w))) else: return (d, h, w,", "(oc, ic, ch, cw) elif kernel_layout == 'HWIO': kernel_shape =", "WARRANTIES OR CONDITIONS OF ANY # KIND, either express or", "\"float32\" data = np.random.uniform(size=dshape_nhwc).astype(dtype) kernel = np.random.uniform(size=kshape_hwoi).astype(dtype) # use true", "= relay.var(\"x\", relay.TensorType((n, c, w), \"float32\")) y = opfunc(x, pool_size=(1,))", "2, 224, 224, 224), \"float32\") assert yy.args[1].checked_type == relay.TensorType( (2,", "params = relay.build(func, target, params=parameters) assembly = lib.get_source(\"asm\") return assembly", "tvm.size_var(\"n\"), 10, 224, 224 x = relay.var(\"x\", relay.TensorType((n, c, h,", "relay.TensorType((12, 11, 5, 5), \"float32\")) y = relay.nn.conv2d_transpose(x, w, output_padding=(1,", "target: return \"vpdpbusd\" in asm else: assert False, \"Target should", "w, kernel_size=3, padding=(1, 1), channels=2) yy = run_infer_type(y) assert yy.checked_type", "different layout n, c, h, w = 4, 32, 224,", "= intrp1.evaluate(func)(data) tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5, atol=1e-5) _test_pool3d(relay.nn.max_pool3d) _test_pool3d(relay.nn.max_pool3d, padding=(2, 0,", "relay.var(\"w\") y = relay.nn.conv3d(x, w, kernel_size=(3, 3, 3), padding=(1, 1,", "+ ishape, dtype)) y = relay.nn.upsampling3d(x, scale_d=scale_d, scale_h=scale_h, scale_w=scale_w,\\ layout=layout,", "run_test_conv2d(\"float32\", \"float32\", 1, dshape, kshape, padding=(1, 1), channels=10, kernel_size=(3 ,3))", "dtype)) size=5 axis=1 bias=0.5 alpha=.00001 beta=0.75 z = relay.nn.lrn(x, size=size,", "-mcpu=cascadelake\"] llvm_version = tvm.codegen.llvm_version_major() for target in targets: if llvm_version", "target = \"llvm -mcpu=core-avx2\" fast_int8_dtypes = ('uint8', 'int8', 'int32') asm", "w)) y = relay.nn.l2_normalize(x, eps=0.001, axis=[1]) \"axis=\" in y.astext() yy", "# Check that intrinisic is not present in the assembly.", "in [4, 16, 20]: asm = _compile(ic=8, oc=oc, target=target, data_layout=\"NHWC\",", "_compile(ic=17, oc=29, target=target, data_layout=\"NHWC\", kernel_layout='HWIO', dtypes=dtypes) assert _has_fast_int8_instructions(asm, target) #", "h, w), \"int16\")) y = relay.nn.bitpack(x, bit_axis=4, pack_axis=1, pack_type='uint16', bits=1)", "layout=\"NCHW\", method=\"bilinear\") yy = run_infer_type(y) assert yy.checked_type == relay.TensorType((n, c,", "assert yy.args[1].checked_type == relay.TensorType( (2, 10, 3, 3, 3), \"float32\")", "graph, lib, params = relay.build_module.build(mod, target=target, params=params) module = tvm.contrib.graph_runtime.create(graph,", "dilation=dilation, **attrs) func = relay.Function([x, w], y) data = np.random.uniform(-scale,", "_query_inside(self, target, workload): key = (target, workload) if key in", "(1, 32, 18, 18) kshape = (32, 4, 3, 3)", "w, channels=10, kernel_size=(3, 3), strides=(2, 2), padding=(1, 1), output_padding=(2, 2),", "222, 222), \"int32\") # Infer with NDHWC n, c, d,", "28) x = relay.var(\"x\", shape=dshape, dtype=dtype) y = opfunc(x, pool_size=(2,", "**attrs): x = relay.var(\"x\", shape=dshape, dtype=dtype) w = relay.var(\"w\", shape=kshape,", "= intrp2.evaluate(func)(x_data) tvm.testing.assert_allclose(op_res2.asnumpy(), ref_res, rtol=1e-5) def test_l2_normalize(): n, c ,", "params = relay.build_module.build(mod, target=target, params=params) module = tvm.contrib.graph_runtime.create(graph, lib, ctx)", "axis=(3,5)).astype(dtype) for target, ctx in ctx_list(): intrp1 = relay.create_executor(\"graph\", ctx=ctx,", "yy.checked_type == relay.TensorType((n, c, tvm.expr.Cast(\"int32\", tvm.round(d*scale)), tvm.expr.Cast(\"int32\", tvm.round(h*scale)), tvm.expr.Cast(\"int32\", tvm.round(w*scale))),", "= relay.var(\"x\", shape=dshape) y = relay.nn.upsampling3d(x, scale_d=scale_d, scale_h=scale_h, scale_w=scale_w,\\ layout=layout,", "('uint8', 'uint8', 'int32') asm = _compile(ic=16, oc=32, target=target, data_layout=\"NHWC\", kernel_layout='HWIO',", "dimension n, c, h, w = tvm.size_var(\"n\"), 10, 224, 224", "1, 1, 1]) cfg['tile_y'] = autotvm.task.space.SplitEntity([-1, 1, 1, 1]) cfg['tile_x']", "relay.nn.batch_flatten(x) yy = run_infer_type(y) assert yy.checked_type == relay.TensorType((d1, ((2*d3)*3)), \"float32\")", "x = relay.var(\"x\", shape=dshape) y = opfunc(x, pool_size=(2, 2), strides=(2,", "shape=dshape, dtype=dtype) y = opfunc(x, pool_size=(2, 2), strides=(2, 2), padding=(0,", "3, 3), \"int8\")) y = relay.nn.conv3d(x, w, out_dtype=\"int32\") assert \"out_dtype=\\\"int32\\\"\"", "# Sweep the output channels to check int8 robustness #", "= run_infer_type(y) assert yy.checked_type == relay.TensorType((3, 6, 9, 12), \"float32\")", "4, 6]: asm = _compile(ic=ic, oc=16, target=target, data_layout=\"NHWC\", kernel_layout='HWIO', dtypes=dtypes)", "224), \"float32\") # test execution dtype = \"float32\" dshape =", "== relay.TensorType((3, 6, 9, 12), \"float32\") # some symbolic values", "scale, dshape, kshape, padding=(1, 1), fref=None, groups=1, dilation=(1, 1), except_targets=None,", "pack_axis=1, pack_type='uint16', bits=1) yy = run_infer_type(y) assert yy.checked_type == relay.TensorType(", "c, h, w = tvm.size_var(\"n\"), 2, 3, tvm.size_var(\"w\") t =", "1, dshape, kshape, padding=(1, 1), channels=64, groups=32, kernel_size=(3 ,3), except_targets=['cuda'])", "w, kernel_size=(3, 3), padding=(0, 0), channels=32) yy = run_infer_type(y) assert", "kernel_size=(3 ,3), fref=lambda x, w: topi.testing.depthwise_conv2d_python_nchw( x, w, (1, 1),", "to in writing, # software distributed under the License is", "run_infer_type(z) assert yy.checked_type == relay.TensorType(shape, dtype) func = relay.Function([x], z)", "op_res2 = intrp2.evaluate(func)(x_data) tvm.testing.assert_allclose(op_res2.asnumpy(), ref_res, rtol=1e-5) def batch_flatten(data): shape =", "shape=dshape) y = relay.nn.upsampling(x, scale_h=scale_h, scale_w=scale_w, layout=layout, method=method, align_corners=align_corners) func", "scale_w=scale_w,\\ layout=layout, method=method,\\ coordinate_transformation_mode=coordinate_transformation_mode) func = relay.Function([x], y) data =", "kshape, padding=(0, 1), channels=10, kernel_size=(1 ,3)) # dilated conv2d dshape", "layout) else: ref = topi.testing.trilinear_resize3d_python(data, (int(round(d*scale_d)),\\ int(round(h*scale_h)),\\ int(round(w*scale_w))), layout) for", "dtype=dtype) w = relay.var(\"w\", dtype=dtype) y = relay.nn.conv2d(x, w, padding=padding,", "w), \"float32\")) y = opfunc(x, pool_size=(1, 1)) assert \"pool_size=\" in", "relay.var(\"w\") y = relay.nn.conv1d(x, w, kernel_size=3, padding=(1, 1), channels=2) yy", "CUDA is disabled for 'direct' schedule: # https://github.com/apache/incubator-tvm/pull/3070#issuecomment-486597553 # group", "ishape x = relay.var(\"x\", shape=dshape) y = relay.nn.upsampling(x, scale_h=scale_h, scale_w=scale_w,", "dtype=dtype) y = relay.nn.conv3d(x, w, padding=padding, dilation=dilation, groups=groups, **attrs) func", "1, c), \"float32\") n, c, h, w = tvm.size_var(\"n\"), tvm.size_var(\"c\"),", "w = tvm.var(\"n\"), 10, 224 x = relay.var(\"x\", relay.ty.TensorType((n, c,", "(2, 2), (3, 3), (4, 4)), 'constant') for target, ctx", "eps=0.001, axis=[1]) \"axis=\" in y.astext() yy = run_infer_type(y) assert yy.checked_type", "kshape = (10, 3, 3, 3, 3) run_test_conv3d(\"float32\", \"float32\", 1,", "\"float32\")) y = opfunc(x, layout=\"NHWC\") yy = run_infer_type(y) assert yy.checked_type", "shape inference. o, i, h, w = 32, 32, 128,", "= (3, 10, 3) oshape = (1, 10, 37) x", "yy.checked_type == relay.TensorType((n, 10, 5, 224, 224), \"float32\") # test", "dtypes = (('int8', 'int8', 'int32')) # Check that both non-divisible", "relay.TensorType(data_shape, input_dtype)) else: raise ValueError('Not supported') if kernel_layout == 'OIHW':", "= np.random.uniform(size=dshape_nhwc).astype(dtype) kernel = np.random.uniform(size=kshape_hwoi).astype(dtype) # use true kshape layout", "1), fref=None, dilation=1, except_targets=None, **attrs): if except_targets is None: except_targets", "channels=10, kernel_size=3, dilation=3) def test_conv2d_infer_type(): # symbolic in batch dimension", "run_test_conv3d(dtype, out_dtype, scale, dshape, kshape, padding=(1, 1, 1), fref=None, groups=1,", "shape=dshape) w = relay.var(\"w\") y = relay.nn.conv2d_transpose(x, w, channels=10, kernel_size=(3,3),", "high=128, size=dshape) ref_res = reffunc(data.reshape(1,3,14,2,14,2), axis=(3,5)).astype(dtype) for target, ctx in", "tvm import relay from tvm.relay import transform from tvm.relay.testing import", "100, 100, 200), \"float32\")) y = relay.nn.upsampling3d(x, scale_d=2, scale_h=2, scale_w=2,", "3), padding=(1, 1), channels=15) assert \"channels=15\" in y.astext() yy =", "= tvm.size_var(\"n\"), tvm.size_var(\"c\"), 224, 224 x = relay.var(\"x\", relay.TensorType((n, h,", "10, 12 x = relay.var(\"x\", relay.TensorType((n, c, h, w), \"float32\"))", "kw = (4, 4) sh, sw = (2, 2) ph,", "assert yy.checked_type == relay.TensorType(shape, dtype) func = relay.Function([x], z) x_data", "= relay.Function([x], y) data = np.random.uniform(size=dshape).astype(dtype) ref_res = topi.testing.pool1d_ncw_python(data, (2,),", "tvm.size_var(\"c\") x = relay.var(\"x\", relay.TensorType((n, c, 100, 200), \"float32\")) y", "test_conv2d_infer_type() test_conv3d_infer_type() test_bitpack_infer_type() test_upsampling_infer_type() test_upsampling3d_infer_type() test_flatten_infer_type() test_pad_infer_type() test_pad_run() test_conv2d_transpose_infer_type() test_conv2d_transpose_nchw_run()", "relay.build_config(opt_level=3): print('Compiling...') graph_json, mod, params = tvm.relay.build(mod, target=\"llvm -device=arm_cpu\") #", "_compile(ic=16, oc=32, target=target, data_layout=\"NHWC\", kernel_layout='HWIO', dtypes=dtypes) # Check that intrinisic", "kh, kw = (4, 4) sh, sw = (2, 2)", "conv2d for x86 (skylake, cascadelake) and test assembly contains *pmadd*", "'max' if 'max' in str(opfunc) else 'avg' y = opfunc(x,", "222), \"int32\") # Infer with a different layout n, c,", "run_infer_type(z) assert yy.checked_type == relay.TensorType(o_shape, dtype) func = relay.Function([x], z)", "0), out_shape=(1, 3, 18, 16, 16)) _test_pool3d(relay.nn.max_pool3d, padding=(0, 3, 0,", "= relay.var(\"x\", shape=dshape) y = relay.nn.upsampling(x, scale_h=scale_h, scale_w=scale_w, layout=layout, method=method,", "are not HW supported. dtypes = ('uint8', 'uint8', 'int32') asm", "def test_depthwise_conv2d_int8(): input_dtype = 'uint8' weight_dtype = 'int8' output_dtype =", "the output channels to check int8 robustness # Output channels", "run_infer_type(y) assert yy.checked_type == relay.TensorType((n, c, tvm.expr.Cast(\"int32\", tvm.round(d*scale)), tvm.expr.Cast(\"int32\", tvm.round(h*scale)),", "implied. See the License for the # specific language governing", "padding=(1, 1), channels=16, data_layout=\"NCHW4n4c\", kernel_layout=\"OIHW4o4i\", out_dtype=\"int32\") yy = run_infer_type(y) assert", "target=target, data_layout=\"NHWC\", kernel_layout='HWIO', dtypes=dtypes) assert _has_fast_int8_instructions(asm, target) # Ensure that", "56) x = relay.var(\"x\", relay.TensorType(data_shape, input_dtype)) kernel_shape = (64, 1,", "run_infer_type(func).ret_type.shape)) assert out_shape == f_out_shape, \\ \"Output shape mismatch. expected", "layout=layout, method=method,\\ coordinate_transformation_mode=coordinate_transformation_mode) yy = run_infer_type(y) assert yy.checked_type == relay.TensorType((n,)", "def _test_upsampling(layout, method, align_corners=False): n, c, h, w = tvm.size_var(\"n\"),", "[\"ann_spatial\", \"an\", [\"unroll\", \"unroll\", \"vec\"]], \\ [\"data_pad_inline\", \"ot\", 4], [\"data_vec_inline\",", "224, 224 x = relay.var(\"x\", relay.ty.TensorType((n, c, h, w), \"int16\"))", "= np.zeros(shape=(n, oc, oh, ow)).astype(dtype) for i in range(oh): for", "3, 1, 3) # mixed precision. run_test_conv2d(\"int8\", \"int32\", 1, dshape,", "tvm.const(2.0, \"float64\") x = relay.var(\"x\", relay.TensorType((n, c, h, w), \"float32\"))", "relay.nn.conv1d(x, w, kernel_size=3, padding=(1, 1), channels=2) yy = run_infer_type(y) assert", "func = relay.Function([x], y) # check output shape f_out_shape =", "h, w), \"float32\")) y = opfunc(x, pool_size=(1, 1, 1)) assert", "10) dtype = \"float32\" x = relay.var(\"x\", relay.TensorType(shape, dtype)) eps=0.001", "(n, 2, 224), \"float32\") assert yy.args[1].checked_type == relay.TensorType( (2, 10,", "\"uint8\")) w = relay.var(\"w\", relay.TensorType((2, 10, 3, 3), \"int8\")) y", "ctx=ctx, target=target) intrp2 = relay.create_executor(\"debug\", ctx=ctx, target=target) op_res1 = intrp1.evaluate(func)(x_data)", "yy.checked_type == relay.TensorType( (n, 32, 222, 222), \"int16\") def test_bitpack_infer_type():", "\"v\": 0.1}' temp = util.tempdir() with open(temp.relpath(\"temp.log\"), \"w\") as log_file:", "32, 32) x = relay.var(\"x\", shape=dshape) pool_type = 'max' if", "= run_infer_type(z) assert yy.checked_type == relay.TensorType(shape, dtype) func = relay.Function([x],", "in ctx_list(): executor = relay.create_executor(\"graph\", ctx=ctx, target=target) out = executor.evaluate(func)(data)", "6]: asm = _compile(ic=ic, oc=16, target=target, data_layout=\"NHWC\", kernel_layout='HWIO', dtypes=dtypes) assert", "padding=(1, 1), channels=16, data_layout=\"NWC\", out_dtype=\"int32\") yy = run_infer_type(y) assert yy.checked_type", "op_res1 = intrp1.evaluate(func)(data) tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5, atol=1e-5) def test_flatten_infer_type(): d1,", "rtol=1e-5, atol=1e-5) def test_flatten_infer_type(): d1, d2, d3, d4 = tvm.size_var(\"d1\"),", "either express or implied. See the License for the #", "1) + dilation) if fref is None: ref_res = topi.testing.conv2d_nchw_python(", "dshape, kshape, padding=(1, 1), groups=1, dilation=(1, 1), **attrs): x =", "beta=0.75) \"alpha=\" in y.astext() yy = run_infer_type(y) assert yy.checked_type ==", "relay.TensorType( (2, 10, 3), \"float32\") # infer by shape of", "kshape, padding=(1, 1), channels=32, groups=8, kernel_size=(3 ,3), except_targets=['cuda']) # also", "precision run_test_conv1d(\"int8\", \"int32\", 1, dshape, kshape, padding=(1, 1), channels=10, kernel_size=3)", "padding) else: ref_res = fref(data.astype(out_dtype), dkernel.astype(out_dtype)) for target, ctx in", "2, 14, 2), axis=(3, 5)) for target, ctx in ctx_list():", "16, 20]: asm = _compile(ic=8, oc=oc, target=target, data_layout=\"NCHW\", kernel_layout='OIHW', dtypes=dtypes)", "channels=16, data_layout=\"NWC\", out_dtype=\"int32\") yy = run_infer_type(y) assert yy.checked_type == relay.TensorType(", "c, 100, 200), \"float32\")) y = relay.nn.upsampling(x, scale_h=2, scale_w=2, layout=\"NCHW\",", "y = relay.nn.conv2d(x, w, kernel_size=(3, 3), padding=(1, 1), channels=2) yy", "should have swapped IO. # kshape is HWOI and kernel_layout", "((1, 1), (2, 2), (3, 3), (4, 4))) yy =", "dtype=dtype) y = relay.nn.conv3d(x, w, padding=padding, dilation=dilation, groups=groups, data_layout=\"NDHWC\", kernel_layout=\"DHWIO\",", "np.zeros(shape=oshape) d_np[:,:,0:c_np.shape[2],0:c_np.shape[3]] = c_np ref_res = d_np for target, ctx", "= (range(n), range(ic), (range(ph, ih+ph)), (range(pw, iw+pw))) pad_np[np.ix_(*no_zero)] = a_np", "w), \"float32\")) y = opfunc(x, pool_size=(1, 1, 1)) assert \"pool_size=\"", "opfunc(x, layout=\"NHWC\") yy = run_infer_type(y) assert yy.checked_type == relay.TensorType((n, 1,", "from tvm.relay.testing import ctx_list, run_infer_type from tvm.contrib import util import", "weight_dtype)) y = relay.nn.conv2d(x, weight, kernel_size=(ch, cw), channels=oc, padding=(1, 1),", "\"bilinear\", True) def _test_upsampling3d(layout, method, coordinate_transformation_mode=\"half_pixel\"): n, c, d, h,", "= relay.var(\"x\", relay.TensorType(data_shape, input_dtype)) elif data_layout == 'NHWC': data_shape =", "224 x = relay.var(\"x\", relay.ty.TensorType((n, c, w), \"float32\")) w =", "data_layout=\"NDHWC\", out_dtype=\"int32\") yy = run_infer_type(y) assert yy.checked_type == relay.TensorType( (n,", "c, 200, 400), \"float32\") def test_upsampling3d_infer_type(): n, c, d, h,", "tvm.testing.assert_allclose(out.asnumpy(), ref, rtol=1e-5, atol=1e-5) def test_upsampling(): _test_upsampling(\"NCHW\", \"nearest_neighbor\") _test_upsampling(\"NCHW\", \"bilinear\",", "= relay.Function([x, w], y) dtype = \"float32\" data = np.random.uniform(size=dshape).astype(dtype)", "ih+2*ph, iw+2*pw)).astype(dtype) no_zero = (range(n), range(ic), (range(ph, ih+ph)), (range(pw, iw+pw)))", "assert not _has_fast_int8_instructions(asm, target) # Check that a vectorized instruction", "ref = topi.testing.trilinear_resize3d_python(data, (int(round(d*scale_d)),\\ int(round(h*scale_h)),\\ int(round(w*scale_w))), layout) for target, ctx", "and kernel_layout should have swapped IO. # kshape is HWOI", "workload): key = (target, workload) if key in self.memory: return", "3, 18, 18) kshape = (10, 3, 3, 3) run_test_conv2d(\"float32\",", "relay.TensorType((2, 10, 3), \"int8\")) y = relay.nn.conv1d(x, w, out_dtype=\"int32\") assert", "kshape, padding=(1, 1), channels=32, groups=32, kernel_size=(3 ,3), fref=lambda x, w:", "channels should be a multiple of 4 internally. for ic", "h, w = tvm.size_var(\"n\"), 10, 10, 12 x = relay.var(\"x\",", "kernel_size=(3, 3), padding=(0, 0), channels=32) yy = run_infer_type(y) assert yy.checked_type", "1), channels=10, kernel_size=3) # mixed precision run_test_conv1d(\"int8\", \"int32\", 1, dshape,", "= relay.create_executor(\"graph\", ctx=ctx, target=target) intrp2 = relay.create_executor(\"debug\", ctx=ctx, target=target) op_res1", "target=target, params=params) module = tvm.contrib.graph_runtime.create(graph, lib, ctx) module.set_input('x', tvm.nd.array(data)) module.set_input(**params)", ", h, w = tvm.size_var(\"n\"), tvm.size_var(\"c\"), tvm.size_var(\"h\"), tvm.size_var(\"w\") x =", "= 1 for i in range(len(shape) - 1): target_dim =", "channels=11, data_layout=\"NHWC\") yy = run_infer_type(y) assert yy.checked_type == relay.TensorType( (n,", "y = relay.nn.avg_pool2d(x, pool_size=(kh, kw), strides=(sw, sw), padding=(ph, pw), count_include_pad=False)", "relay.TensorType( (1, 4, 224, 224, 4, 4), \"int32\") assert yy.args[1].checked_type", "kernel_size=(7, 7)) def test_conv3d_infer_type(): # symbolic in batch dimension n,", "\"axis=\" in y.astext() yy = run_infer_type(y) assert yy.checked_type == relay.TensorType((n,", "relay.ty.TensorType((n, c, h, w), \"int16\")) w = relay.var(\"w\", relay.ty.TensorType((32, 32,", "0.0) data = a_np for target, ctx in ctx_list(): intrp1", "coordinate_transformation_mode=\"half_pixel\"): n, c, d, h, w = tvm.size_var(\"n\"), 8, 16,", "2, 3, tvm.size_var(\"w\") t = relay.var(\"t\", relay.TensorType((n, c, h, w),", "relay.Function([x, w], y) dtype = \"float32\" data = np.random.uniform(size=dshape_nhwc).astype(dtype) kernel", "15, 11), \"float32\") def test_conv2d_transpose_nchw_run(): dshape = (1, 3, 18,", "func = relay.Function([x], y) data = np.random.uniform(size=dshape).astype(dtype) ref_res = reffunc(data,", "_test_global_pool2d(relay.nn.global_avg_pool2d, np.mean) def test_pool1d(): def _test_pool1d(opfunc): n, c, w =", "_test_pool3d(relay.nn.max_pool3d, padding=(0, 0, 4, 0, 0, 4), out_shape=(1, 3, 16,", "groups=groups, data_layout=\"NDHWC\", kernel_layout=\"DHWIO\", **attrs) func = relay.Function([x, w], y) data", "w, channels=10, kernel_size=(3,), strides=(2,), padding=(1,), output_padding=(2,)) func = relay.Function([x, w],", "output_padding=(2,)) func = relay.Function([x, w], y) dtype = \"float32\" data", "stride 1, padding N, kernel 3x3 run_test_conv2d_cuda(\"float32\", \"float32\", 1, dshape,", "file except in compliance # with the License. You may", "method=method, align_corners=align_corners) yy = run_infer_type(y) assert yy.checked_type == relay.TensorType((n,) +", "x = relay.var(\"x\", relay.TensorType((3, 2, 4, 3), \"float32\")) y =", "internally. for ic in [1, 4, 6]: asm = _compile(ic=ic,", "y.astext() yy = run_infer_type(y) assert yy.checked_type == relay.TensorType((n, c ,", "from tvm.relay import transform from tvm.relay.testing import ctx_list, run_infer_type from", "ctx in ctx_list(): intrp1 = relay.create_executor(\"graph\", ctx=ctx, target=target) intrp2 =", "target=target, data_layout=\"NHWC\", kernel_layout='HWIO', dtypes=dtypes) # Check that intrinisic is not", "= topi.testing.conv2d_transpose_nhwc_python(data, kernel, 'HWOI', 2, 1) d_np = np.zeros(shape=oshape_nhwc) d_np[:,0:c_np.shape[1],0:c_np.shape[2],:]", "# symbolic in batch dimension n, c, h, w =", "size=shape).astype(dtype) ref_res = topi.testing.l2_normalize_python(x_data, eps, axis) for target, ctx in", "\"int32\") # Infer with NWC n, c, w = 4,", "intrp1.evaluate(func)(data, kernel) tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5, atol=1e-5) def test_conv2d_transpose_nhwc_run(): dshape_nhwc =", "int(round(h*scale_h)),\\ int(round(w*scale_w))), layout) for target, ctx in ctx_list(): executor =", "y) dtype = \"float32\" data = np.random.uniform(size=dshape_nhwc).astype(dtype) kernel = np.random.uniform(size=kshape_hwoi).astype(dtype)", "def _test_pool3d(opfunc, padding=(0, 0, 0, 0, 0, 0), out_shape=(1, 3,", "3) run_test_conv1d(\"float32\", \"float32\", 1, dshape, kshape, padding=(1, 1), channels=10, kernel_size=3,", "precision. run_test_conv2d(\"int8\", \"int32\", 1, dshape, kshape, padding=(0, 1), channels=10, kernel_size=(1", "c , h, w = tvm.size_var(\"n\"), tvm.size_var(\"c\"), tvm.size_var(\"h\"), tvm.size_var(\"w\") x", "x = relay.var(\"x\", relay.TensorType((n, c, 100, 200), \"float32\")) y =", "z = relay.nn.l2_normalize(x, eps=0.001, axis=[axis]) yy = run_infer_type(z) assert yy.checked_type", "(10, 3, 3, 3, 3) run_test_conv3d(\"float32\", \"float32\", 1, dshape, kshape,", "\"float64\") x = relay.var(\"x\", relay.TensorType((n, c, h, w), \"float32\")) y", "= topi.testing.bilinear_resize_python(data, (int(round(h*scale_h)), int(round(w*scale_w))), layout) for target, ctx in ctx_list():", "yy.checked_type == relay.TensorType(shape, dtype) func = relay.Function([x], z) x_data =", ",3), dilation=(3, 3)) def test_conv2d_winograd(): class WinogradFallback(autotvm.FallbackContext): def _query_inside(self, target,", "int mult and add instructions are generated. assert \"vpmulld\" in", "in except_targets: continue intrp1 = relay.create_executor(\"graph\", ctx=ctx, target=target) op_res1 =", "relay.TensorType( (n, 15, 10, 12), \"float32\") assert yy.args[1].checked_type == relay.TensorType(", "10, 3, 3), \"int8\")) y = relay.nn.conv2d(x, w, out_dtype=\"int32\") assert", "32, 128, 128 x = relay.var(\"x\", relay.ty.TensorType((o, i, h, w),", "def test_conv3d_run(): def run_test_conv3d(dtype, out_dtype, scale, dshape, kshape, padding=(1, 1,", "cfg def run_test_conv2d_cuda(dtype, out_dtype, scale, dshape, kshape, padding=(1, 1), groups=1,", "relay.TensorType((5, 10, 5)) x = relay.Var(\"x\", t1) func = relay.Function([x],", "contributor license agreements. See the NOTICE file # distributed with", "1), output_padding=(2, 2), data_layout=\"NHWC\", kernel_layout=\"HWIO\") func = relay.Function([x, w], y)", "w = tvm.size_var(\"n\"), tvm.size_var(\"c\"), tvm.size_var(\"h\"), tvm.size_var(\"w\") scale = tvm.const(2.0, \"float64\")", "16, 16, 20)) _test_pool3d(relay.nn.avg_pool3d) _test_pool3d(relay.nn.avg_pool3d, padding=(2, 0, 0, 2, 0,", "(64, 1, 3, 3) run_test_conv2d(\"float32\", \"float32\", 1, dshape, kshape, padding=(1,", "target=target) op_res1 = intrp1.evaluate(func)(data) tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5, atol=1e-5) def test_flatten_infer_type():", "reffunc(data.reshape(1, 3, 14, 2, 14, 2), axis=(3, 5)) for target,", "== relay.TensorType((d1, ((d2*d3)*d4)), \"float32\") x = relay.var(\"x\", relay.TensorType((3, 2, 4,", "dshape, kshape, padding=(1, 1), fref=None, groups=1, dilation=(1, 1), except_targets=None, **attrs):", "= tvm.size_var(\"n\"), 10, 5, 224, 224 x = relay.var(\"x\", relay.TensorType((n,", "h, w = tvm.size_var(\"n\"), tvm.size_var(\"c\"), tvm.size_var(\"h\"), tvm.size_var(\"w\") x = relay.var(\"x\",", "222), \"int16\") def test_bitpack_infer_type(): # Test axis packing shape inference.", "2, 222), \"int32\") # infer shape in case of different", "3) # mixed precision. run_test_conv2d(\"int8\", \"int32\", 1, dshape, kshape, padding=(0,", "h, w = tvm.size_var(\"n\"), 10, 224, 224 x = relay.var(\"x\",", "1) + dilation) if fref is None: ref_res = topi.testing.conv3d_ncdhw_python(", "asm else: assert False, \"Target should be Skylake or Cascadelake\"", "w], y) data = np.random.uniform(-scale, scale, size=dshape).astype(dtype) kernel = np.random.uniform(-scale,", "reffunc, dtype): n, c, h, w = tvm.size_var(\"n\"), 10, 224,", "d, h, w), \"float32\")) w = relay.var(\"w\") y = relay.nn.conv3d(x,", "d, h, w), \"float32\")) y = opfunc(x, pool_size=(1, 1, 1))", "= relay.var(\"x\", shape=dshape) w = relay.var(\"w\") y = relay.nn.conv2d_transpose(x, w,", "2, 3, 4 t = relay.var(\"t\", relay.TensorType((n, c, h, w),", "7, 7) run_test_conv2d_cuda(\"float32\", \"float32\", 1, dshape, kshape, padding=(2, 2), channels=192,", "= run_infer_type(z) assert yy.checked_type == relay.TensorType(o_shape, dtype) func = relay.Function([x],", "asm def test_depthwise_conv2d_int8(): input_dtype = 'uint8' weight_dtype = 'int8' output_dtype", "c = tvm.size_var(\"n\"), 10, 10, 12 x = relay.var(\"x\", relay.TensorType((n,", "1), channels=192, kernel_size=(3, 3)) # extended winograd: stride 1, padding", "an # \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF", "assert yy.checked_type == relay.TensorType((d1, ((d2*d3)*d4)), \"float32\") x = relay.var(\"x\", relay.TensorType((3,", "assert yy.checked_type == relay.TensorType((n + 2, 6, 9, w +", "data.astype(out_dtype), dkernel.astype(out_dtype), 1, padding) else: ref_res = fref(data.astype(out_dtype), dkernel.astype(out_dtype)) for", "_test_upsampling(\"NCHW\", \"nearest_neighbor\") _test_upsampling(\"NCHW\", \"bilinear\", True) _test_upsampling(\"NHWC\", \"nearest_neighbor\") _test_upsampling(\"NHWC\", \"bilinear\", True)", "'NHWC': data_shape = (n, h, w, ic) x = relay.var(\"x\",", "WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express", "relay.var(\"x\", relay.TensorType((n, w, c), \"int8\")) wt = relay.var(\"w\") y =", "relay.TensorType((3, 2, 4, 3), \"float32\")) y = relay.nn.batch_flatten(x) yy =", "0, 0), out_shape=(1, 3, 18, 16, 16)) _test_pool3d(relay.nn.avg_pool3d, padding=(0, 3,", "0, 4), out_shape=(1, 3, 16, 16, 20)) _test_pool3d(relay.nn.avg_pool3d) _test_pool3d(relay.nn.avg_pool3d, padding=(2,", "dimension n, c, h, w = tvm.size_var(\"n\"), 10, 10, 12", "\"float32\", 1, dshape, kshape, padding=(2, 2), channels=192, kernel_size=(7, 7)) def", "dshape = (1, 3, 5, 224, 224) kshape = (10,", "2), data_layout=\"NHWC\", kernel_layout=\"HWIO\") func = relay.Function([x, w], y) dtype =", "c, d, h, w = tvm.size_var(\"n\"), 8, 16, 16, 16", "autotvm from tvm import relay from tvm.relay import transform from", "\\ {\"i\": 743640, \"t\": \"contrib_spatial_pack\", \"c\": null, \\ \"e\": [[\"tile_co\",", "3, \"float32\"], [1, 1], [1, 1], [1, 1], \"float32\"], \\", "1), (2, 2), (3, 3), (4, 4))) \"pad_width=\" in y.astext()", "groups=groups, **attrs) func = relay.Function([x, w], y) mod = tvm.relay.Module()", "\"c\": null, \\ \"e\": [[\"tile_co\", \"sp\", [32, 16]], [\"tile_oh\", \"sp\",", "# Input channels should be a multiple of 4 internally.", "(1, 64, 56, 56) x = relay.var(\"x\", relay.TensorType(data_shape, input_dtype)) kernel_shape", "((2*d3)*3)), \"float32\") shape = (1, 5, 10, 10) o_shape =", "scale_h=scale_h, scale_w=scale_w,\\ layout=layout, method=method,\\ coordinate_transformation_mode=coordinate_transformation_mode) func = relay.Function([x], y) data", "\\ [\"conv_inline\", \"ot\", 0]]}], \"r\": [[0.0002933163], \\ 0, 3.1976189613342285, 1570811630.6058347],", "relay.TensorType(o_shape, dtype) func = relay.Function([x], z) x_data = np.random.uniform(low=-1, high=1,", "d3, d4 = tvm.size_var(\"d1\"), tvm.size_var(\"d2\"), tvm.size_var(\"d3\"), tvm.size_var(\"d4\") x = relay.var(\"x\",", "kernel_size=3) # mixed precision run_test_conv1d(\"int8\", \"int32\", 1, dshape, kshape, padding=(1,", "is None: ref_res = topi.testing.conv2d_nchw_python( data.astype(out_dtype), dkernel.astype(out_dtype), 1, padding, groups=groups)", "(skylake, cascadelake) and test assembly contains *pmadd* instructions targets =", "assert yy.args[1].checked_type == relay.TensorType( (10, 15, 3, 3), \"float32\") #", "\"int8\")) y = relay.nn.conv3d(x, w, out_dtype=\"int32\") assert \"out_dtype=\\\"int32\\\"\" in y.astext()", "relay.nn.conv2d_transpose(x, w, channels=10, kernel_size=(3,3), strides=(2,2), padding=(1,1), output_padding=(2, 2)) func =", "c, w), \"int8\")) w = relay.var(\"w\", relay.TensorType((2, 10, 3), \"int8\"))", "relay.var(\"x\", relay.ty.TensorType((n, c, h, w), \"float32\")) w = relay.var(\"w\") y", "distributed under the License is distributed on an # \"AS", "= relay.build_module.build(mod, target=target, params=params) module = tvm.contrib.graph_runtime.create(graph, lib, ctx) module.set_input('x',", "tvm.size_var(\"c\"),\\ tvm.size_var(\"d\"), tvm.size_var(\"h\"), tvm.size_var(\"w\") scale = tvm.const(2.0, \"float64\") x =", "1, padding, groups=groups) else: ref_res = fref(data.astype(out_dtype), dkernel.astype(out_dtype)) for target,", "0, 3.1976189613342285, 1570811630.6058347], \"v\": 0.1}' temp = util.tempdir() with open(temp.relpath(\"temp.log\"),", "= intrp1.evaluate(func)(data, kernel) tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5, atol=1e-5) def compile_test_conv2d_arm_cpu(dtype, out_dtype,", "= np.maximum(b_np, 0.0) data = a_np for target, ctx in", "\"int8\")) wt = relay.var(\"w\") y = relay.nn.conv3d(x, wt, kernel_size=(3, 3,", "output_dtype = 'int32' data_shape = (1, 64, 56, 56) x", "# symbolic in batch dimension n, c, d, h, w", "(2, 2), (3, 3), (4, 4))) func = relay.Function([x], y)", "for input and weight. n, c, d, h, w =", "= cfg return cfg def run_test_conv2d_cuda(dtype, out_dtype, scale, dshape, kshape,", "== \"NCDHW\": return (c, d, h, w), (c, int(round(d*scale_d)), int(round(h*scale_h)),\\", "[4, 16, 20]: asm = _compile(ic=8, oc=oc, target=target, data_layout=\"NCHW\", kernel_layout='OIHW',", "_test_pool3d(relay.nn.max_pool3d) _test_pool3d(relay.nn.max_pool3d, padding=(2, 0, 0, 2, 0, 0), out_shape=(1, 3,", "workload) if key in self.memory: return self.memory[key] cfg = autotvm.task.space.FallbackConfigEntity()", "2, 6, 9, w + 8), \"float32\") def test_pad_run(): def", "32, 18, 18) kshape = (64, 1, 3, 3) run_test_conv2d(\"float32\",", "scale, dshape, kshape, padding=(1, 1, 1), fref=None, groups=1, dilation=(1, 1,", "normal conv3d dshape = (1, 5, 224, 224, 6) kshape", "80, 7, 7) run_test_conv2d_cuda(\"float32\", \"float32\", 1, dshape, kshape, padding=(2, 2),", "tvm.nd.array(data)) module.set_input(**params) module.run() op_res1 = module.get_output(0) tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-3, atol=1e-3)", "groups=1, dilation=(1, 1, 1), except_targets=None, **attrs): if except_targets is None:", "1), channels=2) yy = run_infer_type(y) assert yy.checked_type == relay.TensorType( (n,", "ref_res = reffunc(data.reshape(1, 3, 14, 2, 14, 2), axis=(3, 5))", "z = relay.nn.batch_flatten(x) yy = run_infer_type(z) assert yy.checked_type == relay.TensorType(o_shape,", "ctx=ctx, target=target) op_res1 = intrp1.evaluate(func)(data, kernel) tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5, atol=1e-5)", "i*sh:i*sh+kh, j*sw:j*sw+kw] > 0, axis=(2,3)) b_np[:,:,i,j] = np.sum(pad_np[:, :, i*sh:i*sh+kh,", "relay.nn.batch_flatten(x) yy = run_infer_type(y) assert yy.checked_type == relay.TensorType((3, 24), \"float32\")", "that int8 x int8 goes through legalization so that fast", "infer by shape of w, mixed precision n, c, h,", "relay.var(\"x\", relay.TensorType((n, c, d, h, w), \"float32\")) y = relay.nn.upsampling3d(x,", "= relay.var(\"x\", relay.ty.TensorType((n, c, w), \"float32\")) w = relay.var(\"w\") y", "util.tempdir() with open(temp.relpath(\"temp.log\"), \"w\") as log_file: log_file.write(test_schedule) with autotvm.apply_history_best(temp.relpath(\"temp.log\")): with", "should be Skylake or Cascadelake\" # compile conv2d for x86", "under the Apache License, Version 2.0 (the # \"License\"); you", "scale, size=dshape).astype(dtype) kernel = np.random.uniform(-scale, scale, size=kshape).astype(dtype) ref_res = topi.testing.conv2d_nchw_python(", "10, 10) o_shape = (1, 500) dtype = \"float32\" x", "= ('uint8', 'int8', 'int32') asm = _compile(ic=16, oc=32, target=target, data_layout=\"NCHW\",", "padding=(1, 1, 1), channels=2) yy = run_infer_type(y) assert yy.checked_type ==", "relay.var(\"x\", relay.TensorType((n, c, d, h, w), \"int8\")) w = relay.var(\"w\",", "return self.memory[key] cfg = autotvm.task.space.FallbackConfigEntity() cfg.template_key = 'winograd' cfg.is_fallback =", "kernel, 2, 1) d_np = np.zeros(shape=oshape) d_np[:,:,0:c_np.shape[2],0:c_np.shape[3]] = c_np ref_res", "100, 200), \"float32\")) y = relay.nn.upsampling(x, scale_h=2, scale_w=2, layout=\"NCHW\", method=\"bilinear\")", "= relay.Function([x], z) x_data = np.random.uniform(low=-1, high=1, size=shape).astype(dtype) ref_res =", "224 x = relay.var(\"x\", relay.TensorType((n, h, w, c), \"int8\")) wt", "tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5, atol=1e-5) _test_pool3d(relay.nn.max_pool3d) _test_pool3d(relay.nn.max_pool3d, padding=(2, 0, 0, 2,", "padding=(1, 1), groups=1, dilation=(1, 1), **attrs): x = relay.var(\"x\", shape=dshape,", "= run_infer_type(y) assert yy.checked_type == relay.TensorType((n, c, 1, 1), \"float32\")", "run_test_conv2d_cuda(\"float32\", \"float32\", 1, dshape, kshape, padding=(2, 2), channels=192, kernel_size=(7, 7))", "n, c, h, w = tvm.size_var(\"n\"), 16, 32, 32 scale_h", "data_shape = (1, 64, 56, 56) x = relay.var(\"x\", relay.TensorType(data_shape,", "import util import topi.testing def test_conv1d_infer_type(): # symbolic in batch", "N, kernel NxN kshape = (192, 80, 7, 7) run_test_conv2d_cuda(\"float32\",", "relay.nn.l2_normalize(x, eps=0.001, axis=[axis]) yy = run_infer_type(z) assert yy.checked_type == relay.TensorType(shape,", "dtype = \"float32\" data = np.random.uniform(size=dshape_nhwc).astype(dtype) kernel = np.random.uniform(size=kshape_hwoi).astype(dtype) #", "for target, ctx in ctx_list(): executor = relay.create_executor(\"graph\", ctx=ctx, target=target)", "iw) = (3, 28, 28) (oc, oh, ow) = (3,", "1, padding, dilation) for target, ctx in ctx_list(): if target", "oc=oc, target=target, data_layout=\"NCHW\", kernel_layout='OIHW', dtypes=dtypes) assert _has_fast_int8_instructions(asm, target) for oc", "relay.TensorType((n, c, 100, 100, 200), \"float32\")) y = relay.nn.upsampling3d(x, scale_d=2,", "module = tvm.contrib.graph_runtime.create(graph, lib, ctx) module.set_input('x', tvm.nd.array(data)) module.set_input(**params) module.run() op_res1", "is None: ref_res = topi.testing.conv3d_ncdhw_python( data.astype(out_dtype), dkernel.astype(out_dtype), 1, padding, groups=groups)", "== relay.TensorType( (n, 2, 222, 222, 222), \"int32\") # infer", "18, 18) kshape = (64, 1, 3, 3) run_test_conv2d(\"float32\", \"float32\",", "assert yy.checked_type == relay.TensorType((n, 10, 224), \"float32\") # test execution", "run_infer_type(y) assert yy.checked_type == relay.TensorType( (n, 2, 222), \"int32\") #", "ambiguous batch. n, c, h, w = tvm.size_var(\"n\"), 32, 224,", "= relay.var(\"x\", relay.TensorType((d1, d2, d3, d4), \"float32\")) y = relay.nn.batch_flatten(x)", "= [\"llvm -mcpu=skylake-avx512\", \"llvm -mcpu=cascadelake\"] llvm_version = tvm.codegen.llvm_version_major() for target", "or more contributor license agreements. See the NOTICE file #", "16), \"int32\") def test_conv1d_run(): def run_test_conv1d(dtype, out_dtype, scale, dshape, kshape,", "padding=(1, 1), channels=10, kernel_size=(3 ,3)) kshape = (10, 3, 1,", "tvm.nd.array(kernel)} graph, lib, params = relay.build_module.build(mod, target=target, params=params) module =", "{}, actual {}\".format(out_shape, f_out_shape) data = np.random.uniform(size=dshape).astype(dtype) ref_res = topi.testing.pool3d_ncdhw_python(data,", "params=parameters) def test_bitserial_conv2d_infer_type(): # Basic shape test with ambiguous batch.", "assert yy.checked_type == relay.TensorType( (n, h, w, 16), \"int32\") def", "x = relay.var(\"x\", relay.TensorType((n, c, w), \"float32\")) y = opfunc(x,", "raise ValueError('Not supported') weight = relay.var(\"weight\", relay.TensorType(kernel_shape, weight_dtype)) y =", "operator test cases. \"\"\" import numpy as np import tvm", "y = relay.nn.conv3d(x, w, padding=padding, dilation=dilation, groups=groups, **attrs) func =", "test_conv2d_transpose_nchw_run(): dshape = (1, 3, 18, 18) kshape = (3,", "yy = run_infer_type(y) assert yy.checked_type == relay.TensorType((n, 10, 224, 224),", "yy = run_infer_type(y) assert yy.checked_type == relay.TensorType( (1, 4, 224,", "11, 5, 5), \"float32\")) y = relay.nn.conv2d_transpose(x, w, output_padding=(1, 1),", "assert yy.checked_type == relay.TensorType((n,) + oshape, dtype) dshape = (1,)", "y = relay.nn.conv3d(x, w, kernel_size=(3, 3, 3), padding=(1, 1, 1),", "assert yy.checked_type == relay.TensorType((n, c, 200, 200, 400), \"float32\") def", "14, 2, 14, 2), axis=(3, 5)) for target, ctx in", "h, w, c), \"int8\")) wt = relay.var(\"w\") y = relay.nn.conv3d(x,", "\"r\": [[0.0002933163], \\ 0, 3.1976189613342285, 1570811630.6058347], \"v\": 0.1}' temp =", "of different dtypes for input and weight. n, c, w", "0), (1, 3, 16), pool_type, False) for target, ctx in", "relay.var(\"w\") y = relay.nn.conv3d(x, wt, kernel_size=(3, 3, 3), padding=(1, 1,", "atol=1e-5) def compile_test_conv2d_arm_cpu(dtype, out_dtype, scale, dshape, kshape, padding=(1, 1), groups=1,", "channels=192, kernel_size=(7, 7)) def test_conv3d_infer_type(): # symbolic in batch dimension", "if 'max' in str(opfunc) else 'avg' y = opfunc(x, pool_size=(2,", "relay.build_config(opt_level=3): graph, lib, params = relay.build(func, target, params=parameters) assembly =", "\"nearest_neighbor\") _test_upsampling(\"NCHW\", \"bilinear\", True) _test_upsampling(\"NHWC\", \"nearest_neighbor\") _test_upsampling(\"NHWC\", \"bilinear\", True) def", "oh, ow)).astype(dtype) for i in range(oh): for j in range(ow):", "atol=1e-3) # normal winograd: stride 1, padding 1, kernel 3x3", "np.pad(data, ((1, 1), (2, 2), (3, 3), (4, 4)), 'constant')", "1), \"SAME\")) # depthwise conv2d for arm_cpu dshape = (1,", "class WinogradFallback(autotvm.FallbackContext): def _query_inside(self, target, workload): key = (target, workload)", "relay.var(\"x\", relay.TensorType((n, c, 100, 100, 200), \"float32\")) y = relay.nn.upsampling3d(x,", "= _compile(ic=8, oc=oc, target=target, data_layout=\"NCHW\", kernel_layout='OIHW', dtypes=dtypes) assert _has_fast_int8_instructions(asm, target)", "relay.var(\"x\", relay.TensorType((n,) + ishape, dtype)) y = relay.nn.upsampling3d(x, scale_d=scale_d, scale_h=scale_h,", "y = relay.nn.bitpack(x, bit_axis=4, pack_axis=1, pack_type='uint16', bits=1) yy = run_infer_type(y)", "tvm.nd.array(wdata.astype(weight_dtype))} targets = [\"llvm -mcpu=skylake-avx512\", \"llvm -mcpu=cascadelake\"] llvm_version = tvm.codegen.llvm_version_major()", "1, 2, 3, 6, 4, 5]], \\ [\"ann_reduce\", \"an\", [\"unroll\",", "_test_upsampling3d(\"NDHWC\", \"trilinear\", \"align_corners\") def test_conv2d_int8_intrinsics(): def _compile(ic, oc, target, data_layout,", "(192, 80, 3, 3) run_test_conv2d_cuda(\"float32\", \"float32\", 1, dshape, kshape, padding=(1,", "\\ [\"data_pad_inline\", \"ot\", 4], [\"data_vec_inline\", \"ot\", 1], \\ [\"conv_inline\", \"ot\",", "w, c), \"int8\")) wt = relay.var(\"w\") y = relay.nn.conv3d(x, wt,", "out_dtype, scale, dshape, kshape, padding=(1, 1), fref=None, dilation=1, except_targets=None, **attrs):", "h, w = tvm.size_var(\"n\"), tvm.size_var(\"c\"),\\ tvm.size_var(\"d\"), tvm.size_var(\"h\"), tvm.size_var(\"w\") scale =", "= opfunc(x) func = relay.Function([x], y) data = np.random.uniform(size=dshape).astype(dtype) ref_res", "coordinate_transformation_mode=coordinate_transformation_mode) func = relay.Function([x], y) data = np.random.uniform(size=dshape).astype(dtype) if method", "non-divisible oc and ic work asm = _compile(ic=17, oc=29, target=target,", "topi.testing.conv3d_ncdhw_python( data.astype(out_dtype), dkernel.astype(out_dtype), 1, padding, groups=groups) else: ref_res = fref(data.astype(out_dtype),", "100, 200), \"float32\")) y = relay.nn.upsampling3d(x, scale_d=2, scale_h=2, scale_w=2, layout=\"NCDHW\",", "pad_np[np.ix_(*no_zero)] = a_np b_np = np.zeros(shape=(n, oc, oh, ow)).astype(dtype) for", "= relay.var(\"x\", relay.TensorType((n, c, w), \"int8\")) w = relay.var(\"w\", relay.TensorType((2,", "y = opfunc(x, pool_size=(2,), strides=(2,), padding=(0, 0)) func = relay.Function([x],", "relay.TensorType((3, 6, 9, 12), \"float32\") # some symbolic values n,", "w, c), \"float32\")) y = opfunc(x, layout=\"NHWC\") yy = run_infer_type(y)", "dimension n, c, d, h, w = tvm.size_var(\"n\"), 10, 224,", "y = relay.nn.bitserial_conv2d( x, w, kernel_size=(3, 3), padding=(0, 0), channels=32)", "cfg return cfg def run_test_conv2d_cuda(dtype, out_dtype, scale, dshape, kshape, padding=(1,", "execution dtype = \"int32\" dshape = (1, 3, 28, 28)", "default to NCHWc layout. target = \"llvm -mcpu=core-avx2\" fast_int8_dtypes =", "be Skylake or Cascadelake\" # compile conv2d for x86 (skylake,", "input_dtype, weight_dtype, output_dtype = dtypes n, h, w, ch, cw", "1, 3, 3) compile_test_conv2d_arm_cpu(\"float32\", \"float32\", 1, dshape, kshape, padding=(1, 1),", "1, 3, 3], \"float32\"], \\ [1, 1], [1, 1], [1,", "or implied. See the License for the # specific language", "10) o_shape = (1, 500) dtype = \"float32\" x =", "np.reshape(data, (shape[0], target_dim)) def test_batch_flatten(): t1 = relay.TensorType((5, 10, 5))", "\"int8\")) w = relay.var(\"w\", relay.TensorType((2, 10, 3, 3, 3), \"int8\"))", "channels=16, data_layout=\"NCHW4n4c\", kernel_layout=\"OIHW4o4i\", out_dtype=\"int32\") yy = run_infer_type(y) assert yy.checked_type ==", "x = relay.var(\"x\", relay.TensorType((n//4, c//4, h, w, 4, 4), \"int8\"))", "ref_res = topi.testing.conv3d_ndhwc_python( data.astype(out_dtype), dkernel.astype(out_dtype), 1, padding) else: ref_res =", "relay.TensorType((d1, ((d2*d3)*d4)), \"float32\") x = relay.var(\"x\", relay.TensorType((3, 2, 4, 3),", "= relay.var(\"x\", shape=(n, c , h, w)) y = relay.nn.l2_normalize(x,", "test_pool3d() test_avg_pool2d_no_count_pad() test_lrn() test_l2_normalize() test_conv1d_infer_type() test_conv2d_infer_type() test_conv3d_infer_type() test_bitpack_infer_type() test_upsampling_infer_type() test_upsampling3d_infer_type()", "\"int16\")) w = relay.var(\"w\", relay.ty.TensorType((32, 32, 3, 3), \"int16\")) y", "\"float32\")) y = opfunc(x, pool_size=(1, 1)) assert \"pool_size=\" in y.astext()", "def get_shape(): if layout == \"NCDHW\": return (c, d, h,", "input_dtype = 'uint8' weight_dtype = 'int8' output_dtype = 'int32' data_shape", "y) data = np.random.uniform(size=dshape).astype(dtype) ref_res = np.pad(data, ((1, 1), (2,", "= 'int32' data_shape = (1, 64, 56, 56) x =", "with NWC n, c, w = 4, 32, 224 x", "x: int(x), run_infer_type(func).ret_type.shape)) assert out_shape == f_out_shape, \\ \"Output shape", "\"float32\"], {}, \\ [\"depthwise_conv2d_nchw\", [1, 512, 32, 32, \"float32\"], \\", "tvm.size_var(\"h\"), tvm.size_var(\"w\") x = relay.var(\"x\", shape=(n, c , h, w))", "= relay.var(\"x\", relay.TensorType(shape, dtype)) eps=0.001 axis=1 z = relay.nn.l2_normalize(x, eps=0.001,", "tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-3, atol=1e-3) # normal winograd: stride 1, padding", "\"int16\") def test_bitpack_infer_type(): # Test axis packing shape inference. o,", "the # specific language governing permissions and limitations # under", "run_infer_type(y) assert yy.checked_type == relay.TensorType( (n, 2, 222, 222, 222),", "224) kshape = (10, 3, 3, 3, 3) run_test_conv3d(\"float32\", \"float32\",", "# infer by shape of w, mixed precision n, c,", "symbolic in batch dimension n, c, d, h, w =", "def _query_inside(self, target, workload): key = (target, workload) if key", "test_upsampling3d_infer_type(): n, c, d, h, w = tvm.size_var(\"n\"), tvm.size_var(\"c\"),\\ tvm.size_var(\"d\"),", "d2, d3, d4), \"float32\")) y = relay.nn.batch_flatten(x) yy = run_infer_type(y)", "# compile conv2d for x86 (skylake, cascadelake) and test assembly", "(2, 2, 2), padding, out_shape, pool_type, False) for target, ctx", "return (c, h, w), (c, int(round(h*scale_h)), int(round(w*scale_w))) else: return (h,", "+ 2, 6, 9, w + 8), \"float32\") def test_pad_run():", "224 x = relay.var(\"x\", relay.TensorType((n, c, d, h, w), \"uint8\"))", "= relay.var(\"x\", shape=dshape) y = opfunc(x) func = relay.Function([x], y)", "scale_d=scale_d, scale_h=scale_h, scale_w=scale_w,\\ layout=layout, method=method,\\ coordinate_transformation_mode=coordinate_transformation_mode) func = relay.Function([x], y)", "tvm.size_var(\"n\"), tvm.size_var(\"c\"), tvm.size_var(\"h\"), tvm.size_var(\"w\") scale = tvm.const(2.0, \"float64\") x =", "224 x = relay.var(\"x\", relay.TensorType((n, w, c), \"int8\")) wt =", "shape = (1, 5, 10, 10) dtype = \"float32\" x", "ref = topi.testing.upsampling_python(data, (scale_h, scale_w), layout) else: ref = topi.testing.bilinear_resize_python(data,", "(3, 15, 15) dshape = (n, ic, ih, iw) x", "The ASF licenses this file # to you under the", "channels=192, kernel_size=(3, 3)) run_test_conv2d_cuda(\"float32\", \"float32\", 1, dshape, kshape, padding=(2, 2),", "and ic work asm = _compile(ic=17, oc=29, target=target, data_layout=\"NCHW\", kernel_layout='OIHW',", "relay.create_executor(\"graph\", ctx=ctx, target=target) op_res1 = intrp1.evaluate(func)(data, kernel) tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5,", "= relay.Function([x, w], y) mod = relay.Module() mod['main'] = func", "relay.TensorType( (n, w, 16), \"int32\") def test_conv1d_run(): def run_test_conv1d(dtype, out_dtype,", "(4, 4)), 'constant') for target, ctx in ctx_list(): intrp1 =", "== relay.TensorType( (n, 32, 222, 222), \"int16\") def test_bitpack_infer_type(): #", ", h, w)) y = relay.nn.lrn(x, size=10, axis=2, bias=0.5, alpha=.00001,", "scale = tvm.const(2.0, \"float64\") x = relay.var(\"x\", relay.TensorType((n, c, h,", "dtype=dtype) w = relay.var(\"w\", dtype=dtype) y = relay.nn.conv3d(x, w, padding=padding,", "\"\"\" Support level2 operator test cases. \"\"\" import numpy as", "\"int32\", 1, dshape, kshape, padding=(0, 1), channels=10, kernel_size=(1 ,3)) #", "padding=(0, 0), channels=192, kernel_size=(3, 3)) run_test_conv2d_cuda(\"float32\", \"float32\", 1, dshape, kshape,", "y.astext() yy = run_infer_type(y) assert yy.checked_type == relay.TensorType( (n, 15,", "1, dshape, kshape, padding=(1, 1, 1), channels=10, kernel_size=(3, 3 ,3),", "2)) func = relay.Function([x, w], y) dtype = \"float32\" data", "\"float32\", 1, dshape, kshape, padding=(1, 1), channels=192, kernel_size=(3, 3)) #", "(2, 2, 2), (2, 2, 2), padding, out_shape, pool_type, False)", "10, 5)) x = relay.Var(\"x\", t1) func = relay.Function([x], relay.nn.batch_flatten(x))", "dshape, kshape, padding=(1, 1), channels=64, groups=32, kernel_size=(3 ,3), except_targets=['cuda']) #", "# Check that both non-divisible oc and ic work asm", "disabled for 'direct' schedule: # https://github.com/apache/incubator-tvm/pull/3070#issuecomment-486597553 # group conv2d dshape", "dilation) if fref is None: ref_res = topi.testing.conv3d_ndhwc_python( data.astype(out_dtype), dkernel.astype(out_dtype),", "padding=(1, 1), channels=10, kernel_size=(3 ,3)) # mixed precision run_test_conv2d(\"int8\", \"int32\",", "tvm.expr.Cast(\"int32\", tvm.round(d*scale)), tvm.expr.Cast(\"int32\", tvm.round(h*scale)), tvm.expr.Cast(\"int32\", tvm.round(w*scale))), \"float32\") n, c =", "7) run_test_conv2d_cuda(\"float32\", \"float32\", 1, dshape, kshape, padding=(2, 2), channels=192, kernel_size=(7,", "assert _has_fast_int8_instructions(asm, target) # Check that int8 x int8 goes", "222, 222), \"int32\") # Infer with a different layout n,", "3, 3, 3, 3) run_test_conv3d(\"float32\", \"float32\", 1, dshape, kshape, padding=(1,", "yy = run_infer_type(y) assert yy.checked_type == relay.TensorType((n, c, tvm.expr.Cast(\"int32\", tvm.round(d*scale)),", "1) d_np = np.zeros(shape=oshape) d_np[:,:,0:c_np.shape[2],0:c_np.shape[3]] = c_np ref_res = d_np", "law or agreed to in writing, # software distributed under", "np.random.uniform(size=kshape).astype(dtype) c_np = topi.testing.conv1d_transpose_ncw_python( data, kernel, 2, 1) d_np =", "groups=groups, **attrs) func = relay.Function([x, w], y) mod = relay.Module()", "groups=groups) else: ref_res = fref(data.astype(out_dtype), dkernel.astype(out_dtype)) for target, ctx in", "cfg = autotvm.task.space.FallbackConfigEntity() cfg.template_key = 'winograd' cfg.is_fallback = False cfg['tile_b']", "test_avg_pool2d_no_count_pad(): kh, kw = (4, 4) sh, sw = (2,", "assert yy.checked_type == relay.TensorType((n, c , h, w)) shape =", "ref, rtol=1e-5, atol=1e-5) def test_upsampling3d(): _test_upsampling3d(\"NCDHW\", \"nearest_neighbor\") _test_upsampling3d(\"NCDHW\", \"trilinear\", \"align_corners\")", "relay.nn.conv2d_transpose(x, w, output_padding=(1, 1), channels=11, data_layout=\"NHWC\") yy = run_infer_type(y) assert", "kernel_size=(3, 3)) # extended winograd: stride 1, padding N, kernel", "4, 6]: asm = _compile(ic=ic, oc=16, target=target, data_layout=\"NCHW\", kernel_layout='OIHW', dtypes=dtypes)", "target=target, data_layout=\"NCHW\", kernel_layout='OIHW', dtypes=dtypes) assert _has_fast_int8_instructions(asm, target) asm = _compile(ic=17,", ">= 8: dtypes = (('int8', 'int8', 'int32')) # Check that", "c, d, h, w = tvm.size_var(\"n\"), 10, 224, 224, 224", "x = relay.var(\"x\", relay.TensorType((n, c, d, h, w), \"int8\")) w", "= tvm.size_var(\"n\"), tvm.size_var(\"c\"), tvm.size_var(\"h\"), tvm.size_var(\"w\") x = relay.var(\"x\", relay.TensorType((n, c,", "# some symbolic values n, c, h, w = tvm.size_var(\"n\"),", "16, 20]: asm = _compile(ic=8, oc=oc, target=target, data_layout=\"NHWC\", kernel_layout='HWIO', dtypes=dtypes)", "yy = run_infer_type(y) assert yy.checked_type == relay.TensorType((n + 2, 6,", "# normal winograd: stride 1, padding 1, kernel 3x3 dshape", "run_infer_type(y) assert yy.checked_type == relay.TensorType( (n, 32, 222, 222), \"int16\")", "relay.var(\"x\", shape=dshape, dtype=dtype) w = relay.var(\"w\", shape=kshape, dtype=dtype) y =", "assembly = lib.get_source(\"asm\") return assembly def _has_fast_int8_instructions(asm, target): if 'skylake-avx512'", "80, 73, 73) kshape = (192, 80, 3, 3) run_test_conv2d_cuda(\"float32\",", "\"int8\") # Infer with NHWC n, c, h, w =", "w = relay.var(\"w\", dtype=dtype) y = relay.nn.conv2d(x, w, padding=padding, dilation=dilation,", "= (1, 3, 5, 224, 224) kshape = (10, 3,", "= reffunc(data.reshape(1, 3, 14, 2, 14, 2), axis=(3, 5)) for", "(2, 10, 3), \"float32\") # infer by shape of w,", "oshape = (1, 10, 37, 37) x = relay.var(\"x\", shape=dshape)", "= run_infer_type(y) assert yy.checked_type == relay.TensorType((n, c , h, w))", "else: return (h, w, c), (int(round(h*scale_h)), int(round(w*scale_w)), c) ishape, oshape", "when datatypes are not HW supported. dtypes = ('uint8', 'uint8',", "= relay.Function([x, w], y) dtype = \"float32\" data = np.random.uniform(size=dshape_nhwc).astype(dtype)", "data_layout=\"NCHW4n4c\", kernel_layout=\"OIHW4o4i\", out_dtype=\"int32\") yy = run_infer_type(y) assert yy.checked_type == relay.TensorType(", "3, 3), \"int16\")) y = relay.nn.bitserial_conv2d( x, w, kernel_size=(3, 3),", "\"float32\") x = relay.var(\"x\", relay.TensorType((3, 2, 4, 3), \"float32\")) y", "= 'max' if 'max' in str(opfunc) else 'avg' y =", "mod = relay.transform.InferType()(mod) data = np.random.uniform(-scale, scale, size=dshape).astype(dtype) kernel =", "3) run_test_conv1d(\"float32\", \"float32\", 1, dshape, kshape, padding=(1, 1), channels=10, kernel_size=3)", "target, workload): key = (target, workload) if key in self.memory:", "(10, 3, 1, 3) # mixed precision. run_test_conv2d(\"int8\", \"int32\", 1,", "OR CONDITIONS OF ANY # KIND, either express or implied.", "\"int32\") assert yy.args[1].checked_type == relay.TensorType( (4, 8, 3, 3, 4,", "yy = run_infer_type(y) assert yy.checked_type == relay.TensorType((3, 6, 9, 12),", "c), (int(round(d*scale_d)), int(round(h*scale_h)),\\ int(round(w*scale_w)), c) ishape, oshape = get_shape() x", "_compile(ic, oc, target, data_layout, kernel_layout, dtypes): input_dtype, weight_dtype, output_dtype =", "c, d, h, w), \"float32\")) y = relay.nn.upsampling3d(x, scale_d=2, scale_h=2,", "dimension n, c, w = tvm.var(\"n\"), 10, 224 x =", "= run_infer_type(y) assert yy.checked_type == relay.TensorType((n, c, tvm.expr.Cast(\"int32\", tvm.round(h*scale)), tvm.expr.Cast(\"int32\",", "(3, 3), (4, 4))) \"pad_width=\" in y.astext() yy = run_infer_type(y)", "run_test_conv2d_cuda(\"float32\", \"float32\", 1, dshape, kshape, padding=(0, 0), channels=192, kernel_size=(3, 3))", "\"NCHW\": return (c, h, w), (c, int(round(h*scale_h)), int(round(w*scale_w))) else: return", "run_test_conv2d_cuda(dtype, out_dtype, scale, dshape, kshape, padding=(1, 1), groups=1, dilation=(1, 1),", "and weight. n, c, h, w = tvm.size_var(\"n\"), 10, 224,", "winograd: stride 1, padding N, kernel NxN kshape = (192,", "= np.random.uniform(size=dshape).astype(dtype) ref_res = reffunc(data, axis=(2,3), keepdims=True) for target, ctx", "scale, size=dshape).astype(dtype) kernel = np.random.uniform(-scale, scale, size=kshape).astype(dtype) dkernel = topi.testing.dilate_python(kernel,", "test_conv2d_transpose_nhwc_run() test_conv1d_transpose_ncw_run() test_conv1d_run() test_conv2d_run() test_conv2d_winograd() test_conv3d_run() test_conv3d_ndhwc_run() test_bitserial_conv2d_infer_type() test_batch_flatten() test_upsampling()", "out_dtype=\"int32\") yy = run_infer_type(y) assert yy.checked_type == relay.TensorType( (n, h,", "weight. n, c, w = tvm.var(\"n\"), 10, 224 x =", "run_test_conv2d(\"int8\", \"int32\", 1, dshape, kshape, padding=(0, 1), channels=10, kernel_size=(1 ,3))", "reffunc): n, c, h, w = tvm.size_var(\"n\"), 10, 224, 224", "32, 224, 224 x = relay.var(\"x\", relay.TensorType((n//4, c//4, h, w,", "224, 224) kshape = (10, 3, 3, 3, 3) run_test_conv3d(\"float32\",", "parameters = {\"weight\": tvm.nd.array(wdata.astype(weight_dtype))} with relay.build_config(opt_level=3): graph, lib, params =", "(scale_d, scale_h, scale_w), layout) else: ref = topi.testing.trilinear_resize3d_python(data, (int(round(d*scale_d)),\\ int(round(h*scale_h)),\\", "are generated. assert \"vpmulld\" in asm and \"vpadd\" in asm", "copyright ownership. The ASF licenses this file # to you", "ref_res = reffunc(data.reshape(1,3,14,2,14,2), axis=(3,5)).astype(dtype) for target, ctx in ctx_list(): intrp1", "\"pool_size=\" in y.astext() yy = run_infer_type(y) assert yy.checked_type == relay.TensorType((n,", "relay.var(\"x\", shape=dshape) y = opfunc(x) func = relay.Function([x], y) data", "strides=(2, 2), padding=(0, 0)) func = relay.Function([x], y) data =", "= 4, 32, 224, 224, 224 x = relay.var(\"x\", relay.TensorType((n,", "= relay.nn.batch_flatten(x) yy = run_infer_type(z) assert yy.checked_type == relay.TensorType(o_shape, dtype)", "w = tvm.size_var(\"n\"), tvm.size_var(\"c\"), tvm.size_var(\"h\"), tvm.size_var(\"w\") x = relay.var(\"x\", relay.TensorType((n,", "tvm.relay.Module() mod[\"main\"] = func test_schedule='{\"i\": [\"llvm -device=arm_cpu\", \"topi_nn_depthwise_conv2d_nchw\", \\ [[\"TENSOR\",", "= (1, 512, 32, 32) kshape = (512, 1, 3,", "tvm.codegen.llvm_version_major() for target in targets: if llvm_version >= 8: dtypes", "in writing, # software distributed under the License is distributed", "int(round(d*scale_d)), int(round(h*scale_h)),\\ int(round(w*scale_w))) else: return (d, h, w, c), (int(round(d*scale_d)),", "x, w: topi.testing.depthwise_conv2d_python_nchw( x, w, (1, 1), \"SAME\")) # depthwise", "((1, 1), (2, 2), (3, 3), (4, 4))) \"pad_width=\" in", "groups=32, kernel_size=(3 ,3), fref=lambda x, w: topi.testing.depthwise_conv2d_python_nchw( x, w, (1,", "kernel.astype(out_dtype), 1, padding, dilation) for target, ctx in ctx_list(): if", "target=target, data_layout=\"NCHW\", kernel_layout='OIHW', dtypes=dtypes) assert _has_fast_int8_instructions(asm, target) for ic in", "10, 37, 37) x = relay.var(\"x\", shape=dshape) w = relay.var(\"w\")", "(n, 2, 222), \"int32\") # infer shape in case of", "= relay.nn.lrn(x, size=size, axis=axis, bias=bias, alpha=alpha, beta=beta) yy = run_infer_type(z)", "= topi.testing.conv1d_ncw_python( data.astype(out_dtype), kernel.astype(out_dtype), 1, padding, dilation) for target, ctx", "reffunc(data, axis=(2,3), keepdims=True) for target, ctx in ctx_list(): intrp1 =", "relay.var(\"w\", dtype=dtype) y = relay.nn.conv2d(x, w, padding=padding, dilation=dilation, groups=groups, **attrs)", "dshape, kshape, padding=(1, 1, 1), channels=10, kernel_size=(3, 3 ,3), except_targets=[\"cuda\"])", "= autotvm.task.space.FallbackConfigEntity() cfg.template_key = 'winograd' cfg.is_fallback = False cfg['tile_b'] =", "1, dshape, kshape, padding=(1, 1), channels=10, kernel_size=3) # dilated conv2d", "2, 224, 224), \"float32\") assert yy.args[1].checked_type == relay.TensorType( (2, 10,", "= np.random.uniform(low=-1, high=1, size=shape).astype(dtype) ref_res = x_data.flatten().reshape(o_shape) for target, ctx", "c), \"float32\")) w = relay.var(\"w\", relay.TensorType((12, 11, 5, 5), \"float32\"))", "target=target) intrp2 = relay.create_executor(\"debug\", ctx=ctx, target=target) op_res1 = intrp1.evaluate(func)(x_data) tvm.testing.assert_allclose(op_res1.asnumpy(),", "for older Intel # generations, because we default to NCHWc", "relay.TensorType(data_shape, input_dtype)) kernel_shape = (64, 1, 3, 3) weight =", "_test_pool2d(opfunc, reffunc): n, c, h, w = tvm.size_var(\"n\"), 10, 224,", "mod = tvm.relay.Module() mod[\"main\"] = func test_schedule='{\"i\": [\"llvm -device=arm_cpu\", \"topi_nn_depthwise_conv2d_nchw\",", "dtype = \"float32\" dshape = (1, 3, 32, 32, 32)", "topi.testing.trilinear_resize3d_python(data, (int(round(d*scale_d)),\\ int(round(h*scale_h)),\\ int(round(w*scale_w))), layout) for target, ctx in ctx_list():", "\"float32\")) w = relay.var(\"w\", relay.TensorType((12, 11, 5, 5), \"float32\")) y", "run_infer_type(y) assert yy.checked_type == relay.TensorType( (1, 4, 224, 224, 4,", "assert yy.checked_type == relay.TensorType(o_shape, dtype) func = relay.Function([x], z) x_data", "ref_res, rtol=1e-5) op_res2 = intrp2.evaluate(func)(x_data) tvm.testing.assert_allclose(op_res2.asnumpy(), ref_res, rtol=1e-5) def test_l2_normalize():", "axis=2, bias=0.5, alpha=.00001, beta=0.75) \"alpha=\" in y.astext() yy = run_infer_type(y)", "[1, 1], [1, 1], \"float32\"], {}, \\ [\"depthwise_conv2d_nchw\", [1, 512,", "3, 3), \"int8\")) y = relay.nn.conv2d(x, w, out_dtype=\"int32\") assert \"out_dtype=\\\"int32\\\"\"", "rtol=1e-5) def test_pad_infer_type(): # entirely concrete case n, c, h,", "shape = data.shape target_dim = 1 for i in range(len(shape)", ",3), except_targets=['cuda']) # also group conv2d dshape = (1, 32,", "relay.TensorType((n, c, w), \"int8\")) w = relay.var(\"w\", relay.TensorType((2, 10, 3),", "winograd: stride 1, padding N, kernel 3x3 run_test_conv2d_cuda(\"float32\", \"float32\", 1,", "w = 4, 32, 224, 224 x = relay.var(\"x\", relay.TensorType((n,", "6, 7]], \\ [\"reorder_1\", \"re\", [0, 1, 2, 3, 6,", "x = relay.var(\"x\", shape=dshape) pool_type = 'max' if 'max' in", "= relay.transform.InferType()(mod) data = np.random.uniform(-scale, scale, size=dshape).astype(dtype) kernel = np.random.uniform(-scale,", "False) for target, ctx in ctx_list(): intrp1 = relay.create_executor(\"graph\", ctx=ctx,", "_test_upsampling(\"NHWC\", \"bilinear\", True) def _test_upsampling3d(layout, method, coordinate_transformation_mode=\"half_pixel\"): n, c, d,", "# depthwise conv2d dshape = (1, 32, 18, 18) kshape", "(1,) + ishape x = relay.var(\"x\", shape=dshape) y = relay.nn.upsampling(x,", "test_bitpack_infer_type(): # Test axis packing shape inference. o, i, h,", "w), \"float32\")) y = relay.nn.upsampling(x, scale_h=2, scale_w=2, layout=\"NCHW\", method=\"bilinear\") \"method=\\\"BINLINEAR\\\"\"", "kernel_size=(ch, cw), channels=oc, padding=(1, 1), dilation=(1, 1), data_layout=data_layout, kernel_layout=kernel_layout, out_dtype=output_dtype)", ">= 8: with relay.build_config(opt_level=3): graph, lib, params = relay.build(func, target,", "data = np.random.uniform(size=dshape).astype(dtype) ref_res = topi.testing.pool1d_ncw_python(data, (2,), (2,), (0, 0),", ",3)) # CUDA is disabled for 'direct' schedule: # https://github.com/apache/incubator-tvm/pull/3070#issuecomment-486597553", "relay.Function([x], z) x_data = np.random.uniform(low=-1, high=1, size=shape).astype(dtype) ref_res = topi.testing.lrn_python(x_data,", "10, 5).astype(t1.dtype) ref_res = batch_flatten(data) for target, ctx in ctx_list():", "kernel_size=(3, 3, 3), padding=(1, 1, 1), channels=2) yy = run_infer_type(y)", "kernel = np.random.uniform(size=kshape_hwoi).astype(dtype) # use true kshape layout here -", "\\ [[\"TENSOR\", [1, 512, 32, 32], \"float32\"], \\ [\"TENSOR\", [512,", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "ic, ih, iw) x = relay.var(\"x\", shape=dshape) y = relay.nn.avg_pool2d(x,", "1, 64, 64, 3, 3 if data_layout == 'NCHW': data_shape", "packing shape inference. o, i, h, w = 32, 32,", "scale, size=dshape).astype(dtype) kernel = np.random.uniform(-scale, scale, size=kshape).astype(dtype) ref_res = topi.testing.conv1d_ncw_python(", "= tvm.size_var(\"n\"), tvm.size_var(\"c\"), tvm.size_var(\"h\"), tvm.size_var(\"w\") x = relay.var(\"x\", shape=(n, c", "kshape = (512, 1, 3, 3) compile_test_conv2d_arm_cpu(\"float32\", \"float32\", 1, dshape,", "16, 16)) _test_pool3d(relay.nn.max_pool3d, padding=(0, 3, 0, 0, 3, 0), out_shape=(1,", "method=method,\\ coordinate_transformation_mode=coordinate_transformation_mode) func = relay.Function([x], y) data = np.random.uniform(size=dshape).astype(dtype) if", "(1, 3, 18, 18) kshape = (3, 10, 3, 3)", "d_np = np.zeros(shape=oshape) d_np[:,:,0:c_np.shape[2],0:c_np.shape[3]] = c_np ref_res = d_np for", "== \"NCHW\": return (c, h, w), (c, int(round(h*scale_h)), int(round(w*scale_w))) else:", "a multiple of 16 internally. for oc in [4, 16,", "= d_np for target, ctx in ctx_list(): intrp1 = relay.create_executor(\"graph\",", "3, 16, 19, 16)) _test_pool3d(relay.nn.avg_pool3d, padding=(0, 0, 4, 0, 0,", "= relay.create_executor(\"graph\", ctx=ctx, target=target) op_res1 = intrp1.evaluate(func)(data) tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5,", "for j in range(ow): pad_count = np.sum(pad_np[:, :, i*sh:i*sh+kh, j*sw:j*sw+kw]", "in asm elif 'cascadelake' in target: return \"vpdpbusd\" in asm", "= relay.var(\"x\", relay.TensorType((n, c, d, h, w), \"float32\")) y =", "w = tvm.size_var(\"n\"), tvm.size_var(\"c\"), 224, 224 x = relay.var(\"x\", relay.TensorType((n,", "16), \"int32\") def test_conv3d_run(): def run_test_conv3d(dtype, out_dtype, scale, dshape, kshape,", "relay.TensorType((n,) + ishape, dtype)) y = relay.nn.upsampling3d(x, scale_d=scale_d, scale_h=scale_h, scale_w=scale_w,\\", "\"float32\") assert yy.args[1].checked_type == relay.TensorType( (2, 10, 3), \"float32\") #", "for target, ctx in ctx_list(): intrp1 = relay.create_executor(\"graph\", ctx=ctx, target=target)", "(1, 5, 10, 10) dtype = \"float32\" x = relay.var(\"x\",", "kernel 3x3 run_test_conv2d_cuda(\"float32\", \"float32\", 1, dshape, kshape, padding=(0, 0), channels=192,", "opfunc(x, pool_size=(1, 1, 1)) assert \"pool_size=\" in y.astext() yy =", "dtype = \"float32\" def get_shape(): if layout == \"NCDHW\": return", "w), \"int8\")) w = relay.var(\"w\", relay.TensorType((2, 10, 3), \"int8\")) y", "= run_infer_type(y) assert yy.checked_type == relay.TensorType((n,) + oshape, dtype) dshape", "d_np = np.zeros(shape=oshape) d_np[:,:,0:c_np.shape[2]] = c_np ref_res = d_np for", "tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5, atol=1e-5) def test_pool2d(): _test_pool2d(relay.nn.max_pool2d, np.max) _test_pool2d(relay.nn.avg_pool2d, np.mean)", "relay.TensorType(kernel_shape, weight_dtype)) y = relay.nn.conv2d(x, weight, kernel_size=(ch, cw), channels=oc, padding=(1,", "CONDITIONS OF ANY # KIND, either express or implied. See", "= fref(data.astype(out_dtype), dkernel.astype(out_dtype)) for target, ctx in ctx_list(): if target", "yy.checked_type == relay.TensorType( (n, 2, 222), \"int32\") # Infer with", "'winograd' cfg.is_fallback = False cfg['tile_b'] = autotvm.task.space.SplitEntity([-1, 1, 1, 1])", "False cfg['tile_b'] = autotvm.task.space.SplitEntity([-1, 1, 1, 1]) cfg['tile_y'] = autotvm.task.space.SplitEntity([-1,", "and \"vpadd\" in asm def test_depthwise_conv2d_int8(): input_dtype = 'uint8' weight_dtype", "dtypes): input_dtype, weight_dtype, output_dtype = dtypes n, h, w, ch,", "rtol=1e-5, atol=1e-5) # normal conv1d dshape = (1, 3, 224)", "3, 18, 16, 16)) _test_pool3d(relay.nn.max_pool3d, padding=(0, 3, 0, 0, 3,", "for input and weight. n, c, h, w = tvm.size_var(\"n\"),", "relay.TensorType((n, c, 200, 200, 400), \"float32\") def _test_pool2d(opfunc, reffunc): n,", "dtypes=dtypes) # Check that intrinisic is not present in the", "j in range(ow): pad_count = np.sum(pad_np[:, :, i*sh:i*sh+kh, j*sw:j*sw+kw] >", "d, h, w, c), \"int8\")) wt = relay.var(\"w\") y =", "data_layout=\"NHWC\", kernel_layout='HWIO', dtypes=dtypes) # Check that intrinisic is not present", "# also group conv2d dshape = (1, 32, 18, 18)", "dtype=dtype) y = relay.nn.conv2d(x, w, padding=padding, dilation=dilation, groups=groups, **attrs) func", "w, kernel_size=(3, 3, 3), padding=(1, 1, 1), channels=2) yy =", "and kernel_layout is HWIO y = relay.nn.conv2d_transpose(x, w, channels=10, kernel_size=(3,", "kshape, padding=(1, 1), channels=10, kernel_size=(3 ,3)) # mixed precision run_test_conv2d(\"int8\",", "shape=kshape, dtype=dtype) y = relay.nn.conv2d(x, w, padding=padding, dilation=dilation, groups=groups, **attrs)", "= intrp1.evaluate(func)(data, kernel) tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5, atol=1e-5) # normal conv1d", "relay.nn.conv3d(x, w, padding=padding, dilation=dilation, groups=groups, data_layout=\"NDHWC\", kernel_layout=\"DHWIO\", **attrs) func =", "by shape of w, mixed precision n, h, w, c", "opfunc(x, pool_size=(1, 1)) assert \"pool_size=\" in y.astext() yy = run_infer_type(y)", "y.astext() yy = run_infer_type(y) assert yy.checked_type == relay.TensorType((n, 10, 224,", "3), \"int8\")) y = relay.nn.conv3d(x, w, out_dtype=\"int32\") assert \"out_dtype=\\\"int32\\\"\" in", "assembly contains *pmadd* instructions targets = [\"llvm -mcpu=skylake-avx512\", \"llvm -mcpu=cascadelake\"]", "5, 224, 224, 6) kshape = (3, 3, 3, 6,", "= run_infer_type(y) assert yy.checked_type == relay.TensorType((n, c, 200, 200, 400),", "datatypes are not HW supported. dtypes = ('uint8', 'uint8', 'int32')", "= (n, ic, h, w) x = relay.var(\"x\", relay.TensorType(data_shape, input_dtype))", "intrp1.evaluate(func)(data) tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5, atol=1e-5) def _test_global_pool2d(opfunc, reffunc): n, c,", "_test_upsampling3d(layout, method, coordinate_transformation_mode=\"half_pixel\"): n, c, d, h, w = tvm.size_var(\"n\"),", "relay.TensorType( (n, 2, 222, 222, 222), \"int32\") # infer shape", "z) x_data = np.random.uniform(low=-1, high=1, size=shape).astype(dtype) ref_res = topi.testing.l2_normalize_python(x_data, eps,", "== relay.TensorType( (2, 10, 3, 3), \"float32\") # infer by", "relay.Function([x], y) data = np.random.uniform(size=dshape).astype(dtype) ref_res = reffunc(data, axis=(2,3), keepdims=True)", "# normal conv2d dshape = (1, 3, 224, 224) kshape", "c_np ref_res = d_np for target, ctx in ctx_list(): intrp1", "target=target) op_res1 = intrp1.evaluate(func)(data, kernel) tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5, atol=1e-5) #", "targets: if llvm_version >= 8: dtypes = (('int8', 'int8', 'int32'))", "relay.TensorType( (2, 10, 3, 3), \"float32\") # infer by shape", "kshape, padding=(1, 1), channels=10, kernel_size=3) # dilated conv2d dshape =", "relay.TensorType((n, d, h, w, c), \"int8\")) wt = relay.var(\"w\") y", "\"float32\") # test execution dtype = \"float32\" dshape = (1,", "relay.nn.conv1d(x, w, padding=padding, dilation=dilation, **attrs) func = relay.Function([x, w], y)", "224, 224, 224 x = relay.var(\"x\", relay.ty.TensorType((n, c, d, h,", "relay.var(\"x\", relay.TensorType((n, c, h, w), \"uint8\")) w = relay.var(\"w\", relay.TensorType((2,", "to check int8 robustness # Input channels should be a", "d_np = np.zeros(shape=oshape_nhwc) d_np[:,0:c_np.shape[1],0:c_np.shape[2],:] = c_np def test_conv1d_transpose_ncw_run(): dshape =", "w)) y = relay.nn.lrn(x, size=10, axis=2, bias=0.5, alpha=.00001, beta=0.75) \"alpha=\"", "\"ot\", 4], [\"data_vec_inline\", \"ot\", 1], \\ [\"conv_inline\", \"ot\", 0]]}], \"r\":", "padding=padding, dilation=dilation, groups=groups, data_layout=\"NDHWC\", kernel_layout=\"DHWIO\", **attrs) func = relay.Function([x, w],", "h, w = tvm.size_var(\"n\"), 10, 5, 224, 224 x =", "yy.checked_type == relay.TensorType( (n, 2, 222, 222, 222), \"int32\") #", "ref_res = topi.testing.conv1d_ncw_python( data.astype(out_dtype), kernel.astype(out_dtype), 1, padding, dilation) for target,", "NWC n, c, w = 4, 32, 224 x =", "def test_conv2d_infer_type(): # symbolic in batch dimension n, c, h,", "relay.var(\"x\", shape=dshape) w = relay.var(\"w\") y = relay.nn.conv1d_transpose(x, w, channels=10,", "x = relay.var(\"x\", relay.TensorType(shape, dtype)) z = relay.nn.batch_flatten(x) yy =", "\"float32\"], [1, 1], [1, 1], [1, 1], \"float32\"], \\ {\"i\":", "3), padding=(1, 1, 1), channels=2) yy = run_infer_type(y) assert yy.checked_type", "padding, dilation) for target, ctx in ctx_list(): if target in", "16]], [\"tile_oh\", \"sp\", [8, 1]], \\ [\"tile_ow\", \"sp\", [1, 8]],", "target in targets: if llvm_version >= 8: dtypes = ('uint8',", "- 1): target_dim = target_dim * shape[i + 1] return", "\"__main__\": test_pool1d() test_pool2d() test_pool3d() test_avg_pool2d_no_count_pad() test_lrn() test_l2_normalize() test_conv1d_infer_type() test_conv2d_infer_type() test_conv3d_infer_type()", "test_pool2d(): _test_pool2d(relay.nn.max_pool2d, np.max) _test_pool2d(relay.nn.avg_pool2d, np.mean) _test_pool2d_int(relay.nn.avg_pool2d, np.mean, 'int32') _test_pool2d_int(relay.nn.avg_pool2d, np.mean,", "d, h, w = tvm.size_var(\"n\"), 10, 224, 224, 224 x", "224), dtype) # test execution dtype = \"int32\" dshape =", "method == \"nearest_neighbor\": ref = topi.testing.upsampling_python(data, (scale_h, scale_w), layout) else:", "yy.checked_type == relay.TensorType((n, c, 200, 200, 400), \"float32\") def _test_pool2d(opfunc,", "kernel_size=(3 ,3), except_targets=['cuda']) # also group conv2d dshape = (1,", "1, 1, c), \"float32\") n, c, h, w = tvm.size_var(\"n\"),", "3, 32, 32, 32) x = relay.var(\"x\", shape=dshape) pool_type =", "ref_res, rtol=1e-5, atol=1e-5) def _test_pool2d_int(opfunc, reffunc, dtype): n, c, h,", "# CUDA is disabled for 'direct' schedule: # https://github.com/apache/incubator-tvm/pull/3070#issuecomment-486597553 #", "y = relay.nn.lrn(x, size=10, axis=2, bias=0.5, alpha=.00001, beta=0.75) \"alpha=\" in", "relay.TensorType( (n, 2, 222), \"int32\") # Infer with NWC n,", "np.random.uniform(size=dshape).astype(dtype) ref_res = np.pad(data, ((1, 1), (2, 2), (3, 3),", "target=target) op_res = intrp.evaluate(func)(data) np.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=0.01) def _test_upsampling(layout, method,", "x = relay.var(\"x\", shape=dshape) y = relay.nn.upsampling(x, scale_h=scale_h, scale_w=scale_w, layout=layout,", "ref = topi.testing.bilinear_resize_python(data, (int(round(h*scale_h)), int(round(w*scale_w))), layout) for target, ctx in", "tvm.size_var(\"n\"), 10, 224, 224 x = relay.var(\"x\", relay.ty.TensorType((n, c, h,", "axis) for target, ctx in ctx_list(): intrp1 = relay.create_executor(\"graph\", ctx=ctx,", "\"float32\")) y = opfunc(x, pool_size=(1, 1, 1)) assert \"pool_size=\" in", "beta=beta) yy = run_infer_type(z) assert yy.checked_type == relay.TensorType(shape, dtype) func", "if data_layout == 'NCHW': data_shape = (n, ic, h, w)", "1, dshape, kshape, padding=(1, 1), channels=10, kernel_size=(3 ,3), dilation=(3, 3))", "Infer with a different layout n, c, h, w =", "alpha=.00001, beta=0.75) \"alpha=\" in y.astext() yy = run_infer_type(y) assert yy.checked_type", "run_infer_type(y) assert yy.checked_type == relay.TensorType((d1, ((2*d3)*3)), \"float32\") shape = (1,", "1), \"float32\") # test execution dtype = \"float32\" dshape =", "dilated conv2d dshape = (1, 3, 18, 18) kshape =", ">= 8: dtypes = ('uint8', 'int8', 'int32') # Sweep the", "[[\"tile_co\", \"sp\", [32, 16]], [\"tile_oh\", \"sp\", [8, 1]], \\ [\"tile_ow\",", "kshape, padding=(1, 1, 1), channels=10, kernel_size=(3, 3 ,3), except_targets=[\"cuda\"]) def", "ishape, dtype)) y = relay.nn.upsampling(x, scale_h=scale_h, scale_w=scale_w, layout=layout, method=method, align_corners=align_corners)", "channels=16, data_layout=\"NHWC\", out_dtype=\"int32\") yy = run_infer_type(y) assert yy.checked_type == relay.TensorType(", "legalization so that fast instructions can be picked up. for", "kernel_layout='OIHW', dtypes=dtypes) assert _has_fast_int8_instructions(asm, target) for ic in [1, 4,", "224 x = relay.var(\"x\", relay.ty.TensorType((n, c, h, w), \"int16\")) w", "c, w), \"float32\")) y = opfunc(x, pool_size=(1,)) assert \"pool_size=\" in", "\\ [\"depthwise_conv2d_nchw\", [1, 512, 32, 32, \"float32\"], \\ [512, 1,", "[4, 16, 20]: asm = _compile(ic=8, oc=oc, target=target, data_layout=\"NHWC\", kernel_layout='HWIO',", "= relay.var(\"w\", relay.TensorType((2, 10, 3, 3), \"int8\")) y = relay.nn.conv2d(x,", "for i in range(len(shape) - 1): target_dim = target_dim *", "- HWOI c_np = topi.testing.conv2d_transpose_nhwc_python(data, kernel, 'HWOI', 2, 1) d_np", "w], y) dtype = \"float32\" data = np.random.uniform(size=dshape).astype(dtype) kernel =", "= _compile(ic=17, oc=29, target=target, data_layout=\"NHWC\", kernel_layout='HWIO', dtypes=dtypes) assert _has_fast_int8_instructions(asm, target)", "module.run() op_res1 = module.get_output(0) tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-3, atol=1e-3) # normal", "h, w, c = tvm.size_var(\"n\"), 10, 10, 12 x =", "the License is distributed on an # \"AS IS\" BASIS,", "schedule: # https://github.com/apache/incubator-tvm/pull/3070#issuecomment-486597553 # group conv2d dshape = (1, 32,", "relay.var(\"x\", shape=dshape) w = relay.var(\"w\") y = relay.nn.conv2d_transpose(x, w, channels=10,", "0, 0, 3, 0), out_shape=(1, 3, 16, 19, 16)) _test_pool3d(relay.nn.max_pool3d,", "= relay.nn.upsampling(x, scale_h=2, scale_w=2, layout=\"NCHW\", method=\"bilinear\") yy = run_infer_type(y) assert", "relay.Function([x, w], y) mod = relay.Module() mod['main'] = func mod", "10, 224, 224, 224 x = relay.var(\"x\", relay.ty.TensorType((n, c, d,", "str(opfunc) else 'avg' y = opfunc(x, pool_size=(2, 2, 2), strides=(2,", "c), \"float32\")) y = opfunc(x, layout=\"NHWC\") yy = run_infer_type(y) assert", "11), \"float32\") def test_conv2d_transpose_nchw_run(): dshape = (1, 3, 18, 18)", "ref_res = fref(data.astype(out_dtype), dkernel.astype(out_dtype)) for target, ctx in ctx_list(): if", "<gh_stars>1-10 # Licensed to the Apache Software Foundation (ASF) under", "relay.var(\"w\", dtype=dtype) y = relay.nn.conv3d(x, w, padding=padding, dilation=dilation, groups=groups, data_layout=\"NDHWC\",", "relay.var(\"x\", relay.TensorType((d1, d2, d3, d4), \"float32\")) y = relay.nn.batch_flatten(x) yy", "= np.zeros(shape=oshape) d_np[:,:,0:c_np.shape[2]] = c_np ref_res = d_np for target,", "w), (c, int(round(h*scale_h)), int(round(w*scale_w))) else: return (h, w, c), (int(round(h*scale_h)),", "1, 1), \"float32\") # test execution dtype = \"float32\" dshape", "1, 1)) assert \"pool_size=\" in y.astext() yy = run_infer_type(y) assert", "ref = topi.testing.upsampling3d_python(data, (scale_d, scale_h, scale_w), layout) else: ref =", "elif 'cascadelake' in target: return \"vpdpbusd\" in asm else: assert", "relay.Function([x, w], y) data = np.random.uniform(-scale, scale, size=dshape).astype(dtype) kernel =", "out_shape=(1, 3, 16, 19, 16)) _test_pool3d(relay.nn.max_pool3d, padding=(0, 0, 4, 0,", "16)) _test_pool3d(relay.nn.max_pool3d, padding=(0, 0, 4, 0, 0, 4), out_shape=(1, 3,", "32, 3, 3), \"int16\")) y = relay.nn.bitserial_conv2d( x, w, kernel_size=(3,", "224, 224 x = relay.var(\"x\", relay.TensorType((n, c, h, w), dtype))", "batch dimension n, c, h, w = tvm.size_var(\"n\"), 10, 224,", "== relay.TensorType((n,) + oshape, dtype) dshape = (1,) + ishape", "56, 56) x = relay.var(\"x\", relay.TensorType(data_shape, input_dtype)) kernel_shape = (64,", "222), \"int32\") # Infer with NWC n, c, w =", "dilation=(1, 1, 1), except_targets=None, **attrs): if except_targets is None: except_targets", "2, 4, 3), \"float32\")) y = relay.nn.batch_flatten(x) yy = run_infer_type(y)", "lib, ctx) module.set_input('x', tvm.nd.array(data)) module.set_input(**params) module.run() op_res1 = module.get_output(0) tvm.testing.assert_allclose(op_res1.asnumpy(),", "tvm.size_var(\"n\"), 8, 16, 16, 16 scale_d = 2.0 scale_h =", "[1, 1], [1, 1], [1, 1], \"float32\"], {}, \\ [\"depthwise_conv2d_nchw\",", "w) x = relay.var(\"x\", relay.TensorType(data_shape, input_dtype)) elif data_layout == 'NHWC':", "relay.TensorType((n, c, 100, 200), \"float32\")) y = relay.nn.upsampling(x, scale_h=2, scale_w=2,", "-mcpu=skylake-avx512\", \"llvm -mcpu=cascadelake\"] llvm_version = tvm.codegen.llvm_version_major() for target in targets:", "c, w = tvm.var(\"n\"), 10, 224 x = relay.var(\"x\", relay.ty.TensorType((n,", "1), channels=10, kernel_size=3, dilation=3) def test_conv2d_infer_type(): # symbolic in batch", "padding=(2, 2), channels=192, kernel_size=(7, 7)) def test_conv3d_infer_type(): # symbolic in", "= relay.nn.conv2d(x, weight, kernel_size=(ch, cw), channels=oc, padding=(1, 1), dilation=(1, 1),", "= relay.create_executor(\"graph\", ctx=ctx, target=target) op_res1 = intrp1.evaluate(func)(data, kernel) tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res,", "15, 10, 12), \"float32\") assert yy.args[1].checked_type == relay.TensorType( (10, 15,", "rtol=1e-5, atol=1e-5) # normal conv3d dshape = (1, 3, 5,", "16)) _test_pool3d(relay.nn.max_pool3d, padding=(0, 3, 0, 0, 3, 0), out_shape=(1, 3,", "y = relay.nn.batch_flatten(x) yy = run_infer_type(y) assert yy.checked_type == relay.TensorType((d1,", "yy = run_infer_type(y) assert yy.checked_type == relay.TensorType((d1, ((d2*d3)*d4)), \"float32\") x", "np.zeros(shape=(n, ic, ih+2*ph, iw+2*pw)).astype(dtype) no_zero = (range(n), range(ic), (range(ph, ih+ph)),", "data_layout=\"NHWC\", kernel_layout='HWIO', dtypes=dtypes) assert _has_fast_int8_instructions(asm, target) # Check that int8", "some symbolic values n, c, h, w = tvm.size_var(\"n\"), 2,", "\"int8\")) w = relay.var(\"w\", relay.TensorType((2, 10, 3, 3), \"int8\")) y", "padding=(1, 1), channels=15) assert \"channels=15\" in y.astext() yy = run_infer_type(y)", "'int32') _test_pool2d_int(relay.nn.avg_pool2d, np.mean, 'uint16') _test_global_pool2d(relay.nn.global_max_pool2d, np.max) _test_global_pool2d(relay.nn.global_avg_pool2d, np.mean) def test_pool1d():", "\"ot\", 1], \\ [\"conv_inline\", \"ot\", 0]]}], \"r\": [[0.0002933163], \\ 0,", "relay.var(\"w\", relay.TensorType((12, 11, 5, 5), \"float32\")) y = relay.nn.conv2d_transpose(x, w,", "a_np for target, ctx in ctx_list(): intrp1 = relay.create_executor(\"graph\", ctx=ctx,", "= (4, 10, 7, 7) x = relay.var(\"x\", shape=dshape) y", "range(len(shape) - 1): target_dim = target_dim * shape[i + 1]", "relay.Var(\"x\", t1) func = relay.Function([x], relay.nn.batch_flatten(x)) data = np.random.rand(5, 10,", "== relay.TensorType( (n, 2, 224, 224), \"float32\") assert yy.args[1].checked_type ==", "relay.build_config(opt_level=3): graph, lib, params = relay.build(func, target, params=parameters) def test_bitserial_conv2d_infer_type():", "== relay.TensorType((n, 10, 224), \"float32\") # test execution dtype =", "(1, 500) dtype = \"float32\" x = relay.var(\"x\", relay.TensorType(shape, dtype))", "x = relay.var(\"x\", relay.TensorType((n, c, h, w), \"int8\")) w =", "yy.checked_type == relay.TensorType((n, 10, 224, 224), dtype) # test execution", "relay.TensorType((n,) + oshape, dtype) dshape = (1,) + ishape x", "ctx_list(): intrp1 = relay.create_executor(\"graph\", ctx=ctx, target=target) op_res1 = intrp1.evaluate(func)(data, kernel)", "if llvm_version >= 8: dtypes = ('uint8', 'int8', 'int32') #", "Check that vector int mult and add instructions are generated.", "cfg['auto_unroll_max_setp'] = autotvm.task.space.OtherOptionEntity(1500) cfg['unroll_explicit'] = autotvm.task.space.OtherOptionEntity(1) self.memory[key] = cfg return", "8), \"float32\") def test_pad_run(): def _test_run(dtype): dshape = (4, 10,", "ref_res, rtol=1e-5, atol=1e-5) def compile_test_conv2d_arm_cpu(dtype, out_dtype, scale, dshape, kshape, padding=(1,", "[\"ann_reduce\", \"an\", [\"unroll\", \"none\"]], \\ [\"ann_spatial\", \"an\", [\"unroll\", \"unroll\", \"vec\"]],", "relay.var(\"w\", relay.TensorType((2, 10, 3, 3, 3), \"int8\")) y = relay.nn.conv3d(x,", "= run_infer_type(y) assert yy.checked_type == relay.TensorType( (n, 2, 224), \"float32\")", "\"float32\", 1, dshape, kshape, padding=(1, 1), channels=32, groups=32, kernel_size=(3 ,3),", "= relay.var(\"w\") y = relay.nn.conv1d_transpose(x, w, channels=10, kernel_size=(3,), strides=(2,), padding=(1,),", "y = relay.nn.conv2d(x, wt, kernel_size=(3, 3), padding=(1, 1), channels=16, data_layout=\"NHWC\",", "\"float32\") x = relay.var(\"x\", relay.TensorType((d1, 2, d3, 3), \"float32\")) y", "(the # \"License\"); you may not use this file except", "def test_conv2d_transpose_nhwc_run(): dshape_nhwc = (1, 18, 18, 3) kshape_hwoi =", "y = opfunc(x, pool_size=(1,)) assert \"pool_size=\" in y.astext() yy =", "np.random.uniform(low=-1, high=1, size=shape).astype(dtype) ref_res = topi.testing.l2_normalize_python(x_data, eps, axis) for target,", "elif data_layout == 'NHWC': data_shape = (n, h, w, ic)", "_test_pool1d(relay.nn.max_pool1d) _test_pool1d(relay.nn.avg_pool1d) def test_pool3d(): def _test_pool3d(opfunc, padding=(0, 0, 0, 0,", "dshape, kshape, padding=(2, 2), channels=192, kernel_size=(3, 3)) # extended winograd:", "raise ValueError('Not supported') if kernel_layout == 'OIHW': kernel_shape = (oc,", "run_infer_type(y) assert yy.checked_type == relay.TensorType((n, c, 200, 200, 400), \"float32\")", "channels=oc, padding=(1, 1), dilation=(1, 1), data_layout=data_layout, kernel_layout=kernel_layout, out_dtype=output_dtype) func =", "__name__ == \"__main__\": test_pool1d() test_pool2d() test_pool3d() test_avg_pool2d_no_count_pad() test_lrn() test_l2_normalize() test_conv1d_infer_type()", "cw) elif kernel_layout == 'HWIO': kernel_shape = (ch, cw, ic,", "1), channels=10, kernel_size=(3, 3 ,3)) def test_conv3d_ndhwc_run(): def run_test_conv3d(dtype, out_dtype,", "rtol=1e-5, atol=1e-5) def test_upsampling(): _test_upsampling(\"NCHW\", \"nearest_neighbor\") _test_upsampling(\"NCHW\", \"bilinear\", True) _test_upsampling(\"NHWC\",", "('uint8', 'int8', 'int32') # Sweep the input channels to check", "kernel = np.random.uniform(size=kshape).astype(dtype) c_np = topi.testing.conv1d_transpose_ncw_python( data, kernel, 2, 1)", "np.mean, 'uint16') _test_global_pool2d(relay.nn.global_max_pool2d, np.max) _test_global_pool2d(relay.nn.global_avg_pool2d, np.mean) def test_pool1d(): def _test_pool1d(opfunc):", "if method == \"nearest_neighbor\": ref = topi.testing.upsampling_python(data, (scale_h, scale_w), layout)", "= intrp1.evaluate(func)(x_data) tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5) op_res2 = intrp2.evaluate(func)(x_data) tvm.testing.assert_allclose(op_res2.asnumpy(), ref_res,", "input_dtype)) else: raise ValueError('Not supported') if kernel_layout == 'OIHW': kernel_shape", "x = relay.var(\"x\", shape=dshape) y = relay.nn.avg_pool2d(x, pool_size=(kh, kw), strides=(sw,", "3) run_test_conv2d(\"float32\", \"float32\", 1, dshape, kshape, padding=(1, 1), channels=10, kernel_size=(3", "= relay.nn.conv2d_transpose(x, w, channels=10, kernel_size=(3, 3), strides=(2, 2), padding=(1, 1),", "h, w), \"float32\")) y = relay.nn.pad(t, ((1, 1), (2, 2),", "c, d, h, w = tvm.size_var(\"n\"), tvm.size_var(\"c\"),\\ tvm.size_var(\"d\"), tvm.size_var(\"h\"), tvm.size_var(\"w\")", "y = relay.nn.conv3d(x, w, out_dtype=\"int32\") assert \"out_dtype=\\\"int32\\\"\" in y.astext() yy", "yy.checked_type == relay.TensorType( (n, 15, 10, 12), \"float32\") assert yy.args[1].checked_type", "3), \"float32\") # infer by shape of w, mixed precision", "kernel_size=3, padding=(1, 1), channels=16, data_layout=\"NWC\", out_dtype=\"int32\") yy = run_infer_type(y) assert", "i in range(len(shape) - 1): target_dim = target_dim * shape[i", "c), \"int8\")) wt = relay.var(\"w\") y = relay.nn.conv1d(x, wt, kernel_size=3,", "int8 robustness # Output channels should be a multiple of", "intrp1.evaluate(func)(data, kernel) tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5, atol=1e-5) def compile_test_conv2d_arm_cpu(dtype, out_dtype, scale,", "y) data = np.random.uniform(size=dshape).astype(dtype) ref_res = reffunc(data, axis=(2,3), keepdims=True) for", "run_test_conv2d(dtype, out_dtype, scale, dshape, kshape, padding=(1, 1), fref=None, groups=1, dilation=(1,", "512, 32, 32, \"float32\"], \\ [512, 1, 3, 3, \"float32\"],", "lib, params = relay.build(func, target, params=parameters) def test_bitserial_conv2d_infer_type(): # Basic", ", h, w = tvm.size_var(\"n\"), tvm.size_var(\"c\"), tvm.size_var(\"h\"), tvm.size_var(\"w\") scale =", "10 parameters = {\"weight\": tvm.nd.array(wdata.astype(weight_dtype))} with relay.build_config(opt_level=3): graph, lib, params", "2), padding=(1, 1), output_padding=(2, 2), data_layout=\"NHWC\", kernel_layout=\"HWIO\") func = relay.Function([x,", "tvm.var(\"n\"), 10, 224 x = relay.var(\"x\", relay.TensorType((n, c, w), \"int8\"))", "'int8', 'int32') asm = _compile(ic=16, oc=32, target=target, data_layout=\"NCHW\", kernel_layout='OIHW', dtypes=fast_int8_dtypes)", "tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5, atol=1e-5) def test_flatten_infer_type(): d1, d2, d3, d4", "w = relay.var(\"w\") y = relay.nn.conv1d_transpose(x, w, channels=10, kernel_size=(3,), strides=(2,),", "shape of w, mixed precision n, h, w, c =", "with the License. You may obtain a copy of the", "kernel) tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5, atol=1e-5) def test_upsampling_infer_type(): n, c ,", "should be a multiple of 16 internally. for oc in", "224 x = relay.var(\"x\", relay.TensorType((n//4, c//4, h, w, 4, 4),", "3), padding=(1, 1, 1), channels=16, data_layout=\"NDHWC\", out_dtype=\"int32\") yy = run_infer_type(y)", "kshape = (10, 3, 3) run_test_conv1d(\"float32\", \"float32\", 1, dshape, kshape,", "kernel_layout is HWIO y = relay.nn.conv2d_transpose(x, w, channels=10, kernel_size=(3, 3),", "relay.var(\"x\", relay.TensorType(shape, dtype)) eps=0.001 axis=1 z = relay.nn.l2_normalize(x, eps=0.001, axis=[axis])", "out_dtype=\"int32\") assert \"out_dtype=\\\"int32\\\"\" in y.astext() yy = run_infer_type(y) assert yy.checked_type", "w, ch, cw = 1, 64, 64, 3, 3 if", "applicable law or agreed to in writing, # software distributed", "target, params=parameters) assembly = lib.get_source(\"asm\") return assembly def _has_fast_int8_instructions(asm, target):", "is None: ref_res = topi.testing.conv3d_ndhwc_python( data.astype(out_dtype), dkernel.astype(out_dtype), 1, padding) else:", "== relay.TensorType((n, 10, 224, 224), \"float32\") # test execution dtype", "range(ic), (range(ph, ih+ph)), (range(pw, iw+pw))) pad_np[np.ix_(*no_zero)] = a_np b_np =", "module.get_output(0) tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-3, atol=1e-3) # normal winograd: stride 1,", "relay.create_executor(\"debug\", ctx=ctx, target=target) op_res1 = intrp1.evaluate(func)(x_data) tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5) op_res2", "yy = run_infer_type(y) assert yy.checked_type == relay.TensorType( (n, w, 16),", "y) data = np.random.random_integers(low=-128, high=128, size=dshape) ref_res = reffunc(data.reshape(1,3,14,2,14,2), axis=(3,5)).astype(dtype)", "rtol=1e-5) def batch_flatten(data): shape = data.shape target_dim = 1 for", "params = relay.build(func, target, params=parameters) def test_bitserial_conv2d_infer_type(): # Basic shape", "assert yy.checked_type == relay.TensorType( (32, 2, 128, 128, 1), \"uint16\")", "[\"reorder_1\", \"re\", [0, 1, 2, 3, 6, 4, 5]], \\", "dtypes = ('uint8', 'int8', 'int32') # Sweep the input channels", "is distributed on an # \"AS IS\" BASIS, WITHOUT WARRANTIES", "10, 3, 3, 3), \"int8\")) y = relay.nn.conv3d(x, w, out_dtype=\"int32\")", "file # to you under the Apache License, Version 2.0", "16, 19, 16)) _test_pool3d(relay.nn.avg_pool3d, padding=(0, 0, 4, 0, 0, 4),", "relay.var(\"x\", shape=dshape, dtype=dtype) w = relay.var(\"w\", dtype=dtype) y = relay.nn.conv3d(x,", "ctx_list, run_infer_type from tvm.contrib import util import topi.testing def test_conv1d_infer_type():", "w], y) mod = relay.Module() mod['main'] = func mod =", "w], y) mod = tvm.relay.Module() mod[\"main\"] = func test_schedule='{\"i\": [\"llvm", "# with the License. You may obtain a copy of", "2, d3, 3), \"float32\")) y = relay.nn.batch_flatten(x) yy = run_infer_type(y)", "{\"weight\": tvm.nd.array(wdata.astype(weight_dtype))} targets = [\"llvm -mcpu=skylake-avx512\", \"llvm -mcpu=cascadelake\"] llvm_version =", "\\ [512, 1, 3, 3, \"float32\"], [1, 1], [1, 1],", "input and weight. n, c, d, h, w = tvm.size_var(\"n\"),", "\\ 0, 3.1976189613342285, 1570811630.6058347], \"v\": 0.1}' temp = util.tempdir() with", "1, dshape, kshape, padding=(2, 2), channels=192, kernel_size=(7, 7)) def test_conv3d_infer_type():", "[0, 1, 2, 3, 6, 4, 5]], \\ [\"ann_reduce\", \"an\",", "size=dshape).astype(dtype) kernel = np.random.uniform(-scale, scale, size=kshape).astype(dtype) dkernel = topi.testing.dilate_python(kernel, (1,", "w = 1, 2, 3, 4 t = relay.var(\"t\", relay.TensorType((n,", "tvm.size_var(\"w\") x = relay.var(\"x\", shape=(n, c , h, w)) y", "Infer with NWC n, c, w = 4, 32, 224", "= relay.nn.conv2d(x, w, kernel_size=(3, 3), padding=(1, 1), channels=2) yy =", "y = relay.nn.conv2d_transpose(x, w, output_padding=(1, 1), channels=11, data_layout=\"NHWC\") yy =", "x = relay.var(\"x\", shape=dshape) w = relay.var(\"w\") y = relay.nn.conv1d_transpose(x,", "assembly. assert not _has_fast_int8_instructions(asm, target) # Check that a vectorized", "assert yy.checked_type == relay.TensorType( (n, 2, 222), \"int32\") # Infer", "= \"float32\" dshape = (1, 3, 32) x = relay.var(\"x\",", "\"float32\"], \\ [1, 1], [1, 1], [1, 1], \"float32\"], {},", "18) kshape = (3, 10, 3, 3) oshape = (1,", "in ctx_list(): intrp1 = relay.create_executor(\"graph\", ctx=ctx, target=target) op_res1 = intrp1.evaluate(func)(data,", "under one # or more contributor license agreements. See the", "intrp1.evaluate(func)(data) tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5, atol=1e-5) _test_pool3d(relay.nn.max_pool3d) _test_pool3d(relay.nn.max_pool3d, padding=(2, 0, 0,", "w, mixed precision n, h, w, c = tvm.size_var(\"n\"), 10,", "kernel_layout='HWIO', dtypes=dtypes) # Check that intrinisic is not present in", "symbolic in batch dimension n, c, w = tvm.var(\"n\"), 10,", "mixed precision n, c, w = tvm.var(\"n\"), 10, 224 x", "3)) # extended winograd: stride 1, padding N, kernel 3x3", "= relay.var(\"x\", shape=dshape) w = relay.var(\"w\") y = relay.nn.conv1d_transpose(x, w,", "224 x = relay.var(\"x\", relay.TensorType((n, c, h, w), dtype)) y", "and test assembly contains *pmadd* instructions targets = [\"llvm -mcpu=skylake-avx512\",", "= intrp1.evaluate(func)(data) tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5, atol=1e-5) def test_flatten_infer_type(): d1, d2,", "kshape, padding=(1, 1, 1), fref=None, groups=1, dilation=(1, 1, 1), except_targets=None,", "10, 224, 224), \"float32\") # test execution dtype = \"float32\"", "w = relay.var(\"w\", relay.TensorType((12, 11, 5, 5), \"float32\")) y =", "def test_conv1d_transpose_ncw_run(): dshape = (1, 3, 18) kshape = (3,", "for input and weight. n, c, w = tvm.var(\"n\"), 10,", "= relay.var(\"x\", shape=dshape, dtype=dtype) w = relay.var(\"w\", dtype=dtype) y =", "224, 224 x = relay.var(\"x\", relay.TensorType((n, c, d, h, w),", "data_layout=\"NHWC\", out_dtype=\"int32\") yy = run_infer_type(y) assert yy.checked_type == relay.TensorType( (n,", "size=dshape).astype(dtype) kernel = np.random.uniform(-scale, scale, size=kshape).astype(dtype) ref_res = topi.testing.conv2d_nchw_python( data.astype(out_dtype),", "kernel_size=(3, 3), padding=(1, 1), channels=16, data_layout=\"NCHW4n4c\", kernel_layout=\"OIHW4o4i\", out_dtype=\"int32\") yy =", "3, 3, 4, 4), \"int8\") # Infer with NHWC n,", "(d, h, w, c), (int(round(d*scale_d)), int(round(h*scale_h)),\\ int(round(w*scale_w)), c) ishape, oshape", "1570811630.6058347], \"v\": 0.1}' temp = util.tempdir() with open(temp.relpath(\"temp.log\"), \"w\") as", "dshape, kshape, padding=(1, 1, 1), fref=None, groups=1, dilation=(1, 1, 1),", "16, 20)) def test_avg_pool2d_no_count_pad(): kh, kw = (4, 4) sh,", "relay.TensorType((n//4, c//4, h, w, 4, 4), \"int8\")) wt = relay.var(\"w\")", "atol=1e-5) _test_pool1d(relay.nn.max_pool1d) _test_pool1d(relay.nn.avg_pool1d) def test_pool3d(): def _test_pool3d(opfunc, padding=(0, 0, 0,", "def _compile(ic, oc, target, data_layout, kernel_layout, dtypes): input_dtype, weight_dtype, output_dtype", "_test_pool2d_int(opfunc, reffunc, dtype): n, c, h, w = tvm.size_var(\"n\"), 10,", "conv2d dshape = (1, 3, 18, 18) kshape = (10,", "normal conv1d dshape = (1, 3, 224) kshape = (10,", "relay.Function([x, w], y) mod = tvm.relay.Module() mod[\"main\"] = func test_schedule='{\"i\":", "dilation=(1, 1), **attrs): x = relay.var(\"x\", shape=dshape, dtype=dtype) w =", "relay.TensorType((n, c, tvm.expr.Cast(\"int32\", tvm.round(h*scale)), tvm.expr.Cast(\"int32\", tvm.round(w*scale))), \"float32\") n, c =", "intrp.evaluate(func)(data) np.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=0.01) def _test_upsampling(layout, method, align_corners=False): n, c,", "c, w = tvm.var(\"n\"), 10, 224 x = relay.var(\"x\", relay.TensorType((n,", "different dtypes for input and weight. n, c, d, h,", "relay.TensorType((n, 10, 224), \"float32\") # test execution dtype = \"float32\"", "axis=1 bias=0.5 alpha=.00001 beta=0.75 z = relay.nn.lrn(x, size=size, axis=axis, bias=bias,", "[\"llvm -device=arm_cpu\", \"topi_nn_depthwise_conv2d_nchw\", \\ [[\"TENSOR\", [1, 512, 32, 32], \"float32\"],", "= relay.var(\"x\", shape=dshape_nhwc) w = relay.var(\"w\") # kshape and kernel_layout", "else: ref = topi.testing.bilinear_resize_python(data, (int(round(h*scale_h)), int(round(w*scale_w))), layout) for target, ctx", "layout=\"NCDHW\", method=\"trilinear\") yy = run_infer_type(y) assert yy.checked_type == relay.TensorType((n, c,", "ctx=ctx, target=target) out = executor.evaluate(func)(data) tvm.testing.assert_allclose(out.asnumpy(), ref, rtol=1e-5, atol=1e-5) def", "agreements. See the NOTICE file # distributed with this work", "data_layout=data_layout, kernel_layout=kernel_layout, out_dtype=output_dtype) func = relay.Function([x, weight], y) wdata =", "padding=padding) func = relay.Function([x], y) # check output shape f_out_shape", "0, axis=(2,3)) b_np[:,:,i,j] = np.sum(pad_np[:, :, i*sh:i*sh+kh, j*sw:j*sw+kw], axis=(2,3)) /", "shape=dshape_nhwc) w = relay.var(\"w\") # kshape and kernel_layout should have", "layout=\"NHWC\") yy = run_infer_type(y) assert yy.checked_type == relay.TensorType((n, 1, 1,", "5, 224, 224 x = relay.var(\"x\", relay.TensorType((n, c, d, h,", "= util.tempdir() with open(temp.relpath(\"temp.log\"), \"w\") as log_file: log_file.write(test_schedule) with autotvm.apply_history_best(temp.relpath(\"temp.log\")):", "(n, d, h, w, 16), \"int32\") def test_conv3d_run(): def run_test_conv3d(dtype,", "kernel_size=(3, 3 ,3)) def test_conv3d_ndhwc_run(): def run_test_conv3d(dtype, out_dtype, scale, dshape,", "channels=10, kernel_size=(1 ,3)) # dilated conv2d dshape = (1, 3,", "if llvm_version >= 8: dtypes = (('int8', 'int8', 'int32')) #", "size=kshape).astype(dtype) dkernel = topi.testing.dilate_python(kernel, (1, 1) + dilation) if fref", "# dilated conv2d dshape = (1, 3, 18) kshape =", "tvm.relay.build(mod, target=\"llvm -device=arm_cpu\") # depthwise conv2d dshape = (1, 32,", "= tvm.size_var(\"n\"), tvm.size_var(\"c\"),\\ tvm.size_var(\"d\"), tvm.size_var(\"h\"), tvm.size_var(\"w\") scale = tvm.const(2.0, \"float64\")", "8, 3, 3, 4, 4), \"int8\") # Infer with NHWC", "n, c, h, w = tvm.size_var(\"n\"), tvm.size_var(\"c\"), 224, 224 x", "= relay.var(\"x\", relay.ty.TensorType((n, c, h, w), \"float32\")) w = relay.var(\"w\")", "assert \"pool_size=\" in y.astext() yy = run_infer_type(y) assert yy.checked_type ==", "padding=(1, 1), fref=None, dilation=1, except_targets=None, **attrs): if except_targets is None:", "_compile(ic=ic, oc=16, target=target, data_layout=\"NCHW\", kernel_layout='OIHW', dtypes=dtypes) assert _has_fast_int8_instructions(asm, target) for", "op_res1 = intrp1.evaluate(func)(data, kernel) tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5, atol=1e-5) def test_conv2d_transpose_nhwc_run():", "relay.TensorType((n, c, d, h, w), \"int8\")) w = relay.var(\"w\", relay.TensorType((2,", "1) + dilation) if fref is None: ref_res = topi.testing.conv3d_ndhwc_python(", "intrp2.evaluate(func)(x_data) tvm.testing.assert_allclose(op_res2.asnumpy(), ref_res, rtol=1e-5) def batch_flatten(data): shape = data.shape target_dim", "y = relay.nn.conv2d_transpose(x, w, kernel_size=(3, 3), padding=(1, 1), channels=15) assert", "== relay.TensorType( (2, 10, 3, 3, 3), \"float32\") # infer", "dshape = (1, 3, 28, 28) x = relay.var(\"x\", shape=dshape,", "generations, because we default to NCHWc layout. target = \"llvm", "= relay.var(\"w\", relay.IncompleteType()) y = relay.nn.conv2d_transpose(x, w, kernel_size=(3, 3), padding=(1,", "autotvm.task.space.FallbackConfigEntity() cfg.template_key = 'winograd' cfg.is_fallback = False cfg['tile_b'] = autotvm.task.space.SplitEntity([-1,", "align_corners=align_corners) yy = run_infer_type(y) assert yy.checked_type == relay.TensorType((n,) + oshape,", "out_dtype=\"int32\") yy = run_infer_type(y) assert yy.checked_type == relay.TensorType( (1, 4,", "target=\"llvm -device=arm_cpu\") # depthwise conv2d dshape = (1, 32, 18,", "License. You may obtain a copy of the License at", "return assembly def _has_fast_int8_instructions(asm, target): if 'skylake-avx512' in target: return", "_test_global_pool2d(opfunc, reffunc): n, c, h, w = tvm.size_var(\"n\"), tvm.size_var(\"c\"), 224,", "= run_infer_type(y) assert yy.checked_type == relay.TensorType((3, 24), \"float32\") x =", "= relay.var(\"x\", shape=dshape, dtype=dtype) w = relay.var(\"w\", shape=kshape, dtype=dtype) y", "precision n, c, d, h, w = tvm.size_var(\"n\"), 10, 224,", "reffunc(data.reshape(1,3,14,2,14,2), axis=(3,5)).astype(dtype) for target, ctx in ctx_list(): intrp1 = relay.create_executor(\"graph\",", "fref=None, groups=1, dilation=(1, 1, 1), except_targets=None, **attrs): if except_targets is", "n, c, d, h, w = tvm.size_var(\"n\"), tvm.size_var(\"c\"),\\ tvm.size_var(\"d\"), tvm.size_var(\"h\"),", "w, c), \"int8\")) wt = relay.var(\"w\") y = relay.nn.conv1d(x, wt,", "parameters = {\"weight\": tvm.nd.array(wdata.astype(weight_dtype))} targets = [\"llvm -mcpu=skylake-avx512\", \"llvm -mcpu=cascadelake\"]", "padding=(0, 0, 4, 0, 0, 4), out_shape=(1, 3, 16, 16,", "atol=1e-5) def test_upsampling3d(): _test_upsampling3d(\"NCDHW\", \"nearest_neighbor\") _test_upsampling3d(\"NCDHW\", \"trilinear\", \"align_corners\") _test_upsampling3d(\"NDHWC\", \"nearest_neighbor\")", "relay.build_module.build(mod, target=target, params=params) module = tvm.contrib.graph_runtime.create(graph, lib, ctx) module.set_input('x', tvm.nd.array(data))", "(n, 2, 224, 224), \"float32\") assert yy.args[1].checked_type == relay.TensorType( (2,", "0), channels=32) yy = run_infer_type(y) assert yy.checked_type == relay.TensorType( (n,", "\"topi_nn_depthwise_conv2d_nchw\", \\ [[\"TENSOR\", [1, 512, 32, 32], \"float32\"], \\ [\"TENSOR\",", "[1, 4, 6]: asm = _compile(ic=ic, oc=16, target=target, data_layout=\"NHWC\", kernel_layout='HWIO',", "tvm.contrib.graph_runtime.create(graph, lib, ctx) module.set_input('x', tvm.nd.array(data)) module.set_input(**params) module.run() op_res1 = module.get_output(0)", "+ dilation) if fref is None: ref_res = topi.testing.conv3d_ndhwc_python( data.astype(out_dtype),", "yy.checked_type == relay.TensorType((n, c, tvm.expr.Cast(\"int32\", tvm.round(h*scale)), tvm.expr.Cast(\"int32\", tvm.round(w*scale))), \"float32\") n,", "run_infer_type(y) assert yy.checked_type == relay.TensorType( (n, 2, 224), \"float32\") assert", "= relay.var(\"x\", relay.TensorType((n, c, h, w), \"float32\")) y = opfunc(x)", "6]: asm = _compile(ic=ic, oc=16, target=target, data_layout=\"NCHW\", kernel_layout='OIHW', dtypes=dtypes) assert", "is None: except_targets = [] x = relay.var(\"x\", shape=dshape, dtype=dtype)", "16), pool_type, False) for target, ctx in ctx_list(): intrp1 =", "32, 18, 18) kshape = (32, 1, 3, 3) run_test_conv2d(\"float32\",", "= topi.testing.upsampling_python(data, (scale_h, scale_w), layout) else: ref = topi.testing.bilinear_resize_python(data, (int(round(h*scale_h)),", "ref_res = topi.testing.pool3d_ncdhw_python(data, (2, 2, 2), (2, 2, 2), padding,", "_test_run(dtype): dshape = (4, 10, 7, 7) x = relay.var(\"x\"," ]
[ "Unicode: 4\", unescaped_token) def test_list_to_index_dict(self): lst = [\"test\", \"strings\"] d", "self._init_subtokenizer(vocab_list) s = \"testing 123\" encoded_list = subtokenizer.encode(s) self.assertEqual([1, 2,", "# testing 123 decoded_str = subtokenizer.decode(encoded_list) self.assertEqual(\"testing 123\", decoded_str) def", "12 }) min_count = 5 alphabet = set(\"translate\") reserved_tokens =", "2.0 (the \"License\"); # you may not use this file", "5} alphabet = set(\"abc_\") subtoken_dict = {\"a\": 0, \"b\": 1,", "tokenizer.Subtokenizer(temp_file.name, reserved_tokens=[]) def test_encode(self): vocab_list = [\"123_\", \"test\", \"ing_\"] subtokenizer", "{\"ab\": 1, \"bc\": 3, \"abc\": 5} alphabet = set(\"abc_\") min_count", "= {\"a\": 0, \"b\": 1, \"c\": 2, \"_\": 3} max_subtoken_length", "self.assertDictEqual({\"test\": 0, \"strings\": 1}, d) def test_split_token_to_subtokens(self): token = \"abc\"", "test_generate_alphabet_dict(self): s = [\"testing\", \"123\"] reserved_tokens = [\"???\"] alphabet =", "= collections.defaultdict(int, { \"a\": 2, \"b\": 4, \"c\": 1, \"ab\":", "[\"testing\", \"123\"] reserved_tokens = [\"???\"] alphabet = tokenizer._generate_alphabet_dict(s, reserved_tokens) self.assertIn(\"?\",", "subtoken_counts = collections.defaultdict(int, { \"translate\": 10, \"t\": 40, \"tr\": 16,", "in the vocab list for c in alphabet: self.assertIn(c, vocab_list)", "\\\\, Unicode: 4\", unescaped_token) def test_list_to_index_dict(self): lst = [\"test\", \"strings\"]", "\"test\", \"ing_\"] subtokenizer = self._init_subtokenizer(vocab_list) encoded_list = [1, 2, 0]", "be decremented to 2, # so it should not be", "subtoken) w.write(\"\\n\") return tokenizer.Subtokenizer(temp_file.name, reserved_tokens=[]) def test_encode(self): vocab_list = [\"123_\",", "list). self.assertNotIn(\"tra\", subtoken_list) self.assertIn(\"tr\", subtoken_list) self.assertIn(\"t\", subtoken_list) self.assertEqual(len(\"translate\"), max_token_length) def", "self.assertIn(\"1\", alphabet) self.assertIn(\"2\", alphabet) self.assertIn(\"3\", alphabet) def test_count_and_gen_subtokens(self): token_counts =", "num_iterations = 1 reserved_tokens = [\"reserved\", \"tokens\"] vocab_list = tokenizer._generate_subtokens(token_counts,", "= [\"test\", \"? \", \"testing\", \"123\", \".\"] s = tokenizer._join_tokens_to_string(tokens,", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "= subtokenizer._subtoken_ids_to_tokens(encoded_list) self.assertEqual([u\"testing\", u\"123\"], token_list) class StringHelperTest(tf.test.TestCase): def test_split_string_to_tokens(self): text", "to 2, # so it should not be added to", "self.assertIn(\"3\", alphabet) def test_count_and_gen_subtokens(self): token_counts = {\"abc\": 5} alphabet =", "3, \"abbc\": 5 }) min_count = 3 subtoken_buckets = tokenizer._filter_and_bucket_subtokens(", "subtoken_buckets = tokenizer._filter_and_bucket_subtokens( subtoken_counts, min_count) self.assertEqual(len(subtoken_buckets[0]), 0) self.assertEqual(set(\"b\"), subtoken_buckets[1]) self.assertEqual(set([\"ab\",", "self.assertIn(\"i\", alphabet) self.assertIn(\"n\", alphabet) self.assertIn(\"g\", alphabet) self.assertIn(\"1\", alphabet) self.assertIn(\"2\", alphabet)", "subtoken_counts) def test_filter_and_bucket_subtokens(self): subtoken_counts = collections.defaultdict(int, { \"a\": 2, \"b\":", "123\" encoded_list = subtokenizer.encode(s) self.assertEqual([1, 2, 0], encoded_list) def test_decode(self):", "= set(\"abc_\") min_count = 100 num_iterations = 1 reserved_tokens =", "{ \"a\": 5, \"b\": 5, \"c\": 5, \"_\": 5, \"ab\":", "= tokenizer._split_token_to_subtokens(token, subtoken_dict, max_subtoken_length) self.assertEqual([\"ab\", \"c\"], subtokens) def test_generate_alphabet_dict(self): s", "set(\"translate\") reserved_tokens = [\"reserved\", \"tokens\"] subtoken_list, max_token_length = tokenizer._gen_new_subtoken_list( subtoken_counts,", "use this file except in compliance with the License. #", "self.assertIn(\"e\", alphabet) self.assertIn(\"s\", alphabet) self.assertIn(\"i\", alphabet) self.assertIn(\"n\", alphabet) self.assertIn(\"g\", alphabet)", "\"a\": 5, \"b\": 5, \"c\": 5, \"_\": 5, \"ab\": 5,", "vocab list for c in alphabet: self.assertIn(c, vocab_list) if __name__", "= \"testing 123\" encoded_list = subtokenizer.encode(s) self.assertEqual([1, 2, 0], encoded_list)", "self.assertEqual(len(\"translate\"), max_token_length) def test_generate_subtokens(self): token_counts = {\"ab\": 1, \"bc\": 3,", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "test_generate_subtokens(self): token_counts = {\"ab\": 1, \"bc\": 3, \"abc\": 5} alphabet", "c in alphabet: self.assertIn(c, vocab_list) if __name__ == \"__main__\": tf.test.main()", "def _init_subtokenizer(self, vocab_list): temp_file = tempfile.NamedTemporaryFile(delete=False) with tf.io.gfile.GFile(temp_file.name, \"w\") as", "License. # You may obtain a copy of the License", "reserved_tokens = [\"reserved\", \"tokens\"] vocab_list = tokenizer._generate_subtokens(token_counts, alphabet, min_count, num_iterations,", "= self._init_subtokenizer(vocab_list) s = \"testing 123\" encoded_list = subtokenizer.encode(s) self.assertEqual([1,", "The TensorFlow Authors. All Rights Reserved. # # Licensed under", "5, \"b\": 5, \"c\": 5, \"_\": 5, \"ab\": 5, \"bc\":", "def test_encode(self): vocab_list = [\"123_\", \"test\", \"ing_\"] subtokenizer = self._init_subtokenizer(vocab_list)", "40, \"tr\": 16, \"tra\": 12 }) min_count = 5 alphabet", "\"c\": 1, \"ab\": 6, \"ac\": 3, \"abbc\": 5 }) min_count", "5, \"_\": 5, \"ab\": 5, \"bc\": 5, \"c_\": 5, \"abc\":", "under the License is distributed on an \"AS IS\" BASIS,", "= tokenizer._list_to_index_dict(lst) self.assertDictEqual({\"test\": 0, \"strings\": 1}, d) def test_split_token_to_subtokens(self): token", "License for the specific language governing permissions and # limitations", "limitations under the License. \"\"\"Test Subtokenizer and string helper methods.\"\"\"", "self.assertEqual(len(subtoken_buckets[0]), 0) self.assertEqual(set(\"b\"), subtoken_buckets[1]) self.assertEqual(set([\"ab\", \"ac\"]), subtoken_buckets[2]) self.assertEqual(len(subtoken_buckets[3]), 0) self.assertEqual(set([\"abbc\"]),", "# so it should not be added to the canddiate", "def test_split_string_to_tokens(self): text = \"test? testing 123.\" tokens = tokenizer._split_string_to_tokens(text,", "is in the vocab list for c in alphabet: self.assertIn(c,", "self.assertEqual(\"Underline: _, Backslash: \\\\, Unicode: 4\", unescaped_token) def test_list_to_index_dict(self): lst", "self.assertEqual([\"test\", \"? \", \"testing\", \"123\", \".\"], tokens) def test_join_tokens_to_string(self): tokens", "\"_\": 3} max_subtoken_length = 2 subtoken_counts = tokenizer._count_and_gen_subtokens( token_counts, alphabet,", "added to the canddiate list). self.assertNotIn(\"tra\", subtoken_list) self.assertIn(\"tr\", subtoken_list) self.assertIn(\"t\",", "Reserved. # # Licensed under the Apache License, Version 2.0", "\"bc\": 3, \"abc\": 5} alphabet = set(\"abc_\") min_count = 100", "max_subtoken_length) self.assertIsInstance(subtoken_counts, collections.defaultdict) self.assertDictEqual( { \"a\": 5, \"b\": 5, \"c\":", "= 2 subtokens = tokenizer._split_token_to_subtokens(token, subtoken_dict, max_subtoken_length) self.assertEqual([\"ab\", \"c\"], subtokens)", "TensorFlow Authors. All Rights Reserved. # # Licensed under the", "5} alphabet = set(\"abc_\") min_count = 100 num_iterations = 1", "1, \"c\": 2, \"ab\": 3} max_subtoken_length = 2 subtokens =", "in compliance with the License. # You may obtain a", "= subtokenizer.encode(s) self.assertEqual([1, 2, 0], encoded_list) def test_decode(self): vocab_list =", "software # distributed under the License is distributed on an", "s = [\"testing\", \"123\"] reserved_tokens = [\"???\"] alphabet = tokenizer._generate_alphabet_dict(s,", "set(\"abc_\") subtoken_dict = {\"a\": 0, \"b\": 1, \"c\": 2, \"_\":", "should not be added to the canddiate list). self.assertNotIn(\"tra\", subtoken_list)", "subtokenizer._subtoken_ids_to_tokens(encoded_list) self.assertEqual([u\"testing\", u\"123\"], token_list) class StringHelperTest(tf.test.TestCase): def test_split_string_to_tokens(self): text =", "tokenizer._join_tokens_to_string(tokens, tokenizer._ALPHANUMERIC_CHAR_SET) self.assertEqual(\"test? testing 123.\", s) def test_escape_token(self): token =", "\"strings\": 1}, d) def test_split_token_to_subtokens(self): token = \"abc\" subtoken_dict =", "text = \"test? testing 123.\" tokens = tokenizer._split_string_to_tokens(text, tokenizer._ALPHANUMERIC_CHAR_SET) self.assertEqual([\"test\",", "that \"tra\" isn\"t in the list (its count should be", "3} max_subtoken_length = 2 subtokens = tokenizer._split_token_to_subtokens(token, subtoken_dict, max_subtoken_length) self.assertEqual([\"ab\",", "= 1 reserved_tokens = [\"reserved\", \"tokens\"] vocab_list = tokenizer._generate_subtokens(token_counts, alphabet,", "u\"Underline: \\\\u, Backslash: \\\\\\\\, Unicode: \\\\52;\" unescaped_token = tokenizer._unescape_token(escaped_token) self.assertEqual(\"Underline:", "token_list = subtokenizer._subtoken_ids_to_tokens(encoded_list) self.assertEqual([u\"testing\", u\"123\"], token_list) class StringHelperTest(tf.test.TestCase): def test_split_string_to_tokens(self):", "class StringHelperTest(tf.test.TestCase): def test_split_string_to_tokens(self): text = \"test? testing 123.\" tokens", "self.assertEqual(len(subtoken_buckets[3]), 0) self.assertEqual(set([\"abbc\"]), subtoken_buckets[4]) def test_gen_new_subtoken_list(self): subtoken_counts = collections.defaultdict(int, {", "unescaped_token = tokenizer._unescape_token(escaped_token) self.assertEqual(\"Underline: _, Backslash: \\\\, Unicode: 4\", unescaped_token)", "\"tra\" isn\"t in the list (its count should be decremented", "min_count, num_iterations, reserved_tokens) # Check that reserved tokens are at", "reserved_tokens = [\"reserved\", \"tokens\"] subtoken_list, max_token_length = tokenizer._gen_new_subtoken_list( subtoken_counts, min_count,", "alphabet = set(\"abc_\") subtoken_dict = {\"a\": 0, \"b\": 1, \"c\":", "alphabet) self.assertIn(\"n\", alphabet) self.assertIn(\"g\", alphabet) self.assertIn(\"1\", alphabet) self.assertIn(\"2\", alphabet) self.assertIn(\"3\",", "min_count = 5 alphabet = set(\"translate\") reserved_tokens = [\"reserved\", \"tokens\"]", "}) min_count = 5 alphabet = set(\"translate\") reserved_tokens = [\"reserved\",", "= u\"Underline: \\\\u, Backslash: \\\\\\\\, Unicode: \\\\52;\" unescaped_token = tokenizer._unescape_token(escaped_token)", "lst = [\"test\", \"strings\"] d = tokenizer._list_to_index_dict(lst) self.assertDictEqual({\"test\": 0, \"strings\":", "testing 123 token_list = subtokenizer._subtoken_ids_to_tokens(encoded_list) self.assertEqual([u\"testing\", u\"123\"], token_list) class StringHelperTest(tf.test.TestCase):", "subtoken_buckets[2]) self.assertEqual(len(subtoken_buckets[3]), 0) self.assertEqual(set([\"abbc\"]), subtoken_buckets[4]) def test_gen_new_subtoken_list(self): subtoken_counts = collections.defaultdict(int,", "tempfile import tensorflow as tf from official.nlp.transformer.utils import tokenizer class", "min_count) self.assertEqual(len(subtoken_buckets[0]), 0) self.assertEqual(set(\"b\"), subtoken_buckets[1]) self.assertEqual(set([\"ab\", \"ac\"]), subtoken_buckets[2]) self.assertEqual(len(subtoken_buckets[3]), 0)", "collections.defaultdict(int, { \"a\": 2, \"b\": 4, \"c\": 1, \"ab\": 6,", "alphabet, reserved_tokens) # Check that \"tra\" isn\"t in the list", "OF ANY KIND, either express or implied. # See the", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "import collections import tempfile import tensorflow as tf from official.nlp.transformer.utils", "testing 123.\", s) def test_escape_token(self): token = u\"abc_\\\\4\" alphabet =", "ANY KIND, either express or implied. # See the License", "See the License for the specific language governing permissions and", "subtokenizer.decode(encoded_list) self.assertEqual(\"testing 123\", decoded_str) def test_subtoken_ids_to_tokens(self): vocab_list = [\"123_\", \"test\",", "alphabet = set(\"abc_\\\\u;\") escaped_token = tokenizer._escape_token(token, alphabet) self.assertEqual(\"abc\\\\u\\\\\\\\\\\\52;_\", escaped_token) def", "Copyright 2021 The TensorFlow Authors. All Rights Reserved. # #", "def test_filter_and_bucket_subtokens(self): subtoken_counts = collections.defaultdict(int, { \"a\": 2, \"b\": 4,", "the License. # You may obtain a copy of the", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "for the specific language governing permissions and # limitations under", "to in writing, software # distributed under the License is", "Check that reserved tokens are at the front of the", "\"w\") as w: for subtoken in vocab_list: w.write(\"'%s'\" % subtoken)", "tempfile.NamedTemporaryFile(delete=False) with tf.io.gfile.GFile(temp_file.name, \"w\") as w: for subtoken in vocab_list:", "max_token_length) def test_generate_subtokens(self): token_counts = {\"ab\": 1, \"bc\": 3, \"abc\":", "# See the License for the specific language governing permissions", "2, \"b\": 4, \"c\": 1, \"ab\": 6, \"ac\": 3, \"abbc\":", "subtoken_counts, min_count) self.assertEqual(len(subtoken_buckets[0]), 0) self.assertEqual(set(\"b\"), subtoken_buckets[1]) self.assertEqual(set([\"ab\", \"ac\"]), subtoken_buckets[2]) self.assertEqual(len(subtoken_buckets[3]),", "alphabet) self.assertIn(\"s\", alphabet) self.assertIn(\"i\", alphabet) self.assertIn(\"n\", alphabet) self.assertIn(\"g\", alphabet) self.assertIn(\"1\",", "def test_gen_new_subtoken_list(self): subtoken_counts = collections.defaultdict(int, { \"translate\": 10, \"t\": 40,", "or agreed to in writing, software # distributed under the", "self.assertNotIn(\"tra\", subtoken_list) self.assertIn(\"tr\", subtoken_list) self.assertIn(\"t\", subtoken_list) self.assertEqual(len(\"translate\"), max_token_length) def test_generate_subtokens(self):", "required by applicable law or agreed to in writing, software", "0, \"b\": 1, \"c\": 2, \"_\": 3} max_subtoken_length = 2", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "with the License. # You may obtain a copy of", "escaped_token = u\"Underline: \\\\u, Backslash: \\\\\\\\, Unicode: \\\\52;\" unescaped_token =", "\"b\": 1, \"c\": 2, \"_\": 3} max_subtoken_length = 2 subtoken_counts", "5, \"abc\": 5, \"bc_\": 5, \"abc_\": 5 }, subtoken_counts) def", "123.\", s) def test_escape_token(self): token = u\"abc_\\\\4\" alphabet = set(\"abc_\\\\u;\")", "min_count, alphabet, reserved_tokens) # Check that \"tra\" isn\"t in the", "alphabet = tokenizer._generate_alphabet_dict(s, reserved_tokens) self.assertIn(\"?\", alphabet) self.assertIn(\"t\", alphabet) self.assertIn(\"e\", alphabet)", "\"b\": 4, \"c\": 1, \"ab\": 6, \"ac\": 3, \"abbc\": 5", "= [\"???\"] alphabet = tokenizer._generate_alphabet_dict(s, reserved_tokens) self.assertIn(\"?\", alphabet) self.assertIn(\"t\", alphabet)", "encoded_list = [1, 2, 0] # testing 123 token_list =", "compliance with the License. # You may obtain a copy", "All Rights Reserved. # # Licensed under the Apache License,", "agreed to in writing, software # distributed under the License", "{ \"translate\": 10, \"t\": 40, \"tr\": 16, \"tra\": 12 })", "max_token_length = tokenizer._gen_new_subtoken_list( subtoken_counts, min_count, alphabet, reserved_tokens) # Check that", "token = u\"abc_\\\\4\" alphabet = set(\"abc_\\\\u;\") escaped_token = tokenizer._escape_token(token, alphabet)", "\"c_\": 5, \"abc\": 5, \"bc_\": 5, \"abc_\": 5 }, subtoken_counts)", "distributed under the License is distributed on an \"AS IS\"", "unescaped_token) def test_list_to_index_dict(self): lst = [\"test\", \"strings\"] d = tokenizer._list_to_index_dict(lst)", "= set(\"abc_\") subtoken_dict = {\"a\": 0, \"b\": 1, \"c\": 2,", "2, 0], encoded_list) def test_decode(self): vocab_list = [\"123_\", \"test\", \"ing_\"]", "express or implied. # See the License for the specific", "test_list_to_index_dict(self): lst = [\"test\", \"strings\"] d = tokenizer._list_to_index_dict(lst) self.assertDictEqual({\"test\": 0,", "= tokenizer._generate_alphabet_dict(s, reserved_tokens) self.assertIn(\"?\", alphabet) self.assertIn(\"t\", alphabet) self.assertIn(\"e\", alphabet) self.assertIn(\"s\",", "except in compliance with the License. # You may obtain", "= tokenizer._escape_token(token, alphabet) self.assertEqual(\"abc\\\\u\\\\\\\\\\\\52;_\", escaped_token) def test_unescape_token(self): escaped_token = u\"Underline:", "\"t\": 40, \"tr\": 16, \"tra\": 12 }) min_count = 5", "Licensed under the Apache License, Version 2.0 (the \"License\"); #", "encoded_list = subtokenizer.encode(s) self.assertEqual([1, 2, 0], encoded_list) def test_decode(self): vocab_list", "not use this file except in compliance with the License.", "reserved_tokens = [\"???\"] alphabet = tokenizer._generate_alphabet_dict(s, reserved_tokens) self.assertIn(\"?\", alphabet) self.assertIn(\"t\",", "collections.defaultdict) self.assertDictEqual( { \"a\": 5, \"b\": 5, \"c\": 5, \"_\":", "the vocab list for c in alphabet: self.assertIn(c, vocab_list) if", "self.assertIsInstance(subtoken_counts, collections.defaultdict) self.assertDictEqual( { \"a\": 5, \"b\": 5, \"c\": 5,", "list for c in alphabet: self.assertIn(c, vocab_list) if __name__ ==", "writing, software # distributed under the License is distributed on", "self.assertIn(\"2\", alphabet) self.assertIn(\"3\", alphabet) def test_count_and_gen_subtokens(self): token_counts = {\"abc\": 5}", "4, \"c\": 1, \"ab\": 6, \"ac\": 3, \"abbc\": 5 })", "for c in alphabet: self.assertIn(c, vocab_list) if __name__ == \"__main__\":", "5, \"bc_\": 5, \"abc_\": 5 }, subtoken_counts) def test_filter_and_bucket_subtokens(self): subtoken_counts", "you may not use this file except in compliance with", "[\"test\", \"strings\"] d = tokenizer._list_to_index_dict(lst) self.assertDictEqual({\"test\": 0, \"strings\": 1}, d)", "= tokenizer._generate_subtokens(token_counts, alphabet, min_count, num_iterations, reserved_tokens) # Check that reserved", "# Licensed under the Apache License, Version 2.0 (the \"License\");", "\"abc\": 5, \"bc_\": 5, \"abc_\": 5 }, subtoken_counts) def test_filter_and_bucket_subtokens(self):", "test_filter_and_bucket_subtokens(self): subtoken_counts = collections.defaultdict(int, { \"a\": 2, \"b\": 4, \"c\":", "canddiate list). self.assertNotIn(\"tra\", subtoken_list) self.assertIn(\"tr\", subtoken_list) self.assertIn(\"t\", subtoken_list) self.assertEqual(len(\"translate\"), max_token_length)", "string helper methods.\"\"\" import collections import tempfile import tensorflow as", "not be added to the canddiate list). self.assertNotIn(\"tra\", subtoken_list) self.assertIn(\"tr\",", "temp_file = tempfile.NamedTemporaryFile(delete=False) with tf.io.gfile.GFile(temp_file.name, \"w\") as w: for subtoken", "self.assertIn(\"t\", subtoken_list) self.assertEqual(len(\"translate\"), max_token_length) def test_generate_subtokens(self): token_counts = {\"ab\": 1,", "tokenizer._gen_new_subtoken_list( subtoken_counts, min_count, alphabet, reserved_tokens) # Check that \"tra\" isn\"t", "reserved_tokens=[]) def test_encode(self): vocab_list = [\"123_\", \"test\", \"ing_\"] subtokenizer =", "CONDITIONS OF ANY KIND, either express or implied. # See", "Check that \"tra\" isn\"t in the list (its count should", "2, 0] # testing 123 token_list = subtokenizer._subtoken_ids_to_tokens(encoded_list) self.assertEqual([u\"testing\", u\"123\"],", "2, \"ab\": 3} max_subtoken_length = 2 subtokens = tokenizer._split_token_to_subtokens(token, subtoken_dict,", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "Check that each character in alphabet is in the vocab", "self._init_subtokenizer(vocab_list) encoded_list = [1, 2, 0] # testing 123 token_list", "# limitations under the License. \"\"\"Test Subtokenizer and string helper", "\"testing\", \"123\", \".\"], tokens) def test_join_tokens_to_string(self): tokens = [\"test\", \"?", "alphabet = set(\"translate\") reserved_tokens = [\"reserved\", \"tokens\"] subtoken_list, max_token_length =", "5 }, subtoken_counts) def test_filter_and_bucket_subtokens(self): subtoken_counts = collections.defaultdict(int, { \"a\":", "5, \"c\": 5, \"_\": 5, \"ab\": 5, \"bc\": 5, \"c_\":", "= {\"ab\": 1, \"bc\": 3, \"abc\": 5} alphabet = set(\"abc_\")", "and # limitations under the License. \"\"\"Test Subtokenizer and string", "alphabet) self.assertIn(\"3\", alphabet) def test_count_and_gen_subtokens(self): token_counts = {\"abc\": 5} alphabet", "subtoken_counts, min_count, alphabet, reserved_tokens) # Check that \"tra\" isn\"t in", "\"? \", \"testing\", \"123\", \".\"] s = tokenizer._join_tokens_to_string(tokens, tokenizer._ALPHANUMERIC_CHAR_SET) self.assertEqual(\"test?", "2021 The TensorFlow Authors. All Rights Reserved. # # Licensed", "subtokens = tokenizer._split_token_to_subtokens(token, subtoken_dict, max_subtoken_length) self.assertEqual([\"ab\", \"c\"], subtokens) def test_generate_alphabet_dict(self):", "= [\"reserved\", \"tokens\"] subtoken_list, max_token_length = tokenizer._gen_new_subtoken_list( subtoken_counts, min_count, alphabet,", "reserved_tokens) # Check that \"tra\" isn\"t in the list (its", "def test_unescape_token(self): escaped_token = u\"Underline: \\\\u, Backslash: \\\\\\\\, Unicode: \\\\52;\"", "the list self.assertEqual(vocab_list[:2], reserved_tokens) # Check that each character in", "vocab_list): temp_file = tempfile.NamedTemporaryFile(delete=False) with tf.io.gfile.GFile(temp_file.name, \"w\") as w: for", "\\\\u, Backslash: \\\\\\\\, Unicode: \\\\52;\" unescaped_token = tokenizer._unescape_token(escaped_token) self.assertEqual(\"Underline: _,", "max_subtoken_length) self.assertEqual([\"ab\", \"c\"], subtokens) def test_generate_alphabet_dict(self): s = [\"testing\", \"123\"]", "# Copyright 2021 The TensorFlow Authors. All Rights Reserved. #", "\"test\", \"ing_\"] subtokenizer = self._init_subtokenizer(vocab_list) s = \"testing 123\" encoded_list", "subtoken_dict = {\"a\": 0, \"b\": 1, \"c\": 2, \"_\": 3}", "0) self.assertEqual(set(\"b\"), subtoken_buckets[1]) self.assertEqual(set([\"ab\", \"ac\"]), subtoken_buckets[2]) self.assertEqual(len(subtoken_buckets[3]), 0) self.assertEqual(set([\"abbc\"]), subtoken_buckets[4])", "OR CONDITIONS OF ANY KIND, either express or implied. #", "}, subtoken_counts) def test_filter_and_bucket_subtokens(self): subtoken_counts = collections.defaultdict(int, { \"a\": 2,", "the License is distributed on an \"AS IS\" BASIS, #", "\"c\"], subtokens) def test_generate_alphabet_dict(self): s = [\"testing\", \"123\"] reserved_tokens =", "= set(\"translate\") reserved_tokens = [\"reserved\", \"tokens\"] subtoken_list, max_token_length = tokenizer._gen_new_subtoken_list(", "self.assertEqual(\"testing 123\", decoded_str) def test_subtoken_ids_to_tokens(self): vocab_list = [\"123_\", \"test\", \"ing_\"]", "tokenizer._split_token_to_subtokens(token, subtoken_dict, max_subtoken_length) self.assertEqual([\"ab\", \"c\"], subtokens) def test_generate_alphabet_dict(self): s =", "Backslash: \\\\, Unicode: 4\", unescaped_token) def test_list_to_index_dict(self): lst = [\"test\",", "self.assertEqual(set([\"ab\", \"ac\"]), subtoken_buckets[2]) self.assertEqual(len(subtoken_buckets[3]), 0) self.assertEqual(set([\"abbc\"]), subtoken_buckets[4]) def test_gen_new_subtoken_list(self): subtoken_counts", "collections import tempfile import tensorflow as tf from official.nlp.transformer.utils import", "[1, 2, 0] # testing 123 token_list = subtokenizer._subtoken_ids_to_tokens(encoded_list) self.assertEqual([u\"testing\",", "= tokenizer._split_string_to_tokens(text, tokenizer._ALPHANUMERIC_CHAR_SET) self.assertEqual([\"test\", \"? \", \"testing\", \"123\", \".\"], tokens)", "= tokenizer._count_and_gen_subtokens( token_counts, alphabet, subtoken_dict, max_subtoken_length) self.assertIsInstance(subtoken_counts, collections.defaultdict) self.assertDictEqual( {", "in alphabet is in the vocab list for c in", "= 5 alphabet = set(\"translate\") reserved_tokens = [\"reserved\", \"tokens\"] subtoken_list,", "[\"test\", \"? \", \"testing\", \"123\", \".\"] s = tokenizer._join_tokens_to_string(tokens, tokenizer._ALPHANUMERIC_CHAR_SET)", "\"abc_\": 5 }, subtoken_counts) def test_filter_and_bucket_subtokens(self): subtoken_counts = collections.defaultdict(int, {", "test_subtoken_ids_to_tokens(self): vocab_list = [\"123_\", \"test\", \"ing_\"] subtokenizer = self._init_subtokenizer(vocab_list) encoded_list", "\"c\": 2, \"ab\": 3} max_subtoken_length = 2 subtokens = tokenizer._split_token_to_subtokens(token,", "\"c\": 2, \"_\": 3} max_subtoken_length = 2 subtoken_counts = tokenizer._count_and_gen_subtokens(", "front of the list self.assertEqual(vocab_list[:2], reserved_tokens) # Check that each", "100 num_iterations = 1 reserved_tokens = [\"reserved\", \"tokens\"] vocab_list =", "reserved tokens are at the front of the list self.assertEqual(vocab_list[:2],", "Unicode: \\\\52;\" unescaped_token = tokenizer._unescape_token(escaped_token) self.assertEqual(\"Underline: _, Backslash: \\\\, Unicode:", "= \"abc\" subtoken_dict = {\"a\": 0, \"b\": 1, \"c\": 2,", "alphabet, subtoken_dict, max_subtoken_length) self.assertIsInstance(subtoken_counts, collections.defaultdict) self.assertDictEqual( { \"a\": 5, \"b\":", "= tokenizer._unescape_token(escaped_token) self.assertEqual(\"Underline: _, Backslash: \\\\, Unicode: 4\", unescaped_token) def", "= tokenizer._join_tokens_to_string(tokens, tokenizer._ALPHANUMERIC_CHAR_SET) self.assertEqual(\"test? testing 123.\", s) def test_escape_token(self): token", "self.assertIn(\"n\", alphabet) self.assertIn(\"g\", alphabet) self.assertIn(\"1\", alphabet) self.assertIn(\"2\", alphabet) self.assertIn(\"3\", alphabet)", "encoded_list) def test_decode(self): vocab_list = [\"123_\", \"test\", \"ing_\"] subtokenizer =", "law or agreed to in writing, software # distributed under", "tokenizer._split_string_to_tokens(text, tokenizer._ALPHANUMERIC_CHAR_SET) self.assertEqual([\"test\", \"? \", \"testing\", \"123\", \".\"], tokens) def", "test_unescape_token(self): escaped_token = u\"Underline: \\\\u, Backslash: \\\\\\\\, Unicode: \\\\52;\" unescaped_token", "that each character in alphabet is in the vocab list", "= subtokenizer.decode(encoded_list) self.assertEqual(\"testing 123\", decoded_str) def test_subtoken_ids_to_tokens(self): vocab_list = [\"123_\",", "decremented to 2, # so it should not be added", "be added to the canddiate list). self.assertNotIn(\"tra\", subtoken_list) self.assertIn(\"tr\", subtoken_list)", "Subtokenizer and string helper methods.\"\"\" import collections import tempfile import", "under the License. \"\"\"Test Subtokenizer and string helper methods.\"\"\" import", "vocab_list = tokenizer._generate_subtokens(token_counts, alphabet, min_count, num_iterations, reserved_tokens) # Check that", "0) self.assertEqual(set([\"abbc\"]), subtoken_buckets[4]) def test_gen_new_subtoken_list(self): subtoken_counts = collections.defaultdict(int, { \"translate\":", "self.assertIn(\"g\", alphabet) self.assertIn(\"1\", alphabet) self.assertIn(\"2\", alphabet) self.assertIn(\"3\", alphabet) def test_count_and_gen_subtokens(self):", "import tensorflow as tf from official.nlp.transformer.utils import tokenizer class SubtokenizerTest(tf.test.TestCase):", "may obtain a copy of the License at # #", "0] # testing 123 decoded_str = subtokenizer.decode(encoded_list) self.assertEqual(\"testing 123\", decoded_str)", "\"123\", \".\"], tokens) def test_join_tokens_to_string(self): tokens = [\"test\", \"? \",", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "5, \"c_\": 5, \"abc\": 5, \"bc_\": 5, \"abc_\": 5 },", "[\"123_\", \"test\", \"ing_\"] subtokenizer = self._init_subtokenizer(vocab_list) s = \"testing 123\"", "d = tokenizer._list_to_index_dict(lst) self.assertDictEqual({\"test\": 0, \"strings\": 1}, d) def test_split_token_to_subtokens(self):", "= [1, 2, 0] # testing 123 token_list = subtokenizer._subtoken_ids_to_tokens(encoded_list)", "2 subtokens = tokenizer._split_token_to_subtokens(token, subtoken_dict, max_subtoken_length) self.assertEqual([\"ab\", \"c\"], subtokens) def", "tokens are at the front of the list self.assertEqual(vocab_list[:2], reserved_tokens)", "may not use this file except in compliance with the", "w.write(\"'%s'\" % subtoken) w.write(\"\\n\") return tokenizer.Subtokenizer(temp_file.name, reserved_tokens=[]) def test_encode(self): vocab_list", "subtoken_buckets[4]) def test_gen_new_subtoken_list(self): subtoken_counts = collections.defaultdict(int, { \"translate\": 10, \"t\":", "\"abc\": 5} alphabet = set(\"abc_\") min_count = 100 num_iterations =", "test_encode(self): vocab_list = [\"123_\", \"test\", \"ing_\"] subtokenizer = self._init_subtokenizer(vocab_list) s", "escaped_token = tokenizer._escape_token(token, alphabet) self.assertEqual(\"abc\\\\u\\\\\\\\\\\\52;_\", escaped_token) def test_unescape_token(self): escaped_token =", "self.assertEqual(\"test? testing 123.\", s) def test_escape_token(self): token = u\"abc_\\\\4\" alphabet", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "tokenizer class SubtokenizerTest(tf.test.TestCase): def _init_subtokenizer(self, vocab_list): temp_file = tempfile.NamedTemporaryFile(delete=False) with", "alphabet) def test_count_and_gen_subtokens(self): token_counts = {\"abc\": 5} alphabet = set(\"abc_\")", "alphabet) self.assertIn(\"1\", alphabet) self.assertIn(\"2\", alphabet) self.assertIn(\"3\", alphabet) def test_count_and_gen_subtokens(self): token_counts", "this file except in compliance with the License. # You", "self.assertEqual(set([\"abbc\"]), subtoken_buckets[4]) def test_gen_new_subtoken_list(self): subtoken_counts = collections.defaultdict(int, { \"translate\": 10,", "it should not be added to the canddiate list). self.assertNotIn(\"tra\",", "\"\"\"Test Subtokenizer and string helper methods.\"\"\" import collections import tempfile", "w: for subtoken in vocab_list: w.write(\"'%s'\" % subtoken) w.write(\"\\n\") return", "\\\\\\\\, Unicode: \\\\52;\" unescaped_token = tokenizer._unescape_token(escaped_token) self.assertEqual(\"Underline: _, Backslash: \\\\,", "self.assertEqual(vocab_list[:2], reserved_tokens) # Check that each character in alphabet is", "the canddiate list). self.assertNotIn(\"tra\", subtoken_list) self.assertIn(\"tr\", subtoken_list) self.assertIn(\"t\", subtoken_list) self.assertEqual(len(\"translate\"),", "subtoken_dict, max_subtoken_length) self.assertEqual([\"ab\", \"c\"], subtokens) def test_generate_alphabet_dict(self): s = [\"testing\",", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "governing permissions and # limitations under the License. \"\"\"Test Subtokenizer", "6, \"ac\": 3, \"abbc\": 5 }) min_count = 3 subtoken_buckets", "# # Licensed under the Apache License, Version 2.0 (the", "{ \"a\": 2, \"b\": 4, \"c\": 1, \"ab\": 6, \"ac\":", "file except in compliance with the License. # You may", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "tokenizer._ALPHANUMERIC_CHAR_SET) self.assertEqual([\"test\", \"? \", \"testing\", \"123\", \".\"], tokens) def test_join_tokens_to_string(self):", "2, # so it should not be added to the", "vocab_list: w.write(\"'%s'\" % subtoken) w.write(\"\\n\") return tokenizer.Subtokenizer(temp_file.name, reserved_tokens=[]) def test_encode(self):", "StringHelperTest(tf.test.TestCase): def test_split_string_to_tokens(self): text = \"test? testing 123.\" tokens =", "= [1, 2, 0] # testing 123 decoded_str = subtokenizer.decode(encoded_list)", "def test_list_to_index_dict(self): lst = [\"test\", \"strings\"] d = tokenizer._list_to_index_dict(lst) self.assertDictEqual({\"test\":", "\"c\": 5, \"_\": 5, \"ab\": 5, \"bc\": 5, \"c_\": 5,", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "s = tokenizer._join_tokens_to_string(tokens, tokenizer._ALPHANUMERIC_CHAR_SET) self.assertEqual(\"test? testing 123.\", s) def test_escape_token(self):", "d) def test_split_token_to_subtokens(self): token = \"abc\" subtoken_dict = {\"a\": 0,", "\"bc\": 5, \"c_\": 5, \"abc\": 5, \"bc_\": 5, \"abc_\": 5", "subtoken_buckets[1]) self.assertEqual(set([\"ab\", \"ac\"]), subtoken_buckets[2]) self.assertEqual(len(subtoken_buckets[3]), 0) self.assertEqual(set([\"abbc\"]), subtoken_buckets[4]) def test_gen_new_subtoken_list(self):", "10, \"t\": 40, \"tr\": 16, \"tra\": 12 }) min_count =", "\"tr\": 16, \"tra\": 12 }) min_count = 5 alphabet =", "[\"reserved\", \"tokens\"] vocab_list = tokenizer._generate_subtokens(token_counts, alphabet, min_count, num_iterations, reserved_tokens) #", "test_split_string_to_tokens(self): text = \"test? testing 123.\" tokens = tokenizer._split_string_to_tokens(text, tokenizer._ALPHANUMERIC_CHAR_SET)", "5 alphabet = set(\"translate\") reserved_tokens = [\"reserved\", \"tokens\"] subtoken_list, max_token_length", "subtokenizer.encode(s) self.assertEqual([1, 2, 0], encoded_list) def test_decode(self): vocab_list = [\"123_\",", "the list (its count should be decremented to 2, #", "to the canddiate list). self.assertNotIn(\"tra\", subtoken_list) self.assertIn(\"tr\", subtoken_list) self.assertIn(\"t\", subtoken_list)", "def test_generate_subtokens(self): token_counts = {\"ab\": 1, \"bc\": 3, \"abc\": 5}", "tensorflow as tf from official.nlp.transformer.utils import tokenizer class SubtokenizerTest(tf.test.TestCase): def", "tokenizer._count_and_gen_subtokens( token_counts, alphabet, subtoken_dict, max_subtoken_length) self.assertIsInstance(subtoken_counts, collections.defaultdict) self.assertDictEqual( { \"a\":", "\"testing 123\" encoded_list = subtokenizer.encode(s) self.assertEqual([1, 2, 0], encoded_list) def", "0] # testing 123 token_list = subtokenizer._subtoken_ids_to_tokens(encoded_list) self.assertEqual([u\"testing\", u\"123\"], token_list)", "\"abc\" subtoken_dict = {\"a\": 0, \"b\": 1, \"c\": 2, \"ab\":", "{\"abc\": 5} alphabet = set(\"abc_\") subtoken_dict = {\"a\": 0, \"b\":", "and string helper methods.\"\"\" import collections import tempfile import tensorflow", "self.assertIn(\"s\", alphabet) self.assertIn(\"i\", alphabet) self.assertIn(\"n\", alphabet) self.assertIn(\"g\", alphabet) self.assertIn(\"1\", alphabet)", "each character in alphabet is in the vocab list for", "= \"test? testing 123.\" tokens = tokenizer._split_string_to_tokens(text, tokenizer._ALPHANUMERIC_CHAR_SET) self.assertEqual([\"test\", \"?", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "as tf from official.nlp.transformer.utils import tokenizer class SubtokenizerTest(tf.test.TestCase): def _init_subtokenizer(self,", "with tf.io.gfile.GFile(temp_file.name, \"w\") as w: for subtoken in vocab_list: w.write(\"'%s'\"", "5, \"abc_\": 5 }, subtoken_counts) def test_filter_and_bucket_subtokens(self): subtoken_counts = collections.defaultdict(int,", "\"tokens\"] subtoken_list, max_token_length = tokenizer._gen_new_subtoken_list( subtoken_counts, min_count, alphabet, reserved_tokens) #", "test_gen_new_subtoken_list(self): subtoken_counts = collections.defaultdict(int, { \"translate\": 10, \"t\": 40, \"tr\":", "or implied. # See the License for the specific language", "num_iterations, reserved_tokens) # Check that reserved tokens are at the", "Rights Reserved. # # Licensed under the Apache License, Version", "3, \"abc\": 5} alphabet = set(\"abc_\") min_count = 100 num_iterations", "% subtoken) w.write(\"\\n\") return tokenizer.Subtokenizer(temp_file.name, reserved_tokens=[]) def test_encode(self): vocab_list =", "\"_\": 5, \"ab\": 5, \"bc\": 5, \"c_\": 5, \"abc\": 5,", "2 subtoken_counts = tokenizer._count_and_gen_subtokens( token_counts, alphabet, subtoken_dict, max_subtoken_length) self.assertIsInstance(subtoken_counts, collections.defaultdict)", "subtoken_list, max_token_length = tokenizer._gen_new_subtoken_list( subtoken_counts, min_count, alphabet, reserved_tokens) # Check", "KIND, either express or implied. # See the License for", "specific language governing permissions and # limitations under the License.", "16, \"tra\": 12 }) min_count = 5 alphabet = set(\"translate\")", "subtoken in vocab_list: w.write(\"'%s'\" % subtoken) w.write(\"\\n\") return tokenizer.Subtokenizer(temp_file.name, reserved_tokens=[])", "\"? \", \"testing\", \"123\", \".\"], tokens) def test_join_tokens_to_string(self): tokens =", "\"ac\"]), subtoken_buckets[2]) self.assertEqual(len(subtoken_buckets[3]), 0) self.assertEqual(set([\"abbc\"]), subtoken_buckets[4]) def test_gen_new_subtoken_list(self): subtoken_counts =", "alphabet = set(\"abc_\") min_count = 100 num_iterations = 1 reserved_tokens", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "subtoken_counts = collections.defaultdict(int, { \"a\": 2, \"b\": 4, \"c\": 1,", "}) min_count = 3 subtoken_buckets = tokenizer._filter_and_bucket_subtokens( subtoken_counts, min_count) self.assertEqual(len(subtoken_buckets[0]),", "\"ing_\"] subtokenizer = self._init_subtokenizer(vocab_list) s = \"testing 123\" encoded_list =", "\\\\52;\" unescaped_token = tokenizer._unescape_token(escaped_token) self.assertEqual(\"Underline: _, Backslash: \\\\, Unicode: 4\",", "4\", unescaped_token) def test_list_to_index_dict(self): lst = [\"test\", \"strings\"] d =", "subtoken_dict = {\"a\": 0, \"b\": 1, \"c\": 2, \"ab\": 3}", "escaped_token) def test_unescape_token(self): escaped_token = u\"Underline: \\\\u, Backslash: \\\\\\\\, Unicode:", "tokenizer._unescape_token(escaped_token) self.assertEqual(\"Underline: _, Backslash: \\\\, Unicode: 4\", unescaped_token) def test_list_to_index_dict(self):", "(the \"License\"); # you may not use this file except", "u\"abc_\\\\4\" alphabet = set(\"abc_\\\\u;\") escaped_token = tokenizer._escape_token(token, alphabet) self.assertEqual(\"abc\\\\u\\\\\\\\\\\\52;_\", escaped_token)", "# you may not use this file except in compliance", "so it should not be added to the canddiate list).", "\"123\", \".\"] s = tokenizer._join_tokens_to_string(tokens, tokenizer._ALPHANUMERIC_CHAR_SET) self.assertEqual(\"test? testing 123.\", s)", "tf from official.nlp.transformer.utils import tokenizer class SubtokenizerTest(tf.test.TestCase): def _init_subtokenizer(self, vocab_list):", "= [\"test\", \"strings\"] d = tokenizer._list_to_index_dict(lst) self.assertDictEqual({\"test\": 0, \"strings\": 1},", "3} max_subtoken_length = 2 subtoken_counts = tokenizer._count_and_gen_subtokens( token_counts, alphabet, subtoken_dict,", "vocab_list = [\"123_\", \"test\", \"ing_\"] subtokenizer = self._init_subtokenizer(vocab_list) encoded_list =", "5 }) min_count = 3 subtoken_buckets = tokenizer._filter_and_bucket_subtokens( subtoken_counts, min_count)", "[\"???\"] alphabet = tokenizer._generate_alphabet_dict(s, reserved_tokens) self.assertIn(\"?\", alphabet) self.assertIn(\"t\", alphabet) self.assertIn(\"e\",", "0], encoded_list) def test_decode(self): vocab_list = [\"123_\", \"test\", \"ing_\"] subtokenizer", "test_join_tokens_to_string(self): tokens = [\"test\", \"? \", \"testing\", \"123\", \".\"] s", "tf.io.gfile.GFile(temp_file.name, \"w\") as w: for subtoken in vocab_list: w.write(\"'%s'\" %", "subtokenizer = self._init_subtokenizer(vocab_list) s = \"testing 123\" encoded_list = subtokenizer.encode(s)", "def test_subtoken_ids_to_tokens(self): vocab_list = [\"123_\", \"test\", \"ing_\"] subtokenizer = self._init_subtokenizer(vocab_list)", "self.assertEqual(set(\"b\"), subtoken_buckets[1]) self.assertEqual(set([\"ab\", \"ac\"]), subtoken_buckets[2]) self.assertEqual(len(subtoken_buckets[3]), 0) self.assertEqual(set([\"abbc\"]), subtoken_buckets[4]) def", "# Check that each character in alphabet is in the", "s) def test_escape_token(self): token = u\"abc_\\\\4\" alphabet = set(\"abc_\\\\u;\") escaped_token", "test_split_token_to_subtokens(self): token = \"abc\" subtoken_dict = {\"a\": 0, \"b\": 1,", "# # Unless required by applicable law or agreed to", "= tempfile.NamedTemporaryFile(delete=False) with tf.io.gfile.GFile(temp_file.name, \"w\") as w: for subtoken in", "test_decode(self): vocab_list = [\"123_\", \"test\", \"ing_\"] subtokenizer = self._init_subtokenizer(vocab_list) encoded_list", "self._init_subtokenizer(vocab_list) encoded_list = [1, 2, 0] # testing 123 decoded_str", "def test_escape_token(self): token = u\"abc_\\\\4\" alphabet = set(\"abc_\\\\u;\") escaped_token =", "tokenizer._generate_subtokens(token_counts, alphabet, min_count, num_iterations, reserved_tokens) # Check that reserved tokens", "self.assertIn(\"t\", alphabet) self.assertIn(\"e\", alphabet) self.assertIn(\"s\", alphabet) self.assertIn(\"i\", alphabet) self.assertIn(\"n\", alphabet)", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "set(\"abc_\\\\u;\") escaped_token = tokenizer._escape_token(token, alphabet) self.assertEqual(\"abc\\\\u\\\\\\\\\\\\52;_\", escaped_token) def test_unescape_token(self): escaped_token", "subtoken_list) self.assertIn(\"t\", subtoken_list) self.assertEqual(len(\"translate\"), max_token_length) def test_generate_subtokens(self): token_counts = {\"ab\":", "Version 2.0 (the \"License\"); # you may not use this", "s = \"testing 123\" encoded_list = subtokenizer.encode(s) self.assertEqual([1, 2, 0],", "token_list) class StringHelperTest(tf.test.TestCase): def test_split_string_to_tokens(self): text = \"test? testing 123.\"", "1, \"c\": 2, \"_\": 3} max_subtoken_length = 2 subtoken_counts =", "Backslash: \\\\\\\\, Unicode: \\\\52;\" unescaped_token = tokenizer._unescape_token(escaped_token) self.assertEqual(\"Underline: _, Backslash:", "in the list (its count should be decremented to 2,", "123\", decoded_str) def test_subtoken_ids_to_tokens(self): vocab_list = [\"123_\", \"test\", \"ing_\"] subtokenizer", "alphabet) self.assertIn(\"i\", alphabet) self.assertIn(\"n\", alphabet) self.assertIn(\"g\", alphabet) self.assertIn(\"1\", alphabet) self.assertIn(\"2\",", "implied. # See the License for the specific language governing", "SubtokenizerTest(tf.test.TestCase): def _init_subtokenizer(self, vocab_list): temp_file = tempfile.NamedTemporaryFile(delete=False) with tf.io.gfile.GFile(temp_file.name, \"w\")", "tokens) def test_join_tokens_to_string(self): tokens = [\"test\", \"? \", \"testing\", \"123\",", "# testing 123 token_list = subtokenizer._subtoken_ids_to_tokens(encoded_list) self.assertEqual([u\"testing\", u\"123\"], token_list) class", "under the Apache License, Version 2.0 (the \"License\"); # you", "= {\"abc\": 5} alphabet = set(\"abc_\") subtoken_dict = {\"a\": 0,", "\"b\": 5, \"c\": 5, \"_\": 5, \"ab\": 5, \"bc\": 5,", "reserved_tokens) self.assertIn(\"?\", alphabet) self.assertIn(\"t\", alphabet) self.assertIn(\"e\", alphabet) self.assertIn(\"s\", alphabet) self.assertIn(\"i\",", "{\"a\": 0, \"b\": 1, \"c\": 2, \"ab\": 3} max_subtoken_length =", "count should be decremented to 2, # so it should", "\"a\": 2, \"b\": 4, \"c\": 1, \"ab\": 6, \"ac\": 3,", "by applicable law or agreed to in writing, software #", "1 reserved_tokens = [\"reserved\", \"tokens\"] vocab_list = tokenizer._generate_subtokens(token_counts, alphabet, min_count,", "alphabet) self.assertIn(\"t\", alphabet) self.assertIn(\"e\", alphabet) self.assertIn(\"s\", alphabet) self.assertIn(\"i\", alphabet) self.assertIn(\"n\",", "alphabet) self.assertIn(\"g\", alphabet) self.assertIn(\"1\", alphabet) self.assertIn(\"2\", alphabet) self.assertIn(\"3\", alphabet) def", "self.assertDictEqual( { \"a\": 5, \"b\": 5, \"c\": 5, \"_\": 5,", "testing 123 decoded_str = subtokenizer.decode(encoded_list) self.assertEqual(\"testing 123\", decoded_str) def test_subtoken_ids_to_tokens(self):", "the License. \"\"\"Test Subtokenizer and string helper methods.\"\"\" import collections", "\".\"] s = tokenizer._join_tokens_to_string(tokens, tokenizer._ALPHANUMERIC_CHAR_SET) self.assertEqual(\"test? testing 123.\", s) def", "= 3 subtoken_buckets = tokenizer._filter_and_bucket_subtokens( subtoken_counts, min_count) self.assertEqual(len(subtoken_buckets[0]), 0) self.assertEqual(set(\"b\"),", "testing 123.\" tokens = tokenizer._split_string_to_tokens(text, tokenizer._ALPHANUMERIC_CHAR_SET) self.assertEqual([\"test\", \"? \", \"testing\",", "123 token_list = subtokenizer._subtoken_ids_to_tokens(encoded_list) self.assertEqual([u\"testing\", u\"123\"], token_list) class StringHelperTest(tf.test.TestCase): def", "123 decoded_str = subtokenizer.decode(encoded_list) self.assertEqual(\"testing 123\", decoded_str) def test_subtoken_ids_to_tokens(self): vocab_list", "\"bc_\": 5, \"abc_\": 5 }, subtoken_counts) def test_filter_and_bucket_subtokens(self): subtoken_counts =", "_init_subtokenizer(self, vocab_list): temp_file = tempfile.NamedTemporaryFile(delete=False) with tf.io.gfile.GFile(temp_file.name, \"w\") as w:", "that reserved tokens are at the front of the list", "\"tra\": 12 }) min_count = 5 alphabet = set(\"translate\") reserved_tokens", "language governing permissions and # limitations under the License. \"\"\"Test", "123.\" tokens = tokenizer._split_string_to_tokens(text, tokenizer._ALPHANUMERIC_CHAR_SET) self.assertEqual([\"test\", \"? \", \"testing\", \"123\",", "subtoken_list) self.assertIn(\"tr\", subtoken_list) self.assertIn(\"t\", subtoken_list) self.assertEqual(len(\"translate\"), max_token_length) def test_generate_subtokens(self): token_counts", "= 100 num_iterations = 1 reserved_tokens = [\"reserved\", \"tokens\"] vocab_list", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "Unless required by applicable law or agreed to in writing,", "subtokens) def test_generate_alphabet_dict(self): s = [\"testing\", \"123\"] reserved_tokens = [\"???\"]", "= set(\"abc_\\\\u;\") escaped_token = tokenizer._escape_token(token, alphabet) self.assertEqual(\"abc\\\\u\\\\\\\\\\\\52;_\", escaped_token) def test_unescape_token(self):", "tokenizer._escape_token(token, alphabet) self.assertEqual(\"abc\\\\u\\\\\\\\\\\\52;_\", escaped_token) def test_unescape_token(self): escaped_token = u\"Underline: \\\\u,", "the specific language governing permissions and # limitations under the", "w.write(\"\\n\") return tokenizer.Subtokenizer(temp_file.name, reserved_tokens=[]) def test_encode(self): vocab_list = [\"123_\", \"test\",", "def test_decode(self): vocab_list = [\"123_\", \"test\", \"ing_\"] subtokenizer = self._init_subtokenizer(vocab_list)", "min_count = 100 num_iterations = 1 reserved_tokens = [\"reserved\", \"tokens\"]", "applicable law or agreed to in writing, software # distributed", "tokenizer._list_to_index_dict(lst) self.assertDictEqual({\"test\": 0, \"strings\": 1}, d) def test_split_token_to_subtokens(self): token =", "as w: for subtoken in vocab_list: w.write(\"'%s'\" % subtoken) w.write(\"\\n\")", "= [\"123_\", \"test\", \"ing_\"] subtokenizer = self._init_subtokenizer(vocab_list) s = \"testing", "= 2 subtoken_counts = tokenizer._count_and_gen_subtokens( token_counts, alphabet, subtoken_dict, max_subtoken_length) self.assertIsInstance(subtoken_counts,", "2, \"_\": 3} max_subtoken_length = 2 subtoken_counts = tokenizer._count_and_gen_subtokens( token_counts,", "1, \"ab\": 6, \"ac\": 3, \"abbc\": 5 }) min_count =", "tokenizer._generate_alphabet_dict(s, reserved_tokens) self.assertIn(\"?\", alphabet) self.assertIn(\"t\", alphabet) self.assertIn(\"e\", alphabet) self.assertIn(\"s\", alphabet)", "subtokenizer = self._init_subtokenizer(vocab_list) encoded_list = [1, 2, 0] # testing", "\"ab\": 5, \"bc\": 5, \"c_\": 5, \"abc\": 5, \"bc_\": 5,", "= tokenizer._filter_and_bucket_subtokens( subtoken_counts, min_count) self.assertEqual(len(subtoken_buckets[0]), 0) self.assertEqual(set(\"b\"), subtoken_buckets[1]) self.assertEqual(set([\"ab\", \"ac\"]),", "# Check that reserved tokens are at the front of", "token_counts = {\"abc\": 5} alphabet = set(\"abc_\") subtoken_dict = {\"a\":", "in writing, software # distributed under the License is distributed", "self.assertEqual([1, 2, 0], encoded_list) def test_decode(self): vocab_list = [\"123_\", \"test\",", "def test_count_and_gen_subtokens(self): token_counts = {\"abc\": 5} alphabet = set(\"abc_\") subtoken_dict", "\"ing_\"] subtokenizer = self._init_subtokenizer(vocab_list) encoded_list = [1, 2, 0] #", "min_count = 3 subtoken_buckets = tokenizer._filter_and_bucket_subtokens( subtoken_counts, min_count) self.assertEqual(len(subtoken_buckets[0]), 0)", "test_count_and_gen_subtokens(self): token_counts = {\"abc\": 5} alphabet = set(\"abc_\") subtoken_dict =", "\"ab\": 3} max_subtoken_length = 2 subtokens = tokenizer._split_token_to_subtokens(token, subtoken_dict, max_subtoken_length)", "import tokenizer class SubtokenizerTest(tf.test.TestCase): def _init_subtokenizer(self, vocab_list): temp_file = tempfile.NamedTemporaryFile(delete=False)", "list (its count should be decremented to 2, # so", "encoded_list = [1, 2, 0] # testing 123 decoded_str =", "3 subtoken_buckets = tokenizer._filter_and_bucket_subtokens( subtoken_counts, min_count) self.assertEqual(len(subtoken_buckets[0]), 0) self.assertEqual(set(\"b\"), subtoken_buckets[1])", "subtoken_dict, max_subtoken_length) self.assertIsInstance(subtoken_counts, collections.defaultdict) self.assertDictEqual( { \"a\": 5, \"b\": 5,", "= [\"reserved\", \"tokens\"] vocab_list = tokenizer._generate_subtokens(token_counts, alphabet, min_count, num_iterations, reserved_tokens)", "subtoken_counts = tokenizer._count_and_gen_subtokens( token_counts, alphabet, subtoken_dict, max_subtoken_length) self.assertIsInstance(subtoken_counts, collections.defaultdict) self.assertDictEqual(", "vocab_list = [\"123_\", \"test\", \"ing_\"] subtokenizer = self._init_subtokenizer(vocab_list) s =", "\"tokens\"] vocab_list = tokenizer._generate_subtokens(token_counts, alphabet, min_count, num_iterations, reserved_tokens) # Check", "list self.assertEqual(vocab_list[:2], reserved_tokens) # Check that each character in alphabet", "# Check that \"tra\" isn\"t in the list (its count", "License is distributed on an \"AS IS\" BASIS, # WITHOUT", "= {\"a\": 0, \"b\": 1, \"c\": 2, \"ab\": 3} max_subtoken_length", "License, Version 2.0 (the \"License\"); # you may not use", "= [\"123_\", \"test\", \"ing_\"] subtokenizer = self._init_subtokenizer(vocab_list) encoded_list = [1,", "are at the front of the list self.assertEqual(vocab_list[:2], reserved_tokens) #", "reserved_tokens) # Check that each character in alphabet is in", "# You may obtain a copy of the License at", "isn\"t in the list (its count should be decremented to", "for subtoken in vocab_list: w.write(\"'%s'\" % subtoken) w.write(\"\\n\") return tokenizer.Subtokenizer(temp_file.name,", "from official.nlp.transformer.utils import tokenizer class SubtokenizerTest(tf.test.TestCase): def _init_subtokenizer(self, vocab_list): temp_file", "[\"123_\", \"test\", \"ing_\"] subtokenizer = self._init_subtokenizer(vocab_list) encoded_list = [1, 2,", "u\"123\"], token_list) class StringHelperTest(tf.test.TestCase): def test_split_string_to_tokens(self): text = \"test? testing", "helper methods.\"\"\" import collections import tempfile import tensorflow as tf", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "= tokenizer._gen_new_subtoken_list( subtoken_counts, min_count, alphabet, reserved_tokens) # Check that \"tra\"", "\".\"], tokens) def test_join_tokens_to_string(self): tokens = [\"test\", \"? \", \"testing\",", "\", \"testing\", \"123\", \".\"] s = tokenizer._join_tokens_to_string(tokens, tokenizer._ALPHANUMERIC_CHAR_SET) self.assertEqual(\"test? testing", "= [\"testing\", \"123\"] reserved_tokens = [\"???\"] alphabet = tokenizer._generate_alphabet_dict(s, reserved_tokens)", "\", \"testing\", \"123\", \".\"], tokens) def test_join_tokens_to_string(self): tokens = [\"test\",", "Authors. All Rights Reserved. # # Licensed under the Apache", "token = \"abc\" subtoken_dict = {\"a\": 0, \"b\": 1, \"c\":", "5, \"ab\": 5, \"bc\": 5, \"c_\": 5, \"abc\": 5, \"bc_\":", "\"testing\", \"123\", \".\"] s = tokenizer._join_tokens_to_string(tokens, tokenizer._ALPHANUMERIC_CHAR_SET) self.assertEqual(\"test? testing 123.\",", "token_counts = {\"ab\": 1, \"bc\": 3, \"abc\": 5} alphabet =", "alphabet) self.assertIn(\"e\", alphabet) self.assertIn(\"s\", alphabet) self.assertIn(\"i\", alphabet) self.assertIn(\"n\", alphabet) self.assertIn(\"g\",", "alphabet) self.assertEqual(\"abc\\\\u\\\\\\\\\\\\52;_\", escaped_token) def test_unescape_token(self): escaped_token = u\"Underline: \\\\u, Backslash:", "the License for the specific language governing permissions and #", "collections.defaultdict(int, { \"translate\": 10, \"t\": 40, \"tr\": 16, \"tra\": 12", "should be decremented to 2, # so it should not", "Apache License, Version 2.0 (the \"License\"); # you may not", "decoded_str = subtokenizer.decode(encoded_list) self.assertEqual(\"testing 123\", decoded_str) def test_subtoken_ids_to_tokens(self): vocab_list =", "either express or implied. # See the License for the", "(its count should be decremented to 2, # so it", "the front of the list self.assertEqual(vocab_list[:2], reserved_tokens) # Check that", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "return tokenizer.Subtokenizer(temp_file.name, reserved_tokens=[]) def test_encode(self): vocab_list = [\"123_\", \"test\", \"ing_\"]", "at the front of the list self.assertEqual(vocab_list[:2], reserved_tokens) # Check", "max_subtoken_length = 2 subtoken_counts = tokenizer._count_and_gen_subtokens( token_counts, alphabet, subtoken_dict, max_subtoken_length)", "def test_generate_alphabet_dict(self): s = [\"testing\", \"123\"] reserved_tokens = [\"???\"] alphabet", "{\"a\": 0, \"b\": 1, \"c\": 2, \"_\": 3} max_subtoken_length =", "token_counts, alphabet, subtoken_dict, max_subtoken_length) self.assertIsInstance(subtoken_counts, collections.defaultdict) self.assertDictEqual( { \"a\": 5,", "0, \"b\": 1, \"c\": 2, \"ab\": 3} max_subtoken_length = 2", "\"test? testing 123.\" tokens = tokenizer._split_string_to_tokens(text, tokenizer._ALPHANUMERIC_CHAR_SET) self.assertEqual([\"test\", \"? \",", "\"ac\": 3, \"abbc\": 5 }) min_count = 3 subtoken_buckets =", "tokenizer._ALPHANUMERIC_CHAR_SET) self.assertEqual(\"test? testing 123.\", s) def test_escape_token(self): token = u\"abc_\\\\4\"", "class SubtokenizerTest(tf.test.TestCase): def _init_subtokenizer(self, vocab_list): temp_file = tempfile.NamedTemporaryFile(delete=False) with tf.io.gfile.GFile(temp_file.name,", "self.assertIn(\"tr\", subtoken_list) self.assertIn(\"t\", subtoken_list) self.assertEqual(len(\"translate\"), max_token_length) def test_generate_subtokens(self): token_counts =", "character in alphabet is in the vocab list for c", "License. \"\"\"Test Subtokenizer and string helper methods.\"\"\" import collections import", "import tempfile import tensorflow as tf from official.nlp.transformer.utils import tokenizer", "= self._init_subtokenizer(vocab_list) encoded_list = [1, 2, 0] # testing 123", "def test_join_tokens_to_string(self): tokens = [\"test\", \"? \", \"testing\", \"123\", \".\"]", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "reserved_tokens) # Check that reserved tokens are at the front", "alphabet, min_count, num_iterations, reserved_tokens) # Check that reserved tokens are", "of the list self.assertEqual(vocab_list[:2], reserved_tokens) # Check that each character", "[\"reserved\", \"tokens\"] subtoken_list, max_token_length = tokenizer._gen_new_subtoken_list( subtoken_counts, min_count, alphabet, reserved_tokens)", "tokens = [\"test\", \"? \", \"testing\", \"123\", \".\"] s =", "official.nlp.transformer.utils import tokenizer class SubtokenizerTest(tf.test.TestCase): def _init_subtokenizer(self, vocab_list): temp_file =", "set(\"abc_\") min_count = 100 num_iterations = 1 reserved_tokens = [\"reserved\",", "alphabet is in the vocab list for c in alphabet:", "subtoken_list) self.assertEqual(len(\"translate\"), max_token_length) def test_generate_subtokens(self): token_counts = {\"ab\": 1, \"bc\":", "methods.\"\"\" import collections import tempfile import tensorflow as tf from", "0, \"strings\": 1}, d) def test_split_token_to_subtokens(self): token = \"abc\" subtoken_dict", "def test_split_token_to_subtokens(self): token = \"abc\" subtoken_dict = {\"a\": 0, \"b\":", "\"translate\": 10, \"t\": 40, \"tr\": 16, \"tra\": 12 }) min_count", "tokens = tokenizer._split_string_to_tokens(text, tokenizer._ALPHANUMERIC_CHAR_SET) self.assertEqual([\"test\", \"? \", \"testing\", \"123\", \".\"],", "2, 0] # testing 123 decoded_str = subtokenizer.decode(encoded_list) self.assertEqual(\"testing 123\",", "\"strings\"] d = tokenizer._list_to_index_dict(lst) self.assertDictEqual({\"test\": 0, \"strings\": 1}, d) def", "self.assertIn(\"?\", alphabet) self.assertIn(\"t\", alphabet) self.assertIn(\"e\", alphabet) self.assertIn(\"s\", alphabet) self.assertIn(\"i\", alphabet)", "\"License\"); # you may not use this file except in", "5, \"bc\": 5, \"c_\": 5, \"abc\": 5, \"bc_\": 5, \"abc_\":", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "test_escape_token(self): token = u\"abc_\\\\4\" alphabet = set(\"abc_\\\\u;\") escaped_token = tokenizer._escape_token(token,", "in vocab_list: w.write(\"'%s'\" % subtoken) w.write(\"\\n\") return tokenizer.Subtokenizer(temp_file.name, reserved_tokens=[]) def", "= u\"abc_\\\\4\" alphabet = set(\"abc_\\\\u;\") escaped_token = tokenizer._escape_token(token, alphabet) self.assertEqual(\"abc\\\\u\\\\\\\\\\\\52;_\",", "# distributed under the License is distributed on an \"AS", "# Unless required by applicable law or agreed to in", "alphabet) self.assertIn(\"2\", alphabet) self.assertIn(\"3\", alphabet) def test_count_and_gen_subtokens(self): token_counts = {\"abc\":", "\"ab\": 6, \"ac\": 3, \"abbc\": 5 }) min_count = 3", "tokenizer._filter_and_bucket_subtokens( subtoken_counts, min_count) self.assertEqual(len(subtoken_buckets[0]), 0) self.assertEqual(set(\"b\"), subtoken_buckets[1]) self.assertEqual(set([\"ab\", \"ac\"]), subtoken_buckets[2])", "_, Backslash: \\\\, Unicode: 4\", unescaped_token) def test_list_to_index_dict(self): lst =", "self.assertEqual(\"abc\\\\u\\\\\\\\\\\\52;_\", escaped_token) def test_unescape_token(self): escaped_token = u\"Underline: \\\\u, Backslash: \\\\\\\\,", "\"123\"] reserved_tokens = [\"???\"] alphabet = tokenizer._generate_alphabet_dict(s, reserved_tokens) self.assertIn(\"?\", alphabet)", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "1}, d) def test_split_token_to_subtokens(self): token = \"abc\" subtoken_dict = {\"a\":", "= collections.defaultdict(int, { \"translate\": 10, \"t\": 40, \"tr\": 16, \"tra\":", "You may obtain a copy of the License at #", "[1, 2, 0] # testing 123 decoded_str = subtokenizer.decode(encoded_list) self.assertEqual(\"testing", "self.assertEqual([u\"testing\", u\"123\"], token_list) class StringHelperTest(tf.test.TestCase): def test_split_string_to_tokens(self): text = \"test?", "max_subtoken_length = 2 subtokens = tokenizer._split_token_to_subtokens(token, subtoken_dict, max_subtoken_length) self.assertEqual([\"ab\", \"c\"],", "permissions and # limitations under the License. \"\"\"Test Subtokenizer and", "self.assertEqual([\"ab\", \"c\"], subtokens) def test_generate_alphabet_dict(self): s = [\"testing\", \"123\"] reserved_tokens", "the Apache License, Version 2.0 (the \"License\"); # you may", "decoded_str) def test_subtoken_ids_to_tokens(self): vocab_list = [\"123_\", \"test\", \"ing_\"] subtokenizer =", "1, \"bc\": 3, \"abc\": 5} alphabet = set(\"abc_\") min_count =", "\"abbc\": 5 }) min_count = 3 subtoken_buckets = tokenizer._filter_and_bucket_subtokens( subtoken_counts,", "\"b\": 1, \"c\": 2, \"ab\": 3} max_subtoken_length = 2 subtokens" ]
[]
[ "for openstack snapshot data\"\"\" def __init__(self, data): self.data = data['snapshot']", "= SnapshotData(self.show()) return self._snapshot_obj def show(self): return self.plugin.cinder.show_snapshot(self.source_id) def delete(self):", "is not None: return self._snapshot_obj self._snapshot_obj = SnapshotData(self.show()) return self._snapshot_obj", "= None @property def snapshot_obj(self): if self._snapshot_obj is not None:", "2020 Soil, Inc. from soil.openstack.base import DataBase from soil.openstack.base import", "None @property def snapshot_obj(self): if self._snapshot_obj is not None: return", "snapshot\"\"\" def __init__(self, plugin, source_id): super(Snapshot, self).__init__(plugin, source_id) self._snapshot_obj =", "self._snapshot_obj self._snapshot_obj = SnapshotData(self.show()) return self._snapshot_obj def show(self): return self.plugin.cinder.show_snapshot(self.source_id)", "from soil.openstack.base import SourceBase class SnapshotData(DataBase): \"\"\"A class for openstack", "if status in ('available', ): return True self._check_failed_status(status) return False", "status = snapshot_info['snapshot']['status'] if status in ('available', ): return True", "SnapshotData(DataBase): \"\"\"A class for openstack snapshot data\"\"\" def __init__(self, data):", "self._snapshot_obj = SnapshotData(self.show()) return self._snapshot_obj def show(self): return self.plugin.cinder.show_snapshot(self.source_id) def", "def show(self): return self.plugin.cinder.show_snapshot(self.source_id) def delete(self): self.plugin.cinder.delete_snapshot(self.source_id) def is_created(self): snapshot_info", "source_id) self._snapshot_obj = None @property def snapshot_obj(self): if self._snapshot_obj is", "data\"\"\" def __init__(self, data): self.data = data['snapshot'] class Snapshot(SourceBase): \"\"\"A", "# Copyright 2020 Soil, Inc. from soil.openstack.base import DataBase from", "Copyright 2020 Soil, Inc. from soil.openstack.base import DataBase from soil.openstack.base", "Soil, Inc. from soil.openstack.base import DataBase from soil.openstack.base import SourceBase", "class SnapshotData(DataBase): \"\"\"A class for openstack snapshot data\"\"\" def __init__(self,", "in ('available', ): return True self._check_failed_status(status) return False def is_delete(self):", "self._snapshot_obj = None @property def snapshot_obj(self): if self._snapshot_obj is not", "('available', ): return True self._check_failed_status(status) return False def is_delete(self): pass", "self.show() status = snapshot_info['snapshot']['status'] if status in ('available', ): return", "class Snapshot(SourceBase): \"\"\"A class for openstack snapshot\"\"\" def __init__(self, plugin,", "snapshot_info['snapshot']['status'] if status in ('available', ): return True self._check_failed_status(status) return", "@property def snapshot_obj(self): if self._snapshot_obj is not None: return self._snapshot_obj", "Inc. from soil.openstack.base import DataBase from soil.openstack.base import SourceBase class", "data['snapshot'] class Snapshot(SourceBase): \"\"\"A class for openstack snapshot\"\"\" def __init__(self,", "<gh_stars>1-10 # Copyright 2020 Soil, Inc. from soil.openstack.base import DataBase", "return self.plugin.cinder.show_snapshot(self.source_id) def delete(self): self.plugin.cinder.delete_snapshot(self.source_id) def is_created(self): snapshot_info = self.show()", "self).__init__(plugin, source_id) self._snapshot_obj = None @property def snapshot_obj(self): if self._snapshot_obj", "import SourceBase class SnapshotData(DataBase): \"\"\"A class for openstack snapshot data\"\"\"", "self.plugin.cinder.show_snapshot(self.source_id) def delete(self): self.plugin.cinder.delete_snapshot(self.source_id) def is_created(self): snapshot_info = self.show() status", "DataBase from soil.openstack.base import SourceBase class SnapshotData(DataBase): \"\"\"A class for", "soil.openstack.base import SourceBase class SnapshotData(DataBase): \"\"\"A class for openstack snapshot", "return self._snapshot_obj def show(self): return self.plugin.cinder.show_snapshot(self.source_id) def delete(self): self.plugin.cinder.delete_snapshot(self.source_id) def", "self._snapshot_obj def show(self): return self.plugin.cinder.show_snapshot(self.source_id) def delete(self): self.plugin.cinder.delete_snapshot(self.source_id) def is_created(self):", "show(self): return self.plugin.cinder.show_snapshot(self.source_id) def delete(self): self.plugin.cinder.delete_snapshot(self.source_id) def is_created(self): snapshot_info =", "def snapshot_obj(self): if self._snapshot_obj is not None: return self._snapshot_obj self._snapshot_obj", "for openstack snapshot\"\"\" def __init__(self, plugin, source_id): super(Snapshot, self).__init__(plugin, source_id)", "snapshot data\"\"\" def __init__(self, data): self.data = data['snapshot'] class Snapshot(SourceBase):", "self._snapshot_obj is not None: return self._snapshot_obj self._snapshot_obj = SnapshotData(self.show()) return", "status in ('available', ): return True self._check_failed_status(status) return False def", "delete(self): self.plugin.cinder.delete_snapshot(self.source_id) def is_created(self): snapshot_info = self.show() status = snapshot_info['snapshot']['status']", "\"\"\"A class for openstack snapshot data\"\"\" def __init__(self, data): self.data", "import DataBase from soil.openstack.base import SourceBase class SnapshotData(DataBase): \"\"\"A class", "def is_created(self): snapshot_info = self.show() status = snapshot_info['snapshot']['status'] if status", "def delete(self): self.plugin.cinder.delete_snapshot(self.source_id) def is_created(self): snapshot_info = self.show() status =", "\"\"\"A class for openstack snapshot\"\"\" def __init__(self, plugin, source_id): super(Snapshot,", "def __init__(self, plugin, source_id): super(Snapshot, self).__init__(plugin, source_id) self._snapshot_obj = None", "from soil.openstack.base import DataBase from soil.openstack.base import SourceBase class SnapshotData(DataBase):", "if self._snapshot_obj is not None: return self._snapshot_obj self._snapshot_obj = SnapshotData(self.show())", "class for openstack snapshot data\"\"\" def __init__(self, data): self.data =", "self.data = data['snapshot'] class Snapshot(SourceBase): \"\"\"A class for openstack snapshot\"\"\"", "data): self.data = data['snapshot'] class Snapshot(SourceBase): \"\"\"A class for openstack", "= snapshot_info['snapshot']['status'] if status in ('available', ): return True self._check_failed_status(status)", "def __init__(self, data): self.data = data['snapshot'] class Snapshot(SourceBase): \"\"\"A class", "self.plugin.cinder.delete_snapshot(self.source_id) def is_created(self): snapshot_info = self.show() status = snapshot_info['snapshot']['status'] if", "openstack snapshot\"\"\" def __init__(self, plugin, source_id): super(Snapshot, self).__init__(plugin, source_id) self._snapshot_obj", "plugin, source_id): super(Snapshot, self).__init__(plugin, source_id) self._snapshot_obj = None @property def", "super(Snapshot, self).__init__(plugin, source_id) self._snapshot_obj = None @property def snapshot_obj(self): if", "class for openstack snapshot\"\"\" def __init__(self, plugin, source_id): super(Snapshot, self).__init__(plugin,", "= self.show() status = snapshot_info['snapshot']['status'] if status in ('available', ):", "Snapshot(SourceBase): \"\"\"A class for openstack snapshot\"\"\" def __init__(self, plugin, source_id):", "None: return self._snapshot_obj self._snapshot_obj = SnapshotData(self.show()) return self._snapshot_obj def show(self):", "__init__(self, plugin, source_id): super(Snapshot, self).__init__(plugin, source_id) self._snapshot_obj = None @property", "SourceBase class SnapshotData(DataBase): \"\"\"A class for openstack snapshot data\"\"\" def", "soil.openstack.base import DataBase from soil.openstack.base import SourceBase class SnapshotData(DataBase): \"\"\"A", "source_id): super(Snapshot, self).__init__(plugin, source_id) self._snapshot_obj = None @property def snapshot_obj(self):", "snapshot_info = self.show() status = snapshot_info['snapshot']['status'] if status in ('available',", "SnapshotData(self.show()) return self._snapshot_obj def show(self): return self.plugin.cinder.show_snapshot(self.source_id) def delete(self): self.plugin.cinder.delete_snapshot(self.source_id)", "snapshot_obj(self): if self._snapshot_obj is not None: return self._snapshot_obj self._snapshot_obj =", "openstack snapshot data\"\"\" def __init__(self, data): self.data = data['snapshot'] class", "is_created(self): snapshot_info = self.show() status = snapshot_info['snapshot']['status'] if status in", "return self._snapshot_obj self._snapshot_obj = SnapshotData(self.show()) return self._snapshot_obj def show(self): return", "__init__(self, data): self.data = data['snapshot'] class Snapshot(SourceBase): \"\"\"A class for", "not None: return self._snapshot_obj self._snapshot_obj = SnapshotData(self.show()) return self._snapshot_obj def", "= data['snapshot'] class Snapshot(SourceBase): \"\"\"A class for openstack snapshot\"\"\" def" ]
[ "secrets from the lsass.exe memory It does not work if:", "# -*- coding: utf-8 -*- # Thanks to @skelsec for", "access \"\"\" def __init__(self): ModuleInfo.__init__(self, 'pypykatz', 'windows', system_module=True) def run(self):", "hash for data in user.get('msv_creds', []): if data['username']: login =", "[]) for logon_session in logon_sessions: # Right now kerberos_creds, dpapi_creds", "mimi = pypykatz.go_live() except Exception: self.debug(traceback.format_exc()) if mimi: results =", "[] for user in results: results[user]['Login'] = user pwd_found.append(results[user]) return", "results[login]['Nthash'] = codecs.encode(data['NThash'], 'hex') constant.pypykatz_result = results pwd_found = []", "mimi = None try: mimi = pypykatz.go_live() except Exception: self.debug(traceback.format_exc())", "'ssp_creds', 'livessp_creds', 'tspkg_creds', 'wdigest_creds']: for data in user.get(i, []): if", "= data.get('domainname', 'N/A') results[login]['Password'] = data['password'] # msv_creds to get", "'livessp_creds', 'tspkg_creds', 'wdigest_creds']: for data in user.get(i, []): if all((data['username'],", "memory It does not work if: - LSASS is running", "dumps all secrets from the lsass.exe memory It does not", "'windows', system_module=True) def run(self): mimi = None try: mimi =", "= mimi.to_dict().get('logon_sessions', []) for logon_session in logon_sessions: # Right now", "logon_session in logon_sessions: # Right now kerberos_creds, dpapi_creds results are", "data in user.get('msv_creds', []): if data['username']: login = data['username'] else:", "login = data['username'] else: login = user['username'] if login not", "results pwd_found = [] for user in results: results[user]['Login'] =", "Exception: self.debug(traceback.format_exc()) if mimi: results = {} logon_sessions = mimi.to_dict().get('logon_sessions',", "constant from pypykatz.pypykatz import pypykatz class Pypykatz(ModuleInfo): \"\"\" Pypykatz dumps", "if all((data['username'], data['password'])): login = data['username'] if login not in", "results[login]['Domain'] = data.get('domainname', 'N/A') results[login]['Password'] = data['password'] # msv_creds to", "for data in user.get('msv_creds', []): if data['username']: login = data['username']", "- LSASS is running as a protected process - A", "self.debug(traceback.format_exc()) if mimi: results = {} logon_sessions = mimi.to_dict().get('logon_sessions', [])", "It does not work if: - LSASS is running as", "this access \"\"\" def __init__(self): ModuleInfo.__init__(self, 'pypykatz', 'windows', system_module=True) def", "'hex') if data['LMHash']: results[login]['Lmhash'] = codecs.encode(data['LMHash'], 'hex') if data['NThash']: results[login]['Nthash']", "lsass.exe memory It does not work if: - LSASS is", "# Get cleartext password for i in ['credman_creds', 'ssp_creds', 'livessp_creds',", "results: results[login] = {} results[login]['Type'] = i results[login]['Domain'] = data.get('domainname',", "Pypykatz # Checks his project here: https://github.com/skelsec/pypykatz import codecs import", "{} logon_sessions = mimi.to_dict().get('logon_sessions', []) for logon_session in logon_sessions: #", "to get sha1 user hash for data in user.get('msv_creds', []):", "his project here: https://github.com/skelsec/pypykatz import codecs import traceback from lazagne.config.module_info", "data['NThash']: results[login]['Nthash'] = codecs.encode(data['NThash'], 'hex') constant.pypykatz_result = results pwd_found =", "-*- coding: utf-8 -*- # Thanks to @skelsec for his", "# msv_creds to get sha1 user hash for data in", "= logon_sessions[logon_session] # Get cleartext password for i in ['credman_creds',", "kerberos_creds, dpapi_creds results are not used user = logon_sessions[logon_session] #", "codecs.encode(data['NThash'], 'hex') constant.pypykatz_result = results pwd_found = [] for user", "if data['LMHash']: results[login]['Lmhash'] = codecs.encode(data['LMHash'], 'hex') if data['NThash']: results[login]['Nthash'] =", "# Right now kerberos_creds, dpapi_creds results are not used user", "Right now kerberos_creds, dpapi_creds results are not used user =", "cleartext password for i in ['credman_creds', 'ssp_creds', 'livessp_creds', 'tspkg_creds', 'wdigest_creds']:", "= data['username'] else: login = user['username'] if login not in", "running as a protected process - A security product blocks", "pypykatz.go_live() except Exception: self.debug(traceback.format_exc()) if mimi: results = {} logon_sessions", "from the lsass.exe memory It does not work if: -", "from pypykatz.pypykatz import pypykatz class Pypykatz(ModuleInfo): \"\"\" Pypykatz dumps all", "user.get(i, []): if all((data['username'], data['password'])): login = data['username'] if login", "[]): if data['username']: login = data['username'] else: login = user['username']", "= {} logon_sessions = mimi.to_dict().get('logon_sessions', []) for logon_session in logon_sessions:", "results[login]['Password'] = data['password'] # msv_creds to get sha1 user hash", "except Exception: self.debug(traceback.format_exc()) if mimi: results = {} logon_sessions =", "user = logon_sessions[logon_session] # Get cleartext password for i in", "traceback from lazagne.config.module_info import ModuleInfo from lazagne.config.constant import constant from", "password for i in ['credman_creds', 'ssp_creds', 'livessp_creds', 'tspkg_creds', 'wdigest_creds']: for", "data['LMHash']: results[login]['Lmhash'] = codecs.encode(data['LMHash'], 'hex') if data['NThash']: results[login]['Nthash'] = codecs.encode(data['NThash'],", "data['username']: login = data['username'] else: login = user['username'] if login", "results are not used user = logon_sessions[logon_session] # Get cleartext", "if login not in results: results[login] = {} if data['SHAHash']:", "ModuleInfo from lazagne.config.constant import constant from pypykatz.pypykatz import pypykatz class", "results[login] = {} if data['SHAHash']: results[login]['Shahash'] = codecs.encode(data['SHAHash'], 'hex') if", "i results[login]['Domain'] = data.get('domainname', 'N/A') results[login]['Password'] = data['password'] # msv_creds", "'hex') constant.pypykatz_result = results pwd_found = [] for user in", "security product blocks this access \"\"\" def __init__(self): ModuleInfo.__init__(self, 'pypykatz',", "LSASS is running as a protected process - A security", "'wdigest_creds']: for data in user.get(i, []): if all((data['username'], data['password'])): login", "protected process - A security product blocks this access \"\"\"", "user hash for data in user.get('msv_creds', []): if data['username']: login", "data['username'] if login not in results: results[login] = {} results[login]['Type']", "= {} results[login]['Type'] = i results[login]['Domain'] = data.get('domainname', 'N/A') results[login]['Password']", "from lazagne.config.module_info import ModuleInfo from lazagne.config.constant import constant from pypykatz.pypykatz", "login = data['username'] if login not in results: results[login] =", "login not in results: results[login] = {} results[login]['Type'] = i", "= codecs.encode(data['LMHash'], 'hex') if data['NThash']: results[login]['Nthash'] = codecs.encode(data['NThash'], 'hex') constant.pypykatz_result", "in logon_sessions: # Right now kerberos_creds, dpapi_creds results are not", "constant.pypykatz_result = results pwd_found = [] for user in results:", "data['password'] # msv_creds to get sha1 user hash for data", "work if: - LSASS is running as a protected process", "login not in results: results[login] = {} if data['SHAHash']: results[login]['Shahash']", "= codecs.encode(data['SHAHash'], 'hex') if data['LMHash']: results[login]['Lmhash'] = codecs.encode(data['LMHash'], 'hex') if", "is running as a protected process - A security product", "class Pypykatz(ModuleInfo): \"\"\" Pypykatz dumps all secrets from the lsass.exe", "import pypykatz class Pypykatz(ModuleInfo): \"\"\" Pypykatz dumps all secrets from", "used user = logon_sessions[logon_session] # Get cleartext password for i", "all secrets from the lsass.exe memory It does not work", "from lazagne.config.constant import constant from pypykatz.pypykatz import pypykatz class Pypykatz(ModuleInfo):", "# Checks his project here: https://github.com/skelsec/pypykatz import codecs import traceback", "for data in user.get(i, []): if all((data['username'], data['password'])): login =", "data['password'])): login = data['username'] if login not in results: results[login]", "codecs import traceback from lazagne.config.module_info import ModuleInfo from lazagne.config.constant import", "blocks this access \"\"\" def __init__(self): ModuleInfo.__init__(self, 'pypykatz', 'windows', system_module=True)", "= [] for user in results: results[user]['Login'] = user pwd_found.append(results[user])", "in user.get(i, []): if all((data['username'], data['password'])): login = data['username'] if", "codecs.encode(data['SHAHash'], 'hex') if data['LMHash']: results[login]['Lmhash'] = codecs.encode(data['LMHash'], 'hex') if data['NThash']:", "= results pwd_found = [] for user in results: results[user]['Login']", "results[login] = {} results[login]['Type'] = i results[login]['Domain'] = data.get('domainname', 'N/A')", "\"\"\" def __init__(self): ModuleInfo.__init__(self, 'pypykatz', 'windows', system_module=True) def run(self): mimi", "if data['SHAHash']: results[login]['Shahash'] = codecs.encode(data['SHAHash'], 'hex') if data['LMHash']: results[login]['Lmhash'] =", "in results: results[login] = {} if data['SHAHash']: results[login]['Shahash'] = codecs.encode(data['SHAHash'],", "coding: utf-8 -*- # Thanks to @skelsec for his awesome", "user['username'] if login not in results: results[login] = {} if", "- A security product blocks this access \"\"\" def __init__(self):", "a protected process - A security product blocks this access", "logon_sessions[logon_session] # Get cleartext password for i in ['credman_creds', 'ssp_creds',", "import ModuleInfo from lazagne.config.constant import constant from pypykatz.pypykatz import pypykatz", "'tspkg_creds', 'wdigest_creds']: for data in user.get(i, []): if all((data['username'], data['password'])):", "https://github.com/skelsec/pypykatz import codecs import traceback from lazagne.config.module_info import ModuleInfo from", "login = user['username'] if login not in results: results[login] =", "'pypykatz', 'windows', system_module=True) def run(self): mimi = None try: mimi", "the lsass.exe memory It does not work if: - LSASS", "if data['username']: login = data['username'] else: login = user['username'] if", "not in results: results[login] = {} if data['SHAHash']: results[login]['Shahash'] =", "try: mimi = pypykatz.go_live() except Exception: self.debug(traceback.format_exc()) if mimi: results", "as a protected process - A security product blocks this", "if data['NThash']: results[login]['Nthash'] = codecs.encode(data['NThash'], 'hex') constant.pypykatz_result = results pwd_found", "def __init__(self): ModuleInfo.__init__(self, 'pypykatz', 'windows', system_module=True) def run(self): mimi =", "for logon_session in logon_sessions: # Right now kerberos_creds, dpapi_creds results", "tool Pypykatz # Checks his project here: https://github.com/skelsec/pypykatz import codecs", "sha1 user hash for data in user.get('msv_creds', []): if data['username']:", "logon_sessions = mimi.to_dict().get('logon_sessions', []) for logon_session in logon_sessions: # Right", "else: login = user['username'] if login not in results: results[login]", "in results: results[login] = {} results[login]['Type'] = i results[login]['Domain'] =", "= codecs.encode(data['NThash'], 'hex') constant.pypykatz_result = results pwd_found = [] for", "now kerberos_creds, dpapi_creds results are not used user = logon_sessions[logon_session]", "'hex') if data['NThash']: results[login]['Nthash'] = codecs.encode(data['NThash'], 'hex') constant.pypykatz_result = results", "if login not in results: results[login] = {} results[login]['Type'] =", "codecs.encode(data['LMHash'], 'hex') if data['NThash']: results[login]['Nthash'] = codecs.encode(data['NThash'], 'hex') constant.pypykatz_result =", "ModuleInfo.__init__(self, 'pypykatz', 'windows', system_module=True) def run(self): mimi = None try:", "Checks his project here: https://github.com/skelsec/pypykatz import codecs import traceback from", "system_module=True) def run(self): mimi = None try: mimi = pypykatz.go_live()", "def run(self): mimi = None try: mimi = pypykatz.go_live() except", "lazagne.config.module_info import ModuleInfo from lazagne.config.constant import constant from pypykatz.pypykatz import", "user.get('msv_creds', []): if data['username']: login = data['username'] else: login =", "mimi: results = {} logon_sessions = mimi.to_dict().get('logon_sessions', []) for logon_session", "product blocks this access \"\"\" def __init__(self): ModuleInfo.__init__(self, 'pypykatz', 'windows',", "results[login]['Shahash'] = codecs.encode(data['SHAHash'], 'hex') if data['LMHash']: results[login]['Lmhash'] = codecs.encode(data['LMHash'], 'hex')", "Get cleartext password for i in ['credman_creds', 'ssp_creds', 'livessp_creds', 'tspkg_creds',", "if mimi: results = {} logon_sessions = mimi.to_dict().get('logon_sessions', []) for", "pwd_found = [] for user in results: results[user]['Login'] = user", "lazagne.config.constant import constant from pypykatz.pypykatz import pypykatz class Pypykatz(ModuleInfo): \"\"\"", "process - A security product blocks this access \"\"\" def", "not used user = logon_sessions[logon_session] # Get cleartext password for", "data in user.get(i, []): if all((data['username'], data['password'])): login = data['username']", "[]): if all((data['username'], data['password'])): login = data['username'] if login not", "import traceback from lazagne.config.module_info import ModuleInfo from lazagne.config.constant import constant", "his awesome tool Pypykatz # Checks his project here: https://github.com/skelsec/pypykatz", "# Thanks to @skelsec for his awesome tool Pypykatz #", "= i results[login]['Domain'] = data.get('domainname', 'N/A') results[login]['Password'] = data['password'] #", "data.get('domainname', 'N/A') results[login]['Password'] = data['password'] # msv_creds to get sha1", "results[login]['Lmhash'] = codecs.encode(data['LMHash'], 'hex') if data['NThash']: results[login]['Nthash'] = codecs.encode(data['NThash'], 'hex')", "in ['credman_creds', 'ssp_creds', 'livessp_creds', 'tspkg_creds', 'wdigest_creds']: for data in user.get(i,", "not in results: results[login] = {} results[login]['Type'] = i results[login]['Domain']", "-*- # Thanks to @skelsec for his awesome tool Pypykatz", "run(self): mimi = None try: mimi = pypykatz.go_live() except Exception:", "['credman_creds', 'ssp_creds', 'livessp_creds', 'tspkg_creds', 'wdigest_creds']: for data in user.get(i, []):", "utf-8 -*- # Thanks to @skelsec for his awesome tool", "Thanks to @skelsec for his awesome tool Pypykatz # Checks", "for user in results: results[user]['Login'] = user pwd_found.append(results[user]) return pwd_found", "= {} if data['SHAHash']: results[login]['Shahash'] = codecs.encode(data['SHAHash'], 'hex') if data['LMHash']:", "mimi.to_dict().get('logon_sessions', []) for logon_session in logon_sessions: # Right now kerberos_creds,", "= data['username'] if login not in results: results[login] = {}", "not work if: - LSASS is running as a protected", "= None try: mimi = pypykatz.go_live() except Exception: self.debug(traceback.format_exc()) if", "does not work if: - LSASS is running as a", "if: - LSASS is running as a protected process -", "in user.get('msv_creds', []): if data['username']: login = data['username'] else: login", "results: results[login] = {} if data['SHAHash']: results[login]['Shahash'] = codecs.encode(data['SHAHash'], 'hex')", "data['username'] else: login = user['username'] if login not in results:", "\"\"\" Pypykatz dumps all secrets from the lsass.exe memory It", "= pypykatz.go_live() except Exception: self.debug(traceback.format_exc()) if mimi: results = {}", "Pypykatz dumps all secrets from the lsass.exe memory It does", "A security product blocks this access \"\"\" def __init__(self): ModuleInfo.__init__(self,", "{} if data['SHAHash']: results[login]['Shahash'] = codecs.encode(data['SHAHash'], 'hex') if data['LMHash']: results[login]['Lmhash']", "i in ['credman_creds', 'ssp_creds', 'livessp_creds', 'tspkg_creds', 'wdigest_creds']: for data in", "'N/A') results[login]['Password'] = data['password'] # msv_creds to get sha1 user", "= data['password'] # msv_creds to get sha1 user hash for", "@skelsec for his awesome tool Pypykatz # Checks his project", "results[login]['Type'] = i results[login]['Domain'] = data.get('domainname', 'N/A') results[login]['Password'] = data['password']", "here: https://github.com/skelsec/pypykatz import codecs import traceback from lazagne.config.module_info import ModuleInfo", "None try: mimi = pypykatz.go_live() except Exception: self.debug(traceback.format_exc()) if mimi:", "dpapi_creds results are not used user = logon_sessions[logon_session] # Get", "for i in ['credman_creds', 'ssp_creds', 'livessp_creds', 'tspkg_creds', 'wdigest_creds']: for data", "{} results[login]['Type'] = i results[login]['Domain'] = data.get('domainname', 'N/A') results[login]['Password'] =", "msv_creds to get sha1 user hash for data in user.get('msv_creds',", "pypykatz class Pypykatz(ModuleInfo): \"\"\" Pypykatz dumps all secrets from the", "are not used user = logon_sessions[logon_session] # Get cleartext password", "data['SHAHash']: results[login]['Shahash'] = codecs.encode(data['SHAHash'], 'hex') if data['LMHash']: results[login]['Lmhash'] = codecs.encode(data['LMHash'],", "to @skelsec for his awesome tool Pypykatz # Checks his", "import constant from pypykatz.pypykatz import pypykatz class Pypykatz(ModuleInfo): \"\"\" Pypykatz", "= user['username'] if login not in results: results[login] = {}", "__init__(self): ModuleInfo.__init__(self, 'pypykatz', 'windows', system_module=True) def run(self): mimi = None", "for his awesome tool Pypykatz # Checks his project here:", "pypykatz.pypykatz import pypykatz class Pypykatz(ModuleInfo): \"\"\" Pypykatz dumps all secrets", "all((data['username'], data['password'])): login = data['username'] if login not in results:", "results = {} logon_sessions = mimi.to_dict().get('logon_sessions', []) for logon_session in", "awesome tool Pypykatz # Checks his project here: https://github.com/skelsec/pypykatz import", "project here: https://github.com/skelsec/pypykatz import codecs import traceback from lazagne.config.module_info import", "import codecs import traceback from lazagne.config.module_info import ModuleInfo from lazagne.config.constant", "get sha1 user hash for data in user.get('msv_creds', []): if", "Pypykatz(ModuleInfo): \"\"\" Pypykatz dumps all secrets from the lsass.exe memory", "logon_sessions: # Right now kerberos_creds, dpapi_creds results are not used" ]
[ "\"\"\"Test standard Discogs position 12.2.9#4: \"multiple CDs\".\"\"\" release = self._make_release_from_positions(['1-1',", "release.data['tracklist'][1]['title'] = 'TRACK GROUP TITLE' release.data['tracklist'][1]['sub_tracks'] = [ self._make_track('TITLE ONE',", "self.assertEqual(d.mediums, 2) self.assertEqual(t[0].medium, 1) self.assertEqual(t[0].medium_total, 2) self.assertEqual(t[0].medium_index, 1) self.assertEqual(t[1].medium, 1)", "self.assertEqual(t[0].medium_index, 1) self.assertEqual(t[1].medium, 1) self.assertEqual(t[1].medium_total, 2) self.assertEqual(t[1].medium_index, 2) self.assertEqual(t[2].medium, 2)", "}], 'formats': [{ 'descriptions': ['FORMAT DESC 1', 'FORMAT DESC 2'],", "position) for (i, position) in enumerate(positions, start=1)] return self._make_release(tracks) def", "medium, medium_index and subtrack_index.\"\"\" # List of tuples (discogs_position, (medium,", "d = DiscogsPlugin().get_album_info(release) self.assertEqual(d.mediums, 2) # 3 sides = 1", "parsing of index tracks that act as disc titles.\"\"\" release", "be # included in all copies or substantial portions of", "d = DiscogsPlugin().get_album_info(release) self.assertEqual(d.mediums, 1) self.assertEqual(len(d.tracks), 3) def test_parse_tracklist_subtracks_extra_material(self): \"\"\"Test", "Track 2: Index track with track group title release.data['tracklist'][1]['title'] =", "permission notice shall be # included in all copies or", "'B1', 'B2']) d = DiscogsPlugin().get_album_info(release) self.assertEqual(d.mediums, 1) # 2 sides", "required for the tests on this class.\"\"\" data = {", "'type_' field, but # the API seems to return it.", "\"\"\" release = self._make_release_from_positions(['1', '', '4']) # Track 2: Index", "self.assertEqual(t[0].medium_total, 2) self.assertEqual(t[0].medium_index, 1) self.assertEqual(t[1].medium, 1) self.assertEqual(t[1].medium_total, 2) self.assertEqual(t[1].medium_index, 2)", "d = DiscogsPlugin() for position, expected in positions: self.assertEqual(d.get_track_index(position), expected)", "test_parse_tracklist_subtracks_letter(self): \"\"\"Test standard Discogs position 12.2.9#5: \"sub tracks, letter\".\"\"\" release", "TITLE' release.data['tracklist'][1]['sub_tracks'] = [ self._make_track('TITLE ONE', '2.1', '01:01'), self._make_track('TITLE TWO',", "of a release that does not have the required fields.\"\"\"", "def test_parse_tracklist_multiple_lp(self): \"\"\"Test standard Discogs position 12.2.9#3: \"multiple LP\".\"\"\" release", "self.assertEqual(d.mediums, 3) self.assertEqual(t[0].medium_index, 1) self.assertEqual(t[0].index, 1) self.assertEqual(t[0].medium_total, 2) self.assertEqual(t[1].medium_index, 2)", "standard Discogs position 12.2.9#3: \"multiple LP\".\"\"\" release = self._make_release_from_positions(['A1', 'A2',", "release that does not have the required fields.\"\"\" release =", "self.assertEqual(d.mediums, 1) self.assertEqual(len(d.tracks), 3) self.assertEqual(d.tracks[1].title, 'TRACK GROUP TITLE') def test_parse_tracklist_subtracks_nested_physical(self):", "the minimal amount of information.\"\"\" data = {'id': 123, 'tracklist':", "1) def test_parse_track_indices(self): release = self._make_release_from_positions(['1', '2']) d = DiscogsPlugin().get_album_info(release)", "position 12.2.9#4: \"multiple CDs\".\"\"\" release = self._make_release_from_positions(['1-1', '1-2', '2-1', '3-1'])", "contain the required fields', logs[0]) def suite(): return unittest.TestLoader().loadTestsFromName(__name__) if", "self.assertEqual(t[0].medium_total, 2) self.assertEqual(t[1].medium_index, 2) self.assertEqual(t[1].index, 2) self.assertEqual(t[1].medium_total, 2) def test_parse_track_indices_several_media(self):", "import division, absolute_import, print_function import unittest from test import _common", "charge, to any person obtaining # a copy of this", "self.assertEqual(d.tracks[2].disctitle, 'MEDIUM TITLE CD2') self.assertEqual(len(d.tracks), 3) def test_parse_minimal_release(self): \"\"\"Test parsing", "CDs\".\"\"\" release = self._make_release_from_positions(['1-1', '1-2', '2-1', '3-1']) d = DiscogsPlugin().get_album_info(release)", "'A')), ('12.34', (None, '12', '34')), ('1ab', (None, '1', 'AB')), #", "'III', 'IV']) d = DiscogsPlugin().get_album_info(release) self.assertEqual(d.mediums, 1) self.assertEqual(len(d.tracks), 4) def", "release = self._make_release_from_positions(['A1', 'A2.1', 'A2.2', 'A3']) d = DiscogsPlugin().get_album_info(release) self.assertEqual(d.mediums,", "self.assertEqual(d.tracks[0].disctitle, 'MEDIUM TITLE CD1') self.assertEqual(d.tracks[1].disctitle, 'MEDIUM TITLE CD1') self.assertEqual(d.tracks[2].disctitle, 'MEDIUM", "list of elements on the returned Bag is incomplete, including", "to return it. Values: 'track' for regular tracks, # 'heading'", "1) self.assertEqual(t[2].index, 3) self.assertEqual(t[2].medium_total, 1) self.assertEqual(t[3].medium_index, 1) self.assertEqual(t[3].index, 4) self.assertEqual(t[3].medium_total,", "= d.tracks self.assertEqual(d.mediums, 2) self.assertEqual(t[0].medium, 1) self.assertEqual(t[0].medium_total, 2) self.assertEqual(t[0].medium_index, 1)", "release = self._make_release_from_positions(['A1', 'B1', 'C1']) d = DiscogsPlugin().get_album_info(release) t =", "enumerate(positions, start=1)] return self._make_release(tracks) def test_parse_media_for_tracks(self): tracks = [self._make_track('TITLE ONE',", "'1.2']) # Track 1: Index track with medium title release.data['tracklist'][0]['title']", "'A3']) d = DiscogsPlugin().get_album_info(release) self.assertEqual(d.mediums, 1) self.assertEqual(len(d.tracks), 3) def test_parse_tracklist_subtracks_extra_material(self):", "Index track with medium title release.data['tracklist'][0]['title'] = 'MEDIUM TITLE' #", "shall be # included in all copies or substantial portions", "2) self.assertEqual(t[2].medium, 2) self.assertEqual(t[2].medium_total, 1) self.assertEqual(t[2].medium_index, 1) def test_parse_track_indices(self): release", "positions = [('1', (None, '1', None)), ('A12', ('A', '12', None)),", "self.assertEqual(len(d.tracks), 4) def test_parse_tracklist_subtracks_dot(self): \"\"\"Test standard Discogs position 12.2.9#5: \"sub", "self.assertEqual(d.tracks[0].title, 'TRACK GROUP TITLE') def test_parse_tracklist_subtracks_nested_logical(self): \"\"\"Test parsing of subtracks", "medium title (Cd2) release.data['tracklist'][3]['title'] = 'MEDIUM TITLE CD2' d =", "self.assertEqual(t[2].medium_index, 1) def test_parse_track_indices(self): release = self._make_release_from_positions(['1', '2']) d =", "None: # Test samples on discogs_client do not have a", "tracklist where tracks have the specified `positions`.\"\"\" tracks = [self._make_track('TITLE%s'", "release = self._make_release_from_positions(['A1', 'A2', 'B1', 'B2']) d = DiscogsPlugin().get_album_info(release) self.assertEqual(d.mediums,", "= {'id': 123, 'tracklist': [self._make_track('A', '1', '01:01')], 'artists': [{'name': 'ARTIST", "start=1)] return self._make_release(tracks) def test_parse_media_for_tracks(self): tracks = [self._make_track('TITLE ONE', '1',", "sell copies of the Software, and to # permit persons", "1) self.assertEqual(t[0].medium_total, 2) self.assertEqual(t[1].medium_index, 2) self.assertEqual(t[1].index, 2) self.assertEqual(t[1].medium_total, 2) def", "self._make_release_from_positions(['1', '', '4']) # Track 2: Index track with track", "as disc titles.\"\"\" release = self._make_release_from_positions(['', '1-1', '1-2', '', '2-1'])", "def test_parse_minimal_release(self): \"\"\"Test parsing of a release with the minimal", "= self._make_release_from_positions(['A1', 'A2', 'B1', 'B2']) d = DiscogsPlugin().get_album_info(release) self.assertEqual(d.mediums, 1)", "tracks=None): \"\"\"Returns a Bag that mimics a discogs_client.Release. The list", "def test_parse_tracklist_subtracks_extra_material(self): \"\"\"Test standard Discogs position 12.2.9#6: \"extra material\".\"\"\" release", "= DiscogsPlugin().get_album_info(release) t = d.tracks self.assertEqual(d.mediums, 2) self.assertEqual(t[0].medium, 1) self.assertEqual(t[0].medium_total,", "DESC 2'], 'name': 'FORMAT', 'qty': 1 }], 'styles': [ 'STYLE1',", "*args: None) with capture_log() as logs: d = DiscogsPlugin().get_album_info(release) self.assertEqual(d,", "[] } if tracks: for recording in tracks: data['tracklist'].append(recording) return", "\"\"\"Test parsing of subtracks that include index tracks.\"\"\" release =", "GROUP TITLE') def test_parse_tracklist_subtracks_nested_logical(self): \"\"\"Test parsing of subtracks defined inside", "# # The above copyright notice and this permission notice", "publish, # distribute, sublicense, and/or sell copies of the Software,", "'id': 321, 'join': ''}], 'title': 'TITLE'} release = Bag(data=data, title=data['title'],", "self.assertEqual(len(d.tracks), 4) def test_parse_tracklist_multiple_cd(self): \"\"\"Test standard Discogs position 12.2.9#4: \"multiple", "= self._make_release_from_positions(['A1', 'A2a', 'A2b', 'A3']) d = DiscogsPlugin().get_album_info(release) self.assertEqual(d.mediums, 1)", "Values: 'track' for regular tracks, # 'heading' for descriptive texts", "1 LP + 1 LP self.assertEqual(len(d.tracks), 4) def test_parse_tracklist_multiple_cd(self): \"\"\"Test", "= self._make_release_from_positions(['A1', 'A2.1', 'A2.2', 'A3']) d = DiscogsPlugin().get_album_info(release) self.assertEqual(d.mediums, 1)", "of discogs `position` to medium, medium_index and subtrack_index.\"\"\" # List", "'MEDIUM TITLE') self.assertEqual(len(d.tracks), 1) self.assertEqual(d.tracks[0].title, 'TRACK GROUP TITLE') def test_parse_tracklist_subtracks_nested_logical(self):", "'join': ',' }], 'formats': [{ 'descriptions': ['FORMAT DESC 1', 'FORMAT", "tracks have the specified `positions`.\"\"\" tracks = [self._make_track('TITLE%s' % i,", "= [('1', (None, '1', None)), ('A12', ('A', '12', None)), ('12-34',", "use, copy, modify, merge, publish, # distribute, sublicense, and/or sell", "Index track with track group title, and sub_tracks release.data['tracklist'][1]['title'] =", "1) self.assertEqual(t[3].medium_index, 1) self.assertEqual(t[3].index, 4) self.assertEqual(t[3].medium_total, 1) def test_parse_position(self): \"\"\"Test", "return it. Values: 'track' for regular tracks, # 'heading' for", "d = DiscogsPlugin().get_album_info(release) self.assertEqual(d.mediums, 1) # 2 sides = 1", "+ 1 LP self.assertEqual(len(d.tracks), 4) def test_parse_tracklist_multiple_cd(self): \"\"\"Test standard Discogs", "def test_parse_tracklist_without_sides(self): \"\"\"Test standard Discogs position 12.2.9#1: \"without sides\".\"\"\" release", "position.\"\"\" release = self._make_release_from_positions(['I', 'II', 'III', 'IV']) d = DiscogsPlugin().get_album_info(release)", "tracks: data['tracklist'].append(recording) return Bag(data=data, # Make some fields available as", "2) self.assertEqual(t[1].medium_index, 2) self.assertEqual(t[1].index, 2) self.assertEqual(t[1].medium_total, 2) self.assertEqual(t[2].medium_index, 1) self.assertEqual(t[2].index,", "GROUP TITLE' d = DiscogsPlugin().get_album_info(release) self.assertEqual(d.mediums, 1) self.assertEqual(d.tracks[0].disctitle, 'MEDIUM TITLE')", "import unittest from test import _common from test._common import Bag", "self.assertEqual(t[0].medium_total, 1) self.assertEqual(t[1].medium, 2) self.assertEqual(t[1].medium_total, 1) def test_parse_medium_numbers_two_mediums_two_sided(self): release =", "modify, merge, publish, # distribute, sublicense, and/or sell copies of", "ONE', '2.1', '01:01'), self._make_track('TITLE TWO', '2.2', '02:02') ] d =", "'artists': [{ 'name': 'ARTIST NAME', 'id': 'ARTIST ID', 'join': ','", "= 'TRACK GROUP TITLE' release.data['tracklist'][1]['sub_tracks'] = [ self._make_track('TITLE ONE', '2',", "# Track 4: Index track with medium title (Cd2) release.data['tracklist'][3]['title']", "'TITLE'} release = Bag(data=data, title=data['title'], artists=[Bag(data=d) for d in data['artists']])", "self.assertEqual(t[3].medium_total, 1) def test_parse_position(self): \"\"\"Test the conversion of discogs `position`", "Discogs position 12.2.9#2: \"with sides\".\"\"\" release = self._make_release_from_positions(['A1', 'A2', 'B1',", "in tracks: data['tracklist'].append(recording) return Bag(data=data, # Make some fields available", "(None, '12', '34')), ('1ab', (None, '1', 'AB')), # Non-standard ('IV',", "'A2', 'B1', 'B2']) d = DiscogsPlugin().get_album_info(release) self.assertEqual(d.mediums, 1) # 2", "(discogs_position, (medium, medium_index, subindex) positions = [('1', (None, '1', None)),", "12.13.2). track['type_'] = type_ return track def _make_release_from_positions(self, positions): \"\"\"Return", "to # the following conditions: # # The above copyright", "def test_parse_track_indices(self): release = self._make_release_from_positions(['1', '2']) d = DiscogsPlugin().get_album_info(release) t", "LP self.assertEqual(len(d.tracks), 4) def test_parse_tracklist_multiple_lp(self): \"\"\"Test standard Discogs position 12.2.9#3:", "on this class.\"\"\" data = { 'id': 'ALBUM ID', 'uri':", "without restriction, including # without limitation the rights to use,", "index tracks.\"\"\" release = self._make_release_from_positions(['', '', '1.1', '1.2']) # Track", "defined inside a index track that are physical subtracks (ie.", "self._make_release_from_positions(['', '', '1.1', '1.2']) # Track 1: Index track with", "position 12.2.9#1: \"without sides\".\"\"\" release = self._make_release_from_positions(['1', '2', '3']) d", "self._make_release_from_positions(['1', '2', '3']) d = DiscogsPlugin().get_album_info(release) self.assertEqual(d.mediums, 1) self.assertEqual(len(d.tracks), 3)", "2) self.assertEqual(t[0].medium, 1) self.assertEqual(t[0].medium_total, 2) self.assertEqual(t[0].medium_index, 1) self.assertEqual(t[1].medium, 1) self.assertEqual(t[1].medium_total,", "[{ 'name': 'LABEL NAME', 'catno': 'CATALOG NUMBER', }], 'tracklist': []", "= self._make_release_from_positions(['1', '2', '3']) d = DiscogsPlugin().get_album_info(release) self.assertEqual(d.mediums, 1) self.assertEqual(len(d.tracks),", "sublicense, and/or sell copies of the Software, and to #", "'TRACK GROUP TITLE' d = DiscogsPlugin().get_album_info(release) self.assertEqual(d.mediums, 1) self.assertEqual(d.tracks[0].disctitle, 'MEDIUM", "medium title (Cd1) release.data['tracklist'][0]['title'] = 'MEDIUM TITLE CD1' # Track", "release = self._make_release_from_positions(['A1', 'A2a', 'A2b', 'A3']) d = DiscogsPlugin().get_album_info(release) self.assertEqual(d.mediums,", "are logical subtracks (ie. should be grouped together into a", "release.data['tracklist'][1]['title'] = 'TRACK GROUP TITLE' d = DiscogsPlugin().get_album_info(release) self.assertEqual(d.mediums, 1)", "on the returned Bag is incomplete, including just those required", "'STYLE1', 'STYLE2' ], 'labels': [{ 'name': 'LABEL NAME', 'catno': 'CATALOG", "'MEDIUM TITLE CD1') self.assertEqual(d.tracks[2].disctitle, 'MEDIUM TITLE CD2') self.assertEqual(len(d.tracks), 3) def", "\"\"\"Test standard Discogs position 12.2.9#1: \"without sides\".\"\"\" release = self._make_release_from_positions(['1',", "= self._make_release_from_positions(['A1', 'A2', 'B1', 'C1']) d = DiscogsPlugin().get_album_info(release) self.assertEqual(d.mediums, 2)", "self.assertEqual(t[0].index, 1) self.assertEqual(t[0].medium_total, 2) self.assertEqual(t[1].medium_index, 2) self.assertEqual(t[1].index, 2) self.assertEqual(t[1].medium_total, 2)", "2) self.assertEqual(t[1].index, 2) self.assertEqual(t[1].medium_total, 2) self.assertEqual(t[2].medium_index, 1) self.assertEqual(t[2].index, 3) self.assertEqual(t[2].medium_total,", "non standard Discogs position.\"\"\" release = self._make_release_from_positions(['I', 'II', 'III', 'IV'])", "discogs plugin. \"\"\" from __future__ import division, absolute_import, print_function import", "CD1') self.assertEqual(d.tracks[2].disctitle, 'MEDIUM TITLE CD2') self.assertEqual(len(d.tracks), 3) def test_parse_minimal_release(self): \"\"\"Test", "1) self.assertEqual(t[0].medium_total, 2) self.assertEqual(t[0].medium_index, 1) self.assertEqual(t[1].medium, 1) self.assertEqual(t[1].medium_total, 2) self.assertEqual(t[1].medium_index,", "'2.1', '01:01'), self._make_track('TITLE TWO', '2.2', '02:02') ] d = DiscogsPlugin().get_album_info(release)", "def test_parse_medium_numbers_single_medium(self): release = self._make_release_from_positions(['1', '2']) d = DiscogsPlugin().get_album_info(release) t", "persons to whom the Software is furnished to do so,", "position) in enumerate(positions, start=1)] return self._make_release(tracks) def test_parse_media_for_tracks(self): tracks =", "CD1') self.assertEqual(d.tracks[1].disctitle, 'MEDIUM TITLE CD1') self.assertEqual(d.tracks[2].disctitle, 'MEDIUM TITLE CD2') self.assertEqual(len(d.tracks),", "= DiscogsPlugin().get_album_info(release) self.assertEqual(d.mediums, 1) self.assertEqual(len(d.tracks), 3) def test_parse_tracklist_with_sides(self): \"\"\"Test standard", "= DiscogsPlugin().get_album_info(release) t = d.tracks self.assertEqual(d.media, 'FORMAT') self.assertEqual(t[0].media, d.media) self.assertEqual(t[1].media,", "grouped together into a single track). \"\"\" release = self._make_release_from_positions(['1',", "12.2.9#1: \"without sides\".\"\"\" release = self._make_release_from_positions(['1', '2', '3']) d =", "not have the required fields.\"\"\" release = Bag(data={}, refresh=lambda *args:", "TWO', '2.2', '02:02') ] d = DiscogsPlugin().get_album_info(release) self.assertEqual(d.mediums, 1) self.assertEqual(len(d.tracks),", "the required fields.\"\"\" release = Bag(data={}, refresh=lambda *args: None) with", "the Software. \"\"\"Tests for discogs plugin. \"\"\" from __future__ import", "test_parse_medium_numbers_two_mediums(self): release = self._make_release_from_positions(['1-1', '2-1']) d = DiscogsPlugin().get_album_info(release) t =", "2 sides = 1 LP self.assertEqual(len(d.tracks), 4) def test_parse_tracklist_multiple_lp(self): \"\"\"Test", "track with medium title release.data['tracklist'][0]['title'] = 'MEDIUM TITLE' # Track", "with capture_log() as logs: d = DiscogsPlugin().get_album_info(release) self.assertEqual(d, None) self.assertIn('Release", "those required for the tests on this class.\"\"\" data =", "1 }], 'styles': [ 'STYLE1', 'STYLE2' ], 'labels': [{ 'name':", "of subtracks that include index tracks.\"\"\" release = self._make_release_from_positions(['', '',", "t = d.tracks self.assertEqual(d.mediums, 1) self.assertEqual(t[0].medium, 1) self.assertEqual(t[0].medium_total, 2) self.assertEqual(t[1].medium,", "test_parse_tracklist_subtracks_nested_physical(self): \"\"\"Test parsing of subtracks defined inside a index track", "Bag from test.helper import capture_log from beetsplug.discogs import DiscogsPlugin class", "\"\"\"Test parsing of a release with the minimal amount of", "including # without limitation the rights to use, copy, modify,", "'02:02') ] d = DiscogsPlugin().get_album_info(release) self.assertEqual(d.mediums, 1) self.assertEqual(len(d.tracks), 3) self.assertEqual(d.tracks[1].title,", "in data['artists']]) def _make_track(self, title, position='', duration='', type_=None): track =", "'3']) # Track 2: Index track with track group title,", "# without limitation the rights to use, copy, modify, merge,", "{'id': 123, 'tracklist': [self._make_track('A', '1', '01:01')], 'artists': [{'name': 'ARTIST NAME',", "1: Index track with medium title (Cd1) release.data['tracklist'][0]['title'] = 'MEDIUM", "self.assertEqual(d.mediums, 1) self.assertEqual(len(d.tracks), 4) def test_parse_tracklist_subtracks_dot(self): \"\"\"Test standard Discogs position", "= self._make_release_from_positions(['A1', 'B1', 'C1']) d = DiscogsPlugin().get_album_info(release) t = d.tracks", "(ie. not real tracks - 12.13.2). track['type_'] = type_ return", "1) self.assertEqual(t[0].medium_total, 2) def test_parse_medium_numbers_two_mediums(self): release = self._make_release_from_positions(['1-1', '2-1']) d", "self.assertEqual(len(d.tracks), 4) def test_parse_tracklist_non_standard(self): \"\"\"Test non standard Discogs position.\"\"\" release", "self.assertEqual(t[1].medium_total, 1) def test_parse_medium_numbers_two_mediums_two_sided(self): release = self._make_release_from_positions(['A1', 'B1', 'C1']) d", "sides = 1 LP self.assertEqual(len(d.tracks), 4) def test_parse_tracklist_multiple_lp(self): \"\"\"Test standard", "('1.12', (None, '1', '12')), ('12.a', (None, '12', 'A')), ('12.34', (None,", "included in all copies or substantial portions of the Software.", "3 sides = 1 LP + 1 LP self.assertEqual(len(d.tracks), 4)", "return self._make_release(tracks) def test_parse_media_for_tracks(self): tracks = [self._make_track('TITLE ONE', '1', '01:01'),", "NAME') self.assertEqual(d.album, 'TITLE') self.assertEqual(len(d.tracks), 1) def test_parse_release_without_required_fields(self): \"\"\"Test parsing of", "position 12.2.9#5: \"sub tracks, letter\".\"\"\" release = self._make_release_from_positions(['A1', 'A2a', 'A2b',", "track with track group title release.data['tracklist'][1]['title'] = 'TRACK GROUP TITLE'", "d.tracks self.assertEqual(d.media, 'FORMAT') self.assertEqual(t[0].media, d.media) self.assertEqual(t[1].media, d.media) def test_parse_medium_numbers_single_medium(self): release", "data['artists']]) d = DiscogsPlugin().get_album_info(release) self.assertEqual(d.artist, 'ARTIST NAME') self.assertEqual(d.album, 'TITLE') self.assertEqual(len(d.tracks),", "from beetsplug.discogs import DiscogsPlugin class DGAlbumInfoTest(_common.TestCase): def _make_release(self, tracks=None): \"\"\"Returns", "} if tracks: for recording in tracks: data['tracklist'].append(recording) return Bag(data=data,", "to do so, subject to # the following conditions: #", "test_parse_tracklist_multiple_cd(self): \"\"\"Test standard Discogs position 12.2.9#4: \"multiple CDs\".\"\"\" release =", "conversion of discogs `position` to medium, medium_index and subtrack_index.\"\"\" #", "notice shall be # included in all copies or substantial", "self.assertEqual(t[1].medium_index, 2) self.assertEqual(t[1].index, 2) self.assertEqual(t[1].medium_total, 2) def test_parse_track_indices_several_media(self): release =", "= 'TRACK GROUP TITLE' release.data['tracklist'][1]['sub_tracks'] = [ self._make_track('TITLE ONE', '2.1',", "# Make some fields available as properties, as they are", "TITLE') def test_parse_tracklist_subtracks_nested_logical(self): \"\"\"Test parsing of subtracks defined inside a", "in data['artists']]) d = DiscogsPlugin().get_album_info(release) self.assertEqual(d.artist, 'ARTIST NAME') self.assertEqual(d.album, 'TITLE')", "DiscogsPlugin().get_album_info(release) t = d.tracks self.assertEqual(d.mediums, 1) self.assertEqual(t[0].medium, 1) self.assertEqual(t[0].medium_total, 2)", "Software without restriction, including # without limitation the rights to", "Bag that mimics a discogs_client.Release. The list of elements on", "('12.a', (None, '12', 'A')), ('12.34', (None, '12', '34')), ('1ab', (None,", "medium_index, subindex) positions = [('1', (None, '1', None)), ('A12', ('A',", "for discogs plugin. \"\"\" from __future__ import division, absolute_import, print_function", "self._make_release_from_positions(['A1', 'B1', 'C1']) d = DiscogsPlugin().get_album_info(release) t = d.tracks self.assertEqual(d.mediums,", "index tracks that act as disc titles.\"\"\" release = self._make_release_from_positions(['',", "(the # \"Software\"), to deal in the Software without restriction,", "# The above copyright notice and this permission notice shall", "this class.\"\"\" data = { 'id': 'ALBUM ID', 'uri': 'ALBUM", "12.2.9#5: \"sub tracks, dots\".\"\"\" release = self._make_release_from_positions(['1', '2.1', '2.2', '3'])", "self._make_release_from_positions(['1', '2', 'Video 1']) d = DiscogsPlugin().get_album_info(release) self.assertEqual(d.mediums, 2) self.assertEqual(len(d.tracks),", "'12', None)), ('12-34', ('12-', '34', None)), ('CD1-1', ('CD1-', '1', None)),", "a Bag that mimics a discogs_client.Release. The list of elements", "track with track group title, and sub_tracks release.data['tracklist'][1]['title'] = 'TRACK", "], 'labels': [{ 'name': 'LABEL NAME', 'catno': 'CATALOG NUMBER', }],", "TWO') def test_parse_tracklist_disctitles(self): \"\"\"Test parsing of index tracks that act", "'heading' for descriptive texts (ie. not real tracks - 12.13.2).", "for d in data['artists']]) def _make_track(self, title, position='', duration='', type_=None):", "test_parse_tracklist_subtracks_extra_material(self): \"\"\"Test standard Discogs position 12.2.9#6: \"extra material\".\"\"\" release =", "test_parse_minimal_release(self): \"\"\"Test parsing of a release with the minimal amount", "1) self.assertEqual(d.tracks[0].disctitle, 'MEDIUM TITLE') self.assertEqual(len(d.tracks), 1) self.assertEqual(d.tracks[0].title, 'TRACK GROUP TITLE')", "in the Software without restriction, including # without limitation the", "= DiscogsPlugin().get_album_info(release) self.assertEqual(d.mediums, 1) self.assertEqual(len(d.tracks), 4) def test_parse_tracklist_subtracks_dot(self): \"\"\"Test standard", "'MEDIUM TITLE CD1' # Track 4: Index track with medium", "sides = 1 LP + 1 LP self.assertEqual(len(d.tracks), 4) def", "title, and sub_tracks release.data['tracklist'][1]['title'] = 'TRACK GROUP TITLE' release.data['tracklist'][1]['sub_tracks'] =", "or substantial portions of the Software. \"\"\"Tests for discogs plugin.", "Copyright 2016, <NAME>. # # Permission is hereby granted, free", "DiscogsPlugin methods. title=data['title'], artists=[Bag(data=d) for d in data['artists']]) def _make_track(self,", "division, absolute_import, print_function import unittest from test import _common from", "('1ab', (None, '1', 'AB')), # Non-standard ('IV', ('IV', None, None)),", "[{ 'descriptions': ['FORMAT DESC 1', 'FORMAT DESC 2'], 'name': 'FORMAT',", "2) self.assertEqual(d.tracks[0].disctitle, 'MEDIUM TITLE CD1') self.assertEqual(d.tracks[1].disctitle, 'MEDIUM TITLE CD1') self.assertEqual(d.tracks[2].disctitle,", "index track that are physical subtracks (ie. should not be", "(Cd1) release.data['tracklist'][0]['title'] = 'MEDIUM TITLE CD1' # Track 4: Index", "not have a 'type_' field, but # the API seems", "<filename>test/test_discogs.py # -*- coding: utf-8 -*- # This file is", "release = self._make_release_from_positions(['1', '2.1', '2.2', '3']) d = DiscogsPlugin().get_album_info(release) self.assertEqual(d.mediums,", "of this software and associated documentation files (the # \"Software\"),", "subject to # the following conditions: # # The above", "d = DiscogsPlugin().get_album_info(release) self.assertEqual(d.mediums, 1) self.assertEqual(len(d.tracks), 4) def test_parse_tracklist_subtracks_dot(self): \"\"\"Test", "[('1', (None, '1', None)), ('A12', ('A', '12', None)), ('12-34', ('12-',", "to medium, medium_index and subtrack_index.\"\"\" # List of tuples (discogs_position,", "a index track that are logical subtracks (ie. should be", "tests on this class.\"\"\" data = { 'id': 'ALBUM ID',", "field, but # the API seems to return it. Values:", "# Track 2: Index track with track group title, and", "are physical subtracks (ie. should not be grouped together). \"\"\"", "Make some fields available as properties, as they are #", "position 12.2.9#5: \"sub tracks, dots\".\"\"\" release = self._make_release_from_positions(['1', '2.1', '2.2',", "GROUP TITLE' release.data['tracklist'][1]['sub_tracks'] = [ self._make_track('TITLE ONE', '2', '01:01'), self._make_track('TITLE", "for d in data['artists']]) d = DiscogsPlugin().get_album_info(release) self.assertEqual(d.artist, 'ARTIST NAME')", "= DiscogsPlugin().get_album_info(release) self.assertEqual(d.mediums, 1) self.assertEqual(len(d.tracks), 3) def test_parse_tracklist_subtracks_extra_material(self): \"\"\"Test standard", "Track 1: Index track with medium title (Cd1) release.data['tracklist'][0]['title'] =", "}], 'tracklist': [] } if tracks: for recording in tracks:", "LP\".\"\"\" release = self._make_release_from_positions(['A1', 'A2', 'B1', 'C1']) d = DiscogsPlugin().get_album_info(release)", "self._make_track('TITLE ONE', '2', '01:01'), self._make_track('TITLE TWO', '3', '02:02') ] d", "to use, copy, modify, merge, publish, # distribute, sublicense, and/or", "artists=[Bag(data=d) for d in data['artists']]) d = DiscogsPlugin().get_album_info(release) self.assertEqual(d.artist, 'ARTIST", "following conditions: # # The above copyright notice and this", "def test_parse_position(self): \"\"\"Test the conversion of discogs `position` to medium,", "d = DiscogsPlugin().get_album_info(release) self.assertEqual(d.artist, 'ARTIST NAME') self.assertEqual(d.album, 'TITLE') self.assertEqual(len(d.tracks), 1)", "fields', logs[0]) def suite(): return unittest.TestLoader().loadTestsFromName(__name__) if __name__ == '__main__':", "'AB')), # Non-standard ('IV', ('IV', None, None)), ] d =", "in all copies or substantial portions of the Software. \"\"\"Tests", "def test_parse_tracklist_subtracks_indices(self): \"\"\"Test parsing of subtracks that include index tracks.\"\"\"", "\"without sides\".\"\"\" release = self._make_release_from_positions(['1', '2', '3']) d = DiscogsPlugin().get_album_info(release)", "self.assertEqual(d.media, 'FORMAT') self.assertEqual(t[0].media, d.media) self.assertEqual(t[1].media, d.media) def test_parse_medium_numbers_single_medium(self): release =", "self.assertEqual(len(d.tracks), 3) self.assertEqual(d.tracks[1].title, 'TRACK GROUP TITLE') def test_parse_tracklist_subtracks_nested_physical(self): \"\"\"Test parsing", "self.assertEqual(t[1].index, 2) self.assertEqual(t[1].medium_total, 2) def test_parse_track_indices_several_media(self): release = self._make_release_from_positions(['1-1', '1-2',", "conditions: # # The above copyright notice and this permission", "release = self._make_release_from_positions(['1', '2']) d = DiscogsPlugin().get_album_info(release) t = d.tracks", "'id': 'ARTIST ID', 'join': ',' }], 'formats': [{ 'descriptions': ['FORMAT", "group title, and sub_tracks release.data['tracklist'][1]['title'] = 'TRACK GROUP TITLE' release.data['tracklist'][1]['sub_tracks']", "of information.\"\"\" data = {'id': 123, 'tracklist': [self._make_track('A', '1', '01:01')],", "d.tracks self.assertEqual(t[0].medium_index, 1) self.assertEqual(t[0].index, 1) self.assertEqual(t[0].medium_total, 2) self.assertEqual(t[1].medium_index, 2) self.assertEqual(t[1].index,", "'C1']) d = DiscogsPlugin().get_album_info(release) self.assertEqual(d.mediums, 2) # 3 sides =", "of subtracks defined inside a index track that are physical", "('CD1-', '1', None)), ('1.12', (None, '1', '12')), ('12.a', (None, '12',", "self.assertEqual(len(d.tracks), 4) self.assertEqual(d.tracks[1].title, 'TITLE ONE') self.assertEqual(d.tracks[2].title, 'TITLE TWO') def test_parse_tracklist_disctitles(self):", "'TRACK GROUP TITLE' release.data['tracklist'][1]['sub_tracks'] = [ self._make_track('TITLE ONE', '2', '01:01'),", "\"\"\" release = self._make_release_from_positions(['1', '', '3']) # Track 2: Index", "fields.\"\"\" release = Bag(data={}, refresh=lambda *args: None) with capture_log() as", "\"sub tracks, letter\".\"\"\" release = self._make_release_from_positions(['A1', 'A2a', 'A2b', 'A3']) d", "Bag is incomplete, including just those required for the tests", "= DiscogsPlugin().get_album_info(release) self.assertEqual(d.mediums, 1) self.assertEqual(d.tracks[0].disctitle, 'MEDIUM TITLE') self.assertEqual(len(d.tracks), 1) self.assertEqual(d.tracks[0].title,", "self.assertEqual(t[1].medium_total, 2) self.assertEqual(t[2].medium_index, 1) self.assertEqual(t[2].index, 3) self.assertEqual(t[2].medium_total, 1) self.assertEqual(t[3].medium_index, 1)", "'STYLE2' ], 'labels': [{ 'name': 'LABEL NAME', 'catno': 'CATALOG NUMBER',", "1) self.assertEqual(t[0].medium_total, 2) self.assertEqual(t[1].medium, 1) self.assertEqual(t[0].medium_total, 2) def test_parse_medium_numbers_two_mediums(self): release", "parsing of subtracks that include index tracks.\"\"\" release = self._make_release_from_positions(['',", "self._make_track('TITLE TWO', '3', '02:02') ] d = DiscogsPlugin().get_album_info(release) self.assertEqual(d.mediums, 1)", "that act as disc titles.\"\"\" release = self._make_release_from_positions(['', '1-1', '1-2',", "for (i, position) in enumerate(positions, start=1)] return self._make_release(tracks) def test_parse_media_for_tracks(self):", "2) self.assertEqual(t[1].medium, 1) self.assertEqual(t[0].medium_total, 2) def test_parse_medium_numbers_two_mediums(self): release = self._make_release_from_positions(['1-1',", "real tracks - 12.13.2). track['type_'] = type_ return track def", "d.tracks self.assertEqual(d.mediums, 2) self.assertEqual(t[0].medium, 1) self.assertEqual(t[0].medium_total, 2) self.assertEqual(t[0].medium_index, 1) self.assertEqual(t[1].medium,", "return track def _make_release_from_positions(self, positions): \"\"\"Return a Bag that mimics", "test_parse_tracklist_disctitles(self): \"\"\"Test parsing of index tracks that act as disc", "accessed by DiscogsPlugin methods. title=data['title'], artists=[Bag(data=d) for d in data['artists']])", "DiscogsPlugin().get_album_info(release) self.assertEqual(d.mediums, 1) self.assertEqual(d.tracks[0].disctitle, 'MEDIUM TITLE') self.assertEqual(len(d.tracks), 1) self.assertEqual(d.tracks[0].title, 'TRACK", "= self._make_release_from_positions(['1', '2.1', '2.2', '3']) d = DiscogsPlugin().get_album_info(release) self.assertEqual(d.mediums, 1)", "def test_parse_track_indices_several_media(self): release = self._make_release_from_positions(['1-1', '1-2', '2-1', '3-1']) d =", "subtracks defined inside a index track that are physical subtracks", "obtaining # a copy of this software and associated documentation", "DiscogsPlugin().get_album_info(release) self.assertEqual(d.mediums, 2) self.assertEqual(len(d.tracks), 3) def test_parse_tracklist_subtracks_indices(self): \"\"\"Test parsing of", "\"\"\"Test parsing of index tracks that act as disc titles.\"\"\"", "subtracks defined inside a index track that are logical subtracks", "release = self._make_release_from_positions(['1-1', '1-2', '2-1', '3-1']) d = DiscogsPlugin().get_album_info(release) t", "Bag that mimics a discogs_client.Release with a tracklist where tracks", "'name': 'LABEL NAME', 'catno': 'CATALOG NUMBER', }], 'tracklist': [] }", "from test._common import Bag from test.helper import capture_log from beetsplug.discogs", "None, None)), ] d = DiscogsPlugin() for position, expected in", "self._make_release(tracks) def test_parse_media_for_tracks(self): tracks = [self._make_track('TITLE ONE', '1', '01:01'), self._make_track('TITLE", "self.assertEqual(t[1].medium_index, 2) self.assertEqual(t[1].index, 2) self.assertEqual(t[1].medium_total, 2) self.assertEqual(t[2].medium_index, 1) self.assertEqual(t[2].index, 3)", "'A2.a', 'A2.b', 'A3']) d = DiscogsPlugin().get_album_info(release) self.assertEqual(d.mediums, 1) self.assertEqual(len(d.tracks), 3)", "free of charge, to any person obtaining # a copy", "\"\"\"Test non standard Discogs position.\"\"\" release = self._make_release_from_positions(['I', 'II', 'III',", "self.assertEqual(len(d.tracks), 3) release = self._make_release_from_positions(['A1', 'A2.a', 'A2.b', 'A3']) d =", "self.assertEqual(d.tracks[1].title, 'TRACK GROUP TITLE') def test_parse_tracklist_subtracks_nested_physical(self): \"\"\"Test parsing of subtracks", "from __future__ import division, absolute_import, print_function import unittest from test", "TITLE CD1') self.assertEqual(d.tracks[2].disctitle, 'MEDIUM TITLE CD2') self.assertEqual(len(d.tracks), 3) def test_parse_minimal_release(self):", "copy of this software and associated documentation files (the #", "3) self.assertEqual(len(d.tracks), 4) def test_parse_tracklist_non_standard(self): \"\"\"Test non standard Discogs position.\"\"\"", "do not have a 'type_' field, but # the API", "'12', 'A')), ('12.34', (None, '12', '34')), ('1ab', (None, '1', 'AB')),", "'year': '3001', 'artists': [{ 'name': 'ARTIST NAME', 'id': 'ARTIST ID',", "('CD1-1', ('CD1-', '1', None)), ('1.12', (None, '1', '12')), ('12.a', (None,", "copy, modify, merge, publish, # distribute, sublicense, and/or sell copies", "'TRACK GROUP TITLE') def test_parse_tracklist_subtracks_nested_physical(self): \"\"\"Test parsing of subtracks defined", "type_ return track def _make_release_from_positions(self, positions): \"\"\"Return a Bag that", "2) self.assertEqual(t[2].medium_index, 1) self.assertEqual(t[2].index, 3) self.assertEqual(t[2].medium_total, 1) self.assertEqual(t[3].medium_index, 1) self.assertEqual(t[3].index,", "track). \"\"\" release = self._make_release_from_positions(['1', '', '3']) # Track 2:", "for recording in tracks: data['tracklist'].append(recording) return Bag(data=data, # Make some", "DiscogsPlugin() for position, expected in positions: self.assertEqual(d.get_track_index(position), expected) def test_parse_tracklist_without_sides(self):", "(None, '1', None)), ('A12', ('A', '12', None)), ('12-34', ('12-', '34',", "= DiscogsPlugin().get_album_info(release) self.assertEqual(d.mediums, 1) self.assertEqual(len(d.tracks), 3) release = self._make_release_from_positions(['A1', 'A2.a',", "Index track with track group title release.data['tracklist'][1]['title'] = 'TRACK GROUP", "is hereby granted, free of charge, to any person obtaining", "'II', 'III', 'IV']) d = DiscogsPlugin().get_album_info(release) self.assertEqual(d.mediums, 1) self.assertEqual(len(d.tracks), 4)", "self.assertEqual(t[2].medium, 2) self.assertEqual(t[2].medium_total, 1) self.assertEqual(t[2].medium_index, 1) def test_parse_track_indices(self): release =", "data['tracklist'].append(recording) return Bag(data=data, # Make some fields available as properties,", "'2']) d = DiscogsPlugin().get_album_info(release) t = d.tracks self.assertEqual(t[0].medium_index, 1) self.assertEqual(t[0].index,", "'3']) d = DiscogsPlugin().get_album_info(release) self.assertEqual(d.mediums, 1) self.assertEqual(len(d.tracks), 3) release =", "TITLE CD1') self.assertEqual(d.tracks[1].disctitle, 'MEDIUM TITLE CD1') self.assertEqual(d.tracks[2].disctitle, 'MEDIUM TITLE CD2')", "= DiscogsPlugin().get_album_info(release) self.assertEqual(d.mediums, 2) # 3 sides = 1 LP", "('IV', None, None)), ] d = DiscogsPlugin() for position, expected", "of charge, to any person obtaining # a copy of", "above copyright notice and this permission notice shall be #", "return Bag(data=data, # Make some fields available as properties, as", "self.assertEqual(t[1].medium_total, 2) def test_parse_track_indices_several_media(self): release = self._make_release_from_positions(['1-1', '1-2', '2-1', '3-1'])", "self.assertEqual(t[2].medium_total, 1) self.assertEqual(t[3].medium_index, 1) self.assertEqual(t[3].index, 4) self.assertEqual(t[3].medium_total, 1) def test_parse_position(self):", "Index track with medium title (Cd2) release.data['tracklist'][3]['title'] = 'MEDIUM TITLE", "None)), ('1.12', (None, '1', '12')), ('12.a', (None, '12', 'A')), ('12.34',", "plugin. \"\"\" from __future__ import division, absolute_import, print_function import unittest", "'01:01'), self._make_track('TITLE TWO', '3', '02:02') ] d = DiscogsPlugin().get_album_info(release) self.assertEqual(d.mediums,", "release = self._make_release_from_positions(['1', '2', '3']) d = DiscogsPlugin().get_album_info(release) self.assertEqual(d.mediums, 1)", "def test_parse_tracklist_disctitles(self): \"\"\"Test parsing of index tracks that act as", "] d = DiscogsPlugin().get_album_info(release) self.assertEqual(d.mediums, 1) self.assertEqual(len(d.tracks), 3) self.assertEqual(d.tracks[1].title, 'TRACK", "duration='', type_=None): track = { 'title': title, 'position': position, 'duration':", "of beets. # Copyright 2016, <NAME>. # # Permission is", "file is part of beets. # Copyright 2016, <NAME>. #", "track that are logical subtracks (ie. should be grouped together", "# # Permission is hereby granted, free of charge, to", "self.assertEqual(d.artist, 'ARTIST NAME') self.assertEqual(d.album, 'TITLE') self.assertEqual(len(d.tracks), 1) def test_parse_release_without_required_fields(self): \"\"\"Test", "test_parse_tracklist_with_sides(self): \"\"\"Test standard Discogs position 12.2.9#2: \"with sides\".\"\"\" release =", "API seems to return it. Values: 'track' for regular tracks,", "d = DiscogsPlugin().get_album_info(release) self.assertEqual(d.mediums, 2) self.assertEqual(d.tracks[0].disctitle, 'MEDIUM TITLE CD1') self.assertEqual(d.tracks[1].disctitle,", "TWO', '3', '02:02') ] d = DiscogsPlugin().get_album_info(release) self.assertEqual(d.mediums, 1) self.assertEqual(len(d.tracks),", "= self._make_release_from_positions(['', '1-1', '1-2', '', '2-1']) # Track 1: Index", "test import _common from test._common import Bag from test.helper import", "permit persons to whom the Software is furnished to do", "minimal amount of information.\"\"\" data = {'id': 123, 'tracklist': [self._make_track('A',", "dots\".\"\"\" release = self._make_release_from_positions(['1', '2.1', '2.2', '3']) d = DiscogsPlugin().get_album_info(release)", "copies of the Software, and to # permit persons to", "None)), ('12-34', ('12-', '34', None)), ('CD1-1', ('CD1-', '1', None)), ('1.12',", "2: Index track with track group title, and sub_tracks release.data['tracklist'][1]['title']", "321, 'join': ''}], 'title': 'TITLE'} release = Bag(data=data, title=data['title'], artists=[Bag(data=d)", "self.assertEqual(len(d.tracks), 3) def test_parse_tracklist_subtracks_extra_material(self): \"\"\"Test standard Discogs position 12.2.9#6: \"extra", "'uri': 'ALBUM URI', 'title': 'ALBUM TITLE', 'year': '3001', 'artists': [{", "\"\"\"Test parsing of subtracks defined inside a index track that", "Bag(data={}, refresh=lambda *args: None) with capture_log() as logs: d =", "sub_tracks release.data['tracklist'][1]['title'] = 'TRACK GROUP TITLE' release.data['tracklist'][1]['sub_tracks'] = [ self._make_track('TITLE", "the API seems to return it. Values: 'track' for regular", "act as disc titles.\"\"\" release = self._make_release_from_positions(['', '1-1', '1-2', '',", "that mimics a discogs_client.Release. The list of elements on the", "',' }], 'formats': [{ 'descriptions': ['FORMAT DESC 1', 'FORMAT DESC", "\"\"\"Test standard Discogs position 12.2.9#2: \"with sides\".\"\"\" release = self._make_release_from_positions(['A1',", "self.assertEqual(d.tracks[1].disctitle, 'MEDIUM TITLE CD1') self.assertEqual(d.tracks[2].disctitle, 'MEDIUM TITLE CD2') self.assertEqual(len(d.tracks), 3)", "'2', 'Video 1']) d = DiscogsPlugin().get_album_info(release) self.assertEqual(d.mediums, 2) self.assertEqual(len(d.tracks), 3)", "d.media) self.assertEqual(t[1].media, d.media) def test_parse_medium_numbers_single_medium(self): release = self._make_release_from_positions(['1', '2']) d", "self._make_release_from_positions(['A1', 'A2', 'B1', 'B2']) d = DiscogsPlugin().get_album_info(release) self.assertEqual(d.mediums, 1) #", "3) self.assertEqual(d.tracks[1].title, 'TRACK GROUP TITLE') def test_parse_tracklist_subtracks_nested_physical(self): \"\"\"Test parsing of", "\"\"\"Test standard Discogs position 12.2.9#5: \"sub tracks, dots\".\"\"\" release =", "'ARTIST ID', 'join': ',' }], 'formats': [{ 'descriptions': ['FORMAT DESC", "information.\"\"\" data = {'id': 123, 'tracklist': [self._make_track('A', '1', '01:01')], 'artists':", "titles.\"\"\" release = self._make_release_from_positions(['', '1-1', '1-2', '', '2-1']) # Track", "3) def test_parse_tracklist_subtracks_extra_material(self): \"\"\"Test standard Discogs position 12.2.9#6: \"extra material\".\"\"\"", "\"\"\"Return a Bag that mimics a discogs_client.Release with a tracklist", "CD1' # Track 4: Index track with medium title (Cd2)", "test_parse_tracklist_subtracks_indices(self): \"\"\"Test parsing of subtracks that include index tracks.\"\"\" release", "self.assertEqual(t[1].medium_total, 2) self.assertEqual(t[1].medium_index, 2) self.assertEqual(t[2].medium, 2) self.assertEqual(t[2].medium_total, 1) self.assertEqual(t[2].medium_index, 1)", "DiscogsPlugin().get_album_info(release) self.assertEqual(d.mediums, 2) # 3 sides = 1 LP +", "List of tuples (discogs_position, (medium, medium_index, subindex) positions = [('1',", "subtracks (ie. should be grouped together into a single track).", "(None, '1', 'AB')), # Non-standard ('IV', ('IV', None, None)), ]", "and associated documentation files (the # \"Software\"), to deal in", "include index tracks.\"\"\" release = self._make_release_from_positions(['', '', '1.1', '1.2']) #", "test_parse_media_for_tracks(self): tracks = [self._make_track('TITLE ONE', '1', '01:01'), self._make_track('TITLE TWO', '2',", "= DiscogsPlugin() for position, expected in positions: self.assertEqual(d.get_track_index(position), expected) def", "__future__ import division, absolute_import, print_function import unittest from test import", "DiscogsPlugin().get_album_info(release) t = d.tracks self.assertEqual(t[0].medium_index, 1) self.assertEqual(t[0].index, 1) self.assertEqual(t[0].medium_total, 2)", "# 'heading' for descriptive texts (ie. not real tracks -", "'tracklist': [] } if tracks: for recording in tracks: data['tracklist'].append(recording)", "'TRACK GROUP TITLE') def test_parse_tracklist_subtracks_nested_logical(self): \"\"\"Test parsing of subtracks defined", "NAME', 'id': 321, 'join': ''}], 'title': 'TITLE'} release = Bag(data=data,", "as logs: d = DiscogsPlugin().get_album_info(release) self.assertEqual(d, None) self.assertIn('Release does not", "self._make_release_from_positions(['1-1', '1-2', '2-1', '3-1']) d = DiscogsPlugin().get_album_info(release) t = d.tracks", "print_function import unittest from test import _common from test._common import", "'formats': [{ 'descriptions': ['FORMAT DESC 1', 'FORMAT DESC 2'], 'name':", "LP + 1 LP self.assertEqual(len(d.tracks), 4) def test_parse_tracklist_multiple_cd(self): \"\"\"Test standard", "ONE', '2', '01:01'), self._make_track('TITLE TWO', '3', '02:02') ] d =", "# 2 sides = 1 LP self.assertEqual(len(d.tracks), 4) def test_parse_tracklist_multiple_lp(self):", "self.assertEqual(len(d.tracks), 3) def test_parse_tracklist_subtracks_indices(self): \"\"\"Test parsing of subtracks that include", "be grouped together). \"\"\" release = self._make_release_from_positions(['1', '', '4']) #", "absolute_import, print_function import unittest from test import _common from test._common", "self._make_track('TITLE TWO', '2.2', '02:02') ] d = DiscogsPlugin().get_album_info(release) self.assertEqual(d.mediums, 1)", "def test_parse_tracklist_subtracks_nested_physical(self): \"\"\"Test parsing of subtracks defined inside a index", "<NAME>. # # Permission is hereby granted, free of charge,", "data['artists']]) def _make_track(self, title, position='', duration='', type_=None): track = {", "DiscogsPlugin().get_album_info(release) self.assertEqual(d.mediums, 1) self.assertEqual(len(d.tracks), 3) def test_parse_tracklist_subtracks_letter(self): \"\"\"Test standard Discogs", "Discogs position 12.2.9#6: \"extra material\".\"\"\" release = self._make_release_from_positions(['1', '2', 'Video", "\"\"\"Test standard Discogs position 12.2.9#3: \"multiple LP\".\"\"\" release = self._make_release_from_positions(['A1',", "1) self.assertEqual(t[0].medium_total, 1) self.assertEqual(t[1].medium, 2) self.assertEqual(t[1].medium_total, 1) def test_parse_medium_numbers_two_mediums_two_sided(self): release", "'1', '12')), ('12.a', (None, '12', 'A')), ('12.34', (None, '12', '34')),", "release = self._make_release_from_positions(['1-1', '1-2', '2-1', '3-1']) d = DiscogsPlugin().get_album_info(release) self.assertEqual(d.mediums,", "'A3']) d = DiscogsPlugin().get_album_info(release) self.assertEqual(d.mediums, 1) self.assertEqual(len(d.tracks), 3) def test_parse_tracklist_subtracks_letter(self):", "def _make_release_from_positions(self, positions): \"\"\"Return a Bag that mimics a discogs_client.Release", "# Track 1: Index track with medium title release.data['tracklist'][0]['title'] =", "d = DiscogsPlugin().get_album_info(release) self.assertEqual(d.mediums, 1) self.assertEqual(d.tracks[0].disctitle, 'MEDIUM TITLE') self.assertEqual(len(d.tracks), 1)", "d in data['artists']]) def _make_track(self, title, position='', duration='', type_=None): track", "title=data['title'], artists=[Bag(data=d) for d in data['artists']]) d = DiscogsPlugin().get_album_info(release) self.assertEqual(d.artist,", "'A2.b', 'A3']) d = DiscogsPlugin().get_album_info(release) self.assertEqual(d.mediums, 1) self.assertEqual(len(d.tracks), 3) def", "1 LP self.assertEqual(len(d.tracks), 4) def test_parse_tracklist_multiple_cd(self): \"\"\"Test standard Discogs position", "3) def test_parse_minimal_release(self): \"\"\"Test parsing of a release with the", "URI', 'title': 'ALBUM TITLE', 'year': '3001', 'artists': [{ 'name': 'ARTIST", "} if type_ is not None: # Test samples on", "coding: utf-8 -*- # This file is part of beets.", "# Permission is hereby granted, free of charge, to any", "title, 'position': position, 'duration': duration } if type_ is not", "2) self.assertEqual(t[1].medium_index, 2) self.assertEqual(t[1].index, 2) self.assertEqual(t[1].medium_total, 2) def test_parse_track_indices_several_media(self): release", "self.assertEqual(t[1].index, 2) self.assertEqual(t[1].medium_total, 2) self.assertEqual(t[2].medium_index, 1) self.assertEqual(t[2].index, 3) self.assertEqual(t[2].medium_total, 1)", "d = DiscogsPlugin().get_album_info(release) self.assertEqual(d.mediums, 2) self.assertEqual(len(d.tracks), 3) def test_parse_tracklist_subtracks_indices(self): \"\"\"Test", "portions of the Software. \"\"\"Tests for discogs plugin. \"\"\" from", "tracks = [self._make_track('TITLE%s' % i, position) for (i, position) in", "'qty': 1 }], 'styles': [ 'STYLE1', 'STYLE2' ], 'labels': [{", "1) self.assertEqual(len(d.tracks), 3) release = self._make_release_from_positions(['A1', 'A2.1', 'A2.2', 'A3']) d", "the Software is furnished to do so, subject to #", "with the minimal amount of information.\"\"\" data = {'id': 123,", "release = self._make_release_from_positions(['1', '', '4']) # Track 2: Index track", "amount of information.\"\"\" data = {'id': 123, 'tracklist': [self._make_track('A', '1',", "'1', '01:01'), self._make_track('TITLE TWO', '2', '02:02')] release = self._make_release(tracks=tracks) d", "CD2') self.assertEqual(len(d.tracks), 3) def test_parse_minimal_release(self): \"\"\"Test parsing of a release", "3) release = self._make_release_from_positions(['A1', 'A2.1', 'A2.2', 'A3']) d = DiscogsPlugin().get_album_info(release)", "self.assertEqual(d.mediums, 1) self.assertEqual(len(d.tracks), 4) self.assertEqual(d.tracks[1].title, 'TITLE ONE') self.assertEqual(d.tracks[2].title, 'TITLE TWO')", "without limitation the rights to use, copy, modify, merge, publish,", "'3-1']) d = DiscogsPlugin().get_album_info(release) t = d.tracks self.assertEqual(d.mediums, 3) self.assertEqual(t[0].medium_index,", "4) def test_parse_tracklist_non_standard(self): \"\"\"Test non standard Discogs position.\"\"\" release =", "have the specified `positions`.\"\"\" tracks = [self._make_track('TITLE%s' % i, position)", "DiscogsPlugin().get_album_info(release) self.assertEqual(d.mediums, 1) self.assertEqual(len(d.tracks), 4) def test_parse_tracklist_subtracks_dot(self): \"\"\"Test standard Discogs", "[self._make_track('TITLE ONE', '1', '01:01'), self._make_track('TITLE TWO', '2', '02:02')] release =", "def test_parse_tracklist_subtracks_nested_logical(self): \"\"\"Test parsing of subtracks defined inside a index", "'ARTIST NAME', 'id': 'ARTIST ID', 'join': ',' }], 'formats': [{", "Discogs position 12.2.9#3: \"multiple LP\".\"\"\" release = self._make_release_from_positions(['A1', 'A2', 'B1',", "descriptive texts (ie. not real tracks - 12.13.2). track['type_'] =", "'2-1']) # Track 1: Index track with medium title (Cd1)", "Index track with medium title (Cd1) release.data['tracklist'][0]['title'] = 'MEDIUM TITLE", "from test import _common from test._common import Bag from test.helper", "it. Values: 'track' for regular tracks, # 'heading' for descriptive", "= DiscogsPlugin().get_album_info(release) self.assertEqual(d.mediums, 2) self.assertEqual(len(d.tracks), 3) def test_parse_tracklist_subtracks_indices(self): \"\"\"Test parsing", "def test_parse_tracklist_subtracks_dot(self): \"\"\"Test standard Discogs position 12.2.9#5: \"sub tracks, dots\".\"\"\"", "= DiscogsPlugin().get_album_info(release) t = d.tracks self.assertEqual(d.mediums, 3) self.assertEqual(t[0].medium_index, 1) self.assertEqual(t[0].index,", "None)), ('CD1-1', ('CD1-', '1', None)), ('1.12', (None, '1', '12')), ('12.a',", "def test_parse_tracklist_subtracks_letter(self): \"\"\"Test standard Discogs position 12.2.9#5: \"sub tracks, letter\".\"\"\"", "'1.1', '1.2']) # Track 1: Index track with medium title", "substantial portions of the Software. \"\"\"Tests for discogs plugin. \"\"\"", "self.assertEqual(t[0].medium_index, 1) self.assertEqual(t[0].index, 1) self.assertEqual(t[0].medium_total, 2) self.assertEqual(t[1].medium_index, 2) self.assertEqual(t[1].index, 2)", "merge, publish, # distribute, sublicense, and/or sell copies of the", "'1', '01:01')], 'artists': [{'name': 'ARTIST NAME', 'id': 321, 'join': ''}],", "limitation the rights to use, copy, modify, merge, publish, #", "release = self._make_release_from_positions(['1', '2', 'Video 1']) d = DiscogsPlugin().get_album_info(release) self.assertEqual(d.mediums,", "letter\".\"\"\" release = self._make_release_from_positions(['A1', 'A2a', 'A2b', 'A3']) d = DiscogsPlugin().get_album_info(release)", "for descriptive texts (ie. not real tracks - 12.13.2). track['type_']", "= self._make_release_from_positions(['1', '2']) d = DiscogsPlugin().get_album_info(release) t = d.tracks self.assertEqual(t[0].medium_index,", "of a release with the minimal amount of information.\"\"\" data", "def test_parse_release_without_required_fields(self): \"\"\"Test parsing of a release that does not", "so, subject to # the following conditions: # # The", "'Video 1']) d = DiscogsPlugin().get_album_info(release) self.assertEqual(d.mediums, 2) self.assertEqual(len(d.tracks), 3) def", "= { 'id': 'ALBUM ID', 'uri': 'ALBUM URI', 'title': 'ALBUM", "position, expected in positions: self.assertEqual(d.get_track_index(position), expected) def test_parse_tracklist_without_sides(self): \"\"\"Test standard", "4: Index track with medium title (Cd2) release.data['tracklist'][3]['title'] = 'MEDIUM", "= 'TRACK GROUP TITLE' d = DiscogsPlugin().get_album_info(release) self.assertEqual(d.mediums, 1) self.assertEqual(d.tracks[0].disctitle,", "= Bag(data={}, refresh=lambda *args: None) with capture_log() as logs: d", "= [self._make_track('TITLE ONE', '1', '01:01'), self._make_track('TITLE TWO', '2', '02:02')] release", "do so, subject to # the following conditions: # #", "the conversion of discogs `position` to medium, medium_index and subtrack_index.\"\"\"", "DiscogsPlugin().get_album_info(release) self.assertEqual(d.artist, 'ARTIST NAME') self.assertEqual(d.album, 'TITLE') self.assertEqual(len(d.tracks), 1) def test_parse_release_without_required_fields(self):", "tracks = [self._make_track('TITLE ONE', '1', '01:01'), self._make_track('TITLE TWO', '2', '02:02')]", "self.assertEqual(t[1].medium, 1) self.assertEqual(t[1].medium_total, 2) self.assertEqual(t[1].medium_index, 2) self.assertEqual(t[2].medium, 2) self.assertEqual(t[2].medium_total, 1)", "the rights to use, copy, modify, merge, publish, # distribute,", "= 1 LP self.assertEqual(len(d.tracks), 4) def test_parse_tracklist_multiple_lp(self): \"\"\"Test standard Discogs", "'name': 'ARTIST NAME', 'id': 'ARTIST ID', 'join': ',' }], 'formats':", "parsing of a release with the minimal amount of information.\"\"\"", "self._make_release_from_positions(['I', 'II', 'III', 'IV']) d = DiscogsPlugin().get_album_info(release) self.assertEqual(d.mediums, 1) self.assertEqual(len(d.tracks),", "% i, position) for (i, position) in enumerate(positions, start=1)] return", "'MEDIUM TITLE CD1') self.assertEqual(d.tracks[1].disctitle, 'MEDIUM TITLE CD1') self.assertEqual(d.tracks[2].disctitle, 'MEDIUM TITLE", "DiscogsPlugin().get_album_info(release) t = d.tracks self.assertEqual(d.mediums, 2) self.assertEqual(t[0].medium, 1) self.assertEqual(t[0].medium_total, 2)", "tracks, letter\".\"\"\" release = self._make_release_from_positions(['A1', 'A2a', 'A2b', 'A3']) d =", "Software, and to # permit persons to whom the Software", "None) with capture_log() as logs: d = DiscogsPlugin().get_album_info(release) self.assertEqual(d, None)", "available as properties, as they are # accessed by DiscogsPlugin", "# accessed by DiscogsPlugin methods. title=data['title'], artists=[Bag(data=d) for d in", "t = d.tracks self.assertEqual(t[0].medium_index, 1) self.assertEqual(t[0].index, 1) self.assertEqual(t[0].medium_total, 2) self.assertEqual(t[1].medium_index,", "'MEDIUM TITLE CD2') self.assertEqual(len(d.tracks), 3) def test_parse_minimal_release(self): \"\"\"Test parsing of", "self.assertEqual(t[0].medium_total, 2) def test_parse_medium_numbers_two_mediums(self): release = self._make_release_from_positions(['1-1', '2-1']) d =", "'FORMAT DESC 2'], 'name': 'FORMAT', 'qty': 1 }], 'styles': [", "the following conditions: # # The above copyright notice and", "incomplete, including just those required for the tests on this", "test_parse_track_indices(self): release = self._make_release_from_positions(['1', '2']) d = DiscogsPlugin().get_album_info(release) t =", "self.assertEqual(len(d.tracks), 3) release = self._make_release_from_positions(['A1', 'A2.1', 'A2.2', 'A3']) d =", "release = self._make_release_from_positions(['A1', 'A2.a', 'A2.b', 'A3']) d = DiscogsPlugin().get_album_info(release) self.assertEqual(d.mediums,", "'LABEL NAME', 'catno': 'CATALOG NUMBER', }], 'tracklist': [] } if", "'2-1']) d = DiscogsPlugin().get_album_info(release) t = d.tracks self.assertEqual(d.mediums, 2) self.assertEqual(t[0].medium,", "distribute, sublicense, and/or sell copies of the Software, and to", "'01:01'), self._make_track('TITLE TWO', '2', '02:02')] release = self._make_release(tracks=tracks) d =", "d = DiscogsPlugin().get_album_info(release) self.assertEqual(d.mediums, 1) self.assertEqual(len(d.tracks), 3) self.assertEqual(d.tracks[1].title, 'TRACK GROUP", "release = self._make_release_from_positions(['', '1-1', '1-2', '', '2-1']) # Track 1:", "physical subtracks (ie. should not be grouped together). \"\"\" release", "# \"Software\"), to deal in the Software without restriction, including", "positions: self.assertEqual(d.get_track_index(position), expected) def test_parse_tracklist_without_sides(self): \"\"\"Test standard Discogs position 12.2.9#1:", "'labels': [{ 'name': 'LABEL NAME', 'catno': 'CATALOG NUMBER', }], 'tracklist':", "\"extra material\".\"\"\" release = self._make_release_from_positions(['1', '2', 'Video 1']) d =", "be grouped together into a single track). \"\"\" release =", "title, position='', duration='', type_=None): track = { 'title': title, 'position':", "1) self.assertEqual(t[1].medium, 2) self.assertEqual(t[1].medium_total, 1) def test_parse_medium_numbers_two_mediums_two_sided(self): release = self._make_release_from_positions(['A1',", "this permission notice shall be # included in all copies", "test.helper import capture_log from beetsplug.discogs import DiscogsPlugin class DGAlbumInfoTest(_common.TestCase): def", "}], 'styles': [ 'STYLE1', 'STYLE2' ], 'labels': [{ 'name': 'LABEL", "# Test samples on discogs_client do not have a 'type_'", "1) self.assertEqual(t[0].medium, 1) self.assertEqual(t[0].medium_total, 2) self.assertEqual(t[1].medium, 1) self.assertEqual(t[0].medium_total, 2) def", "subtrack_index.\"\"\" # List of tuples (discogs_position, (medium, medium_index, subindex) positions", "DiscogsPlugin().get_album_info(release) self.assertEqual(d.mediums, 1) self.assertEqual(len(d.tracks), 3) release = self._make_release_from_positions(['A1', 'A2.a', 'A2.b',", "= 'MEDIUM TITLE' # Track 2: Index track with track", "GROUP TITLE') def test_parse_tracklist_subtracks_nested_physical(self): \"\"\"Test parsing of subtracks defined inside", "parsing of a release that does not have the required", "1) self.assertEqual(t[0].index, 1) self.assertEqual(t[0].medium_total, 2) self.assertEqual(t[1].medium_index, 2) self.assertEqual(t[1].index, 2) self.assertEqual(t[1].medium_total,", "with a tracklist where tracks have the specified `positions`.\"\"\" tracks", "CD2' d = DiscogsPlugin().get_album_info(release) self.assertEqual(d.mediums, 2) self.assertEqual(d.tracks[0].disctitle, 'MEDIUM TITLE CD1')", "None)), ('A12', ('A', '12', None)), ('12-34', ('12-', '34', None)), ('CD1-1',", "position 12.2.9#2: \"with sides\".\"\"\" release = self._make_release_from_positions(['A1', 'A2', 'B1', 'B2'])", "medium_index and subtrack_index.\"\"\" # List of tuples (discogs_position, (medium, medium_index,", "Discogs position 12.2.9#4: \"multiple CDs\".\"\"\" release = self._make_release_from_positions(['1-1', '1-2', '2-1',", "12.2.9#5: \"sub tracks, letter\".\"\"\" release = self._make_release_from_positions(['A1', 'A2a', 'A2b', 'A3'])", "self.assertEqual(t[0].medium, 1) self.assertEqual(t[0].medium_total, 2) self.assertEqual(t[0].medium_index, 1) self.assertEqual(t[1].medium, 1) self.assertEqual(t[1].medium_total, 2)", "'1', 'AB')), # Non-standard ('IV', ('IV', None, None)), ] d", "- 12.13.2). track['type_'] = type_ return track def _make_release_from_positions(self, positions):", "3) def test_parse_tracklist_subtracks_letter(self): \"\"\"Test standard Discogs position 12.2.9#5: \"sub tracks,", "1) self.assertEqual(len(d.tracks), 3) def test_parse_tracklist_with_sides(self): \"\"\"Test standard Discogs position 12.2.9#2:", "NUMBER', }], 'tracklist': [] } if tracks: for recording in", "'A2a', 'A2b', 'A3']) d = DiscogsPlugin().get_album_info(release) self.assertEqual(d.mediums, 1) self.assertEqual(len(d.tracks), 3)", "= [ self._make_track('TITLE ONE', '2', '01:01'), self._make_track('TITLE TWO', '3', '02:02')", "'B1', 'C1']) d = DiscogsPlugin().get_album_info(release) t = d.tracks self.assertEqual(d.mediums, 2)", "tuples (discogs_position, (medium, medium_index, subindex) positions = [('1', (None, '1',", "12.2.9#2: \"with sides\".\"\"\" release = self._make_release_from_positions(['A1', 'A2', 'B1', 'B2']) d", "\"with sides\".\"\"\" release = self._make_release_from_positions(['A1', 'A2', 'B1', 'B2']) d =", "ID', 'uri': 'ALBUM URI', 'title': 'ALBUM TITLE', 'year': '3001', 'artists':", "of elements on the returned Bag is incomplete, including just", "expected in positions: self.assertEqual(d.get_track_index(position), expected) def test_parse_tracklist_without_sides(self): \"\"\"Test standard Discogs", "self.assertEqual(d.mediums, 2) self.assertEqual(t[0].medium, 1) self.assertEqual(t[0].medium_total, 1) self.assertEqual(t[1].medium, 2) self.assertEqual(t[1].medium_total, 1)", "self._make_release_from_positions(['A1', 'A2.a', 'A2.b', 'A3']) d = DiscogsPlugin().get_album_info(release) self.assertEqual(d.mediums, 1) self.assertEqual(len(d.tracks),", "\"\"\"Returns a Bag that mimics a discogs_client.Release. The list of", "self._make_release_from_positions(['1', '', '3']) # Track 2: Index track with track", "to whom the Software is furnished to do so, subject", "with medium title (Cd1) release.data['tracklist'][0]['title'] = 'MEDIUM TITLE CD1' #", "fields available as properties, as they are # accessed by", "1) self.assertEqual(t[1].medium, 1) self.assertEqual(t[1].medium_total, 2) self.assertEqual(t[1].medium_index, 2) self.assertEqual(t[2].medium, 2) self.assertEqual(t[2].medium_total,", "'2-1', '3-1']) d = DiscogsPlugin().get_album_info(release) self.assertEqual(d.mediums, 3) self.assertEqual(len(d.tracks), 4) def", "the specified `positions`.\"\"\" tracks = [self._make_track('TITLE%s' % i, position) for", "d = DiscogsPlugin().get_album_info(release) self.assertEqual(d.mediums, 1) self.assertEqual(len(d.tracks), 4) self.assertEqual(d.tracks[1].title, 'TITLE ONE')", "notice and this permission notice shall be # included in", "self.assertEqual(t[0].media, d.media) self.assertEqual(t[1].media, d.media) def test_parse_medium_numbers_single_medium(self): release = self._make_release_from_positions(['1', '2'])", "self.assertEqual(len(d.tracks), 3) def test_parse_minimal_release(self): \"\"\"Test parsing of a release with", "[ 'STYLE1', 'STYLE2' ], 'labels': [{ 'name': 'LABEL NAME', 'catno':", "def test_parse_tracklist_with_sides(self): \"\"\"Test standard Discogs position 12.2.9#2: \"with sides\".\"\"\" release", "release = self._make_release_from_positions(['1-1', '2-1']) d = DiscogsPlugin().get_album_info(release) t = d.tracks", "(None, '1', '12')), ('12.a', (None, '12', 'A')), ('12.34', (None, '12',", "standard Discogs position 12.2.9#1: \"without sides\".\"\"\" release = self._make_release_from_positions(['1', '2',", "# a copy of this software and associated documentation files", "that mimics a discogs_client.Release with a tracklist where tracks have", "import _common from test._common import Bag from test.helper import capture_log", "1) self.assertEqual(t[3].index, 4) self.assertEqual(t[3].medium_total, 1) def test_parse_position(self): \"\"\"Test the conversion", "in enumerate(positions, start=1)] return self._make_release(tracks) def test_parse_media_for_tracks(self): tracks = [self._make_track('TITLE", "samples on discogs_client do not have a 'type_' field, but", "'02:02')] release = self._make_release(tracks=tracks) d = DiscogsPlugin().get_album_info(release) t = d.tracks", "= DiscogsPlugin().get_album_info(release) t = d.tracks self.assertEqual(t[0].medium_index, 1) self.assertEqual(t[0].index, 1) self.assertEqual(t[0].medium_total,", "sides\".\"\"\" release = self._make_release_from_positions(['A1', 'A2', 'B1', 'B2']) d = DiscogsPlugin().get_album_info(release)", "self.assertEqual(d.mediums, 3) self.assertEqual(len(d.tracks), 4) def test_parse_tracklist_non_standard(self): \"\"\"Test non standard Discogs", "NAME', 'catno': 'CATALOG NUMBER', }], 'tracklist': [] } if tracks:", "Software is furnished to do so, subject to # the", "just those required for the tests on this class.\"\"\" data", "t = d.tracks self.assertEqual(d.media, 'FORMAT') self.assertEqual(t[0].media, d.media) self.assertEqual(t[1].media, d.media) def", "DESC 1', 'FORMAT DESC 2'], 'name': 'FORMAT', 'qty': 1 }],", "DGAlbumInfoTest(_common.TestCase): def _make_release(self, tracks=None): \"\"\"Returns a Bag that mimics a", "d = DiscogsPlugin().get_album_info(release) t = d.tracks self.assertEqual(d.mediums, 1) self.assertEqual(t[0].medium, 1)", "a 'type_' field, but # the API seems to return", "ONE', '1', '01:01'), self._make_track('TITLE TWO', '2', '02:02')] release = self._make_release(tracks=tracks)", "'2.2', '02:02') ] d = DiscogsPlugin().get_album_info(release) self.assertEqual(d.mediums, 1) self.assertEqual(len(d.tracks), 3)", "DiscogsPlugin().get_album_info(release) t = d.tracks self.assertEqual(d.media, 'FORMAT') self.assertEqual(t[0].media, d.media) self.assertEqual(t[1].media, d.media)", "for position, expected in positions: self.assertEqual(d.get_track_index(position), expected) def test_parse_tracklist_without_sides(self): \"\"\"Test", "1) self.assertEqual(t[1].medium_total, 2) self.assertEqual(t[1].medium_index, 2) self.assertEqual(t[2].medium, 2) self.assertEqual(t[2].medium_total, 1) self.assertEqual(t[2].medium_index,", "# This file is part of beets. # Copyright 2016,", "discogs_client do not have a 'type_' field, but # the", "1) self.assertEqual(len(d.tracks), 3) def test_parse_tracklist_subtracks_letter(self): \"\"\"Test standard Discogs position 12.2.9#5:", "1) self.assertEqual(d.tracks[0].title, 'TRACK GROUP TITLE') def test_parse_tracklist_subtracks_nested_logical(self): \"\"\"Test parsing of", "release.data['tracklist'][1]['sub_tracks'] = [ self._make_track('TITLE ONE', '2.1', '01:01'), self._make_track('TITLE TWO', '2.2',", "= DiscogsPlugin().get_album_info(release) self.assertEqual(d.mediums, 1) self.assertEqual(len(d.tracks), 3) def test_parse_tracklist_subtracks_letter(self): \"\"\"Test standard", "self.assertEqual(d.mediums, 1) self.assertEqual(len(d.tracks), 3) def test_parse_tracklist_subtracks_letter(self): \"\"\"Test standard Discogs position", "= { 'title': title, 'position': position, 'duration': duration } if", "'', '4']) # Track 2: Index track with track group", "'01:01'), self._make_track('TITLE TWO', '2.2', '02:02') ] d = DiscogsPlugin().get_album_info(release) self.assertEqual(d.mediums,", "'TITLE ONE') self.assertEqual(d.tracks[2].title, 'TITLE TWO') def test_parse_tracklist_disctitles(self): \"\"\"Test parsing of", "= DiscogsPlugin().get_album_info(release) t = d.tracks self.assertEqual(d.mediums, 1) self.assertEqual(t[0].medium, 1) self.assertEqual(t[0].medium_total,", "self._make_release_from_positions(['1-1', '1-2', '2-1', '3-1']) d = DiscogsPlugin().get_album_info(release) self.assertEqual(d.mediums, 3) self.assertEqual(len(d.tracks),", "a release with the minimal amount of information.\"\"\" data =", "[{'name': 'ARTIST NAME', 'id': 321, 'join': ''}], 'title': 'TITLE'} release", "the required fields', logs[0]) def suite(): return unittest.TestLoader().loadTestsFromName(__name__) if __name__", "beetsplug.discogs import DiscogsPlugin class DGAlbumInfoTest(_common.TestCase): def _make_release(self, tracks=None): \"\"\"Returns a", "= self._make_release_from_positions(['1-1', '1-2', '2-1', '3-1']) d = DiscogsPlugin().get_album_info(release) self.assertEqual(d.mediums, 3)", "and sub_tracks release.data['tracklist'][1]['title'] = 'TRACK GROUP TITLE' release.data['tracklist'][1]['sub_tracks'] = [", "person obtaining # a copy of this software and associated", "= self._make_release_from_positions(['I', 'II', 'III', 'IV']) d = DiscogsPlugin().get_album_info(release) self.assertEqual(d.mediums, 1)", "with medium title release.data['tracklist'][0]['title'] = 'MEDIUM TITLE' # Track 2:", "GROUP TITLE' release.data['tracklist'][1]['sub_tracks'] = [ self._make_track('TITLE ONE', '2.1', '01:01'), self._make_track('TITLE", "None) self.assertIn('Release does not contain the required fields', logs[0]) def", "from test.helper import capture_log from beetsplug.discogs import DiscogsPlugin class DGAlbumInfoTest(_common.TestCase):", "position 12.2.9#3: \"multiple LP\".\"\"\" release = self._make_release_from_positions(['A1', 'A2', 'B1', 'C1'])", "(Cd2) release.data['tracklist'][3]['title'] = 'MEDIUM TITLE CD2' d = DiscogsPlugin().get_album_info(release) self.assertEqual(d.mediums,", "does not contain the required fields', logs[0]) def suite(): return", "methods. title=data['title'], artists=[Bag(data=d) for d in data['artists']]) def _make_track(self, title,", "test_parse_tracklist_subtracks_dot(self): \"\"\"Test standard Discogs position 12.2.9#5: \"sub tracks, dots\".\"\"\" release", "# Track 2: Index track with track group title release.data['tracklist'][1]['title']", "tracks: for recording in tracks: data['tracklist'].append(recording) return Bag(data=data, # Make", "1) def test_parse_position(self): \"\"\"Test the conversion of discogs `position` to", "[self._make_track('TITLE%s' % i, position) for (i, position) in enumerate(positions, start=1)]", "# included in all copies or substantial portions of the", "self.assertEqual(d.tracks[0].disctitle, 'MEDIUM TITLE') self.assertEqual(len(d.tracks), 1) self.assertEqual(d.tracks[0].title, 'TRACK GROUP TITLE') def", "_make_release(self, tracks=None): \"\"\"Returns a Bag that mimics a discogs_client.Release. The", "they are # accessed by DiscogsPlugin methods. title=data['title'], artists=[Bag(data=d) for", "3) release = self._make_release_from_positions(['A1', 'A2.a', 'A2.b', 'A3']) d = DiscogsPlugin().get_album_info(release)", "'A2b', 'A3']) d = DiscogsPlugin().get_album_info(release) self.assertEqual(d.mediums, 1) self.assertEqual(len(d.tracks), 3) release", "1: Index track with medium title release.data['tracklist'][0]['title'] = 'MEDIUM TITLE'", "a index track that are physical subtracks (ie. should not", "position, 'duration': duration } if type_ is not None: #", "test_parse_track_indices_several_media(self): release = self._make_release_from_positions(['1-1', '1-2', '2-1', '3-1']) d = DiscogsPlugin().get_album_info(release)", "subindex) positions = [('1', (None, '1', None)), ('A12', ('A', '12',", "defined inside a index track that are logical subtracks (ie.", "release = self._make_release_from_positions(['A1', 'A2', 'B1', 'C1']) d = DiscogsPlugin().get_album_info(release) self.assertEqual(d.mediums,", "self._make_track('TITLE TWO', '2', '02:02')] release = self._make_release(tracks=tracks) d = DiscogsPlugin().get_album_info(release)", "self._make_release_from_positions(['1', '2']) d = DiscogsPlugin().get_album_info(release) t = d.tracks self.assertEqual(d.mediums, 1)", "('A', '12', None)), ('12-34', ('12-', '34', None)), ('CD1-1', ('CD1-', '1',", "= self._make_release_from_positions(['1', '', '4']) # Track 2: Index track with", "d.tracks self.assertEqual(d.mediums, 2) self.assertEqual(t[0].medium, 1) self.assertEqual(t[0].medium_total, 1) self.assertEqual(t[1].medium, 2) self.assertEqual(t[1].medium_total,", "tracks, dots\".\"\"\" release = self._make_release_from_positions(['1', '2.1', '2.2', '3']) d =", "to any person obtaining # a copy of this software", "Discogs position.\"\"\" release = self._make_release_from_positions(['I', 'II', 'III', 'IV']) d =", "\"\"\"Test standard Discogs position 12.2.9#6: \"extra material\".\"\"\" release = self._make_release_from_positions(['1',", "self.assertEqual(len(d.tracks), 4) def test_parse_tracklist_multiple_lp(self): \"\"\"Test standard Discogs position 12.2.9#3: \"multiple", "(ie. should not be grouped together). \"\"\" release = self._make_release_from_positions(['1',", "the Software, and to # permit persons to whom the", "2) self.assertEqual(t[0].medium_index, 1) self.assertEqual(t[1].medium, 1) self.assertEqual(t[1].medium_total, 2) self.assertEqual(t[1].medium_index, 2) self.assertEqual(t[2].medium,", "expected) def test_parse_tracklist_without_sides(self): \"\"\"Test standard Discogs position 12.2.9#1: \"without sides\".\"\"\"", "test_parse_release_without_required_fields(self): \"\"\"Test parsing of a release that does not have", "position='', duration='', type_=None): track = { 'title': title, 'position': position,", "'4']) # Track 2: Index track with track group title,", "'1-2', '2-1', '3-1']) d = DiscogsPlugin().get_album_info(release) t = d.tracks self.assertEqual(d.mediums,", "with medium title (Cd2) release.data['tracklist'][3]['title'] = 'MEDIUM TITLE CD2' d", "t = d.tracks self.assertEqual(d.mediums, 3) self.assertEqual(t[0].medium_index, 1) self.assertEqual(t[0].index, 1) self.assertEqual(t[0].medium_total,", "Test samples on discogs_client do not have a 'type_' field,", "of the Software, and to # permit persons to whom", "= [ self._make_track('TITLE ONE', '2.1', '01:01'), self._make_track('TITLE TWO', '2.2', '02:02')", "2) self.assertEqual(t[1].medium_total, 2) def test_parse_track_indices_several_media(self): release = self._make_release_from_positions(['1-1', '1-2', '2-1',", "3) self.assertEqual(t[2].medium_total, 1) self.assertEqual(t[3].medium_index, 1) self.assertEqual(t[3].index, 4) self.assertEqual(t[3].medium_total, 1) def", "def test_parse_tracklist_multiple_cd(self): \"\"\"Test standard Discogs position 12.2.9#4: \"multiple CDs\".\"\"\" release", "[{ 'name': 'ARTIST NAME', 'id': 'ARTIST ID', 'join': ',' }],", "12.2.9#4: \"multiple CDs\".\"\"\" release = self._make_release_from_positions(['1-1', '1-2', '2-1', '3-1']) d", "'MEDIUM TITLE CD2' d = DiscogsPlugin().get_album_info(release) self.assertEqual(d.mediums, 2) self.assertEqual(d.tracks[0].disctitle, 'MEDIUM", "of index tracks that act as disc titles.\"\"\" release =", "release.data['tracklist'][3]['title'] = 'MEDIUM TITLE CD2' d = DiscogsPlugin().get_album_info(release) self.assertEqual(d.mediums, 2)", "self.assertEqual(t[1].media, d.media) def test_parse_medium_numbers_single_medium(self): release = self._make_release_from_positions(['1', '2']) d =", "tracks.\"\"\" release = self._make_release_from_positions(['', '', '1.1', '1.2']) # Track 1:", "The list of elements on the returned Bag is incomplete,", "self._make_release_from_positions(['1-1', '2-1']) d = DiscogsPlugin().get_album_info(release) t = d.tracks self.assertEqual(d.mediums, 2)", "Discogs position 12.2.9#5: \"sub tracks, letter\".\"\"\" release = self._make_release_from_positions(['A1', 'A2a',", "that are logical subtracks (ie. should be grouped together into", "-*- # This file is part of beets. # Copyright", "specified `positions`.\"\"\" tracks = [self._make_track('TITLE%s' % i, position) for (i,", "DiscogsPlugin().get_album_info(release) t = d.tracks self.assertEqual(d.mediums, 2) self.assertEqual(t[0].medium, 1) self.assertEqual(t[0].medium_total, 1)", "\"multiple LP\".\"\"\" release = self._make_release_from_positions(['A1', 'A2', 'B1', 'C1']) d =", "# List of tuples (discogs_position, (medium, medium_index, subindex) positions =", "logical subtracks (ie. should be grouped together into a single", "3) self.assertEqual(t[0].medium_index, 1) self.assertEqual(t[0].index, 1) self.assertEqual(t[0].medium_total, 2) self.assertEqual(t[1].medium_index, 2) self.assertEqual(t[1].index,", "group title release.data['tracklist'][1]['title'] = 'TRACK GROUP TITLE' d = DiscogsPlugin().get_album_info(release)", "'CATALOG NUMBER', }], 'tracklist': [] } if tracks: for recording", "self.assertEqual(t[0].medium, 1) self.assertEqual(t[0].medium_total, 2) self.assertEqual(t[1].medium, 1) self.assertEqual(t[0].medium_total, 2) def test_parse_medium_numbers_two_mediums(self):", "should not be grouped together). \"\"\" release = self._make_release_from_positions(['1', '',", "a discogs_client.Release. The list of elements on the returned Bag", "= self._make_release_from_positions(['A1', 'A2.a', 'A2.b', 'A3']) d = DiscogsPlugin().get_album_info(release) self.assertEqual(d.mediums, 1)", "self.assertEqual(d.tracks[2].title, 'TITLE TWO') def test_parse_tracklist_disctitles(self): \"\"\"Test parsing of index tracks", "'title': 'ALBUM TITLE', 'year': '3001', 'artists': [{ 'name': 'ARTIST NAME',", "rights to use, copy, modify, merge, publish, # distribute, sublicense,", "DiscogsPlugin().get_album_info(release) self.assertEqual(d.mediums, 1) self.assertEqual(len(d.tracks), 3) def test_parse_tracklist_subtracks_extra_material(self): \"\"\"Test standard Discogs", "= self._make_release_from_positions(['1-1', '2-1']) d = DiscogsPlugin().get_album_info(release) t = d.tracks self.assertEqual(d.mediums,", "= DiscogsPlugin().get_album_info(release) self.assertEqual(d.mediums, 1) self.assertEqual(len(d.tracks), 3) release = self._make_release_from_positions(['A1', 'A2.1',", "copyright notice and this permission notice shall be # included", "logs: d = DiscogsPlugin().get_album_info(release) self.assertEqual(d, None) self.assertIn('Release does not contain", "= DiscogsPlugin().get_album_info(release) self.assertEqual(d.mediums, 1) self.assertEqual(len(d.tracks), 3) self.assertEqual(d.tracks[1].title, 'TRACK GROUP TITLE')", "DiscogsPlugin().get_album_info(release) self.assertEqual(d.mediums, 2) self.assertEqual(d.tracks[0].disctitle, 'MEDIUM TITLE CD1') self.assertEqual(d.tracks[1].disctitle, 'MEDIUM TITLE", "and/or sell copies of the Software, and to # permit", "123, 'tracklist': [self._make_track('A', '1', '01:01')], 'artists': [{'name': 'ARTIST NAME', 'id':", "self.assertEqual(d.get_track_index(position), expected) def test_parse_tracklist_without_sides(self): \"\"\"Test standard Discogs position 12.2.9#1: \"without", "{ 'title': title, 'position': position, 'duration': duration } if type_", "d = DiscogsPlugin().get_album_info(release) self.assertEqual(d.mediums, 3) self.assertEqual(len(d.tracks), 4) def test_parse_tracklist_non_standard(self): \"\"\"Test", "self.assertEqual(d.mediums, 2) self.assertEqual(len(d.tracks), 3) def test_parse_tracklist_subtracks_indices(self): \"\"\"Test parsing of subtracks", "that include index tracks.\"\"\" release = self._make_release_from_positions(['', '', '1.1', '1.2'])", "DiscogsPlugin().get_album_info(release) self.assertEqual(d.mediums, 1) self.assertEqual(len(d.tracks), 4) self.assertEqual(d.tracks[1].title, 'TITLE ONE') self.assertEqual(d.tracks[2].title, 'TITLE", "12.2.9#3: \"multiple LP\".\"\"\" release = self._make_release_from_positions(['A1', 'A2', 'B1', 'C1']) d", "inside a index track that are logical subtracks (ie. should", "release.data['tracklist'][0]['title'] = 'MEDIUM TITLE CD1' # Track 4: Index track", "('IV', ('IV', None, None)), ] d = DiscogsPlugin() for position,", "to # permit persons to whom the Software is furnished", "\"sub tracks, dots\".\"\"\" release = self._make_release_from_positions(['1', '2.1', '2.2', '3']) d", "_make_track(self, title, position='', duration='', type_=None): track = { 'title': title,", "1', 'FORMAT DESC 2'], 'name': 'FORMAT', 'qty': 1 }], 'styles':", "['FORMAT DESC 1', 'FORMAT DESC 2'], 'name': 'FORMAT', 'qty': 1", "'2-1', '3-1']) d = DiscogsPlugin().get_album_info(release) t = d.tracks self.assertEqual(d.mediums, 3)", "TITLE' # Track 2: Index track with track group title", "this software and associated documentation files (the # \"Software\"), to", "unittest from test import _common from test._common import Bag from", "'B2']) d = DiscogsPlugin().get_album_info(release) self.assertEqual(d.mediums, 1) # 2 sides =", "duration } if type_ is not None: # Test samples", "for regular tracks, # 'heading' for descriptive texts (ie. not", "DiscogsPlugin class DGAlbumInfoTest(_common.TestCase): def _make_release(self, tracks=None): \"\"\"Returns a Bag that", "a discogs_client.Release with a tracklist where tracks have the specified", "and subtrack_index.\"\"\" # List of tuples (discogs_position, (medium, medium_index, subindex)", "'1-1', '1-2', '', '2-1']) # Track 1: Index track with", "''}], 'title': 'TITLE'} release = Bag(data=data, title=data['title'], artists=[Bag(data=d) for d", "is furnished to do so, subject to # the following", "a Bag that mimics a discogs_client.Release with a tracklist where", "3) def test_parse_tracklist_with_sides(self): \"\"\"Test standard Discogs position 12.2.9#2: \"with sides\".\"\"\"", "grouped together). \"\"\" release = self._make_release_from_positions(['1', '', '4']) # Track", "# 3 sides = 1 LP + 1 LP self.assertEqual(len(d.tracks),", "with track group title release.data['tracklist'][1]['title'] = 'TRACK GROUP TITLE' d", "as properties, as they are # accessed by DiscogsPlugin methods.", "\"multiple CDs\".\"\"\" release = self._make_release_from_positions(['1-1', '1-2', '2-1', '3-1']) d =", "capture_log from beetsplug.discogs import DiscogsPlugin class DGAlbumInfoTest(_common.TestCase): def _make_release(self, tracks=None):", "whom the Software is furnished to do so, subject to", "# the API seems to return it. Values: 'track' for", "'34', None)), ('CD1-1', ('CD1-', '1', None)), ('1.12', (None, '1', '12')),", "sides\".\"\"\" release = self._make_release_from_positions(['1', '2', '3']) d = DiscogsPlugin().get_album_info(release) self.assertEqual(d.mediums,", "DiscogsPlugin().get_album_info(release) self.assertEqual(d.mediums, 3) self.assertEqual(len(d.tracks), 4) def test_parse_tracklist_non_standard(self): \"\"\"Test non standard", "'3-1']) d = DiscogsPlugin().get_album_info(release) self.assertEqual(d.mediums, 3) self.assertEqual(len(d.tracks), 4) def test_parse_tracklist_non_standard(self):", "Bag(data=data, title=data['title'], artists=[Bag(data=d) for d in data['artists']]) d = DiscogsPlugin().get_album_info(release)", "'TITLE') self.assertEqual(len(d.tracks), 1) def test_parse_release_without_required_fields(self): \"\"\"Test parsing of a release", "self.assertEqual(len(d.tracks), 1) def test_parse_release_without_required_fields(self): \"\"\"Test parsing of a release that", "'MEDIUM TITLE' # Track 2: Index track with track group", "mimics a discogs_client.Release with a tracklist where tracks have the", "self.assertEqual(t[0].medium, 1) self.assertEqual(t[0].medium_total, 1) self.assertEqual(t[1].medium, 2) self.assertEqual(t[1].medium_total, 1) def test_parse_medium_numbers_two_mediums_two_sided(self):", "def test_parse_tracklist_non_standard(self): \"\"\"Test non standard Discogs position.\"\"\" release = self._make_release_from_positions(['I',", "= DiscogsPlugin().get_album_info(release) self.assertEqual(d.artist, 'ARTIST NAME') self.assertEqual(d.album, 'TITLE') self.assertEqual(len(d.tracks), 1) def", "self.assertEqual(t[1].medium_index, 2) self.assertEqual(t[2].medium, 2) self.assertEqual(t[2].medium_total, 1) self.assertEqual(t[2].medium_index, 1) def test_parse_track_indices(self):", "= 1 LP + 1 LP self.assertEqual(len(d.tracks), 4) def test_parse_tracklist_multiple_cd(self):", "some fields available as properties, as they are # accessed", "all copies or substantial portions of the Software. \"\"\"Tests for", "# permit persons to whom the Software is furnished to", "d.tracks self.assertEqual(d.mediums, 1) self.assertEqual(t[0].medium, 1) self.assertEqual(t[0].medium_total, 2) self.assertEqual(t[1].medium, 1) self.assertEqual(t[0].medium_total,", "are # accessed by DiscogsPlugin methods. title=data['title'], artists=[Bag(data=d) for d", "1) def test_parse_medium_numbers_two_mediums_two_sided(self): release = self._make_release_from_positions(['A1', 'B1', 'C1']) d =", "title release.data['tracklist'][1]['title'] = 'TRACK GROUP TITLE' d = DiscogsPlugin().get_album_info(release) self.assertEqual(d.mediums,", "capture_log() as logs: d = DiscogsPlugin().get_album_info(release) self.assertEqual(d, None) self.assertIn('Release does", "'FORMAT', 'qty': 1 }], 'styles': [ 'STYLE1', 'STYLE2' ], 'labels':", "2) def test_parse_medium_numbers_two_mediums(self): release = self._make_release_from_positions(['1-1', '2-1']) d = DiscogsPlugin().get_album_info(release)", "self._make_release_from_positions(['A1', 'A2a', 'A2b', 'A3']) d = DiscogsPlugin().get_album_info(release) self.assertEqual(d.mediums, 1) self.assertEqual(len(d.tracks),", "('12.34', (None, '12', '34')), ('1ab', (None, '1', 'AB')), # Non-standard", "`position` to medium, medium_index and subtrack_index.\"\"\" # List of tuples", "class DGAlbumInfoTest(_common.TestCase): def _make_release(self, tracks=None): \"\"\"Returns a Bag that mimics", "together). \"\"\" release = self._make_release_from_positions(['1', '', '4']) # Track 2:", "title (Cd1) release.data['tracklist'][0]['title'] = 'MEDIUM TITLE CD1' # Track 4:", "self.assertEqual(t[3].index, 4) self.assertEqual(t[3].medium_total, 1) def test_parse_position(self): \"\"\"Test the conversion of", "that does not have the required fields.\"\"\" release = Bag(data={},", "\"\"\" from __future__ import division, absolute_import, print_function import unittest from", "# -*- coding: utf-8 -*- # This file is part", "artists=[Bag(data=d) for d in data['artists']]) def _make_track(self, title, position='', duration='',", "by DiscogsPlugin methods. title=data['title'], artists=[Bag(data=d) for d in data['artists']]) def", "discogs_client.Release with a tracklist where tracks have the specified `positions`.\"\"\"", "[ self._make_track('TITLE ONE', '2', '01:01'), self._make_track('TITLE TWO', '3', '02:02') ]", "= self._make_release(tracks=tracks) d = DiscogsPlugin().get_album_info(release) t = d.tracks self.assertEqual(d.media, 'FORMAT')", "TWO', '2', '02:02')] release = self._make_release(tracks=tracks) d = DiscogsPlugin().get_album_info(release) t", "a copy of this software and associated documentation files (the", "# the following conditions: # # The above copyright notice", "files (the # \"Software\"), to deal in the Software without", "documentation files (the # \"Software\"), to deal in the Software", "discogs_client.Release. The list of elements on the returned Bag is", "tracks, # 'heading' for descriptive texts (ie. not real tracks", "d = DiscogsPlugin().get_album_info(release) t = d.tracks self.assertEqual(d.mediums, 2) self.assertEqual(t[0].medium, 1)", "1) self.assertEqual(t[2].medium_index, 1) def test_parse_track_indices(self): release = self._make_release_from_positions(['1', '2']) d", "[self._make_track('A', '1', '01:01')], 'artists': [{'name': 'ARTIST NAME', 'id': 321, 'join':", "'position': position, 'duration': duration } if type_ is not None:", "import capture_log from beetsplug.discogs import DiscogsPlugin class DGAlbumInfoTest(_common.TestCase): def _make_release(self,", "{ 'id': 'ALBUM ID', 'uri': 'ALBUM URI', 'title': 'ALBUM TITLE',", "d = DiscogsPlugin().get_album_info(release) t = d.tracks self.assertEqual(d.mediums, 3) self.assertEqual(t[0].medium_index, 1)", "1) self.assertEqual(len(d.tracks), 3) release = self._make_release_from_positions(['A1', 'A2.a', 'A2.b', 'A3']) d", "`positions`.\"\"\" tracks = [self._make_track('TITLE%s' % i, position) for (i, position)", "a single track). \"\"\" release = self._make_release_from_positions(['1', '', '3']) #", "self._make_track('TITLE ONE', '2.1', '01:01'), self._make_track('TITLE TWO', '2.2', '02:02') ] d", "and this permission notice shall be # included in all", "'C1']) d = DiscogsPlugin().get_album_info(release) t = d.tracks self.assertEqual(d.mediums, 2) self.assertEqual(t[0].medium,", "test_parse_tracklist_multiple_lp(self): \"\"\"Test standard Discogs position 12.2.9#3: \"multiple LP\".\"\"\" release =", "recording in tracks: data['tracklist'].append(recording) return Bag(data=data, # Make some fields", "2) self.assertEqual(len(d.tracks), 3) def test_parse_tracklist_subtracks_indices(self): \"\"\"Test parsing of subtracks that", "together into a single track). \"\"\" release = self._make_release_from_positions(['1', '',", "release = Bag(data=data, title=data['title'], artists=[Bag(data=d) for d in data['artists']]) d", "and to # permit persons to whom the Software is", "refresh=lambda *args: None) with capture_log() as logs: d = DiscogsPlugin().get_album_info(release)", "('12-', '34', None)), ('CD1-1', ('CD1-', '1', None)), ('1.12', (None, '1',", "'A2.1', 'A2.2', 'A3']) d = DiscogsPlugin().get_album_info(release) self.assertEqual(d.mediums, 1) self.assertEqual(len(d.tracks), 3)", "standard Discogs position 12.2.9#5: \"sub tracks, dots\".\"\"\" release = self._make_release_from_positions(['1',", "test_parse_medium_numbers_single_medium(self): release = self._make_release_from_positions(['1', '2']) d = DiscogsPlugin().get_album_info(release) t =", "import DiscogsPlugin class DGAlbumInfoTest(_common.TestCase): def _make_release(self, tracks=None): \"\"\"Returns a Bag", "release = self._make_release(tracks=tracks) d = DiscogsPlugin().get_album_info(release) t = d.tracks self.assertEqual(d.media,", "self.assertEqual(d.mediums, 2) # 3 sides = 1 LP + 1", "self.assertEqual(t[2].medium_total, 1) self.assertEqual(t[2].medium_index, 1) def test_parse_track_indices(self): release = self._make_release_from_positions(['1', '2'])", "'12', '34')), ('1ab', (None, '1', 'AB')), # Non-standard ('IV', ('IV',", "d = DiscogsPlugin().get_album_info(release) self.assertEqual(d.mediums, 1) self.assertEqual(len(d.tracks), 3) def test_parse_tracklist_with_sides(self): \"\"\"Test", "2) def test_parse_track_indices_several_media(self): release = self._make_release_from_positions(['1-1', '1-2', '2-1', '3-1']) d", "properties, as they are # accessed by DiscogsPlugin methods. title=data['title'],", "self.assertEqual(len(d.tracks), 3) def test_parse_tracklist_subtracks_letter(self): \"\"\"Test standard Discogs position 12.2.9#5: \"sub", "3) def test_parse_tracklist_subtracks_indices(self): \"\"\"Test parsing of subtracks that include index", "track group title, and sub_tracks release.data['tracklist'][1]['title'] = 'TRACK GROUP TITLE'", "(i, position) in enumerate(positions, start=1)] return self._make_release(tracks) def test_parse_media_for_tracks(self): tracks", "= d.tracks self.assertEqual(d.mediums, 1) self.assertEqual(t[0].medium, 1) self.assertEqual(t[0].medium_total, 2) self.assertEqual(t[1].medium, 1)", "track with medium title (Cd1) release.data['tracklist'][0]['title'] = 'MEDIUM TITLE CD1'", "track with medium title (Cd2) release.data['tracklist'][3]['title'] = 'MEDIUM TITLE CD2'", "'2', '02:02')] release = self._make_release(tracks=tracks) d = DiscogsPlugin().get_album_info(release) t =", "Track 1: Index track with medium title release.data['tracklist'][0]['title'] = 'MEDIUM", "TITLE', 'year': '3001', 'artists': [{ 'name': 'ARTIST NAME', 'id': 'ARTIST", "should be grouped together into a single track). \"\"\" release", "but # the API seems to return it. Values: 'track'", "for the tests on this class.\"\"\" data = { 'id':", "4) self.assertEqual(d.tracks[1].title, 'TITLE ONE') self.assertEqual(d.tracks[2].title, 'TITLE TWO') def test_parse_tracklist_disctitles(self): \"\"\"Test", "self._make_release_from_positions(['A1', 'A2', 'B1', 'C1']) d = DiscogsPlugin().get_album_info(release) self.assertEqual(d.mediums, 2) #", "= DiscogsPlugin().get_album_info(release) self.assertEqual(d.mediums, 3) self.assertEqual(len(d.tracks), 4) def test_parse_tracklist_non_standard(self): \"\"\"Test non", "d = DiscogsPlugin().get_album_info(release) self.assertEqual(d.mediums, 1) self.assertEqual(len(d.tracks), 3) def test_parse_tracklist_subtracks_letter(self): \"\"\"Test", "utf-8 -*- # This file is part of beets. #", "self.assertEqual(d.tracks[1].title, 'TITLE ONE') self.assertEqual(d.tracks[2].title, 'TITLE TWO') def test_parse_tracklist_disctitles(self): \"\"\"Test parsing", "title (Cd2) release.data['tracklist'][3]['title'] = 'MEDIUM TITLE CD2' d = DiscogsPlugin().get_album_info(release)", "self.assertEqual(d.mediums, 1) self.assertEqual(len(d.tracks), 3) def test_parse_tracklist_with_sides(self): \"\"\"Test standard Discogs position", "on discogs_client do not have a 'type_' field, but #", "d = DiscogsPlugin().get_album_info(release) t = d.tracks self.assertEqual(d.media, 'FORMAT') self.assertEqual(t[0].media, d.media)", "'ALBUM ID', 'uri': 'ALBUM URI', 'title': 'ALBUM TITLE', 'year': '3001',", "1) self.assertEqual(len(d.tracks), 3) self.assertEqual(d.tracks[1].title, 'TRACK GROUP TITLE') def test_parse_tracklist_subtracks_nested_physical(self): \"\"\"Test", "2) self.assertEqual(t[1].medium_total, 1) def test_parse_medium_numbers_two_mediums_two_sided(self): release = self._make_release_from_positions(['A1', 'B1', 'C1'])", "tracks - 12.13.2). track['type_'] = type_ return track def _make_release_from_positions(self,", "self.assertEqual(d.mediums, 1) self.assertEqual(len(d.tracks), 3) release = self._make_release_from_positions(['A1', 'A2.a', 'A2.b', 'A3'])", "'track' for regular tracks, # 'heading' for descriptive texts (ie.", "release = self._make_release_from_positions(['1', '', '3']) # Track 2: Index track", "Track 2: Index track with track group title, and sub_tracks", "self.assertEqual(d.mediums, 2) self.assertEqual(d.tracks[0].disctitle, 'MEDIUM TITLE CD1') self.assertEqual(d.tracks[1].disctitle, 'MEDIUM TITLE CD1')", "'name': 'FORMAT', 'qty': 1 }], 'styles': [ 'STYLE1', 'STYLE2' ],", "'catno': 'CATALOG NUMBER', }], 'tracklist': [] } if tracks: for", "required fields.\"\"\" release = Bag(data={}, refresh=lambda *args: None) with capture_log()", "the returned Bag is incomplete, including just those required for", "regular tracks, # 'heading' for descriptive texts (ie. not real", "returned Bag is incomplete, including just those required for the", "_make_release_from_positions(self, positions): \"\"\"Return a Bag that mimics a discogs_client.Release with", "DiscogsPlugin().get_album_info(release) self.assertEqual(d.mediums, 1) self.assertEqual(len(d.tracks), 3) self.assertEqual(d.tracks[1].title, 'TRACK GROUP TITLE') def", "\"\"\"Test parsing of a release that does not have the", "associated documentation files (the # \"Software\"), to deal in the", "copies or substantial portions of the Software. \"\"\"Tests for discogs", "_common from test._common import Bag from test.helper import capture_log from", "= self._make_release_from_positions(['1-1', '1-2', '2-1', '3-1']) d = DiscogsPlugin().get_album_info(release) t =", "self.assertEqual(t[2].medium_index, 1) self.assertEqual(t[2].index, 3) self.assertEqual(t[2].medium_total, 1) self.assertEqual(t[3].medium_index, 1) self.assertEqual(t[3].index, 4)", "type_=None): track = { 'title': title, 'position': position, 'duration': duration", "d = DiscogsPlugin().get_album_info(release) self.assertEqual(d, None) self.assertIn('Release does not contain the", "release = self._make_release_from_positions(['', '', '1.1', '1.2']) # Track 1: Index", "'styles': [ 'STYLE1', 'STYLE2' ], 'labels': [{ 'name': 'LABEL NAME',", "data = {'id': 123, 'tracklist': [self._make_track('A', '1', '01:01')], 'artists': [{'name':", "'B1', 'C1']) d = DiscogsPlugin().get_album_info(release) self.assertEqual(d.mediums, 2) # 3 sides", "1) self.assertEqual(len(d.tracks), 4) self.assertEqual(d.tracks[1].title, 'TITLE ONE') self.assertEqual(d.tracks[2].title, 'TITLE TWO') def", "to deal in the Software without restriction, including # without", "(medium, medium_index, subindex) positions = [('1', (None, '1', None)), ('A12',", "material\".\"\"\" release = self._make_release_from_positions(['1', '2', 'Video 1']) d = DiscogsPlugin().get_album_info(release)", "1) self.assertEqual(len(d.tracks), 3) def test_parse_tracklist_subtracks_extra_material(self): \"\"\"Test standard Discogs position 12.2.9#6:", "d in data['artists']]) d = DiscogsPlugin().get_album_info(release) self.assertEqual(d.artist, 'ARTIST NAME') self.assertEqual(d.album,", "2) self.assertEqual(t[0].medium, 1) self.assertEqual(t[0].medium_total, 1) self.assertEqual(t[1].medium, 2) self.assertEqual(t[1].medium_total, 1) def", "TITLE' d = DiscogsPlugin().get_album_info(release) self.assertEqual(d.mediums, 1) self.assertEqual(d.tracks[0].disctitle, 'MEDIUM TITLE') self.assertEqual(len(d.tracks),", "= d.tracks self.assertEqual(t[0].medium_index, 1) self.assertEqual(t[0].index, 1) self.assertEqual(t[0].medium_total, 2) self.assertEqual(t[1].medium_index, 2)", "'title': title, 'position': position, 'duration': duration } if type_ is", "Track 4: Index track with medium title (Cd2) release.data['tracklist'][3]['title'] =", "'descriptions': ['FORMAT DESC 1', 'FORMAT DESC 2'], 'name': 'FORMAT', 'qty':", "parsing of subtracks defined inside a index track that are", "is incomplete, including just those required for the tests on", "Discogs position 12.2.9#1: \"without sides\".\"\"\" release = self._make_release_from_positions(['1', '2', '3'])", "d.tracks self.assertEqual(d.mediums, 3) self.assertEqual(t[0].medium_index, 1) self.assertEqual(t[0].index, 1) self.assertEqual(t[0].medium_total, 2) self.assertEqual(t[1].medium_index,", "track['type_'] = type_ return track def _make_release_from_positions(self, positions): \"\"\"Return a", "'title': 'TITLE'} release = Bag(data=data, title=data['title'], artists=[Bag(data=d) for d in", "where tracks have the specified `positions`.\"\"\" tracks = [self._make_track('TITLE%s' %", "= DiscogsPlugin().get_album_info(release) self.assertEqual(d.mediums, 1) self.assertEqual(len(d.tracks), 4) self.assertEqual(d.tracks[1].title, 'TITLE ONE') self.assertEqual(d.tracks[2].title,", "in positions: self.assertEqual(d.get_track_index(position), expected) def test_parse_tracklist_without_sides(self): \"\"\"Test standard Discogs position", "t = d.tracks self.assertEqual(d.mediums, 2) self.assertEqual(t[0].medium, 1) self.assertEqual(t[0].medium_total, 2) self.assertEqual(t[0].medium_index,", "1) self.assertEqual(len(d.tracks), 4) def test_parse_tracklist_subtracks_dot(self): \"\"\"Test standard Discogs position 12.2.9#5:", "# Track 1: Index track with medium title (Cd1) release.data['tracklist'][0]['title']", "self.assertEqual(d.album, 'TITLE') self.assertEqual(len(d.tracks), 1) def test_parse_release_without_required_fields(self): \"\"\"Test parsing of a", "def test_parse_medium_numbers_two_mediums(self): release = self._make_release_from_positions(['1-1', '2-1']) d = DiscogsPlugin().get_album_info(release) t", "'IV']) d = DiscogsPlugin().get_album_info(release) self.assertEqual(d.mediums, 1) self.assertEqual(len(d.tracks), 4) def test_parse_tracklist_subtracks_dot(self):", "'', '1.1', '1.2']) # Track 1: Index track with medium", "i, position) for (i, position) in enumerate(positions, start=1)] return self._make_release(tracks)", "2) self.assertEqual(t[2].medium_total, 1) self.assertEqual(t[2].medium_index, 1) def test_parse_track_indices(self): release = self._make_release_from_positions(['1',", "'01:01')], 'artists': [{'name': 'ARTIST NAME', 'id': 321, 'join': ''}], 'title':", "Software. \"\"\"Tests for discogs plugin. \"\"\" from __future__ import division,", "# Copyright 2016, <NAME>. # # Permission is hereby granted,", "'1-2', '', '2-1']) # Track 1: Index track with medium", "] d = DiscogsPlugin() for position, expected in positions: self.assertEqual(d.get_track_index(position),", "'3', '02:02') ] d = DiscogsPlugin().get_album_info(release) self.assertEqual(d.mediums, 1) self.assertEqual(len(d.tracks), 4)", "have the required fields.\"\"\" release = Bag(data={}, refresh=lambda *args: None)", "'34')), ('1ab', (None, '1', 'AB')), # Non-standard ('IV', ('IV', None,", "discogs `position` to medium, medium_index and subtrack_index.\"\"\" # List of", "positions): \"\"\"Return a Bag that mimics a discogs_client.Release with a", "tracks that act as disc titles.\"\"\" release = self._make_release_from_positions(['', '1-1',", "4) def test_parse_tracklist_multiple_lp(self): \"\"\"Test standard Discogs position 12.2.9#3: \"multiple LP\".\"\"\"", "test_parse_tracklist_without_sides(self): \"\"\"Test standard Discogs position 12.2.9#1: \"without sides\".\"\"\" release =", "'join': ''}], 'title': 'TITLE'} release = Bag(data=data, title=data['title'], artists=[Bag(data=d) for", "test_parse_medium_numbers_two_mediums_two_sided(self): release = self._make_release_from_positions(['A1', 'B1', 'C1']) d = DiscogsPlugin().get_album_info(release) t", "does not have the required fields.\"\"\" release = Bag(data={}, refresh=lambda", "self.assertEqual(d.mediums, 1) self.assertEqual(t[0].medium, 1) self.assertEqual(t[0].medium_total, 2) self.assertEqual(t[1].medium, 1) self.assertEqual(t[0].medium_total, 2)", "1 LP self.assertEqual(len(d.tracks), 4) def test_parse_tracklist_multiple_lp(self): \"\"\"Test standard Discogs position", "self.assertEqual(t[3].medium_index, 1) self.assertEqual(t[3].index, 4) self.assertEqual(t[3].medium_total, 1) def test_parse_position(self): \"\"\"Test the", "'12')), ('12.a', (None, '12', 'A')), ('12.34', (None, '12', '34')), ('1ab',", "self.assertEqual(t[0].medium_total, 2) self.assertEqual(t[1].medium_index, 2) self.assertEqual(t[1].index, 2) self.assertEqual(t[1].medium_total, 2) self.assertEqual(t[2].medium_index, 1)", "restriction, including # without limitation the rights to use, copy,", "'TRACK GROUP TITLE' release.data['tracklist'][1]['sub_tracks'] = [ self._make_track('TITLE ONE', '2.1', '01:01'),", "= self._make_release_from_positions(['1', '2']) d = DiscogsPlugin().get_album_info(release) t = d.tracks self.assertEqual(d.mediums,", "'1', None)), ('1.12', (None, '1', '12')), ('12.a', (None, '12', 'A')),", "hereby granted, free of charge, to any person obtaining #", "self.assertEqual(len(d.tracks), 1) self.assertEqual(d.tracks[0].title, 'TRACK GROUP TITLE') def test_parse_tracklist_subtracks_nested_logical(self): \"\"\"Test parsing", "TITLE CD1' # Track 4: Index track with medium title", "self.assertEqual(d.mediums, 1) self.assertEqual(d.tracks[0].disctitle, 'MEDIUM TITLE') self.assertEqual(len(d.tracks), 1) self.assertEqual(d.tracks[0].title, 'TRACK GROUP", "'3']) d = DiscogsPlugin().get_album_info(release) self.assertEqual(d.mediums, 1) self.assertEqual(len(d.tracks), 3) def test_parse_tracklist_with_sides(self):", "= self._make_release_from_positions(['1', '', '3']) # Track 2: Index track with", "self._make_release(tracks=tracks) d = DiscogsPlugin().get_album_info(release) t = d.tracks self.assertEqual(d.media, 'FORMAT') self.assertEqual(t[0].media,", "the Software without restriction, including # without limitation the rights", "seems to return it. Values: 'track' for regular tracks, #", "'2.1', '2.2', '3']) d = DiscogsPlugin().get_album_info(release) self.assertEqual(d.mediums, 1) self.assertEqual(len(d.tracks), 3)", "not real tracks - 12.13.2). track['type_'] = type_ return track", "2) self.assertEqual(t[1].index, 2) self.assertEqual(t[1].medium_total, 2) def test_parse_track_indices_several_media(self): release = self._make_release_from_positions(['1-1',", "test._common import Bag from test.helper import capture_log from beetsplug.discogs import", "def test_parse_medium_numbers_two_mediums_two_sided(self): release = self._make_release_from_positions(['A1', 'B1', 'C1']) d = DiscogsPlugin().get_album_info(release)", "standard Discogs position 12.2.9#4: \"multiple CDs\".\"\"\" release = self._make_release_from_positions(['1-1', '1-2',", "4) self.assertEqual(t[3].medium_total, 1) def test_parse_position(self): \"\"\"Test the conversion of discogs", "position 12.2.9#6: \"extra material\".\"\"\" release = self._make_release_from_positions(['1', '2', 'Video 1'])", "t = d.tracks self.assertEqual(d.mediums, 2) self.assertEqual(t[0].medium, 1) self.assertEqual(t[0].medium_total, 1) self.assertEqual(t[1].medium,", "release.data['tracklist'][0]['title'] = 'MEDIUM TITLE' # Track 2: Index track with", "furnished to do so, subject to # the following conditions:", "beets. # Copyright 2016, <NAME>. # # Permission is hereby", "'FORMAT') self.assertEqual(t[0].media, d.media) self.assertEqual(t[1].media, d.media) def test_parse_medium_numbers_single_medium(self): release = self._make_release_from_positions(['1',", "DiscogsPlugin().get_album_info(release) self.assertEqual(d.mediums, 1) # 2 sides = 1 LP self.assertEqual(len(d.tracks),", "medium title release.data['tracklist'][0]['title'] = 'MEDIUM TITLE' # Track 2: Index", "self.assertEqual(d.mediums, 1) self.assertEqual(len(d.tracks), 3) release = self._make_release_from_positions(['A1', 'A2.1', 'A2.2', 'A3'])", "index track that are logical subtracks (ie. should be grouped", "with track group title, and sub_tracks release.data['tracklist'][1]['title'] = 'TRACK GROUP", "'ARTIST NAME') self.assertEqual(d.album, 'TITLE') self.assertEqual(len(d.tracks), 1) def test_parse_release_without_required_fields(self): \"\"\"Test parsing", "12.2.9#6: \"extra material\".\"\"\" release = self._make_release_from_positions(['1', '2', 'Video 1']) d", "'id': 'ALBUM ID', 'uri': 'ALBUM URI', 'title': 'ALBUM TITLE', 'year':", "'2']) d = DiscogsPlugin().get_album_info(release) t = d.tracks self.assertEqual(d.mediums, 1) self.assertEqual(t[0].medium,", "self._make_release_from_positions(['A1', 'A2.1', 'A2.2', 'A3']) d = DiscogsPlugin().get_album_info(release) self.assertEqual(d.mediums, 1) self.assertEqual(len(d.tracks),", "the tests on this class.\"\"\" data = { 'id': 'ALBUM", "standard Discogs position 12.2.9#5: \"sub tracks, letter\".\"\"\" release = self._make_release_from_positions(['A1',", "title release.data['tracklist'][0]['title'] = 'MEDIUM TITLE' # Track 2: Index track", "into a single track). \"\"\" release = self._make_release_from_positions(['1', '', '3'])", "= DiscogsPlugin().get_album_info(release) self.assertEqual(d, None) self.assertIn('Release does not contain the required", "inside a index track that are physical subtracks (ie. should", "type_ is not None: # Test samples on discogs_client do", "'A2.2', 'A3']) d = DiscogsPlugin().get_album_info(release) self.assertEqual(d.mediums, 1) self.assertEqual(len(d.tracks), 3) def", "\"\"\"Test standard Discogs position 12.2.9#5: \"sub tracks, letter\".\"\"\" release =", "2) # 3 sides = 1 LP + 1 LP", "track def _make_release_from_positions(self, positions): \"\"\"Return a Bag that mimics a", "self.assertEqual(d.mediums, 1) # 2 sides = 1 LP self.assertEqual(len(d.tracks), 4)", "any person obtaining # a copy of this software and", "(None, '12', 'A')), ('12.34', (None, '12', '34')), ('1ab', (None, '1',", "4) def test_parse_tracklist_multiple_cd(self): \"\"\"Test standard Discogs position 12.2.9#4: \"multiple CDs\".\"\"\"", "test_parse_tracklist_non_standard(self): \"\"\"Test non standard Discogs position.\"\"\" release = self._make_release_from_positions(['I', 'II',", "not contain the required fields', logs[0]) def suite(): return unittest.TestLoader().loadTestsFromName(__name__)", "track group title release.data['tracklist'][1]['title'] = 'TRACK GROUP TITLE' d =", "release with the minimal amount of information.\"\"\" data = {'id':", "\"\"\"Tests for discogs plugin. \"\"\" from __future__ import division, absolute_import,", "self.assertEqual(t[0].medium_total, 2) self.assertEqual(t[1].medium, 1) self.assertEqual(t[0].medium_total, 2) def test_parse_medium_numbers_two_mediums(self): release =", "subtracks that include index tracks.\"\"\" release = self._make_release_from_positions(['', '', '1.1',", "class.\"\"\" data = { 'id': 'ALBUM ID', 'uri': 'ALBUM URI',", "have a 'type_' field, but # the API seems to", "\"\"\"Test the conversion of discogs `position` to medium, medium_index and", "LP self.assertEqual(len(d.tracks), 4) def test_parse_tracklist_multiple_cd(self): \"\"\"Test standard Discogs position 12.2.9#4:", "TITLE CD2' d = DiscogsPlugin().get_album_info(release) self.assertEqual(d.mediums, 2) self.assertEqual(d.tracks[0].disctitle, 'MEDIUM TITLE", "= Bag(data=data, title=data['title'], artists=[Bag(data=d) for d in data['artists']]) d =", "required fields', logs[0]) def suite(): return unittest.TestLoader().loadTestsFromName(__name__) if __name__ ==", "('12-34', ('12-', '34', None)), ('CD1-1', ('CD1-', '1', None)), ('1.12', (None,", "self.assertIn('Release does not contain the required fields', logs[0]) def suite():", "granted, free of charge, to any person obtaining # a", "def _make_track(self, title, position='', duration='', type_=None): track = { 'title':", "4) def test_parse_tracklist_subtracks_dot(self): \"\"\"Test standard Discogs position 12.2.9#5: \"sub tracks,", "'2.2', '3']) d = DiscogsPlugin().get_album_info(release) self.assertEqual(d.mediums, 1) self.assertEqual(len(d.tracks), 3) release", "self._make_release_from_positions(['1', '2.1', '2.2', '3']) d = DiscogsPlugin().get_album_info(release) self.assertEqual(d.mediums, 1) self.assertEqual(len(d.tracks),", "Bag(data=data, # Make some fields available as properties, as they", "= self._make_release_from_positions(['', '', '1.1', '1.2']) # Track 1: Index track", "test_parse_tracklist_subtracks_nested_logical(self): \"\"\"Test parsing of subtracks defined inside a index track", "'2', '3']) d = DiscogsPlugin().get_album_info(release) self.assertEqual(d.mediums, 1) self.assertEqual(len(d.tracks), 3) def", "self.assertEqual(t[1].medium, 2) self.assertEqual(t[1].medium_total, 1) def test_parse_medium_numbers_two_mediums_two_sided(self): release = self._make_release_from_positions(['A1', 'B1',", "self._make_release_from_positions(['', '1-1', '1-2', '', '2-1']) # Track 1: Index track", "is not None: # Test samples on discogs_client do not", "\"Software\"), to deal in the Software without restriction, including #", "not be grouped together). \"\"\" release = self._make_release_from_positions(['1', '', '4'])", "'3001', 'artists': [{ 'name': 'ARTIST NAME', 'id': 'ARTIST ID', 'join':", "'ARTIST NAME', 'id': 321, 'join': ''}], 'title': 'TITLE'} release =", "software and associated documentation files (the # \"Software\"), to deal", "# Non-standard ('IV', ('IV', None, None)), ] d = DiscogsPlugin()", "subtracks (ie. should not be grouped together). \"\"\" release =", "] d = DiscogsPlugin().get_album_info(release) self.assertEqual(d.mediums, 1) self.assertEqual(len(d.tracks), 4) self.assertEqual(d.tracks[1].title, 'TITLE", "DiscogsPlugin().get_album_info(release) self.assertEqual(d, None) self.assertIn('Release does not contain the required fields',", "if type_ is not None: # Test samples on discogs_client", "= d.tracks self.assertEqual(d.media, 'FORMAT') self.assertEqual(t[0].media, d.media) self.assertEqual(t[1].media, d.media) def test_parse_medium_numbers_single_medium(self):", "= type_ return track def _make_release_from_positions(self, positions): \"\"\"Return a Bag", "of subtracks defined inside a index track that are logical", "not None: # Test samples on discogs_client do not have", "'ALBUM TITLE', 'year': '3001', 'artists': [{ 'name': 'ARTIST NAME', 'id':", "2016, <NAME>. # # Permission is hereby granted, free of", "This file is part of beets. # Copyright 2016, <NAME>.", "1) def test_parse_release_without_required_fields(self): \"\"\"Test parsing of a release that does", "[ self._make_track('TITLE ONE', '2.1', '01:01'), self._make_track('TITLE TWO', '2.2', '02:02') ]", "standard Discogs position 12.2.9#2: \"with sides\".\"\"\" release = self._make_release_from_positions(['A1', 'A2',", "disc titles.\"\"\" release = self._make_release_from_positions(['', '1-1', '1-2', '', '2-1']) #", "that are physical subtracks (ie. should not be grouped together).", "= [self._make_track('TITLE%s' % i, position) for (i, position) in enumerate(positions,", "2: Index track with track group title release.data['tracklist'][1]['title'] = 'TRACK", "= d.tracks self.assertEqual(d.mediums, 2) self.assertEqual(t[0].medium, 1) self.assertEqual(t[0].medium_total, 1) self.assertEqual(t[1].medium, 2)", "of tuples (discogs_position, (medium, medium_index, subindex) positions = [('1', (None,", "'1-2', '2-1', '3-1']) d = DiscogsPlugin().get_album_info(release) self.assertEqual(d.mediums, 3) self.assertEqual(len(d.tracks), 4)", "'A3']) d = DiscogsPlugin().get_album_info(release) self.assertEqual(d.mediums, 1) self.assertEqual(len(d.tracks), 3) release =", "release = self._make_release_from_positions(['I', 'II', 'III', 'IV']) d = DiscogsPlugin().get_album_info(release) self.assertEqual(d.mediums,", "1) self.assertEqual(t[0].medium_total, 2) self.assertEqual(t[1].medium_index, 2) self.assertEqual(t[1].index, 2) self.assertEqual(t[1].medium_total, 2) self.assertEqual(t[2].medium_index,", "'ALBUM URI', 'title': 'ALBUM TITLE', 'year': '3001', 'artists': [{ 'name':", "'A2', 'B1', 'C1']) d = DiscogsPlugin().get_album_info(release) self.assertEqual(d.mediums, 2) # 3", "TITLE') self.assertEqual(len(d.tracks), 1) self.assertEqual(d.tracks[0].title, 'TRACK GROUP TITLE') def test_parse_tracklist_subtracks_nested_logical(self): \"\"\"Test", "is part of beets. # Copyright 2016, <NAME>. # #", "of the Software. \"\"\"Tests for discogs plugin. \"\"\" from __future__", "= d.tracks self.assertEqual(d.mediums, 3) self.assertEqual(t[0].medium_index, 1) self.assertEqual(t[0].index, 1) self.assertEqual(t[0].medium_total, 2)", "= 'MEDIUM TITLE CD2' d = DiscogsPlugin().get_album_info(release) self.assertEqual(d.mediums, 2) self.assertEqual(d.tracks[0].disctitle,", "ONE') self.assertEqual(d.tracks[2].title, 'TITLE TWO') def test_parse_tracklist_disctitles(self): \"\"\"Test parsing of index", "deal in the Software without restriction, including # without limitation", "'', '2-1']) # Track 1: Index track with medium title", "'duration': duration } if type_ is not None: # Test", "2) self.assertEqual(t[1].medium_index, 2) self.assertEqual(t[2].medium, 2) self.assertEqual(t[2].medium_total, 1) self.assertEqual(t[2].medium_index, 1) def", "test_parse_position(self): \"\"\"Test the conversion of discogs `position` to medium, medium_index", "DiscogsPlugin().get_album_info(release) self.assertEqual(d.mediums, 1) self.assertEqual(len(d.tracks), 3) def test_parse_tracklist_with_sides(self): \"\"\"Test standard Discogs", "2'], 'name': 'FORMAT', 'qty': 1 }], 'styles': [ 'STYLE1', 'STYLE2'", "Discogs position 12.2.9#5: \"sub tracks, dots\".\"\"\" release = self._make_release_from_positions(['1', '2.1',", "('A12', ('A', '12', None)), ('12-34', ('12-', '34', None)), ('CD1-1', ('CD1-',", "NAME', 'id': 'ARTIST ID', 'join': ',' }], 'formats': [{ 'descriptions':", "logs[0]) def suite(): return unittest.TestLoader().loadTestsFromName(__name__) if __name__ == '__main__': unittest.main(defaultTest='suite')", "elements on the returned Bag is incomplete, including just those", "'', '3']) # Track 2: Index track with track group", "release = Bag(data={}, refresh=lambda *args: None) with capture_log() as logs:", "-*- coding: utf-8 -*- # This file is part of", "self.assertEqual(len(d.tracks), 3) def test_parse_tracklist_with_sides(self): \"\"\"Test standard Discogs position 12.2.9#2: \"with", "self.assertEqual(d.mediums, 1) self.assertEqual(len(d.tracks), 3) def test_parse_tracklist_subtracks_extra_material(self): \"\"\"Test standard Discogs position", "TITLE') def test_parse_tracklist_subtracks_nested_physical(self): \"\"\"Test parsing of subtracks defined inside a", "Permission is hereby granted, free of charge, to any person", "def test_parse_media_for_tracks(self): tracks = [self._make_track('TITLE ONE', '1', '01:01'), self._make_track('TITLE TWO',", "self.assertEqual(d, None) self.assertIn('Release does not contain the required fields', logs[0])", "title=data['title'], artists=[Bag(data=d) for d in data['artists']]) def _make_track(self, title, position='',", "a release that does not have the required fields.\"\"\" release", "import Bag from test.helper import capture_log from beetsplug.discogs import DiscogsPlugin", "single track). \"\"\" release = self._make_release_from_positions(['1', '', '3']) # Track", "mimics a discogs_client.Release. The list of elements on the returned", "= self._make_release_from_positions(['1', '2', 'Video 1']) d = DiscogsPlugin().get_album_info(release) self.assertEqual(d.mediums, 2)", "= 'MEDIUM TITLE CD1' # Track 4: Index track with", "def _make_release(self, tracks=None): \"\"\"Returns a Bag that mimics a discogs_client.Release.", "track that are physical subtracks (ie. should not be grouped", "TITLE' release.data['tracklist'][1]['sub_tracks'] = [ self._make_track('TITLE ONE', '2', '01:01'), self._make_track('TITLE TWO',", "'TITLE TWO') def test_parse_tracklist_disctitles(self): \"\"\"Test parsing of index tracks that", "d = DiscogsPlugin().get_album_info(release) self.assertEqual(d.mediums, 1) self.assertEqual(len(d.tracks), 3) release = self._make_release_from_positions(['A1',", "TITLE CD2') self.assertEqual(len(d.tracks), 3) def test_parse_minimal_release(self): \"\"\"Test parsing of a", "The above copyright notice and this permission notice shall be", "'2', '01:01'), self._make_track('TITLE TWO', '3', '02:02') ] d = DiscogsPlugin().get_album_info(release)", "track = { 'title': title, 'position': position, 'duration': duration }", "texts (ie. not real tracks - 12.13.2). track['type_'] = type_", "part of beets. # Copyright 2016, <NAME>. # # Permission", "release.data['tracklist'][1]['sub_tracks'] = [ self._make_track('TITLE ONE', '2', '01:01'), self._make_track('TITLE TWO', '3',", "data = { 'id': 'ALBUM ID', 'uri': 'ALBUM URI', 'title':", "self._make_release_from_positions(['1', '2']) d = DiscogsPlugin().get_album_info(release) t = d.tracks self.assertEqual(t[0].medium_index, 1)", "standard Discogs position.\"\"\" release = self._make_release_from_positions(['I', 'II', 'III', 'IV']) d", "'artists': [{'name': 'ARTIST NAME', 'id': 321, 'join': ''}], 'title': 'TITLE'}", "'1', None)), ('A12', ('A', '12', None)), ('12-34', ('12-', '34', None)),", "as they are # accessed by DiscogsPlugin methods. title=data['title'], artists=[Bag(data=d)", "None)), ] d = DiscogsPlugin() for position, expected in positions:", "self.assertEqual(t[1].medium, 1) self.assertEqual(t[0].medium_total, 2) def test_parse_medium_numbers_two_mediums(self): release = self._make_release_from_positions(['1-1', '2-1'])", "d = DiscogsPlugin().get_album_info(release) t = d.tracks self.assertEqual(t[0].medium_index, 1) self.assertEqual(t[0].index, 1)", "including just those required for the tests on this class.\"\"\"", "= DiscogsPlugin().get_album_info(release) self.assertEqual(d.mediums, 1) # 2 sides = 1 LP", "DiscogsPlugin().get_album_info(release) self.assertEqual(d.mediums, 1) self.assertEqual(len(d.tracks), 3) release = self._make_release_from_positions(['A1', 'A2.1', 'A2.2',", "'tracklist': [self._make_track('A', '1', '01:01')], 'artists': [{'name': 'ARTIST NAME', 'id': 321,", "# distribute, sublicense, and/or sell copies of the Software, and", "standard Discogs position 12.2.9#6: \"extra material\".\"\"\" release = self._make_release_from_positions(['1', '2',", "self.assertEqual(t[2].index, 3) self.assertEqual(t[2].medium_total, 1) self.assertEqual(t[3].medium_index, 1) self.assertEqual(t[3].index, 4) self.assertEqual(t[3].medium_total, 1)", "= DiscogsPlugin().get_album_info(release) self.assertEqual(d.mediums, 2) self.assertEqual(d.tracks[0].disctitle, 'MEDIUM TITLE CD1') self.assertEqual(d.tracks[1].disctitle, 'MEDIUM", "a tracklist where tracks have the specified `positions`.\"\"\" tracks =", "Non-standard ('IV', ('IV', None, None)), ] d = DiscogsPlugin() for", "if tracks: for recording in tracks: data['tracklist'].append(recording) return Bag(data=data, #", "(ie. should be grouped together into a single track). \"\"\"", "1) # 2 sides = 1 LP self.assertEqual(len(d.tracks), 4) def", "DiscogsPlugin().get_album_info(release) t = d.tracks self.assertEqual(d.mediums, 3) self.assertEqual(t[0].medium_index, 1) self.assertEqual(t[0].index, 1)", "'02:02') ] d = DiscogsPlugin().get_album_info(release) self.assertEqual(d.mediums, 1) self.assertEqual(len(d.tracks), 4) self.assertEqual(d.tracks[1].title,", "ID', 'join': ',' }], 'formats': [{ 'descriptions': ['FORMAT DESC 1',", "d.media) def test_parse_medium_numbers_single_medium(self): release = self._make_release_from_positions(['1', '2']) d = DiscogsPlugin().get_album_info(release)", "2) self.assertEqual(t[1].medium_total, 2) self.assertEqual(t[2].medium_index, 1) self.assertEqual(t[2].index, 3) self.assertEqual(t[2].medium_total, 1) self.assertEqual(t[3].medium_index,", "1']) d = DiscogsPlugin().get_album_info(release) self.assertEqual(d.mediums, 2) self.assertEqual(len(d.tracks), 3) def test_parse_tracklist_subtracks_indices(self):" ]
[ "self @return item at front of self.stack\"\"\" def front(self): front", "@return dequeued item that was dequeued\"\"\" def get(self): self.rotate(1) dequeued", "front of self @return item at front of self.stack\"\"\" def", "dequeued \"\"\"Rotates the queue {@code rotation} times @param rotation number", "to rotate queue\"\"\" def rotate(self, rotation): for i in range(rotation):", "rotate(self, rotation): for i in range(rotation): temp = self.stack[0] self.stack", "number of times to rotate queue\"\"\" def rotate(self, rotation): for", "- 1) return front \"\"\"Returns the length of this.stack\"\"\" def", "<filename>data_structures/queue/queue_on_pseudo_stack.py \"\"\"Queue represented by a pseudo stack (represented by a", "that was dequeued\"\"\" def get(self): self.rotate(1) dequeued = self.stack[self.length -", "times to rotate queue\"\"\" def rotate(self, rotation): for i in", "0 @return dequeued item that was dequeued\"\"\" def get(self): self.rotate(1)", "in range(rotation): temp = self.stack[0] self.stack = self.stack[1:] self.put(temp) self.length", "enqueue\"\"\" def put(self, item): self.stack.append(item) self.length = self.length + 1", "0 def __str__(self): printed = \"<\" + str(self.stack)[1:-1] + \">\"", "def __str__(self): printed = \"<\" + str(self.stack)[1:-1] + \">\" return", "= self.length + 1 \"\"\"Dequeues {@code item} @requirement: |self.length| >", "get(self): self.rotate(1) dequeued = self.stack[self.length - 1] self.stack = self.stack[:-1]", "def put(self, item): self.stack.append(item) self.length = self.length + 1 \"\"\"Dequeues", "by a pseudo stack (represented by a list with pop", "= self.stack[0] self.stack = self.stack[1:] self.put(temp) self.length = self.length -", "{@code item} @requirement: |self.length| > 0 @return dequeued item that", "rotation number of times to rotate queue\"\"\" def rotate(self, rotation):", "self.length - 1 return dequeued \"\"\"Rotates the queue {@code rotation}", "queue\"\"\" def rotate(self, rotation): for i in range(rotation): temp =", "@requirement: |self.length| > 0 @return dequeued item that was dequeued\"\"\"", "Queue: def __init__(self): self.stack = [] self.length = 0 def", "item): self.stack.append(item) self.length = self.length + 1 \"\"\"Dequeues {@code item}", "self.rotate(self.length - 1) return front \"\"\"Returns the length of this.stack\"\"\"", "rotation} times @param rotation number of times to rotate queue\"\"\"", "with pop and append)\"\"\" class Queue: def __init__(self): self.stack =", "dequeued\"\"\" def get(self): self.rotate(1) dequeued = self.stack[self.length - 1] self.stack", "1 \"\"\"Dequeues {@code item} @requirement: |self.length| > 0 @return dequeued", "of times to rotate queue\"\"\" def rotate(self, rotation): for i", "append)\"\"\" class Queue: def __init__(self): self.stack = [] self.length =", "\"\"\"Enqueues {@code item} @param item item to enqueue\"\"\" def put(self,", "times @param rotation number of times to rotate queue\"\"\" def", "stack (represented by a list with pop and append)\"\"\" class", "the queue {@code rotation} times @param rotation number of times", "for i in range(rotation): temp = self.stack[0] self.stack = self.stack[1:]", "temp = self.stack[0] self.stack = self.stack[1:] self.put(temp) self.length = self.length", "__str__(self): printed = \"<\" + str(self.stack)[1:-1] + \">\" return printed", "self.put(front) self.rotate(self.length - 1) return front \"\"\"Returns the length of", "self.stack.append(item) self.length = self.length + 1 \"\"\"Dequeues {@code item} @requirement:", "self.length = 0 def __str__(self): printed = \"<\" + str(self.stack)[1:-1]", "rotate queue\"\"\" def rotate(self, rotation): for i in range(rotation): temp", "+ str(self.stack)[1:-1] + \">\" return printed \"\"\"Enqueues {@code item} @param", "pop and append)\"\"\" class Queue: def __init__(self): self.stack = []", "item to enqueue\"\"\" def put(self, item): self.stack.append(item) self.length = self.length", "+ 1 \"\"\"Dequeues {@code item} @requirement: |self.length| > 0 @return", "self.stack[:-1] self.rotate(self.length - 1) self.length = self.length - 1 return", "- 1 \"\"\"Reports item at the front of self @return", "represented by a pseudo stack (represented by a list with", "return dequeued \"\"\"Rotates the queue {@code rotation} times @param rotation", "(represented by a list with pop and append)\"\"\" class Queue:", "self.stack = self.stack[1:] self.put(temp) self.length = self.length - 1 \"\"\"Reports", "= self.stack[self.length - 1] self.stack = self.stack[:-1] self.rotate(self.length - 1)", "item item to enqueue\"\"\" def put(self, item): self.stack.append(item) self.length =", "\"\"\"Reports item at the front of self @return item at", "item} @param item item to enqueue\"\"\" def put(self, item): self.stack.append(item)", "item at the front of self @return item at front", "self.stack[0] self.stack = self.stack[1:] self.put(temp) self.length = self.length - 1", "self.length = self.length - 1 \"\"\"Reports item at the front", "item that was dequeued\"\"\" def get(self): self.rotate(1) dequeued = self.stack[self.length", "rotation): for i in range(rotation): temp = self.stack[0] self.stack =", "= self.get() self.put(front) self.rotate(self.length - 1) return front \"\"\"Returns the", "{@code item} @param item item to enqueue\"\"\" def put(self, item):", "@param item item to enqueue\"\"\" def put(self, item): self.stack.append(item) self.length", "printed \"\"\"Enqueues {@code item} @param item item to enqueue\"\"\" def", "- 1 return dequeued \"\"\"Rotates the queue {@code rotation} times", "\"<\" + str(self.stack)[1:-1] + \">\" return printed \"\"\"Enqueues {@code item}", "\">\" return printed \"\"\"Enqueues {@code item} @param item item to", "queue {@code rotation} times @param rotation number of times to", "def __init__(self): self.stack = [] self.length = 0 def __str__(self):", "\"\"\"Queue represented by a pseudo stack (represented by a list", "range(rotation): temp = self.stack[0] self.stack = self.stack[1:] self.put(temp) self.length =", "item} @requirement: |self.length| > 0 @return dequeued item that was", "- 1] self.stack = self.stack[:-1] self.rotate(self.length - 1) self.length =", "front = self.get() self.put(front) self.rotate(self.length - 1) return front \"\"\"Returns", "= [] self.length = 0 def __str__(self): printed = \"<\"", "= self.stack[1:] self.put(temp) self.length = self.length - 1 \"\"\"Reports item", "and append)\"\"\" class Queue: def __init__(self): self.stack = [] self.length", "a pseudo stack (represented by a list with pop and", "self.stack[1:] self.put(temp) self.length = self.length - 1 \"\"\"Reports item at", "self.get() self.put(front) self.rotate(self.length - 1) return front \"\"\"Returns the length", "self.rotate(1) dequeued = self.stack[self.length - 1] self.stack = self.stack[:-1] self.rotate(self.length", "list with pop and append)\"\"\" class Queue: def __init__(self): self.stack", "self.length = self.length - 1 return dequeued \"\"\"Rotates the queue", "def rotate(self, rotation): for i in range(rotation): temp = self.stack[0]", "1 return dequeued \"\"\"Rotates the queue {@code rotation} times @param", "def front(self): front = self.get() self.put(front) self.rotate(self.length - 1) return", "self.stack[self.length - 1] self.stack = self.stack[:-1] self.rotate(self.length - 1) self.length", "1) self.length = self.length - 1 return dequeued \"\"\"Rotates the", "was dequeued\"\"\" def get(self): self.rotate(1) dequeued = self.stack[self.length - 1]", "def get(self): self.rotate(1) dequeued = self.stack[self.length - 1] self.stack =", "self.stack\"\"\" def front(self): front = self.get() self.put(front) self.rotate(self.length - 1)", "1) return front \"\"\"Returns the length of this.stack\"\"\" def size(self):", "item at front of self.stack\"\"\" def front(self): front = self.get()", "+ \">\" return printed \"\"\"Enqueues {@code item} @param item item", "printed = \"<\" + str(self.stack)[1:-1] + \">\" return printed \"\"\"Enqueues", "= self.length - 1 return dequeued \"\"\"Rotates the queue {@code", "|self.length| > 0 @return dequeued item that was dequeued\"\"\" def", "self.stack = [] self.length = 0 def __str__(self): printed =", "self.put(temp) self.length = self.length - 1 \"\"\"Reports item at the", "self.length = self.length + 1 \"\"\"Dequeues {@code item} @requirement: |self.length|", "at the front of self @return item at front of", "\"\"\"Rotates the queue {@code rotation} times @param rotation number of", "class Queue: def __init__(self): self.stack = [] self.length = 0", "front of self.stack\"\"\" def front(self): front = self.get() self.put(front) self.rotate(self.length", "1] self.stack = self.stack[:-1] self.rotate(self.length - 1) self.length = self.length", "self.rotate(self.length - 1) self.length = self.length - 1 return dequeued", "of self @return item at front of self.stack\"\"\" def front(self):", "{@code rotation} times @param rotation number of times to rotate", "pseudo stack (represented by a list with pop and append)\"\"\"", "return printed \"\"\"Enqueues {@code item} @param item item to enqueue\"\"\"", "front \"\"\"Returns the length of this.stack\"\"\" def size(self): return self.length", "front(self): front = self.get() self.put(front) self.rotate(self.length - 1) return front", "self.stack = self.stack[:-1] self.rotate(self.length - 1) self.length = self.length -", "dequeued = self.stack[self.length - 1] self.stack = self.stack[:-1] self.rotate(self.length -", "dequeued item that was dequeued\"\"\" def get(self): self.rotate(1) dequeued =", "i in range(rotation): temp = self.stack[0] self.stack = self.stack[1:] self.put(temp)", "= 0 def __str__(self): printed = \"<\" + str(self.stack)[1:-1] +", "of self.stack\"\"\" def front(self): front = self.get() self.put(front) self.rotate(self.length -", "= self.length - 1 \"\"\"Reports item at the front of", "= \"<\" + str(self.stack)[1:-1] + \">\" return printed \"\"\"Enqueues {@code", "= self.stack[:-1] self.rotate(self.length - 1) self.length = self.length - 1", "the front of self @return item at front of self.stack\"\"\"", "1 \"\"\"Reports item at the front of self @return item", "\"\"\"Dequeues {@code item} @requirement: |self.length| > 0 @return dequeued item", "> 0 @return dequeued item that was dequeued\"\"\" def get(self):", "@param rotation number of times to rotate queue\"\"\" def rotate(self,", "a list with pop and append)\"\"\" class Queue: def __init__(self):", "at front of self.stack\"\"\" def front(self): front = self.get() self.put(front)", "__init__(self): self.stack = [] self.length = 0 def __str__(self): printed", "self.length - 1 \"\"\"Reports item at the front of self", "return front \"\"\"Returns the length of this.stack\"\"\" def size(self): return", "self.length + 1 \"\"\"Dequeues {@code item} @requirement: |self.length| > 0", "to enqueue\"\"\" def put(self, item): self.stack.append(item) self.length = self.length +", "by a list with pop and append)\"\"\" class Queue: def", "- 1) self.length = self.length - 1 return dequeued \"\"\"Rotates", "[] self.length = 0 def __str__(self): printed = \"<\" +", "put(self, item): self.stack.append(item) self.length = self.length + 1 \"\"\"Dequeues {@code", "str(self.stack)[1:-1] + \">\" return printed \"\"\"Enqueues {@code item} @param item", "@return item at front of self.stack\"\"\" def front(self): front =" ]
[ "width, in_dim, out_dim) # TODO: Add check for Theano dim", "in_dim, out_dim) # TODO: Add check for Theano dim ordering.", "#print('mylist is ', mylist, 'the from_ is ', from_, 'node", "= node # start here prev_output = node.name prev_layer_filters =", "fallback = 1) op = 'DepthwiseConv2dNative' size = sec.getint('size', fallback", "node.variance = bn_weights[2] mydict[node.name] = node prev_output = node.name #", "node.input # node.attr = [] node.bias = bias_data mydict[node.name] =", "= node.name #print('pooling ', vars(node)) mylist.append(section) elif section.startswith('route'): ids =", "mylist[i]) for i in ids]) mydict[node.name] = node prev_output =", "(weights_size * 4), section+'-weights')) count += weights_size # DarkNet conv_weights", "else: raise ValueError( 'Unsupported section header type: {}'.format(section)) print(' out", "node.stride) node.padding = sec.getint('padding', fallback = (node.size-1)//2) if section.startswith('pooling'): node.mode", "int(cfg_parser['net_0']['width']) node.height = int(cfg_parser['net_0']['height']) node.channels = int(cfg_parser['net_0']['channels']) node.filters = node.channels", "batch_normalize = sec.getint('batch_normalize', 0) # padding='same' is equivalent to Darknet", "sec.getint('output', 2) bias_data = np.ndarray( shape=[filters], dtype=np.float32, buffer=readfile(weights_file, (filters *", "len)) return f.read(len) def buildGraph(config_path, weights_path): unique_config_file = unique_config_sections(config_path) cfg_parser", "definition. else: raise ValueError( 'Unsupported section header type: {}'.format(section)) print('", "= fc_data mydict[node.name] = node prev_output = node.name prev_layer_filters =", "shape is \", conv_weights.shape) conv_weights = [conv_weights] if batch_normalize else", "{}'.format( activation, section)) if section.startswith('activation'): mylist.append(section) elif re.match(r'^(convolutional|depthwise|groupwise)_\\d+$', section): if", "= [] mydict[node.name] = node prev_output = node.name mylist.append(section) elif", "prev_layer_filters no change else: raise ValueError( 'Unknown activation function `{}`", "conv_bias ] # Create nodes #conv_layer = np.zeros([1, 1, filters],", "section has relative reference mylist.append(name) elif section.startswith('connected'): activation = sec.get('activation',", "import MyGraph from collections import OrderedDict def unique_config_sections(config_file): \"\"\"Convert all", "prev_layer_filters) print('loaded {} bytes in weights file'.format(count*4)) mygraph = MyGraph(mydict)", "prev_layer_filters = node.filters elif re.match(r'^(shortcut)_\\d+$', section): activation = sec.get('activation', fallback", "section + '_activation' tmp_parser.add_section(name) tmp_parser.set(name, 'activation', activation) sec_q.put(tmp_parser[name]) # NOTE:", "int(cfg_parser[section]['softmax']) node.anchors = [float(i) for i in re.split(r',', cfg_parser[section]['anchors'])] #print(vars(node))", "= node.filters elif re.match(r'^(shortcut)_\\d+$', section): activation = sec.get('activation', fallback =", "with configparser. \"\"\" from collections import defaultdict import io section_counters", "no change else: raise ValueError( 'Unknown activation function `{}` in", "import numpy as np import re,sys,os from graph import MyGraph", "+= 3 * filters # TODO: Keras BatchNormalization mistakenly refers", "# start here prev_output = node.name prev_layer_filters = node.channels mylist.append(section)", "conv_weights, conv_bias ] # Create nodes #conv_layer = np.zeros([1, 1,", "= 0) activation = sec.get('activation', fallback = 'logistic') batch_normalize =", "node.global_pooling = 0 elif section.startswith('avgpool'): node.mode = 'avg' node.global_pooling =", "= node.input mydict[node.name] = node prev_output = node.name if activation", "node.softmax = int(cfg_parser[section]['softmax']) node.anchors = [float(i) for i in re.split(r',',", "node.op = 'FusedBatchNorm' node.input = [prev_output] node.input_norm = node.input #node.attr", "node.name mylist.append(section) elif re.match(r'^(pooling|maxpool|avgpool)_\\d+$', section): node = MyGraph.MyNode() node.stride =", "pass elif section.startswith('cost'): pass # Configs not currently handled during", "filters node = MyGraph.MyNode() node.name = section + '_bias' node.op", "%d bytes\" % (msg, len)) return f.read(len) def buildGraph(config_path, weights_path):", "stride = sec.getint('stride', fallback = 1) pad = sec.getint('pad', fallback", "% (msg, len)) return f.read(len) def buildGraph(config_path, weights_path): unique_config_file =", "activation == 'linear' or activation == 'leaky' or activation ==", "node = MyGraph.MyNode() node.name = section node.op = 'DarknetNet' node.input", "+ '_' + str(section_counters[section]) section_counters[section] += 1 line = line.replace(section,", "'rb') # read out major, minor, revision, net.seen readfile(weights_file, (4*4),", "bn_weights = np.ndarray( shape=(3, filters), dtype=np.float32, buffer=readfile(weights_file, (filters * 12),", "'valid' if pad: padding = size//2 # Setting weights. #", "section + '_bias' node.op = 'BiasAdd' node.input = [prev_output] node.input_norm", "(out_dim, in_dim, height, width) # We would like to set", "mylist.append(section) prev_layer_filters = node.filters elif re.match(r'^(shortcut)_\\d+$', section): activation = sec.get('activation',", "0 elif section.startswith('maxpool'): node.mode = 'max' node.global_pooling = 0 elif", "= 'FusedBatchNorm' node.input = [prev_output] node.input_norm = node.input #node.attr =", "= bn_weights[0] node.beta = conv_bias node.mean = bn_weights[1] node.variance =", "change else: node = MyGraph.MyNode() node.name = section + '_bias'", "node.name = section node.op = 'DarknetRegion' node.input = [prev_output] node.input_norm", "= OrderedDict() # record the output of the original layer", "section + '_' + str(section_counters[section]) section_counters[section] += 1 line =", "section.startswith('connected'): activation = sec.get('activation', fallback='linear') filters = sec.getint('output', 2) bias_data", "input is ', node.input) node.input_norm = node.input mydict[node.name] = node", "node = MyGraph.MyNode() node.name = section + '_bias' node.op =", "cfg_parser = configparser.ConfigParser() cfg_parser.read_file(unique_config_file) weights_file = open(weights_path, 'rb') # read", "= MyGraph.MyNode() node.name = section node.op = 'Leaky' if activation", "names. Adds unique suffixes to config sections for compability with", "fallback = 1) assert multiplier == 1 groups = filters", "for _section in cfg_parser.sections(): sec_q = queue.Queue(0) sec_q.put(cfg_parser[_section]) while not", "= [] mydict[node.name] = node prev_output = node.name # prev_layer_filters", "nodes #conv_layer = np.zeros([1, 1, filters], dtype = np.float32) node", "else 'valid' if pad: padding = size//2 # Setting weights.", "re,sys,os from graph import MyGraph from collections import OrderedDict def", "* filters * 4), section+'-weight')) node = MyGraph.MyNode() node.name =", "if activation == 'linear': pass elif activation == 'linear' or", "', activation, weights_shape) conv_bias = np.ndarray( shape=(filters, ), dtype=np.float32, buffer=readfile(weights_file,", "# 定义子图所需要的输出节点,输入节点,终止节点 outputNodes = ['region_0', 'softmax_0'] stopNodes = [] inputNodes", "node.name = section node.op = 'NCNNConcat' node.input = [mylist[i] for", "to var # as std. bn_weight_list = [ bn_weights[0], #", "1 op = 'Conv2D' elif section.startswith('depthwise'): conv = 'dconv' filters", "[mylist[i] for i in ids] #print('mylist is ', mylist, 'the", "node.input_norm = node.input # node.attr = [] node.bias = bias_data", "node.name # prev_layer_filters no change else: node = MyGraph.MyNode() node.name", "tmp_parser.add_section(name) tmp_parser.set(name, 'activation', activation) sec_q.put(tmp_parser[name]) # NOTE: this section has", "unique_config_sections(config_path) cfg_parser = configparser.ConfigParser() cfg_parser.read_file(unique_config_file) weights_file = open(weights_path, 'rb') #", "__name__ == '__main__': config_path = sys.argv[1] weights_path = sys.argv[2] mygraph", "section node.op = 'DarknetReorg' node.input = [prev_output] node.stride = sec.getint('stride',", "OrderedDict def unique_config_sections(config_file): \"\"\"Convert all config sections to have unique", "' ', activation, weights_shape) conv_bias = np.ndarray( shape=(filters, ), dtype=np.float32,", "= sec.getint('stride', fallback = 1) pad = sec.getint('pad', fallback =", "node.name = section node.op = 'Softmax' node.input = [prev_output] node.input_norm", "int(cfg_parser[section]['classes']) node.num = int(cfg_parser[section]['num']) node.softmax = int(cfg_parser[section]['softmax']) node.anchors = [float(i)", "import queue for _section in cfg_parser.sections(): sec_q = queue.Queue(0) sec_q.put(cfg_parser[_section])", "op = 'DepthwiseConv2dNative' size = sec.getint('size', fallback = 1) stride", "= sys.argv[1] weights_path = sys.argv[2] mygraph = buildGraph(config_path, weights_path) #", "is \", conv_weights.shape) conv_weights = np.transpose(conv_weights, idx_dartnet2tf) #print(\"the tf shape", "node.op = 'NCNNConcat' node.input = [mylist[i] for i in ids]", "buffer=readfile(weights_file, (weights_size * 4), section+'-weights')) count += weights_size # DarkNet", "elif re.match(r'^(pooling|maxpool|avgpool)_\\d+$', section): node = MyGraph.MyNode() node.stride = sec.getint('stride', fallback", "1) groups = 1 op = 'Conv2D' elif section.startswith('depthwise'): conv", "[0, 1, 2] elif conv == 'gconv': weights_shape = (size,", "sec_q.put(tmp_parser[name]) # NOTE: this section has relative reference mylist.append(name) elif", "pass # Configs not currently handled during model definition. else:", "mydict[name].filters else: assert len(mydict[name].input) >= 1 return getFilters(mydict, mydict[name].input[0]) def", "currently handled during model definition. else: raise ValueError( 'Unsupported section", "in cfg_parser.sections(): sec_q = queue.Queue(0) sec_q.put(cfg_parser[_section]) while not sec_q.empty(): sec", "conv == 'dconv': weights_shape = (size, size, filters) idx_tf2darknet =", "filters if batch_normalize: bn_weights = np.ndarray( shape=(3, filters), dtype=np.float32, buffer=readfile(weights_file,", "is ', from_, 'node input is ', node.input) node.input_norm =", "defaultdict(int) output_stream = io.StringIO() with open(config_file) as fin: for line", "bn_weight_list = [ bn_weights[0], # scale gamma conv_bias, # shift", "', prev_layer_filters) print('loaded {} bytes in weights file'.format(count*4)) mygraph =", "fallback='linear') filters = sec.getint('output', 2) bias_data = np.ndarray( shape=[filters], dtype=np.float32,", "activation function `{}` in section {}'.format( activation, section)) if section.startswith('activation'):", "= 0) padding = sec.getint('padding', fallback = 0) activation =", "= [prev_output] node.input_norm = node.input #node.attr = [] node.gamma =", "i in range(len(idxmap))] weights_size = np.product(weights_shape) print(' ' + conv,", "input is ', node.input) node.input_norm = node.input node.axis = 0", "1 groups = filters op = 'DepthwiseConv2dNative' elif section.startswith('groupwise'): conv", "node = MyGraph.MyNode() node.name = section node.op = 'MatMul' node.input", "elif conv == 'gconv': weights_shape = (size, size, prev_layer_filters//groups, filters//groups,", "prev_layer_filters, filters) idx_tf2darknet = [0, 1, 2, 3] elif conv", "node.input_norm = node.input node.groups = int(cfg_parser[section]['groups']) mydict[node.name] = node prev_output", "sec_q = queue.Queue(0) sec_q.put(cfg_parser[_section]) while not sec_q.empty(): sec = sec_q.get()", "prev_output = node.name if activation == 'linear': mylist.append(prev_output) else: tmp_parser", "elif section.startswith('reorg'): node = MyGraph.MyNode() node.name = section node.op =", "{}'.format(section)) # this section will can be a subsection if", "section.startswith('softmax'): node = MyGraph.MyNode() node.name = section node.op = 'Softmax'", "= open(weights_path, 'rb') # read out major, minor, revision, net.seen", "section node.op = 'Softmax' node.input = [prev_output] node.input_norm = node.input", "the output of the original layer mylist = [] count", "node.filters elif re.match(r'^(shortcut)_\\d+$', section): activation = sec.get('activation', fallback = 'logistic')", "# (out_dim, in_dim, height, width) # We would like to", "= [prev_output] node.input_norm = node.input #node.attr = [] mydict[node.name] =", "= 'Pooling' node.input = [prev_output] node.input_norm = node.input mydict[node.name] =", "4), section+'-bias')) fc_data = np.ndarray( shape=[prev_layer_filters, filters], dtype=np.float32, buffer=readfile(weights_file, (prev_layer_filters", "node.name = section node.op = 'Shuffle' node.input = [prev_output] node.input_norm", "= 1) pad = sec.getint('pad', fallback = 0) padding =", "= section node.op = 'Leaky' if activation == 'linear': node.slope", "set these to Tensorflow order: # (height, width, in_dim, out_dim)", "as np import re,sys,os from graph import MyGraph from collections", "shape is \", conv_weights.shape) conv_weights = np.transpose(conv_weights, idx_dartnet2tf) #print(\"the tf", "import io section_counters = defaultdict(int) output_stream = io.StringIO() with open(config_file)", "in ids]) mydict[node.name] = node prev_output = node.name mylist.append(section) prev_layer_filters", "section.startswith('route'): ids = [int(i) for i in cfg_parser[section]['layers'].split(',')] node =", "= [int(i) for i in cfg_parser[section]['layers'].split(',')] node = MyGraph.MyNode() node.name", "getFilters(mydict, mydict[name].input[0]) def readfile(f, len, msg): print(\" %s read %d", "(height, width, in_dim, out_dim) # TODO: Add check for Theano", "idx_dartnet2tf = [idxmap[i] for i in range(len(idxmap))] weights_size = np.product(weights_shape)", "12), section+'-batchnorm')) count += 3 * filters # TODO: Keras", "else: node = MyGraph.MyNode() node.name = section + '_bias' node.op", "node.name = section node.op = 'MatMul' node.input = [prev_output] node.input_norm", "(msg, len)) return f.read(len) def buildGraph(config_path, weights_path): unique_config_file = unique_config_sections(config_path)", "mydict = OrderedDict() # record the output of the original", "= node.input node.filters = getFilters(mydict, node.input[0]) * node.stride * node.stride", "'activation', activation) sec_q.put(tmp_parser[name]) # NOTE: this section has relative reference", "print('Parsing section {}'.format(section)) # this section will can be a", "node.input = [prev_output] node.input_norm = node.input #node.attr = [] mydict[node.name]", "= sys.argv[2] mygraph = buildGraph(config_path, weights_path) # 定义子图所需要的输出节点,输入节点,终止节点 outputNodes =", "node.filters elif section.startswith('reorg'): node = MyGraph.MyNode() node.name = section node.op", "print(vars(node)) # node.attr = [] mydict[node.name] = node # start", "'Conv2D' elif section.startswith('depthwise'): conv = 'dconv' filters = prev_layer_filters multiplier", "to config sections for compability with configparser. \"\"\" from collections", "= op node.input = [prev_output] node.input_norm = node.input node.kernel =", "gamma conv_bias, # shift beta bn_weights[1], # running mean bn_weights[2]", "mylist.append(section) elif section.startswith('softmax'): node = MyGraph.MyNode() node.name = section node.op", "node.num = int(cfg_parser[section]['num']) node.softmax = int(cfg_parser[section]['softmax']) node.anchors = [float(i) for", "out major, minor, revision, net.seen readfile(weights_file, (4*4), 'head') mydict =", "'activation', activation) sec_q.put(tmp_parser[name]) mylist.append(name) elif section.startswith('net'): node = MyGraph.MyNode() node.name", "node = MyGraph.MyNode() node.name = section node.op = 'DarknetReorg' node.input", "np.ndarray( shape=[weights_shape[i] for i in idx_tf2darknet], dtype=np.float32, buffer=readfile(weights_file, (weights_size *", "elif re.match(r'^(shortcut)_\\d+$', section): activation = sec.get('activation', fallback = 'logistic') from_", "section.startswith('reorg'): node = MyGraph.MyNode() node.name = section node.op = 'DarknetReorg'", "sys.argv[2] mygraph = buildGraph(config_path, weights_path) # 定义子图所需要的输出节点,输入节点,终止节点 outputNodes = ['region_0',", "compability with configparser. \"\"\" from collections import defaultdict import io", "= node.input #node.attr = [] node.gamma = bn_weights[0] node.beta =", "= [prev_output] node.input_norm = node.input node.classes = int(cfg_parser[section]['classes']) node.num =", "in fin: if line.startswith('['): section = line.strip().strip('[]') _section = section", "node.stride * node.stride mydict[node.name] = node prev_output = node.name mylist.append(section)", "node.op = 'Leaky' if activation == 'linear': node.slope = 1", "node.input node.classes = int(cfg_parser[section]['classes']) node.num = int(cfg_parser[section]['num']) node.softmax = int(cfg_parser[section]['softmax'])", "mean bn_weights[2] # running var ] conv_weights = np.ndarray( shape=[weights_shape[i]", "= 'DepthwiseConv2dNative' size = sec.getint('size', fallback = 1) stride =", "section {}'.format( activation, section)) if section.startswith('activation'): mylist.append(section) elif re.match(r'^(convolutional|depthwise|groupwise)_\\d+$', section):", "0.1 elif activation == 'relu': node.slope = 0 node.input =", "weights_size = np.product(weights_shape) print(' ' + conv, 'bn' if batch_normalize", "else [ conv_weights, conv_bias ] # Create nodes #conv_layer =", "* node.stride mydict[node.name] = node prev_output = node.name mylist.append(section) prev_layer_filters", "'node input is ', node.input) node.input_norm = node.input node.axis =", "mydict[node.name] = node prev_output = node.name #print('pooling ', vars(node)) mylist.append(section)", "= node.name mylist.append(section) pass elif section.startswith('cost'): pass # Configs not", "# record the output of the original layer mylist =", "= node prev_output = node.name mylist.append(section) elif section.startswith('softmax'): node =", "outputNodes = ['region_0', 'softmax_0'] stopNodes = [] inputNodes = ['darknet_0']", "1 else 'valid' if pad: padding = size//2 # Setting", "', name) if hasattr(mydict[name], 'filters'): return mydict[name].filters else: assert len(mydict[name].input)", "[conv_weights] if batch_normalize else [ conv_weights, conv_bias ] # Create", "'_bias' node.op = 'BiasAdd' node.input = [prev_output] node.input_norm = node.input", "sec.getint('size', node.stride) node.padding = sec.getint('padding', fallback = (node.size-1)//2) if section.startswith('pooling'):", "= [] count = 4 import queue for _section in", "# TODO: Add check for Theano dim ordering. #print(\"the darknet", "node.stride = sec.getint('stride', fallback = 1) node.size = sec.getint('size', node.stride)", "node = MyGraph.MyNode() node.name = section node.op = 'BinaryOp' node.op_type", "# padding='same' is equivalent to Darknet pad=1 # padding =", "section+'-weights')) count += weights_size # DarkNet conv_weights are serialized Caffe-style:", "= [ bn_weights[0], # scale gamma conv_bias, # shift beta", "node.input node.filters = getFilters(mydict, node.input[0]) * node.stride * node.stride mydict[node.name]", "mydict[node.name] = node prev_output = node.name # prev_layer_filters no change", "activation, section)) if section.startswith('activation'): mylist.append(section) elif re.match(r'^(convolutional|depthwise|groupwise)_\\d+$', section): if section.startswith('convolutional'):", "section+'-bias')) count += filters if batch_normalize: bn_weights = np.ndarray( shape=(3,", "elif section.startswith('cost'): pass # Configs not currently handled during model", "= [] node.bias = bias_data mydict[node.name] = node prev_output =", "= node.input #node.attr = [] mydict[node.name] = node prev_output =", "hasattr(mydict[name], 'filters'): return mydict[name].filters else: assert len(mydict[name].input) >= 1 return", "idx_tf2darknet = [0, 1, 2, 3, 4] idxmap = {x:", "= prev_layer.shape # TODO: This assumes channel last dim_ordering. if", "4), section+'-weights')) count += weights_size # DarkNet conv_weights are serialized", "'activation', activation) sec_q.put(tmp_parser[name]) mylist.append(name) elif section.startswith('shuffle'): node = MyGraph.MyNode() node.name", "conv_weights are serialized Caffe-style: # (out_dim, in_dim, height, width) #", "* filters # TODO: Keras BatchNormalization mistakenly refers to var", "padding = size//2 # Setting weights. # Darknet serializes convolutional", "This assumes channel last dim_ordering. if conv == 'conv': weights_shape", "= [] node.bias = conv_bias mydict[node.name] = node prev_output =", "'logistic') if activation == 'linear': pass elif activation == 'linear'", "# scale gamma conv_bias, # shift beta bn_weights[1], # running", "filters) idx_tf2darknet = [0, 1, 2, 3] elif conv ==", "section.startswith('region'): node = MyGraph.MyNode() node.name = section node.op = 'DarknetRegion'", "for i in ids] #print('mylist is ', mylist, 'the ids", "convolutional weights as: # [bias/beta, [gamma, mean, variance], conv_weights] #prev_layer_shape", "'filters'): return mydict[name].filters else: assert len(mydict[name].input) >= 1 return getFilters(mydict,", "Adds unique suffixes to config sections for compability with configparser.", "TODO: Keras BatchNormalization mistakenly refers to var # as std.", "* 4), section+'-weight')) node = MyGraph.MyNode() node.name = section node.op", "activation = sec.get('activation', fallback='linear') filters = sec.getint('output', 2) bias_data =", "(filters * 12), section+'-batchnorm')) count += 3 * filters #", "running var ] conv_weights = np.ndarray( shape=[weights_shape[i] for i in", "groups = filters op = 'DepthwiseConv2dNative' elif section.startswith('groupwise'): conv =", "Configs not currently handled during model definition. else: raise ValueError(", "idx_tf2darknet = [0, 1, 2] elif conv == 'gconv': weights_shape", "#prev_layer_shape = prev_layer.shape # TODO: This assumes channel last dim_ordering.", "= np.transpose(conv_weights, idx_dartnet2tf) #print(\"the tf shape is \", conv_weights.shape) conv_weights", "reference mylist.append(name) elif section.startswith('connected'): activation = sec.get('activation', fallback='linear') filters =", "1 line = line.replace(section, _section) output_stream.write(line) output_stream.seek(0) return output_stream def", "= [mylist[i] for i in ids] #print('mylist is ', mylist,", "assert multiplier == 1 groups = filters op = 'DepthwiseConv2dNative'", "fc_data = np.ndarray( shape=[prev_layer_filters, filters], dtype=np.float32, buffer=readfile(weights_file, (prev_layer_filters * filters", "in range(len(idxmap))] weights_size = np.product(weights_shape) print(' ' + conv, 'bn'", "node.name = section node.op = 'BinaryOp' node.op_type = 0 node.input", "from_ = sec.getint('from') node = MyGraph.MyNode() node.name = section node.op", "weights_size # DarkNet conv_weights are serialized Caffe-style: # (out_dim, in_dim,", "= [] inputNodes = ['darknet_0'] mygraph.extractSubGraph(inputNodes, outputNodes, stopNodes) mygraph.generateDot('YoloV2.dot') #", "graph import MyGraph from collections import OrderedDict def unique_config_sections(config_file): \"\"\"Convert", "= int(cfg_parser[section]['softmax']) node.anchors = [float(i) for i in re.split(r',', cfg_parser[section]['anchors'])]", "print(\" %s read %d bytes\" % (msg, len)) return f.read(len)", "node = MyGraph.MyNode() node.name = section node.op = 'Shuffle' node.input", "[] node.width = int(cfg_parser['net_0']['width']) node.height = int(cfg_parser['net_0']['height']) node.channels = int(cfg_parser['net_0']['channels'])", "if batch_normalize else [ conv_weights, conv_bias ] # Create nodes", "size, prev_layer_filters, filters) idx_tf2darknet = [0, 1, 2, 3] elif", "= [prev_output] node.input_norm = node.input node.groups = int(cfg_parser[section]['groups']) mydict[node.name] =", "shape=[prev_layer_filters, filters], dtype=np.float32, buffer=readfile(weights_file, (prev_layer_filters * filters * 4), section+'-weight'))", "tmp_parser.add_section(name) tmp_parser.set(name, 'activation', activation) sec_q.put(tmp_parser[name]) mylist.append(name) elif section.startswith('net'): node =", "section + '_activation' tmp_parser.add_section(name) tmp_parser.set(name, 'activation', activation) sec_q.put(tmp_parser[name]) mylist.append(name) elif", "not currently handled during model definition. else: raise ValueError( 'Unsupported", "2) bias_data = np.ndarray( shape=[filters], dtype=np.float32, buffer=readfile(weights_file, (filters * 4),", "if conv == 'conv': weights_shape = (size, size, prev_layer_filters, filters)", "elif section.startswith('depthwise'): conv = 'dconv' filters = prev_layer_filters multiplier =", "= size//2 # Setting weights. # Darknet serializes convolutional weights", "'DepthwiseConv2dNative' elif section.startswith('groupwise'): conv = 'gconv' filters = sec.getint('filters', fallback=1)", "this section has relative reference mylist.append(name) elif section.startswith('connected'): activation =", "node.input mydict[node.name] = node prev_output = node.name mylist.append(section) pass elif", "node.bias = conv_bias mydict[node.name] = node prev_output = node.name if", "def buildGraph(config_path, weights_path): unique_config_file = unique_config_sections(config_path) cfg_parser = configparser.ConfigParser() cfg_parser.read_file(unique_config_file)", "section.startswith('groupwise'): conv = 'gconv' filters = sec.getint('filters', fallback=1) groups =", "activation) sec_q.put(tmp_parser[name]) # NOTE: this section has relative reference mylist.append(name)", "= node.name mylist.append(section) elif section.startswith('softmax'): node = MyGraph.MyNode() node.name =", "ValueError( 'Unknown activation function `{}` in section {}'.format( activation, section))", "enumerate(idx_tf2darknet)} idx_dartnet2tf = [idxmap[i] for i in range(len(idxmap))] weights_size =", "'same' if pad == 1 else 'valid' if pad: padding", "int(cfg_parser[section]['num']) node.softmax = int(cfg_parser[section]['softmax']) node.anchors = [float(i) for i in", "shape=[filters], dtype=np.float32, buffer=readfile(weights_file, (filters * 4), section+'-bias')) fc_data = np.ndarray(", "= sec.getint('from') node = MyGraph.MyNode() node.name = section node.op =", "cfg_parser.read_file(unique_config_file) weights_file = open(weights_path, 'rb') # read out major, minor,", "1) stride = sec.getint('stride', fallback = 1) pad = sec.getint('pad',", "sec_q.get() section = sec.name print('Parsing section {}'.format(section)) # this section", "= 'gconv' filters = sec.getint('filters', fallback=1) groups = sec.getint('groups', fallback", "== '__main__': config_path = sys.argv[1] weights_path = sys.argv[2] mygraph =", "1, 2, 3, 4] idxmap = {x: i for i,", "= node.input node.classes = int(cfg_parser[section]['classes']) node.num = int(cfg_parser[section]['num']) node.softmax =", "count += 3 * filters # TODO: Keras BatchNormalization mistakenly", "fallback = 1) pad = sec.getint('pad', fallback = 0) padding", "0) padding = sec.getint('padding', fallback = 0) activation = sec.get('activation',", "ids] #print('mylist is ', mylist, 'the ids is ', ids,", "= 0 node.filters = sum([getFilters(mydict, mylist[i]) for i in ids])", "= unique_config_sections(config_path) cfg_parser = configparser.ConfigParser() cfg_parser.read_file(unique_config_file) weights_file = open(weights_path, 'rb')", "\", conv_weights.shape) conv_weights = [conv_weights] if batch_normalize else [ conv_weights,", "[] node.bias = conv_bias mydict[node.name] = node prev_output = node.name", "as std. bn_weight_list = [ bn_weights[0], # scale gamma conv_bias,", "import re,sys,os from graph import MyGraph from collections import OrderedDict", "buffer=readfile(weights_file, (filters * 4), section+'-bias')) count += filters if batch_normalize:", "mylist.append(section) prev_layer_filters = node.filters elif section.startswith('reorg'): node = MyGraph.MyNode() node.name", "name = section + '_activation' tmp_parser.add_section(name) tmp_parser.set(name, 'activation', activation) sec_q.put(tmp_parser[name])", "be a subsection if section.startswith('activation') or section.endswith('activation'): activation = sec.get('activation',", "node.anchors = [float(i) for i in re.split(r',', cfg_parser[section]['anchors'])] #print(vars(node)) #node.attr", "= 1) node.input_norm = node.input node.filters = getFilters(mydict, node.input[0]) *", "# [bias/beta, [gamma, mean, variance], conv_weights] #prev_layer_shape = prev_layer.shape #", "#conv_layer = np.zeros([1, 1, filters], dtype = np.float32) node =", "'dconv' filters = prev_layer_filters multiplier = sec.getint('multiplier', fallback = 1)", "'gconv' filters = sec.getint('filters', fallback=1) groups = sec.getint('groups', fallback =", "mylist, 'the from_ is ', from_, 'node input is ',", "section+'-batchnorm')) count += 3 * filters # TODO: Keras BatchNormalization", "= sec.get('activation', fallback='linear') filters = sec.getint('output', 2) bias_data = np.ndarray(", "buildGraph(config_path, weights_path) # 定义子图所需要的输出节点,输入节点,终止节点 outputNodes = ['region_0', 'softmax_0'] stopNodes =", "= defaultdict(int) output_stream = io.StringIO() with open(config_file) as fin: for", "* node.stride * node.stride mydict[node.name] = node prev_output = node.name", "output_stream.seek(0) return output_stream def getFilters(mydict, name): #print('find filters for ',", "section.startswith('depthwise'): conv = 'dconv' filters = prev_layer_filters multiplier = sec.getint('multiplier',", "node.mode = 'max' node.global_pooling = 0 elif section.startswith('avgpool'): node.mode =", "0 node.input = [prev_output, mylist[from_]] #print('mylist is ', mylist, 'the", "check for Theano dim ordering. #print(\"the darknet shape is \",", "sec.getint('batch_normalize', 0) # padding='same' is equivalent to Darknet pad=1 #", "# node.attr = [] mydict[node.name] = node # start here", "node.attr = [] node.bias = bias_data mydict[node.name] = node prev_output", "mylist[from_]] #print('mylist is ', mylist, 'the from_ is ', from_,", "node.input mydict[node.name] = node prev_output = node.name if activation ==", "node.input_norm = node.input node.classes = int(cfg_parser[section]['classes']) node.num = int(cfg_parser[section]['num']) node.softmax", "= [] node.width = int(cfg_parser['net_0']['width']) node.height = int(cfg_parser['net_0']['height']) node.channels =", "= int(cfg_parser[section]['groups']) mydict[node.name] = node prev_output = node.name mylist.append(section) elif", "node.name = section + '_bias' node.op = 'BiasAdd' node.input =", "in_dim, height, width) # We would like to set these", "cfg_parser.sections(): sec_q = queue.Queue(0) sec_q.put(cfg_parser[_section]) while not sec_q.empty(): sec =", "2, 3, 4] idxmap = {x: i for i, x", "= (size, size, prev_layer_filters//groups, filters//groups, groups) idx_tf2darknet = [0, 1,", "batch_normalize: node = MyGraph.MyNode() node.name = section + '_batch_normalize' node.op", "# NOTE: this section has relative reference mylist.append(name) elif section.startswith('connected'):", "section header type: {}'.format(section)) print(' out filters ', prev_layer_filters) print('loaded", "1) op = 'DepthwiseConv2dNative' size = sec.getint('size', fallback = 1)", "serializes convolutional weights as: # [bias/beta, [gamma, mean, variance], conv_weights]", "is ', mylist, 'the ids is ', ids, 'node input", "1 elif activation == 'leaky': node.slope = 0.1 elif activation", "= node.name # prev_layer_filters no change else: raise ValueError( 'Unknown", "weights_path) # 定义子图所需要的输出节点,输入节点,终止节点 outputNodes = ['region_0', 'softmax_0'] stopNodes = []", "if __name__ == '__main__': config_path = sys.argv[1] weights_path = sys.argv[2]", "node.kernel = conv_weights[0] node.padding = padding node.strides = [1,stride,stride,1] node.groups", "= 1 op = 'Conv2D' elif section.startswith('depthwise'): conv = 'dconv'", "filters mydict[node.name] = node prev_output = node.name prev_layer_filters = filters", "weights_shape = (size, size, prev_layer_filters, filters) idx_tf2darknet = [0, 1,", "* 4), section+'-bias')) count += filters if batch_normalize: bn_weights =", "for Theano dim ordering. #print(\"the darknet shape is \", conv_weights.shape)", "collections import defaultdict import io section_counters = defaultdict(int) output_stream =", "= 'same' if pad == 1 else 'valid' if pad:", "= node prev_output = node.name prev_layer_filters = filters node =", "MyGraph.MyNode() node.stride = sec.getint('stride', fallback = 1) node.size = sec.getint('size',", "prev_output = node.name #print('pooling ', vars(node)) mylist.append(section) elif section.startswith('route'): ids", "conv_weights.shape) conv_weights = np.transpose(conv_weights, idx_dartnet2tf) #print(\"the tf shape is \",", "node prev_output = node.name prev_layer_filters = filters if batch_normalize: node", "re.match(r'^(pooling|maxpool|avgpool)_\\d+$', section): node = MyGraph.MyNode() node.stride = sec.getint('stride', fallback =", "assumes channel last dim_ordering. if conv == 'conv': weights_shape =", "queue.Queue(0) sec_q.put(cfg_parser[_section]) while not sec_q.empty(): sec = sec_q.get() section =", "section.startswith('maxpool'): node.mode = 'max' node.global_pooling = 0 elif section.startswith('avgpool'): node.mode", "section_counters[section] += 1 line = line.replace(section, _section) output_stream.write(line) output_stream.seek(0) return", "[prev_output, mylist[from_]] #print('mylist is ', mylist, 'the from_ is ',", "'leaky': node.slope = 0.1 elif activation == 'relu': node.slope =", "1, 2] elif conv == 'gconv': weights_shape = (size, size,", "'logistic') from_ = sec.getint('from') node = MyGraph.MyNode() node.name = section", "node.padding = sec.getint('padding', fallback = (node.size-1)//2) if section.startswith('pooling'): node.mode =", "order: # (height, width, in_dim, out_dim) # TODO: Add check", "idx_dartnet2tf) #print(\"the tf shape is \", conv_weights.shape) conv_weights = [conv_weights]", "elif section.startswith('net'): node = MyGraph.MyNode() node.name = section node.op =", "node.input node.kernel = conv_weights[0] node.padding = padding node.strides = [1,stride,stride,1]", "== 'relu': node = MyGraph.MyNode() node.name = section node.op =", "= node.input mydict[node.name] = node prev_output = node.name #print('pooling ',", "node.input = [mylist[i] for i in ids] #print('mylist is ',", "#node.attr = [] node.gamma = bn_weights[0] node.beta = conv_bias node.mean", "= node.name mylist.append(section) prev_layer_filters = node.filters elif re.match(r'^(shortcut)_\\d+$', section): activation", "= filters op = 'DepthwiseConv2dNative' elif section.startswith('groupwise'): conv = 'gconv'", "channel last dim_ordering. if conv == 'conv': weights_shape = (size,", "'leaky' or activation == 'relu': node = MyGraph.MyNode() node.name =", "= MyGraph.MyNode() node.name = section node.op = 'MatMul' node.input =", "during model definition. else: raise ValueError( 'Unsupported section header type:", "= section node.op = 'Shuffle' node.input = [prev_output] node.input_norm =", "# Darknet serializes convolutional weights as: # [bias/beta, [gamma, mean,", "to have unique names. Adds unique suffixes to config sections", "re.match(r'^(convolutional|depthwise|groupwise)_\\d+$', section): if section.startswith('convolutional'): conv = 'conv' filters = sec.getint('filters',", "node.strides = [1,stride,stride,1] node.groups = groups node.filters = filters mydict[node.name]", "'FusedBatchNorm' node.input = [prev_output] node.input_norm = node.input #node.attr = []", "Add check for Theano dim ordering. #print(\"the darknet shape is", "{}'.format(section)) print(' out filters ', prev_layer_filters) print('loaded {} bytes in", "section node.op = 'Pooling' node.input = [prev_output] node.input_norm = node.input", "return getFilters(mydict, mydict[name].input[0]) def readfile(f, len, msg): print(\" %s read", "TODO: Add check for Theano dim ordering. #print(\"the darknet shape", "mydict[node.name] = node # start here prev_output = node.name prev_layer_filters", "= section node.op = op node.input = [prev_output] node.input_norm =", "[prev_output] node.input_norm = node.input node.classes = int(cfg_parser[section]['classes']) node.num = int(cfg_parser[section]['num'])", "[prev_output] node.input_norm = node.input mydict[node.name] = node prev_output = node.name", "config sections to have unique names. Adds unique suffixes to", "sec.getint('stride', fallback = 1) node.size = sec.getint('size', node.stride) node.padding =", "= node prev_output = node.name if activation == 'linear': mylist.append(prev_output)", "(filters * 4), section+'-bias')) fc_data = np.ndarray( shape=[prev_layer_filters, filters], dtype=np.float32,", "#node.attr = [] node.bias = conv_bias mydict[node.name] = node prev_output", "'_batch_normalize' node.op = 'FusedBatchNorm' node.input = [prev_output] node.input_norm = node.input", "conv_weights = np.ndarray( shape=[weights_shape[i] for i in idx_tf2darknet], dtype=np.float32, buffer=readfile(weights_file,", "[bias/beta, [gamma, mean, variance], conv_weights] #prev_layer_shape = prev_layer.shape # TODO:", "node.input_norm = [] node.width = int(cfg_parser['net_0']['width']) node.height = int(cfg_parser['net_0']['height']) node.channels", "Keras BatchNormalization mistakenly refers to var # as std. bn_weight_list", "mylist.append(name) elif section.startswith('connected'): activation = sec.get('activation', fallback='linear') filters = sec.getint('output',", "import defaultdict import io section_counters = defaultdict(int) output_stream = io.StringIO()", ">= 1 return getFilters(mydict, mydict[name].input[0]) def readfile(f, len, msg): print(\"", "beta bn_weights[1], # running mean bn_weights[2] # running var ]", "= node.input node.multiplier = fc_data mydict[node.name] = node prev_output =", "node.stride mydict[node.name] = node prev_output = node.name mylist.append(section) prev_layer_filters =", "queue for _section in cfg_parser.sections(): sec_q = queue.Queue(0) sec_q.put(cfg_parser[_section]) while", "conv_weights] #prev_layer_shape = prev_layer.shape # TODO: This assumes channel last", "node.attr = [] mydict[node.name] = node # start here prev_output", "mygraph.type = 'darknet' return mygraph if __name__ == '__main__': config_path", "node = MyGraph.MyNode() node.stride = sec.getint('stride', fallback = 1) node.size", "configparser import numpy as np import re,sys,os from graph import", "#print('find filters for ', name) if hasattr(mydict[name], 'filters'): return mydict[name].filters", "= 0 elif section.startswith('avgpool'): node.mode = 'avg' node.global_pooling = 1", "TODO: This assumes channel last dim_ordering. if conv == 'conv':", "readfile(weights_file, (4*4), 'head') mydict = OrderedDict() # record the output", "= int(cfg_parser['net_0']['width']) node.height = int(cfg_parser['net_0']['height']) node.channels = int(cfg_parser['net_0']['channels']) node.filters =", "= [1,stride,stride,1] node.groups = groups node.filters = filters mydict[node.name] =", "= sec.getint('padding', fallback = (node.size-1)//2) if section.startswith('pooling'): node.mode = str(cfg_parser[section]['mode'])", "assert len(mydict[name].input) >= 1 return getFilters(mydict, mydict[name].input[0]) def readfile(f, len,", "multiplier == 1 groups = filters op = 'DepthwiseConv2dNative' elif", "'linear' or activation == 'leaky' or activation == 'relu': node", "dim ordering. #print(\"the darknet shape is \", conv_weights.shape) conv_weights =", "定义子图所需要的输出节点,输入节点,终止节点 outputNodes = ['region_0', 'softmax_0'] stopNodes = [] inputNodes =", "[gamma, mean, variance], conv_weights] #prev_layer_shape = prev_layer.shape # TODO: This", "sec_q.empty(): sec = sec_q.get() section = sec.name print('Parsing section {}'.format(section))", "= line.strip().strip('[]') _section = section + '_' + str(section_counters[section]) section_counters[section]", "Theano dim ordering. #print(\"the darknet shape is \", conv_weights.shape) conv_weights", "', node.input) node.input_norm = node.input mydict[node.name] = node prev_output =", "weights_shape = (size, size, prev_layer_filters//groups, filters//groups, groups) idx_tf2darknet = [0,", "weights_path): unique_config_file = unique_config_sections(config_path) cfg_parser = configparser.ConfigParser() cfg_parser.read_file(unique_config_file) weights_file =", "= sec.get('activation', fallback = 'logistic') if activation == 'linear': pass", "activation) sec_q.put(tmp_parser[name]) mylist.append(name) elif section.startswith('shuffle'): node = MyGraph.MyNode() node.name =", "shape=[weights_shape[i] for i in idx_tf2darknet], dtype=np.float32, buffer=readfile(weights_file, (weights_size * 4),", "= int(cfg_parser['net_0']['height']) node.channels = int(cfg_parser['net_0']['channels']) node.filters = node.channels # print(vars(node))", "= MyGraph.MyNode() node.name = section node.op = 'NCNNConcat' node.input =", "ids]) mydict[node.name] = node prev_output = node.name mylist.append(section) prev_layer_filters =", "activation == 'linear': node.slope = 1 elif activation == 'leaky':", "# DarkNet conv_weights are serialized Caffe-style: # (out_dim, in_dim, height,", "original layer mylist = [] count = 4 import queue", "node prev_output = node.name mylist.append(section) elif section.startswith('softmax'): node = MyGraph.MyNode()", "= int(cfg_parser['net_0']['channels']) node.filters = node.channels # print(vars(node)) # node.attr =", "tmp_parser.set(name, 'activation', activation) sec_q.put(tmp_parser[name]) mylist.append(name) elif section.startswith('shuffle'): node = MyGraph.MyNode()", "node prev_output = node.name #print('pooling ', vars(node)) mylist.append(section) elif section.startswith('route'):", "out_dim) # TODO: Add check for Theano dim ordering. #print(\"the", "* 4), section+'-bias')) fc_data = np.ndarray( shape=[prev_layer_filters, filters], dtype=np.float32, buffer=readfile(weights_file,", "= np.ndarray( shape=(filters, ), dtype=np.float32, buffer=readfile(weights_file, (filters * 4), section+'-bias'))", "width) # We would like to set these to Tensorflow", "_section) output_stream.write(line) output_stream.seek(0) return output_stream def getFilters(mydict, name): #print('find filters", "return f.read(len) def buildGraph(config_path, weights_path): unique_config_file = unique_config_sections(config_path) cfg_parser =", "== 1 groups = filters op = 'DepthwiseConv2dNative' elif section.startswith('groupwise'):", "count = 4 import queue for _section in cfg_parser.sections(): sec_q", "= sec.getint('filters', fallback = 1) groups = 1 op =", "[] node.gamma = bn_weights[0] node.beta = conv_bias node.mean = bn_weights[1]", "configparser.ConfigParser() name = section + '_activation' tmp_parser.add_section(name) tmp_parser.set(name, 'activation', activation)", "= sec.getint('filters', fallback=1) groups = sec.getint('groups', fallback = 1) op", "net.seen readfile(weights_file, (4*4), 'head') mydict = OrderedDict() # record the", "= MyGraph.MyNode() node.name = section node.op = 'BinaryOp' node.op_type =", "count += weights_size # DarkNet conv_weights are serialized Caffe-style: #", "section node.op = op node.input = [prev_output] node.input_norm = node.input", "pad=1 # padding = 'same' if pad == 1 else", "= MyGraph.MyNode() node.name = section node.op = 'Shuffle' node.input =", "output of the original layer mylist = [] count =", "+= weights_size # DarkNet conv_weights are serialized Caffe-style: # (out_dim,", "node.input = [prev_output] node.input_norm = node.input # node.attr = []", "sec_q.put(cfg_parser[_section]) while not sec_q.empty(): sec = sec_q.get() section = sec.name", "= sec.get('activation', fallback = 'logistic') batch_normalize = sec.getint('batch_normalize', 0) #", "node.name = section node.op = 'Leaky' if activation == 'linear':", "'_' + str(section_counters[section]) section_counters[section] += 1 line = line.replace(section, _section)", "= node prev_output = node.name mylist.append(section) pass elif section.startswith('cost'): pass", "== 'dconv': weights_shape = (size, size, filters) idx_tf2darknet = [0,", "i in ids] #print('mylist is ', mylist, 'the ids is", "[0, 1, 2, 3, 4] idxmap = {x: i for", "activation == 'linear': mylist.append(prev_output) else: tmp_parser = configparser.ConfigParser() name =", "prev_layer_filters no change else: node = MyGraph.MyNode() node.name = section", "is ', node.input) node.input_norm = node.input mydict[node.name] = node prev_output", "\"\"\"Convert all config sections to have unique names. Adds unique", "'DepthwiseConv2dNative' size = sec.getint('size', fallback = 1) stride = sec.getint('stride',", "from graph import MyGraph from collections import OrderedDict def unique_config_sections(config_file):", "else: tmp_parser = configparser.ConfigParser() name = section + '_activation' tmp_parser.add_section(name)", "= [0, 1, 2, 3] elif conv == 'dconv': weights_shape", "= conv_weights[0] node.padding = padding node.strides = [1,stride,stride,1] node.groups =", "as fin: for line in fin: if line.startswith('['): section =", "weights_path = sys.argv[2] mygraph = buildGraph(config_path, weights_path) # 定义子图所需要的输出节点,输入节点,终止节点 outputNodes", "prev_output = node.name # prev_layer_filters no change else: raise ValueError(", "bn_weights[2] mydict[node.name] = node prev_output = node.name # prev_layer_filters no", "weights_file = open(weights_path, 'rb') # read out major, minor, revision,", "2] elif conv == 'gconv': weights_shape = (size, size, prev_layer_filters//groups,", "[] node.bias = bias_data mydict[node.name] = node prev_output = node.name", "node.name mylist.append(section) prev_layer_filters = node.filters elif re.match(r'^(shortcut)_\\d+$', section): activation =", "node.stride = sec.getint('stride', fallback = 1) node.input_norm = node.input node.filters", "scale gamma conv_bias, # shift beta bn_weights[1], # running mean", "name): #print('find filters for ', name) if hasattr(mydict[name], 'filters'): return", "node # start here prev_output = node.name prev_layer_filters = node.channels", "%s read %d bytes\" % (msg, len)) return f.read(len) def", "here prev_output = node.name prev_layer_filters = node.channels mylist.append(section) elif section.startswith('region'):", "stopNodes = [] inputNodes = ['darknet_0'] mygraph.extractSubGraph(inputNodes, outputNodes, stopNodes) mygraph.generateDot('YoloV2.dot')", "= [float(i) for i in re.split(r',', cfg_parser[section]['anchors'])] #print(vars(node)) #node.attr =", "prev_output = node.name # prev_layer_filters no change else: node =", "sections to have unique names. Adds unique suffixes to config", "sec.get('activation', fallback = 'logistic') batch_normalize = sec.getint('batch_normalize', 0) # padding='same'", "# padding = 'same' if pad == 1 else 'valid'", "= line.replace(section, _section) output_stream.write(line) output_stream.seek(0) return output_stream def getFilters(mydict, name):", "= 'avg' node.global_pooling = 1 node.name = section node.op =", "conv, 'bn' if batch_normalize else ' ', activation, weights_shape) conv_bias", "if line.startswith('['): section = line.strip().strip('[]') _section = section + '_'", "mydict[node.name] = node prev_output = node.name prev_layer_filters = filters if", "', mylist, 'the ids is ', ids, 'node input is", "node.size = sec.getint('size', node.stride) node.padding = sec.getint('padding', fallback = (node.size-1)//2)", "config sections for compability with configparser. \"\"\" from collections import", "= [prev_output] node.input_norm = node.input mydict[node.name] = node prev_output =", "sec.getint('stride', fallback = 1) pad = sec.getint('pad', fallback = 0)", "line.startswith('['): section = line.strip().strip('[]') _section = section + '_' +", "prev_layer_filters = node.filters elif section.startswith('reorg'): node = MyGraph.MyNode() node.name =", "batch_normalize else ' ', activation, weights_shape) conv_bias = np.ndarray( shape=(filters,", "= 'conv' filters = sec.getint('filters', fallback = 1) groups =", "prev_output = node.name prev_layer_filters = node.channels mylist.append(section) elif section.startswith('region'): node", "the original layer mylist = [] count = 4 import", "activation == 'relu': node.slope = 0 node.input = [prev_output] node.input_norm", "= node.name prev_layer_filters = node.channels mylist.append(section) elif section.startswith('region'): node =", "prev_output = node.name mylist.append(section) pass elif section.startswith('cost'): pass # Configs", "= section + '_batch_normalize' node.op = 'FusedBatchNorm' node.input = [prev_output]", "mydict[name].input[0]) def readfile(f, len, msg): print(\" %s read %d bytes\"", "= ['darknet_0'] mygraph.extractSubGraph(inputNodes, outputNodes, stopNodes) mygraph.generateDot('YoloV2.dot') # 生成子图对应的代码 mygraph.generateSource('YoloV2', os.path.split(config_path)[1]+'.ncnn',", "= np.product(weights_shape) print(' ' + conv, 'bn' if batch_normalize else", "= sec_q.get() section = sec.name print('Parsing section {}'.format(section)) # this", "node.groups = groups node.filters = filters mydict[node.name] = node prev_output", "[prev_output] node.input_norm = node.input node.groups = int(cfg_parser[section]['groups']) mydict[node.name] = node", "node.input_norm = node.input #node.attr = [] mydict[node.name] = node prev_output", "3 * filters # TODO: Keras BatchNormalization mistakenly refers to", "len(mydict[name].input) >= 1 return getFilters(mydict, mydict[name].input[0]) def readfile(f, len, msg):", "running mean bn_weights[2] # running var ] conv_weights = np.ndarray(", "= 'Shuffle' node.input = [prev_output] node.input_norm = node.input node.groups =", "= [] node.input_norm = [] node.width = int(cfg_parser['net_0']['width']) node.height =", "re.split(r',', cfg_parser[section]['anchors'])] #print(vars(node)) #node.attr = [] mydict[node.name] = node prev_output", "section): activation = sec.get('activation', fallback = 'logistic') from_ = sec.getint('from')", "def unique_config_sections(config_file): \"\"\"Convert all config sections to have unique names.", "= 'BiasAdd' node.input = [prev_output] node.input_norm = node.input #node.attr =", "conv = 'dconv' filters = prev_layer_filters multiplier = sec.getint('multiplier', fallback", "= sec.getint('stride', fallback = 1) node.size = sec.getint('size', node.stride) node.padding", "+ '_bias' node.op = 'BiasAdd' node.input = [prev_output] node.input_norm =", "section node.op = 'Leaky' if activation == 'linear': node.slope =", "mydict[node.name] = node prev_output = node.name mylist.append(section) elif re.match(r'^(pooling|maxpool|avgpool)_\\d+$', section):", "= [] mydict[node.name] = node # start here prev_output =", "# print(vars(node)) # node.attr = [] mydict[node.name] = node #", "'_activation' tmp_parser.add_section(name) tmp_parser.set(name, 'activation', activation) sec_q.put(tmp_parser[name]) mylist.append(name) elif section.startswith('net'): node", "+ str(section_counters[section]) section_counters[section] += 1 line = line.replace(section, _section) output_stream.write(line)", "mylist.append(section) pass elif section.startswith('cost'): pass # Configs not currently handled", "'relu': node = MyGraph.MyNode() node.name = section node.op = 'Leaky'", "node.slope = 1 elif activation == 'leaky': node.slope = 0.1", "MyGraph.MyNode() node.name = section node.op = 'Shuffle' node.input = [prev_output]", "padding = sec.getint('padding', fallback = 0) activation = sec.get('activation', fallback", "We would like to set these to Tensorflow order: #", "(filters * 4), section+'-bias')) count += filters if batch_normalize: bn_weights", "buffer=readfile(weights_file, (filters * 12), section+'-batchnorm')) count += 3 * filters", "section node.op = 'BinaryOp' node.op_type = 0 node.input = [prev_output,", "bytes\" % (msg, len)) return f.read(len) def buildGraph(config_path, weights_path): unique_config_file", "sum([getFilters(mydict, mylist[i]) for i in ids]) mydict[node.name] = node prev_output", "node.input_norm = node.input #node.attr = [] node.bias = conv_bias mydict[node.name]", "= int(cfg_parser[section]['num']) node.softmax = int(cfg_parser[section]['softmax']) node.anchors = [float(i) for i", "node = MyGraph.MyNode() node.name = section + '_batch_normalize' node.op =", "1, 2, 3] elif conv == 'dconv': weights_shape = (size,", "configparser.ConfigParser() cfg_parser.read_file(unique_config_file) weights_file = open(weights_path, 'rb') # read out major,", "Caffe-style: # (out_dim, in_dim, height, width) # We would like", "else: assert len(mydict[name].input) >= 1 return getFilters(mydict, mydict[name].input[0]) def readfile(f,", "or activation == 'leaky' or activation == 'relu': node =", "1) pad = sec.getint('pad', fallback = 0) padding = sec.getint('padding',", "node = MyGraph.MyNode() node.name = section node.op = 'DarknetRegion' node.input", "= node.name prev_layer_filters = filters node = MyGraph.MyNode() node.name =", "weights. # Darknet serializes convolutional weights as: # [bias/beta, [gamma,", "MyGraph.MyNode() node.name = section node.op = 'Softmax' node.input = [prev_output]", "node.mode = str(cfg_parser[section]['mode']) node.global_pooling = 0 elif section.startswith('maxpool'): node.mode =", "buffer=readfile(weights_file, (prev_layer_filters * filters * 4), section+'-weight')) node = MyGraph.MyNode()", "or activation == 'relu': node = MyGraph.MyNode() node.name = section", "'BiasAdd' node.input = [prev_output] node.input_norm = node.input #node.attr = []", "idxmap = {x: i for i, x in enumerate(idx_tf2darknet)} idx_dartnet2tf", "suffixes to config sections for compability with configparser. \"\"\" from", "bn_weights[1], # running mean bn_weights[2] # running var ] conv_weights", "= np.ndarray( shape=(3, filters), dtype=np.float32, buffer=readfile(weights_file, (filters * 12), section+'-batchnorm'))", "node.classes = int(cfg_parser[section]['classes']) node.num = int(cfg_parser[section]['num']) node.softmax = int(cfg_parser[section]['softmax']) node.anchors", "out filters ', prev_layer_filters) print('loaded {} bytes in weights file'.format(count*4))", "= section + '_activation' tmp_parser.add_section(name) tmp_parser.set(name, 'activation', activation) sec_q.put(tmp_parser[name]) mylist.append(name)", "fin: for line in fin: if line.startswith('['): section = line.strip().strip('[]')", "= section + '_' + str(section_counters[section]) section_counters[section] += 1 line", "node.input #node.attr = [] node.bias = conv_bias mydict[node.name] = node", "dtype=np.float32, buffer=readfile(weights_file, (filters * 4), section+'-bias')) count += filters if", "node.input = [prev_output] node.input_norm = node.input node.groups = int(cfg_parser[section]['groups']) mydict[node.name]", "io.StringIO() with open(config_file) as fin: for line in fin: if", "= 'Leaky' if activation == 'linear': node.slope = 1 elif", "= section node.op = 'NCNNConcat' node.input = [mylist[i] for i", "activation == 'leaky': node.slope = 0.1 elif activation == 'relu':", "defaultdict import io section_counters = defaultdict(int) output_stream = io.StringIO() with", "this section will can be a subsection if section.startswith('activation') or", "[prev_output] node.input_norm = node.input # node.attr = [] node.bias =", "def getFilters(mydict, name): #print('find filters for ', name) if hasattr(mydict[name],", "node.input node.multiplier = fc_data mydict[node.name] = node prev_output = node.name", "node.channels mylist.append(section) elif section.startswith('region'): node = MyGraph.MyNode() node.name = section", "x in enumerate(idx_tf2darknet)} idx_dartnet2tf = [idxmap[i] for i in range(len(idxmap))]", "node.input_norm = node.input mydict[node.name] = node prev_output = node.name if", "= [0, 1, 2] elif conv == 'gconv': weights_shape =", "section + '_batch_normalize' node.op = 'FusedBatchNorm' node.input = [prev_output] node.input_norm", "elif section.startswith('groupwise'): conv = 'gconv' filters = sec.getint('filters', fallback=1) groups", "weights_shape = (size, size, filters) idx_tf2darknet = [0, 1, 2]", "print(' ' + conv, 'bn' if batch_normalize else ' ',", "<reponame>nihui/gen-ncnn-models #! /usr/bin/env python # coding: utf-8 import configparser import", "i in cfg_parser[section]['layers'].split(',')] node = MyGraph.MyNode() node.name = section node.op", "#node.attr = [] mydict[node.name] = node prev_output = node.name mylist.append(section)", "= [prev_output] node.input_norm = node.input #node.attr = [] node.bias =", "[] count = 4 import queue for _section in cfg_parser.sections():", "prev_output = node.name mylist.append(section) elif re.match(r'^(pooling|maxpool|avgpool)_\\d+$', section): node = MyGraph.MyNode()", "1) node.input_norm = node.input node.filters = getFilters(mydict, node.input[0]) * node.stride", "= 'DarknetReorg' node.input = [prev_output] node.stride = sec.getint('stride', fallback =", "if activation == 'linear': mylist.append(prev_output) else: tmp_parser = configparser.ConfigParser() name", "Create nodes #conv_layer = np.zeros([1, 1, filters], dtype = np.float32)", "# We would like to set these to Tensorflow order:", "'Unsupported section header type: {}'.format(section)) print(' out filters ', prev_layer_filters)", "mistakenly refers to var # as std. bn_weight_list = [", "node.input_norm = node.input mydict[node.name] = node prev_output = node.name #print('pooling", "node.name mylist.append(section) pass elif section.startswith('cost'): pass # Configs not currently", "multiplier = sec.getint('multiplier', fallback = 1) assert multiplier == 1", "= (node.size-1)//2) if section.startswith('pooling'): node.mode = str(cfg_parser[section]['mode']) node.global_pooling = 0", "fallback=1) groups = sec.getint('groups', fallback = 1) op = 'DepthwiseConv2dNative'", "= [prev_output] node.input_norm = node.input node.multiplier = fc_data mydict[node.name] =", "= filters node = MyGraph.MyNode() node.name = section + '_bias'", "+ '_activation' tmp_parser.add_section(name) tmp_parser.set(name, 'activation', activation) sec_q.put(tmp_parser[name]) mylist.append(name) elif section.startswith('shuffle'):", "0) # padding='same' is equivalent to Darknet pad=1 # padding", "size, filters) idx_tf2darknet = [0, 1, 2] elif conv ==", "# prev_layer_filters no change else: raise ValueError( 'Unknown activation function", "sec.name print('Parsing section {}'.format(section)) # this section will can be", "= 'BiasAdd' node.input = [prev_output] node.input_norm = node.input # node.attr", "if hasattr(mydict[name], 'filters'): return mydict[name].filters else: assert len(mydict[name].input) >= 1", "section+'-weight')) node = MyGraph.MyNode() node.name = section node.op = 'MatMul'", "node.input = [prev_output] node.input_norm = node.input node.multiplier = fc_data mydict[node.name]", "= MyGraph.MyNode() node.name = section node.op = 'DarknetRegion' node.input =", "conv = 'gconv' filters = sec.getint('filters', fallback=1) groups = sec.getint('groups',", "node.input = [] node.input_norm = [] node.width = int(cfg_parser['net_0']['width']) node.height", "in weights file'.format(count*4)) mygraph = MyGraph(mydict) mygraph.type = 'darknet' return", "node.input = [prev_output] node.input_norm = node.input #node.attr = [] node.gamma", "header type: {}'.format(section)) print(' out filters ', prev_layer_filters) print('loaded {}", "activation) sec_q.put(tmp_parser[name]) mylist.append(name) elif section.startswith('net'): node = MyGraph.MyNode() node.name =", "unique_config_sections(config_file): \"\"\"Convert all config sections to have unique names. Adds", "= MyGraph(mydict) mygraph.type = 'darknet' return mygraph if __name__ ==", "unique names. Adds unique suffixes to config sections for compability", "node.input) node.input_norm = node.input mydict[node.name] = node prev_output = node.name", "change else: raise ValueError( 'Unknown activation function `{}` in section", "== 'linear': pass elif activation == 'linear' or activation ==", "idx_tf2darknet = [0, 1, 2, 3] elif conv == 'dconv':", "= [conv_weights] if batch_normalize else [ conv_weights, conv_bias ] #", "'conv': weights_shape = (size, size, prev_layer_filters, filters) idx_tf2darknet = [0,", "tmp_parser.set(name, 'activation', activation) sec_q.put(tmp_parser[name]) mylist.append(name) elif section.startswith('net'): node = MyGraph.MyNode()", "section): if section.startswith('convolutional'): conv = 'conv' filters = sec.getint('filters', fallback", "= 0 node.input = [prev_output] node.input_norm = node.input #node.attr =", "to set these to Tensorflow order: # (height, width, in_dim,", "= node.input node.groups = int(cfg_parser[section]['groups']) mydict[node.name] = node prev_output =", "elif section.startswith('maxpool'): node.mode = 'max' node.global_pooling = 0 elif section.startswith('avgpool'):", "these to Tensorflow order: # (height, width, in_dim, out_dim) #", "'gconv': weights_shape = (size, size, prev_layer_filters//groups, filters//groups, groups) idx_tf2darknet =", "# prev_layer_filters no change else: node = MyGraph.MyNode() node.name =", "4] idxmap = {x: i for i, x in enumerate(idx_tf2darknet)}", "mylist.append(name) elif section.startswith('net'): node = MyGraph.MyNode() node.name = section node.op", "node.filters = getFilters(mydict, node.input[0]) * node.stride * node.stride mydict[node.name] =", "coding: utf-8 import configparser import numpy as np import re,sys,os", "= (size, size, filters) idx_tf2darknet = [0, 1, 2] elif", "= node prev_output = node.name mylist.append(section) elif re.match(r'^(pooling|maxpool|avgpool)_\\d+$', section): node", "would like to set these to Tensorflow order: # (height,", "MyGraph.MyNode() node.name = section node.op = 'DarknetNet' node.input = []", "= prev_layer_filters multiplier = sec.getint('multiplier', fallback = 1) assert multiplier", "= node.input mydict[node.name] = node prev_output = node.name mylist.append(section) pass", "np.ndarray( shape=(3, filters), dtype=np.float32, buffer=readfile(weights_file, (filters * 12), section+'-batchnorm')) count", "batch_normalize: bn_weights = np.ndarray( shape=(3, filters), dtype=np.float32, buffer=readfile(weights_file, (filters *", "bn_weights[2] # running var ] conv_weights = np.ndarray( shape=[weights_shape[i] for", "node = MyGraph.MyNode() node.name = section node.op = op node.input", "= 1) stride = sec.getint('stride', fallback = 1) pad =", "node.padding = padding node.strides = [1,stride,stride,1] node.groups = groups node.filters", "(4*4), 'head') mydict = OrderedDict() # record the output of", "np.ndarray( shape=(filters, ), dtype=np.float32, buffer=readfile(weights_file, (filters * 4), section+'-bias')) count", "section node.op = 'MatMul' node.input = [prev_output] node.input_norm = node.input", "= 'NCNNConcat' node.input = [mylist[i] for i in ids] #print('mylist", "= node.name mylist.append(section) prev_layer_filters = node.filters elif section.startswith('reorg'): node =", "section.startswith('activation'): mylist.append(section) elif re.match(r'^(convolutional|depthwise|groupwise)_\\d+$', section): if section.startswith('convolutional'): conv = 'conv'", "= queue.Queue(0) sec_q.put(cfg_parser[_section]) while not sec_q.empty(): sec = sec_q.get() section", "'__main__': config_path = sys.argv[1] weights_path = sys.argv[2] mygraph = buildGraph(config_path,", "if batch_normalize: bn_weights = np.ndarray( shape=(3, filters), dtype=np.float32, buffer=readfile(weights_file, (filters", "weights as: # [bias/beta, [gamma, mean, variance], conv_weights] #prev_layer_shape =", "node prev_output = node.name # prev_layer_filters no change else: raise", "ids = [int(i) for i in cfg_parser[section]['layers'].split(',')] node = MyGraph.MyNode()", "in ids] #print('mylist is ', mylist, 'the ids is ',", "sec_q.put(tmp_parser[name]) mylist.append(name) elif section.startswith('shuffle'): node = MyGraph.MyNode() node.name = section", "mydict[node.name] = node prev_output = node.name if activation == 'linear':", "= section + '_bias' node.op = 'BiasAdd' node.input = [prev_output]", "prev_output = node.name prev_layer_filters = filters node = MyGraph.MyNode() node.name", "section.startswith('convolutional'): conv = 'conv' filters = sec.getint('filters', fallback = 1)", "i in re.split(r',', cfg_parser[section]['anchors'])] #print(vars(node)) #node.attr = [] mydict[node.name] =", "fallback = 1) stride = sec.getint('stride', fallback = 1) pad", "for i in range(len(idxmap))] weights_size = np.product(weights_shape) print(' ' +", "= sec.name print('Parsing section {}'.format(section)) # this section will can", "last dim_ordering. if conv == 'conv': weights_shape = (size, size,", "ids, 'node input is ', node.input) node.input_norm = node.input node.axis", "prev_layer_filters = filters node = MyGraph.MyNode() node.name = section +", "activation == 'linear': pass elif activation == 'linear' or activation", "', vars(node)) mylist.append(section) elif section.startswith('route'): ids = [int(i) for i", "mylist = [] count = 4 import queue for _section", "np.product(weights_shape) print(' ' + conv, 'bn' if batch_normalize else '", "print('loaded {} bytes in weights file'.format(count*4)) mygraph = MyGraph(mydict) mygraph.type", "/usr/bin/env python # coding: utf-8 import configparser import numpy as", "= [prev_output] node.input_norm = node.input # node.attr = [] node.bias", "sec = sec_q.get() section = sec.name print('Parsing section {}'.format(section)) #", "[int(i) for i in cfg_parser[section]['layers'].split(',')] node = MyGraph.MyNode() node.name =", "# running var ] conv_weights = np.ndarray( shape=[weights_shape[i] for i", "is \", conv_weights.shape) conv_weights = [conv_weights] if batch_normalize else [", "'logistic') batch_normalize = sec.getint('batch_normalize', 0) # padding='same' is equivalent to", "are serialized Caffe-style: # (out_dim, in_dim, height, width) # We", "from_, 'node input is ', node.input) node.input_norm = node.input mydict[node.name]", "#node.attr = [] mydict[node.name] = node prev_output = node.name #", "node.filters = sum([getFilters(mydict, mylist[i]) for i in ids]) mydict[node.name] =", "elif conv == 'dconv': weights_shape = (size, size, filters) idx_tf2darknet", "major, minor, revision, net.seen readfile(weights_file, (4*4), 'head') mydict = OrderedDict()", "equivalent to Darknet pad=1 # padding = 'same' if pad", "\"\"\" from collections import defaultdict import io section_counters = defaultdict(int)", "is ', ids, 'node input is ', node.input) node.input_norm =", "(size, size, prev_layer_filters//groups, filters//groups, groups) idx_tf2darknet = [0, 1, 2,", "io section_counters = defaultdict(int) output_stream = io.StringIO() with open(config_file) as", "ids is ', ids, 'node input is ', node.input) node.input_norm", "if section.startswith('activation') or section.endswith('activation'): activation = sec.get('activation', fallback = 'logistic')", "'MatMul' node.input = [prev_output] node.input_norm = node.input node.multiplier = fc_data", "= section node.op = 'BinaryOp' node.op_type = 0 node.input =", "sec_q.put(tmp_parser[name]) mylist.append(name) elif section.startswith('net'): node = MyGraph.MyNode() node.name = section", "= MyGraph.MyNode() node.name = section node.op = op node.input =", "is ', node.input) node.input_norm = node.input node.axis = 0 node.filters", "MyGraph.MyNode() node.name = section node.op = 'MatMul' node.input = [prev_output]", "'DarknetNet' node.input = [] node.input_norm = [] node.width = int(cfg_parser['net_0']['width'])", "'linear': pass elif activation == 'linear' or activation == 'leaky'", "[prev_output] node.input_norm = node.input node.kernel = conv_weights[0] node.padding = padding", "sec.getint('filters', fallback = 1) groups = 1 op = 'Conv2D'", "node prev_output = node.name mylist.append(section) prev_layer_filters = node.filters elif re.match(r'^(shortcut)_\\d+$',", "node.channels = int(cfg_parser['net_0']['channels']) node.filters = node.channels # print(vars(node)) # node.attr", "of the original layer mylist = [] count = 4", "filters = sec.getint('filters', fallback = 1) groups = 1 op", "node.beta = conv_bias node.mean = bn_weights[1] node.variance = bn_weights[2] mydict[node.name]", "node.name # prev_layer_filters no change else: raise ValueError( 'Unknown activation", "= np.ndarray( shape=[filters], dtype=np.float32, buffer=readfile(weights_file, (filters * 4), section+'-bias')) fc_data", "node.input_norm = node.input node.filters = getFilters(mydict, node.input[0]) * node.stride *", "# node.attr = [] node.bias = bias_data mydict[node.name] = node", "[ bn_weights[0], # scale gamma conv_bias, # shift beta bn_weights[1],", "import configparser import numpy as np import re,sys,os from graph", "= 'Conv2D' elif section.startswith('depthwise'): conv = 'dconv' filters = prev_layer_filters", "1) assert multiplier == 1 groups = filters op =", "), dtype=np.float32, buffer=readfile(weights_file, (filters * 4), section+'-bias')) count += filters", "getFilters(mydict, name): #print('find filters for ', name) if hasattr(mydict[name], 'filters'):", "== 'linear': node.slope = 1 elif activation == 'leaky': node.slope", "== 'conv': weights_shape = (size, size, prev_layer_filters, filters) idx_tf2darknet =", "= sec.getint('pad', fallback = 0) padding = sec.getint('padding', fallback =", "op = 'Conv2D' elif section.startswith('depthwise'): conv = 'dconv' filters =", "node = MyGraph.MyNode() node.name = section node.op = 'Leaky' if", "1, filters], dtype = np.float32) node = MyGraph.MyNode() node.name =", "outputNodes, stopNodes) mygraph.generateDot('YoloV2.dot') # 生成子图对应的代码 mygraph.generateSource('YoloV2', os.path.split(config_path)[1]+'.ncnn', os.path.split(weights_path)[1] + '.ncnn')", "int(cfg_parser['net_0']['channels']) node.filters = node.channels # print(vars(node)) # node.attr = []", "var # as std. bn_weight_list = [ bn_weights[0], # scale", "filters//groups, groups) idx_tf2darknet = [0, 1, 2, 3, 4] idxmap", "node.name = section node.op = 'Pooling' node.input = [prev_output] node.input_norm", "[] mydict[node.name] = node prev_output = node.name # prev_layer_filters no", "'BinaryOp' node.op_type = 0 node.input = [prev_output, mylist[from_]] #print('mylist is", "idx_tf2darknet], dtype=np.float32, buffer=readfile(weights_file, (weights_size * 4), section+'-weights')) count += weights_size", "node.op = 'DarknetNet' node.input = [] node.input_norm = [] node.width", "node.input = [prev_output] node.stride = sec.getint('stride', fallback = 1) node.input_norm", "is ', mylist, 'the from_ is ', from_, 'node input", "in idx_tf2darknet], dtype=np.float32, buffer=readfile(weights_file, (weights_size * 4), section+'-weights')) count +=", "'Leaky' if activation == 'linear': node.slope = 1 elif activation", "section+'-bias')) fc_data = np.ndarray( shape=[prev_layer_filters, filters], dtype=np.float32, buffer=readfile(weights_file, (prev_layer_filters *", "# read out major, minor, revision, net.seen readfile(weights_file, (4*4), 'head')", "= 1 elif activation == 'leaky': node.slope = 0.1 elif", "filters for ', name) if hasattr(mydict[name], 'filters'): return mydict[name].filters else:", "python # coding: utf-8 import configparser import numpy as np", "conv = 'conv' filters = sec.getint('filters', fallback = 1) groups", "mylist, 'the ids is ', ids, 'node input is ',", "op node.input = [prev_output] node.input_norm = node.input node.kernel = conv_weights[0]", "padding node.strides = [1,stride,stride,1] node.groups = groups node.filters = filters", "'Shuffle' node.input = [prev_output] node.input_norm = node.input node.groups = int(cfg_parser[section]['groups'])", "section)) if section.startswith('activation'): mylist.append(section) elif re.match(r'^(convolutional|depthwise|groupwise)_\\d+$', section): if section.startswith('convolutional'): conv", "= sec.get('activation', fallback = 'logistic') from_ = sec.getint('from') node =", "section.startswith('shuffle'): node = MyGraph.MyNode() node.name = section node.op = 'Shuffle'", "activation == 'leaky' or activation == 'relu': node = MyGraph.MyNode()", "node.height = int(cfg_parser['net_0']['height']) node.channels = int(cfg_parser['net_0']['channels']) node.filters = node.channels #", "conv_bias, # shift beta bn_weights[1], # running mean bn_weights[2] #", "[0, 1, 2, 3] elif conv == 'dconv': weights_shape =", "filters = sec.getint('filters', fallback=1) groups = sec.getint('groups', fallback = 1)", "= bn_weights[1] node.variance = bn_weights[2] mydict[node.name] = node prev_output =", "tmp_parser.add_section(name) tmp_parser.set(name, 'activation', activation) sec_q.put(tmp_parser[name]) mylist.append(name) elif section.startswith('shuffle'): node =", "np.float32) node = MyGraph.MyNode() node.name = section node.op = op", "MyGraph.MyNode() node.name = section node.op = op node.input = [prev_output]", "section_counters = defaultdict(int) output_stream = io.StringIO() with open(config_file) as fin:", "bn_weights[1] node.variance = bn_weights[2] mydict[node.name] = node prev_output = node.name", "node = MyGraph.MyNode() node.name = section node.op = 'Softmax' node.input", "= section node.op = 'DarknetNet' node.input = [] node.input_norm =", "if activation == 'linear': node.slope = 1 elif activation ==", "with open(config_file) as fin: for line in fin: if line.startswith('['):", "padding='same' is equivalent to Darknet pad=1 # padding = 'same'", "line in fin: if line.startswith('['): section = line.strip().strip('[]') _section =", "# shift beta bn_weights[1], # running mean bn_weights[2] # running", "unique_config_file = unique_config_sections(config_path) cfg_parser = configparser.ConfigParser() cfg_parser.read_file(unique_config_file) weights_file = open(weights_path,", "section.startswith('net'): node = MyGraph.MyNode() node.name = section node.op = 'DarknetNet'", "fallback = (node.size-1)//2) if section.startswith('pooling'): node.mode = str(cfg_parser[section]['mode']) node.global_pooling =", "= int(cfg_parser[section]['classes']) node.num = int(cfg_parser[section]['num']) node.softmax = int(cfg_parser[section]['softmax']) node.anchors =", "while not sec_q.empty(): sec = sec_q.get() section = sec.name print('Parsing", "= io.StringIO() with open(config_file) as fin: for line in fin:", "will can be a subsection if section.startswith('activation') or section.endswith('activation'): activation", "node.input[0]) * node.stride * node.stride mydict[node.name] = node prev_output =", "# coding: utf-8 import configparser import numpy as np import", "section {}'.format(section)) # this section will can be a subsection", "'bn' if batch_normalize else ' ', activation, weights_shape) conv_bias =", "', ids, 'node input is ', node.input) node.input_norm = node.input", "print(' out filters ', prev_layer_filters) print('loaded {} bytes in weights", "= node prev_output = node.name mylist.append(section) prev_layer_filters = node.filters elif", "[] inputNodes = ['darknet_0'] mygraph.extractSubGraph(inputNodes, outputNodes, stopNodes) mygraph.generateDot('YoloV2.dot') # 生成子图对应的代码", "= sec.getint('output', 2) bias_data = np.ndarray( shape=[filters], dtype=np.float32, buffer=readfile(weights_file, (filters", "= section node.op = 'DarknetRegion' node.input = [prev_output] node.input_norm =", "['darknet_0'] mygraph.extractSubGraph(inputNodes, outputNodes, stopNodes) mygraph.generateDot('YoloV2.dot') # 生成子图对应的代码 mygraph.generateSource('YoloV2', os.path.split(config_path)[1]+'.ncnn', os.path.split(weights_path)[1]", "= node.filters elif section.startswith('reorg'): node = MyGraph.MyNode() node.name = section", "range(len(idxmap))] weights_size = np.product(weights_shape) print(' ' + conv, 'bn' if", "mylist.append(name) elif section.startswith('shuffle'): node = MyGraph.MyNode() node.name = section node.op", "weights_shape) conv_bias = np.ndarray( shape=(filters, ), dtype=np.float32, buffer=readfile(weights_file, (filters *", "= sec.getint('size', fallback = 1) stride = sec.getint('stride', fallback =", "= configparser.ConfigParser() cfg_parser.read_file(unique_config_file) weights_file = open(weights_path, 'rb') # read out", "section node.op = 'Shuffle' node.input = [prev_output] node.input_norm = node.input", "node.input = [prev_output] node.input_norm = node.input mydict[node.name] = node prev_output", "= section node.op = 'DarknetReorg' node.input = [prev_output] node.stride =", "else ' ', activation, weights_shape) conv_bias = np.ndarray( shape=(filters, ),", "all config sections to have unique names. Adds unique suffixes", "std. bn_weight_list = [ bn_weights[0], # scale gamma conv_bias, #", "def readfile(f, len, msg): print(\" %s read %d bytes\" %", "sec.getint('from') node = MyGraph.MyNode() node.name = section node.op = 'BinaryOp'", "node.gamma = bn_weights[0] node.beta = conv_bias node.mean = bn_weights[1] node.variance", "= np.float32) node = MyGraph.MyNode() node.name = section node.op =", "node.op = 'Softmax' node.input = [prev_output] node.input_norm = node.input mydict[node.name]", "= MyGraph.MyNode() node.stride = sec.getint('stride', fallback = 1) node.size =", "len, msg): print(\" %s read %d bytes\" % (msg, len))", "prev_output = node.name mylist.append(section) elif section.startswith('softmax'): node = MyGraph.MyNode() node.name", "dtype=np.float32, buffer=readfile(weights_file, (prev_layer_filters * filters * 4), section+'-weight')) node =", "dtype = np.float32) node = MyGraph.MyNode() node.name = section node.op", "_section in cfg_parser.sections(): sec_q = queue.Queue(0) sec_q.put(cfg_parser[_section]) while not sec_q.empty():", "MyGraph.MyNode() node.name = section node.op = 'DarknetRegion' node.input = [prev_output]", "filters # TODO: Keras BatchNormalization mistakenly refers to var #", "node.input mydict[node.name] = node prev_output = node.name #print('pooling ', vars(node))", "elif section.startswith('avgpool'): node.mode = 'avg' node.global_pooling = 1 node.name =", "dim_ordering. if conv == 'conv': weights_shape = (size, size, prev_layer_filters,", "elif activation == 'linear' or activation == 'leaky' or activation", "'darknet' return mygraph if __name__ == '__main__': config_path = sys.argv[1]", "(size, size, prev_layer_filters, filters) idx_tf2darknet = [0, 1, 2, 3]", "#print(\"the darknet shape is \", conv_weights.shape) conv_weights = np.transpose(conv_weights, idx_dartnet2tf)", "np.ndarray( shape=[filters], dtype=np.float32, buffer=readfile(weights_file, (filters * 4), section+'-bias')) fc_data =", "'dconv': weights_shape = (size, size, filters) idx_tf2darknet = [0, 1,", "= section + '_activation' tmp_parser.add_section(name) tmp_parser.set(name, 'activation', activation) sec_q.put(tmp_parser[name]) #", "section node.op = 'DarknetNet' node.input = [] node.input_norm = []", "name) if hasattr(mydict[name], 'filters'): return mydict[name].filters else: assert len(mydict[name].input) >=", "else: raise ValueError( 'Unknown activation function `{}` in section {}'.format(", "ordering. #print(\"the darknet shape is \", conv_weights.shape) conv_weights = np.transpose(conv_weights,", "if section.startswith('pooling'): node.mode = str(cfg_parser[section]['mode']) node.global_pooling = 0 elif section.startswith('maxpool'):", "fallback = 'logistic') if activation == 'linear': pass elif activation", "= groups node.filters = filters mydict[node.name] = node prev_output =", "if pad: padding = size//2 # Setting weights. # Darknet", "return mydict[name].filters else: assert len(mydict[name].input) >= 1 return getFilters(mydict, mydict[name].input[0])", "in re.split(r',', cfg_parser[section]['anchors'])] #print(vars(node)) #node.attr = [] mydict[node.name] = node", "node prev_output = node.name mylist.append(section) elif re.match(r'^(pooling|maxpool|avgpool)_\\d+$', section): node =", "vars(node)) mylist.append(section) elif section.startswith('route'): ids = [int(i) for i in", "'max' node.global_pooling = 0 elif section.startswith('avgpool'): node.mode = 'avg' node.global_pooling", "start here prev_output = node.name prev_layer_filters = node.channels mylist.append(section) elif", "line.replace(section, _section) output_stream.write(line) output_stream.seek(0) return output_stream def getFilters(mydict, name): #print('find", "'Softmax' node.input = [prev_output] node.input_norm = node.input mydict[node.name] = node", "= node.name prev_layer_filters = filters if batch_normalize: node = MyGraph.MyNode()", "= buildGraph(config_path, weights_path) # 定义子图所需要的输出节点,输入节点,终止节点 outputNodes = ['region_0', 'softmax_0'] stopNodes", "cfg_parser[section]['layers'].split(',')] node = MyGraph.MyNode() node.name = section node.op = 'NCNNConcat'", "Tensorflow order: # (height, width, in_dim, out_dim) # TODO: Add", "4 import queue for _section in cfg_parser.sections(): sec_q = queue.Queue(0)", "0 node.filters = sum([getFilters(mydict, mylist[i]) for i in ids]) mydict[node.name]", "node.width = int(cfg_parser['net_0']['width']) node.height = int(cfg_parser['net_0']['height']) node.channels = int(cfg_parser['net_0']['channels']) node.filters", "if pad == 1 else 'valid' if pad: padding =", "can be a subsection if section.startswith('activation') or section.endswith('activation'): activation =", "= 'darknet' return mygraph if __name__ == '__main__': config_path =", "# Setting weights. # Darknet serializes convolutional weights as: #", "size//2 # Setting weights. # Darknet serializes convolutional weights as:", "is equivalent to Darknet pad=1 # padding = 'same' if", "_section = section + '_' + str(section_counters[section]) section_counters[section] += 1", "node.slope = 0 node.input = [prev_output] node.input_norm = node.input #node.attr", "node.op = 'Shuffle' node.input = [prev_output] node.input_norm = node.input node.groups", "= sec.getint('multiplier', fallback = 1) assert multiplier == 1 groups", "to Tensorflow order: # (height, width, in_dim, out_dim) # TODO:", "'avg' node.global_pooling = 1 node.name = section node.op = 'Pooling'", "node.input) node.input_norm = node.input node.axis = 0 node.filters = sum([getFilters(mydict,", "conv_weights = [conv_weights] if batch_normalize else [ conv_weights, conv_bias ]", "= {x: i for i, x in enumerate(idx_tf2darknet)} idx_dartnet2tf =", "have unique names. Adds unique suffixes to config sections for", "= filters mydict[node.name] = node prev_output = node.name prev_layer_filters =", "= MyGraph.MyNode() node.name = section node.op = 'DarknetNet' node.input =", "node.op = 'BiasAdd' node.input = [prev_output] node.input_norm = node.input #node.attr", "[prev_output] node.input_norm = node.input node.multiplier = fc_data mydict[node.name] = node", "MyGraph.MyNode() node.name = section + '_bias' node.op = 'BiasAdd' node.input", "count += filters if batch_normalize: bn_weights = np.ndarray( shape=(3, filters),", "+= 1 line = line.replace(section, _section) output_stream.write(line) output_stream.seek(0) return output_stream", "section node.op = 'NCNNConcat' node.input = [mylist[i] for i in", "sections for compability with configparser. \"\"\" from collections import defaultdict", "configparser. \"\"\" from collections import defaultdict import io section_counters =", "section = line.strip().strip('[]') _section = section + '_' + str(section_counters[section])", "sec.getint('pad', fallback = 0) padding = sec.getint('padding', fallback = 0)", "'linear': node.slope = 1 elif activation == 'leaky': node.slope =", "buildGraph(config_path, weights_path): unique_config_file = unique_config_sections(config_path) cfg_parser = configparser.ConfigParser() cfg_parser.read_file(unique_config_file) weights_file", "elif section.startswith('softmax'): node = MyGraph.MyNode() node.name = section node.op =", "int(cfg_parser[section]['groups']) mydict[node.name] = node prev_output = node.name mylist.append(section) elif re.match(r'^(pooling|maxpool|avgpool)_\\d+$',", "for i in cfg_parser[section]['layers'].split(',')] node = MyGraph.MyNode() node.name = section", "filters * 4), section+'-weight')) node = MyGraph.MyNode() node.name = section", "= 'dconv' filters = prev_layer_filters multiplier = sec.getint('multiplier', fallback =", "section will can be a subsection if section.startswith('activation') or section.endswith('activation'):", "i for i, x in enumerate(idx_tf2darknet)} idx_dartnet2tf = [idxmap[i] for", "shape=(3, filters), dtype=np.float32, buffer=readfile(weights_file, (filters * 12), section+'-batchnorm')) count +=", "np.zeros([1, 1, filters], dtype = np.float32) node = MyGraph.MyNode() node.name", "node prev_output = node.name prev_layer_filters = filters node = MyGraph.MyNode()", "= np.ndarray( shape=[weights_shape[i] for i in idx_tf2darknet], dtype=np.float32, buffer=readfile(weights_file, (weights_size", "output_stream def getFilters(mydict, name): #print('find filters for ', name) if", "'relu': node.slope = 0 node.input = [prev_output] node.input_norm = node.input", "= section node.op = 'Softmax' node.input = [prev_output] node.input_norm =", "from collections import OrderedDict def unique_config_sections(config_file): \"\"\"Convert all config sections", "prev_layer.shape # TODO: This assumes channel last dim_ordering. if conv", "node prev_output = node.name # prev_layer_filters no change else: node", "# this section will can be a subsection if section.startswith('activation')", "in cfg_parser[section]['layers'].split(',')] node = MyGraph.MyNode() node.name = section node.op =", "== 'relu': node.slope = 0 node.input = [prev_output] node.input_norm =", "collections import OrderedDict def unique_config_sections(config_file): \"\"\"Convert all config sections to", "pad == 1 else 'valid' if pad: padding = size//2", "mean, variance], conv_weights] #prev_layer_shape = prev_layer.shape # TODO: This assumes", "section.endswith('activation'): activation = sec.get('activation', fallback = 'logistic') if activation ==", "pad = sec.getint('pad', fallback = 0) padding = sec.getint('padding', fallback", "'NCNNConcat' node.input = [mylist[i] for i in ids] #print('mylist is", "node = MyGraph.MyNode() node.name = section node.op = 'NCNNConcat' node.input", "= sec.getint('batch_normalize', 0) # padding='same' is equivalent to Darknet pad=1", "filters], dtype = np.float32) node = MyGraph.MyNode() node.name = section", "elif activation == 'leaky': node.slope = 0.1 elif activation ==", "subsection if section.startswith('activation') or section.endswith('activation'): activation = sec.get('activation', fallback =", "= filters if batch_normalize: node = MyGraph.MyNode() node.name = section", "4), section+'-weight')) node = MyGraph.MyNode() node.name = section node.op =", "open(weights_path, 'rb') # read out major, minor, revision, net.seen readfile(weights_file,", "mylist.append(section) elif re.match(r'^(convolutional|depthwise|groupwise)_\\d+$', section): if section.startswith('convolutional'): conv = 'conv' filters", "activation = sec.get('activation', fallback = 'logistic') batch_normalize = sec.getint('batch_normalize', 0)", "# running mean bn_weights[2] # running var ] conv_weights =", "for line in fin: if line.startswith('['): section = line.strip().strip('[]') _section", "filters if batch_normalize: node = MyGraph.MyNode() node.name = section +", "= padding node.strides = [1,stride,stride,1] node.groups = groups node.filters =", "['region_0', 'softmax_0'] stopNodes = [] inputNodes = ['darknet_0'] mygraph.extractSubGraph(inputNodes, outputNodes,", "node.input_norm = node.input node.axis = 0 node.filters = sum([getFilters(mydict, mylist[i])", "f.read(len) def buildGraph(config_path, weights_path): unique_config_file = unique_config_sections(config_path) cfg_parser = configparser.ConfigParser()", "#print('mylist is ', mylist, 'the ids is ', ids, 'node", "line = line.replace(section, _section) output_stream.write(line) output_stream.seek(0) return output_stream def getFilters(mydict,", "function `{}` in section {}'.format( activation, section)) if section.startswith('activation'): mylist.append(section)", "#print(\"the tf shape is \", conv_weights.shape) conv_weights = [conv_weights] if", "bias_data mydict[node.name] = node prev_output = node.name if activation ==", "(node.size-1)//2) if section.startswith('pooling'): node.mode = str(cfg_parser[section]['mode']) node.global_pooling = 0 elif", "node prev_output = node.name mylist.append(section) prev_layer_filters = node.filters elif section.startswith('reorg'):", "fallback = 'logistic') batch_normalize = sec.getint('batch_normalize', 0) # padding='same' is", "= sum([getFilters(mydict, mylist[i]) for i in ids]) mydict[node.name] = node", "like to set these to Tensorflow order: # (height, width,", "node.name prev_layer_filters = filters node = MyGraph.MyNode() node.name = section", "#print(vars(node)) #node.attr = [] mydict[node.name] = node prev_output = node.name", "conv_bias = np.ndarray( shape=(filters, ), dtype=np.float32, buffer=readfile(weights_file, (filters * 4),", "node.input_norm = node.input node.kernel = conv_weights[0] node.padding = padding node.strides", "fallback = 0) activation = sec.get('activation', fallback = 'logistic') batch_normalize", "[] node.input_norm = [] node.width = int(cfg_parser['net_0']['width']) node.height = int(cfg_parser['net_0']['height'])", "DarkNet conv_weights are serialized Caffe-style: # (out_dim, in_dim, height, width)", "= node.channels mylist.append(section) elif section.startswith('region'): node = MyGraph.MyNode() node.name =", "for ', name) if hasattr(mydict[name], 'filters'): return mydict[name].filters else: assert", "node.name = section node.op = op node.input = [prev_output] node.input_norm", "section.startswith('pooling'): node.mode = str(cfg_parser[section]['mode']) node.global_pooling = 0 elif section.startswith('maxpool'): node.mode", "= [prev_output] node.input_norm = node.input node.kernel = conv_weights[0] node.padding =", "4), section+'-bias')) count += filters if batch_normalize: bn_weights = np.ndarray(", "+ conv, 'bn' if batch_normalize else ' ', activation, weights_shape)", "node.name if activation == 'linear': mylist.append(prev_output) else: tmp_parser = configparser.ConfigParser()", "node.name = section node.op = 'DarknetNet' node.input = [] node.input_norm", "'the ids is ', ids, 'node input is ', node.input)", "= 1) groups = 1 op = 'Conv2D' elif section.startswith('depthwise'):", "#! /usr/bin/env python # coding: utf-8 import configparser import numpy", "= 'Softmax' node.input = [prev_output] node.input_norm = node.input mydict[node.name] =", "', mylist, 'the from_ is ', from_, 'node input is", "', from_, 'node input is ', node.input) node.input_norm = node.input", "= conv_bias mydict[node.name] = node prev_output = node.name if activation", "re.match(r'^(shortcut)_\\d+$', section): activation = sec.get('activation', fallback = 'logistic') from_ =", "node.name #print('pooling ', vars(node)) mylist.append(section) elif section.startswith('route'): ids = [int(i)", "# as std. bn_weight_list = [ bn_weights[0], # scale gamma", "= 'logistic') from_ = sec.getint('from') node = MyGraph.MyNode() node.name =", "groups = 1 op = 'Conv2D' elif section.startswith('depthwise'): conv =", "int(cfg_parser['net_0']['height']) node.channels = int(cfg_parser['net_0']['channels']) node.filters = node.channels # print(vars(node)) #", "MyGraph(mydict) mygraph.type = 'darknet' return mygraph if __name__ == '__main__':", "filters), dtype=np.float32, buffer=readfile(weights_file, (filters * 12), section+'-batchnorm')) count += 3", "to Darknet pad=1 # padding = 'same' if pad ==", "node.op = 'MatMul' node.input = [prev_output] node.input_norm = node.input node.multiplier", "in enumerate(idx_tf2darknet)} idx_dartnet2tf = [idxmap[i] for i in range(len(idxmap))] weights_size", "' + conv, 'bn' if batch_normalize else ' ', activation,", "OrderedDict() # record the output of the original layer mylist", "sec.getint('padding', fallback = 0) activation = sec.get('activation', fallback = 'logistic')", "= (size, size, prev_layer_filters, filters) idx_tf2darknet = [0, 1, 2,", "= 'DepthwiseConv2dNative' elif section.startswith('groupwise'): conv = 'gconv' filters = sec.getint('filters',", "tmp_parser.set(name, 'activation', activation) sec_q.put(tmp_parser[name]) # NOTE: this section has relative", "mygraph if __name__ == '__main__': config_path = sys.argv[1] weights_path =", "\", conv_weights.shape) conv_weights = np.transpose(conv_weights, idx_dartnet2tf) #print(\"the tf shape is", "[float(i) for i in re.split(r',', cfg_parser[section]['anchors'])] #print(vars(node)) #node.attr = []", "sec.get('activation', fallback = 'logistic') from_ = sec.getint('from') node = MyGraph.MyNode()", "= 1 node.name = section node.op = 'Pooling' node.input =", "'softmax_0'] stopNodes = [] inputNodes = ['darknet_0'] mygraph.extractSubGraph(inputNodes, outputNodes, stopNodes)", "filters = prev_layer_filters multiplier = sec.getint('multiplier', fallback = 1) assert", "variance], conv_weights] #prev_layer_shape = prev_layer.shape # TODO: This assumes channel", "node.op = op node.input = [prev_output] node.input_norm = node.input node.kernel", "MyGraph from collections import OrderedDict def unique_config_sections(config_file): \"\"\"Convert all config", "== 'gconv': weights_shape = (size, size, prev_layer_filters//groups, filters//groups, groups) idx_tf2darknet", "= [prev_output] node.stride = sec.getint('stride', fallback = 1) node.input_norm =", "node.input #node.attr = [] node.gamma = bn_weights[0] node.beta = conv_bias", "node.input = [prev_output, mylist[from_]] #print('mylist is ', mylist, 'the from_", "sec.getint('groups', fallback = 1) op = 'DepthwiseConv2dNative' size = sec.getint('size',", "type: {}'.format(section)) print(' out filters ', prev_layer_filters) print('loaded {} bytes", "dtype=np.float32, buffer=readfile(weights_file, (weights_size * 4), section+'-weights')) count += weights_size #", "mylist.append(section) elif section.startswith('route'): ids = [int(i) for i in cfg_parser[section]['layers'].split(',')]", "filters], dtype=np.float32, buffer=readfile(weights_file, (prev_layer_filters * filters * 4), section+'-weight')) node", "raise ValueError( 'Unknown activation function `{}` in section {}'.format( activation,", "sec.get('activation', fallback = 'logistic') if activation == 'linear': pass elif", "= node prev_output = node.name # prev_layer_filters no change else:", "= [prev_output, mylist[from_]] #print('mylist is ', mylist, 'the from_ is", "(size, size, filters) idx_tf2darknet = [0, 1, 2] elif conv", "elif activation == 'relu': node.slope = 0 node.input = [prev_output]", "has relative reference mylist.append(name) elif section.startswith('connected'): activation = sec.get('activation', fallback='linear')", "= node.input node.axis = 0 node.filters = sum([getFilters(mydict, mylist[i]) for", "= 'DarknetNet' node.input = [] node.input_norm = [] node.width =", "node.op_type = 0 node.input = [prev_output, mylist[from_]] #print('mylist is ',", "', node.input) node.input_norm = node.input node.axis = 0 node.filters =", "size = sec.getint('size', fallback = 1) stride = sec.getint('stride', fallback", "sec.get('activation', fallback='linear') filters = sec.getint('output', 2) bias_data = np.ndarray( shape=[filters],", "inputNodes = ['darknet_0'] mygraph.extractSubGraph(inputNodes, outputNodes, stopNodes) mygraph.generateDot('YoloV2.dot') # 生成子图对应的代码 mygraph.generateSource('YoloV2',", "elif section.startswith('shuffle'): node = MyGraph.MyNode() node.name = section node.op =", "conv == 'gconv': weights_shape = (size, size, prev_layer_filters//groups, filters//groups, groups)", "= 1) node.size = sec.getint('size', node.stride) node.padding = sec.getint('padding', fallback", "utf-8 import configparser import numpy as np import re,sys,os from", "conv_weights[0] node.padding = padding node.strides = [1,stride,stride,1] node.groups = groups", "= MyGraph.MyNode() node.name = section + '_bias' node.op = 'BiasAdd'", "'_activation' tmp_parser.add_section(name) tmp_parser.set(name, 'activation', activation) sec_q.put(tmp_parser[name]) mylist.append(name) elif section.startswith('shuffle'): node", "0) activation = sec.get('activation', fallback = 'logistic') batch_normalize = sec.getint('batch_normalize',", "getFilters(mydict, node.input[0]) * node.stride * node.stride mydict[node.name] = node prev_output", "node.slope = 0.1 elif activation == 'relu': node.slope = 0", "NOTE: this section has relative reference mylist.append(name) elif section.startswith('connected'): activation", "padding = 'same' if pad == 1 else 'valid' if", "'the from_ is ', from_, 'node input is ', node.input)", "1 return getFilters(mydict, mydict[name].input[0]) def readfile(f, len, msg): print(\" %s", "i in ids]) mydict[node.name] = node prev_output = node.name mylist.append(section)", "sec.getint('size', fallback = 1) stride = sec.getint('stride', fallback = 1)", "1 node.name = section node.op = 'Pooling' node.input = [prev_output]", "node.input node.axis = 0 node.filters = sum([getFilters(mydict, mylist[i]) for i", "MyGraph.MyNode() node.name = section + '_batch_normalize' node.op = 'FusedBatchNorm' node.input", "section.startswith('avgpool'): node.mode = 'avg' node.global_pooling = 1 node.name = section", "2, 3] elif conv == 'dconv': weights_shape = (size, size,", "node.input = [prev_output] node.input_norm = node.input node.kernel = conv_weights[0] node.padding", "node.op = 'Pooling' node.input = [prev_output] node.input_norm = node.input mydict[node.name]", "config_path = sys.argv[1] weights_path = sys.argv[2] mygraph = buildGraph(config_path, weights_path)", "msg): print(\" %s read %d bytes\" % (msg, len)) return", "[prev_output] node.stride = sec.getint('stride', fallback = 1) node.input_norm = node.input", "node.input node.groups = int(cfg_parser[section]['groups']) mydict[node.name] = node prev_output = node.name", "read %d bytes\" % (msg, len)) return f.read(len) def buildGraph(config_path,", "'linear': mylist.append(prev_output) else: tmp_parser = configparser.ConfigParser() name = section +", "# Configs not currently handled during model definition. else: raise", "conv_bias mydict[node.name] = node prev_output = node.name if activation ==", "fc_data mydict[node.name] = node prev_output = node.name prev_layer_filters = filters", "filters) idx_tf2darknet = [0, 1, 2] elif conv == 'gconv':", "node.op = 'BinaryOp' node.op_type = 0 node.input = [prev_output, mylist[from_]]", "i in idx_tf2darknet], dtype=np.float32, buffer=readfile(weights_file, (weights_size * 4), section+'-weights')) count", "activation = sec.get('activation', fallback = 'logistic') from_ = sec.getint('from') node", "elif section.startswith('connected'): activation = sec.get('activation', fallback='linear') filters = sec.getint('output', 2)", "pass elif activation == 'linear' or activation == 'leaky' or", "= sec.getint('stride', fallback = 1) node.input_norm = node.input node.filters =", "filters ', prev_layer_filters) print('loaded {} bytes in weights file'.format(count*4)) mygraph", "= MyGraph.MyNode() node.name = section node.op = 'DarknetReorg' node.input =", "np import re,sys,os from graph import MyGraph from collections import", "fin: if line.startswith('['): section = line.strip().strip('[]') _section = section +", "= getFilters(mydict, node.input[0]) * node.stride * node.stride mydict[node.name] = node", "[prev_output] node.input_norm = node.input #node.attr = [] node.bias = conv_bias", "as: # [bias/beta, [gamma, mean, variance], conv_weights] #prev_layer_shape = prev_layer.shape", "= 'MatMul' node.input = [prev_output] node.input_norm = node.input node.multiplier =", "output_stream.write(line) output_stream.seek(0) return output_stream def getFilters(mydict, name): #print('find filters for", "3, 4] idxmap = {x: i for i, x in", "[idxmap[i] for i in range(len(idxmap))] weights_size = np.product(weights_shape) print(' '", "if section.startswith('convolutional'): conv = 'conv' filters = sec.getint('filters', fallback =", "unique suffixes to config sections for compability with configparser. \"\"\"", "= [idxmap[i] for i in range(len(idxmap))] weights_size = np.product(weights_shape) print('", "# Create nodes #conv_layer = np.zeros([1, 1, filters], dtype =", "sec.getint('filters', fallback=1) groups = sec.getint('groups', fallback = 1) op =", "= node.name mylist.append(section) elif re.match(r'^(pooling|maxpool|avgpool)_\\d+$', section): node = MyGraph.MyNode() node.stride", "mygraph = MyGraph(mydict) mygraph.type = 'darknet' return mygraph if __name__", "+ '_activation' tmp_parser.add_section(name) tmp_parser.set(name, 'activation', activation) sec_q.put(tmp_parser[name]) # NOTE: this", "'DarknetRegion' node.input = [prev_output] node.input_norm = node.input node.classes = int(cfg_parser[section]['classes'])", "node.name prev_layer_filters = node.channels mylist.append(section) elif section.startswith('region'): node = MyGraph.MyNode()", "'head') mydict = OrderedDict() # record the output of the", "= 'DarknetRegion' node.input = [prev_output] node.input_norm = node.input node.classes =", "mygraph = buildGraph(config_path, weights_path) # 定义子图所需要的输出节点,输入节点,终止节点 outputNodes = ['region_0', 'softmax_0']", "var ] conv_weights = np.ndarray( shape=[weights_shape[i] for i in idx_tf2darknet],", "= 1) assert multiplier == 1 groups = filters op", "refers to var # as std. bn_weight_list = [ bn_weights[0],", "node.name = section node.op = 'DarknetReorg' node.input = [prev_output] node.stride", "Setting weights. # Darknet serializes convolutional weights as: # [bias/beta,", "for i in idx_tf2darknet], dtype=np.float32, buffer=readfile(weights_file, (weights_size * 4), section+'-weights'))", "for compability with configparser. \"\"\" from collections import defaultdict import", "'_activation' tmp_parser.add_section(name) tmp_parser.set(name, 'activation', activation) sec_q.put(tmp_parser[name]) # NOTE: this section", "conv_weights.shape) conv_weights = [conv_weights] if batch_normalize else [ conv_weights, conv_bias", "mydict[node.name] = node prev_output = node.name mylist.append(section) pass elif section.startswith('cost'):", "filters op = 'DepthwiseConv2dNative' elif section.startswith('groupwise'): conv = 'gconv' filters", "= ['region_0', 'softmax_0'] stopNodes = [] inputNodes = ['darknet_0'] mygraph.extractSubGraph(inputNodes,", "or section.endswith('activation'): activation = sec.get('activation', fallback = 'logistic') if activation", "prev_output = node.name prev_layer_filters = filters if batch_normalize: node =", "model definition. else: raise ValueError( 'Unsupported section header type: {}'.format(section))", "== 'linear': mylist.append(prev_output) else: tmp_parser = configparser.ConfigParser() name = section", "no change else: node = MyGraph.MyNode() node.name = section +", "'conv' filters = sec.getint('filters', fallback = 1) groups = 1", "fallback = 1) node.input_norm = node.input node.filters = getFilters(mydict, node.input[0])", "BatchNormalization mistakenly refers to var # as std. bn_weight_list =", "node.input #node.attr = [] mydict[node.name] = node prev_output = node.name", "mylist.append(prev_output) else: tmp_parser = configparser.ConfigParser() name = section + '_activation'", "node.input_norm = node.input #node.attr = [] node.gamma = bn_weights[0] node.beta", "= [] node.gamma = bn_weights[0] node.beta = conv_bias node.mean =", "node.name = section + '_batch_normalize' node.op = 'FusedBatchNorm' node.input =", "node.op = 'DarknetRegion' node.input = [prev_output] node.input_norm = node.input node.classes", "a subsection if section.startswith('activation') or section.endswith('activation'): activation = sec.get('activation', fallback", "conv == 'conv': weights_shape = (size, size, prev_layer_filters, filters) idx_tf2darknet", "= configparser.ConfigParser() name = section + '_activation' tmp_parser.add_section(name) tmp_parser.set(name, 'activation',", "prev_layer_filters = filters if batch_normalize: node = MyGraph.MyNode() node.name =", "from_ is ', from_, 'node input is ', node.input) node.input_norm", "= 'logistic') if activation == 'linear': pass elif activation ==", "numpy as np import re,sys,os from graph import MyGraph from", "= sec.getint('size', node.stride) node.padding = sec.getint('padding', fallback = (node.size-1)//2) if", "node.op = 'BiasAdd' node.input = [prev_output] node.input_norm = node.input #", "open(config_file) as fin: for line in fin: if line.startswith('['): section", "return output_stream def getFilters(mydict, name): #print('find filters for ', name)", "sec.getint('multiplier', fallback = 1) assert multiplier == 1 groups =", "elif section.startswith('route'): ids = [int(i) for i in cfg_parser[section]['layers'].split(',')] node", "node.op = 'DarknetReorg' node.input = [prev_output] node.stride = sec.getint('stride', fallback", "filters = sec.getint('output', 2) bias_data = np.ndarray( shape=[filters], dtype=np.float32, buffer=readfile(weights_file,", "== 1 else 'valid' if pad: padding = size//2 #", "prev_layer_filters//groups, filters//groups, groups) idx_tf2darknet = [0, 1, 2, 3, 4]", "node.bias = bias_data mydict[node.name] = node prev_output = node.name if", "= 'BinaryOp' node.op_type = 0 node.input = [prev_output, mylist[from_]] #print('mylist", "in section {}'.format( activation, section)) if section.startswith('activation'): mylist.append(section) elif re.match(r'^(convolutional|depthwise|groupwise)_\\d+$',", "# (height, width, in_dim, out_dim) # TODO: Add check for", "'Unknown activation function `{}` in section {}'.format( activation, section)) if", "cfg_parser[section]['anchors'])] #print(vars(node)) #node.attr = [] mydict[node.name] = node prev_output =", "mydict[node.name] = node prev_output = node.name mylist.append(section) elif section.startswith('softmax'): node", "np.ndarray( shape=[prev_layer_filters, filters], dtype=np.float32, buffer=readfile(weights_file, (prev_layer_filters * filters * 4),", "section node.op = 'DarknetRegion' node.input = [prev_output] node.input_norm = node.input", "= 'logistic') batch_normalize = sec.getint('batch_normalize', 0) # padding='same' is equivalent", "readfile(f, len, msg): print(\" %s read %d bytes\" % (msg,", "shift beta bn_weights[1], # running mean bn_weights[2] # running var", "= 0 elif section.startswith('maxpool'): node.mode = 'max' node.global_pooling = 0", "Darknet pad=1 # padding = 'same' if pad == 1", "] # Create nodes #conv_layer = np.zeros([1, 1, filters], dtype", "= 0 node.input = [prev_output, mylist[from_]] #print('mylist is ', mylist,", "MyGraph.MyNode() node.name = section node.op = 'DarknetReorg' node.input = [prev_output]", "`{}` in section {}'.format( activation, section)) if section.startswith('activation'): mylist.append(section) elif", "'BiasAdd' node.input = [prev_output] node.input_norm = node.input # node.attr =", "= MyGraph.MyNode() node.name = section node.op = 'Softmax' node.input =", "node.name mylist.append(section) elif section.startswith('softmax'): node = MyGraph.MyNode() node.name = section", "node prev_output = node.name if activation == 'linear': mylist.append(prev_output) else:", "MyGraph.MyNode() node.name = section node.op = 'Leaky' if activation ==", "size, prev_layer_filters//groups, filters//groups, groups) idx_tf2darknet = [0, 1, 2, 3,", "1) node.size = sec.getint('size', node.stride) node.padding = sec.getint('padding', fallback =", "[1,stride,stride,1] node.groups = groups node.filters = filters mydict[node.name] = node", "revision, net.seen readfile(weights_file, (4*4), 'head') mydict = OrderedDict() # record", "for i in re.split(r',', cfg_parser[section]['anchors'])] #print(vars(node)) #node.attr = [] mydict[node.name]", "mylist.append(section) elif re.match(r'^(pooling|maxpool|avgpool)_\\d+$', section): node = MyGraph.MyNode() node.stride = sec.getint('stride',", "= node.input # node.attr = [] node.bias = bias_data mydict[node.name]", "mygraph.extractSubGraph(inputNodes, outputNodes, stopNodes) mygraph.generateDot('YoloV2.dot') # 生成子图对应的代码 mygraph.generateSource('YoloV2', os.path.split(config_path)[1]+'.ncnn', os.path.split(weights_path)[1] +", "= np.ndarray( shape=[prev_layer_filters, filters], dtype=np.float32, buffer=readfile(weights_file, (prev_layer_filters * filters *", "= bias_data mydict[node.name] = node prev_output = node.name if activation", "node.global_pooling = 0 elif section.startswith('maxpool'): node.mode = 'max' node.global_pooling =", "pad: padding = size//2 # Setting weights. # Darknet serializes", "= conv_bias node.mean = bn_weights[1] node.variance = bn_weights[2] mydict[node.name] =", "if batch_normalize: node = MyGraph.MyNode() node.name = section + '_batch_normalize'", "mydict[node.name] = node prev_output = node.name prev_layer_filters = filters node", "0 elif section.startswith('avgpool'): node.mode = 'avg' node.global_pooling = 1 node.name", "node.mean = bn_weights[1] node.variance = bn_weights[2] mydict[node.name] = node prev_output", "+= filters if batch_normalize: bn_weights = np.ndarray( shape=(3, filters), dtype=np.float32,", "sec.getint('padding', fallback = (node.size-1)//2) if section.startswith('pooling'): node.mode = str(cfg_parser[section]['mode']) node.global_pooling", "node.filters = filters mydict[node.name] = node prev_output = node.name prev_layer_filters", "groups node.filters = filters mydict[node.name] = node prev_output = node.name", "node.name prev_layer_filters = filters if batch_normalize: node = MyGraph.MyNode() node.name", "node.multiplier = fc_data mydict[node.name] = node prev_output = node.name prev_layer_filters", "prev_output = node.name mylist.append(section) prev_layer_filters = node.filters elif section.startswith('reorg'): node", "bn_weights[0], # scale gamma conv_bias, # shift beta bn_weights[1], #", "node.groups = int(cfg_parser[section]['groups']) mydict[node.name] = node prev_output = node.name mylist.append(section)", "op = 'DepthwiseConv2dNative' elif section.startswith('groupwise'): conv = 'gconv' filters =", "prev_layer_filters multiplier = sec.getint('multiplier', fallback = 1) assert multiplier ==", "Darknet serializes convolutional weights as: # [bias/beta, [gamma, mean, variance],", "# TODO: Keras BatchNormalization mistakenly refers to var # as", "= node.input node.kernel = conv_weights[0] node.padding = padding node.strides =", "np.transpose(conv_weights, idx_dartnet2tf) #print(\"the tf shape is \", conv_weights.shape) conv_weights =", "= node prev_output = node.name prev_layer_filters = filters if batch_normalize:", "serialized Caffe-style: # (out_dim, in_dim, height, width) # We would", "sec.getint('stride', fallback = 1) node.input_norm = node.input node.filters = getFilters(mydict,", "node prev_output = node.name mylist.append(section) pass elif section.startswith('cost'): pass #", "MyGraph.MyNode() node.name = section node.op = 'NCNNConcat' node.input = [mylist[i]", "handled during model definition. else: raise ValueError( 'Unsupported section header", "= sec.getint('padding', fallback = 0) activation = sec.get('activation', fallback =", "activation == 'relu': node = MyGraph.MyNode() node.name = section node.op", "[] mydict[node.name] = node # start here prev_output = node.name", "prev_output = node.name mylist.append(section) prev_layer_filters = node.filters elif re.match(r'^(shortcut)_\\d+$', section):", "* 4), section+'-weights')) count += weights_size # DarkNet conv_weights are", "== 'linear' or activation == 'leaky' or activation == 'relu':", "raise ValueError( 'Unsupported section header type: {}'.format(section)) print(' out filters", "minor, revision, net.seen readfile(weights_file, (4*4), 'head') mydict = OrderedDict() #", "section.startswith('activation') or section.endswith('activation'): activation = sec.get('activation', fallback = 'logistic') if", "[prev_output] node.input_norm = node.input #node.attr = [] mydict[node.name] = node", "= bn_weights[2] mydict[node.name] = node prev_output = node.name # prev_layer_filters", "* 12), section+'-batchnorm')) count += 3 * filters # TODO:", "section.startswith('cost'): pass # Configs not currently handled during model definition.", "tf shape is \", conv_weights.shape) conv_weights = [conv_weights] if batch_normalize", "'node input is ', node.input) node.input_norm = node.input mydict[node.name] =", "for i, x in enumerate(idx_tf2darknet)} idx_dartnet2tf = [idxmap[i] for i", "= 'max' node.global_pooling = 0 elif section.startswith('avgpool'): node.mode = 'avg'", "import OrderedDict def unique_config_sections(config_file): \"\"\"Convert all config sections to have", "fallback = 0) padding = sec.getint('padding', fallback = 0) activation", "node.filters = node.channels # print(vars(node)) # node.attr = [] mydict[node.name]", "# TODO: This assumes channel last dim_ordering. if conv ==", "node.mode = 'avg' node.global_pooling = 1 node.name = section node.op", "from collections import defaultdict import io section_counters = defaultdict(int) output_stream", "output_stream = io.StringIO() with open(config_file) as fin: for line in", "0 node.input = [prev_output] node.input_norm = node.input #node.attr = []", "groups = sec.getint('groups', fallback = 1) op = 'DepthwiseConv2dNative' size", "= 0.1 elif activation == 'relu': node.slope = 0 node.input", "= node prev_output = node.name #print('pooling ', vars(node)) mylist.append(section) elif", "node.channels # print(vars(node)) # node.attr = [] mydict[node.name] = node", "node.axis = 0 node.filters = sum([getFilters(mydict, mylist[i]) for i in", "for i in ids]) mydict[node.name] = node prev_output = node.name", "ValueError( 'Unsupported section header type: {}'.format(section)) print(' out filters ',", "] conv_weights = np.ndarray( shape=[weights_shape[i] for i in idx_tf2darknet], dtype=np.float32,", "line.strip().strip('[]') _section = section + '_' + str(section_counters[section]) section_counters[section] +=", "str(cfg_parser[section]['mode']) node.global_pooling = 0 elif section.startswith('maxpool'): node.mode = 'max' node.global_pooling", "= [0, 1, 2, 3, 4] idxmap = {x: i", "file'.format(count*4)) mygraph = MyGraph(mydict) mygraph.type = 'darknet' return mygraph if", "= node.input #node.attr = [] node.bias = conv_bias mydict[node.name] =", "= node.channels # print(vars(node)) # node.attr = [] mydict[node.name] =", "record the output of the original layer mylist = []", "== 'leaky': node.slope = 0.1 elif activation == 'relu': node.slope", "read out major, minor, revision, net.seen readfile(weights_file, (4*4), 'head') mydict", "{} bytes in weights file'.format(count*4)) mygraph = MyGraph(mydict) mygraph.type =", "= node.name # prev_layer_filters no change else: node = MyGraph.MyNode()", "elif re.match(r'^(convolutional|depthwise|groupwise)_\\d+$', section): if section.startswith('convolutional'): conv = 'conv' filters =", "= section node.op = 'Pooling' node.input = [prev_output] node.input_norm =", "weights file'.format(count*4)) mygraph = MyGraph(mydict) mygraph.type = 'darknet' return mygraph", "if batch_normalize else ' ', activation, weights_shape) conv_bias = np.ndarray(", "node.global_pooling = 1 node.name = section node.op = 'Pooling' node.input", "'DarknetReorg' node.input = [prev_output] node.stride = sec.getint('stride', fallback = 1)", "relative reference mylist.append(name) elif section.startswith('connected'): activation = sec.get('activation', fallback='linear') filters", "if section.startswith('activation'): mylist.append(section) elif re.match(r'^(convolutional|depthwise|groupwise)_\\d+$', section): if section.startswith('convolutional'): conv =", "fallback = 'logistic') from_ = sec.getint('from') node = MyGraph.MyNode() node.name", "bias_data = np.ndarray( shape=[filters], dtype=np.float32, buffer=readfile(weights_file, (filters * 4), section+'-bias'))", "node.input_norm = node.input node.multiplier = fc_data mydict[node.name] = node prev_output", "sys.argv[1] weights_path = sys.argv[2] mygraph = buildGraph(config_path, weights_path) # 定义子图所需要的输出节点,输入节点,终止节点", "height, width) # We would like to set these to", "bn_weights[0] node.beta = conv_bias node.mean = bn_weights[1] node.variance = bn_weights[2]", "conv_weights = np.transpose(conv_weights, idx_dartnet2tf) #print(\"the tf shape is \", conv_weights.shape)", "fallback = 1) groups = 1 op = 'Conv2D' elif", "prev_layer_filters = node.channels mylist.append(section) elif section.startswith('region'): node = MyGraph.MyNode() node.name", "#print('pooling ', vars(node)) mylist.append(section) elif section.startswith('route'): ids = [int(i) for", "buffer=readfile(weights_file, (filters * 4), section+'-bias')) fc_data = np.ndarray( shape=[prev_layer_filters, filters],", "= section node.op = 'MatMul' node.input = [prev_output] node.input_norm =", "str(section_counters[section]) section_counters[section] += 1 line = line.replace(section, _section) output_stream.write(line) output_stream.seek(0)", "batch_normalize else [ conv_weights, conv_bias ] # Create nodes #conv_layer", "mydict[node.name] = node prev_output = node.name mylist.append(section) prev_layer_filters = node.filters", "(prev_layer_filters * filters * 4), section+'-weight')) node = MyGraph.MyNode() node.name", "groups) idx_tf2darknet = [0, 1, 2, 3, 4] idxmap =", "dtype=np.float32, buffer=readfile(weights_file, (filters * 12), section+'-batchnorm')) count += 3 *", "activation = sec.get('activation', fallback = 'logistic') if activation == 'linear':", "= 1) op = 'DepthwiseConv2dNative' size = sec.getint('size', fallback =", "activation, weights_shape) conv_bias = np.ndarray( shape=(filters, ), dtype=np.float32, buffer=readfile(weights_file, (filters", "not sec_q.empty(): sec = sec_q.get() section = sec.name print('Parsing section", "bytes in weights file'.format(count*4)) mygraph = MyGraph(mydict) mygraph.type = 'darknet'", "layer mylist = [] count = 4 import queue for", "= 4 import queue for _section in cfg_parser.sections(): sec_q =", "fallback = 1) node.size = sec.getint('size', node.stride) node.padding = sec.getint('padding',", "3] elif conv == 'dconv': weights_shape = (size, size, filters)", "= str(cfg_parser[section]['mode']) node.global_pooling = 0 elif section.startswith('maxpool'): node.mode = 'max'", "= np.zeros([1, 1, filters], dtype = np.float32) node = MyGraph.MyNode()", "== 'leaky' or activation == 'relu': node = MyGraph.MyNode() node.name", "i, x in enumerate(idx_tf2darknet)} idx_dartnet2tf = [idxmap[i] for i in", "{x: i for i, x in enumerate(idx_tf2darknet)} idx_dartnet2tf = [idxmap[i]", "+ '_activation' tmp_parser.add_section(name) tmp_parser.set(name, 'activation', activation) sec_q.put(tmp_parser[name]) mylist.append(name) elif section.startswith('net'):", "[prev_output] node.input_norm = node.input #node.attr = [] node.gamma = bn_weights[0]", "[] mydict[node.name] = node prev_output = node.name mylist.append(section) elif section.startswith('softmax'):", "= node.name if activation == 'linear': mylist.append(prev_output) else: tmp_parser =", "mylist.append(section) elif section.startswith('region'): node = MyGraph.MyNode() node.name = section node.op", "node.input = [prev_output] node.input_norm = node.input node.classes = int(cfg_parser[section]['classes']) node.num", "section = sec.name print('Parsing section {}'.format(section)) # this section will", "[ conv_weights, conv_bias ] # Create nodes #conv_layer = np.zeros([1,", "section): node = MyGraph.MyNode() node.stride = sec.getint('stride', fallback = 1)", "+ '_batch_normalize' node.op = 'FusedBatchNorm' node.input = [prev_output] node.input_norm =", "'Pooling' node.input = [prev_output] node.input_norm = node.input mydict[node.name] = node", "MyGraph.MyNode() node.name = section node.op = 'BinaryOp' node.op_type = 0", "= sec.getint('groups', fallback = 1) op = 'DepthwiseConv2dNative' size =", "node.input = [prev_output] node.input_norm = node.input #node.attr = [] node.bias", "darknet shape is \", conv_weights.shape) conv_weights = np.transpose(conv_weights, idx_dartnet2tf) #print(\"the", "tmp_parser = configparser.ConfigParser() name = section + '_activation' tmp_parser.add_section(name) tmp_parser.set(name,", "return mygraph if __name__ == '__main__': config_path = sys.argv[1] weights_path", "shape=(filters, ), dtype=np.float32, buffer=readfile(weights_file, (filters * 4), section+'-bias')) count +=", "elif section.startswith('region'): node = MyGraph.MyNode() node.name = section node.op =", "= MyGraph.MyNode() node.name = section + '_batch_normalize' node.op = 'FusedBatchNorm'", "node.name mylist.append(section) prev_layer_filters = node.filters elif section.startswith('reorg'): node = MyGraph.MyNode()", "conv_bias node.mean = bn_weights[1] node.variance = bn_weights[2] mydict[node.name] = node", "node.input_norm = node.input mydict[node.name] = node prev_output = node.name mylist.append(section)", "dtype=np.float32, buffer=readfile(weights_file, (filters * 4), section+'-bias')) fc_data = np.ndarray( shape=[prev_layer_filters," ]
[ "+ '-' + self.artist + '-' + self.genre class Song(models.Model):", "on_delete=models.CASCADE, null=True) song_title = models.CharField(max_length=250) audio_file = models.FileField(default='') song_visibility =", "+ '-' + self.genre class Song(models.Model): user = models.ForeignKey(User, default=1,on_delete=models.CASCADE)", "__str__(self): return self.album_title + '-' + self.artist + '-' +", "models.ForeignKey(Album, on_delete=models.CASCADE, null=True) song_title = models.CharField(max_length=250) audio_file = models.FileField(default='') song_visibility", "default=1,on_delete=models.CASCADE) album = models.ForeignKey(Album, on_delete=models.CASCADE, null=True) song_title = models.CharField(max_length=250) audio_file", "models.CharField(max_length=500) genre = models.CharField(max_length=100) album_logo = models.FileField(default=\"avatar.jpg\") album_visibility = models.CharField(max_length=100,", "self.genre class Song(models.Model): user = models.ForeignKey(User, default=1,on_delete=models.CASCADE) album = models.ForeignKey(Album,", "models class Album(models.Model): user = models.ForeignKey(User, default=1,on_delete=models.CASCADE) artist = models.CharField(max_length=250)", "models.ForeignKey(User, default=1,on_delete=models.CASCADE) artist = models.CharField(max_length=250) album_title = models.CharField(max_length=500) genre =", "= models.CharField(max_length=250) audio_file = models.FileField(default='') song_visibility = models.CharField(max_length=100, default=\"private\") is_favorite", "django.db import models class Album(models.Model): user = models.ForeignKey(User, default=1,on_delete=models.CASCADE) artist", "self.artist + '-' + self.genre class Song(models.Model): user = models.ForeignKey(User,", "album = models.ForeignKey(Album, on_delete=models.CASCADE, null=True) song_title = models.CharField(max_length=250) audio_file =", "User from django.db import models class Album(models.Model): user = models.ForeignKey(User,", "from django.db import models class Album(models.Model): user = models.ForeignKey(User, default=1,on_delete=models.CASCADE)", "null=True) song_title = models.CharField(max_length=250) audio_file = models.FileField(default='') song_visibility = models.CharField(max_length=100,", "= models.BooleanField(default=False) def __str__(self): return self.album_title + '-' + self.artist", "models.FileField(default=\"avatar.jpg\") album_visibility = models.CharField(max_length=100, default=\"private\") is_favorite = models.BooleanField(default=False) def __str__(self):", "album_title = models.CharField(max_length=500) genre = models.CharField(max_length=100) album_logo = models.FileField(default=\"avatar.jpg\") album_visibility", "models.ForeignKey(User, default=1,on_delete=models.CASCADE) album = models.ForeignKey(Album, on_delete=models.CASCADE, null=True) song_title = models.CharField(max_length=250)", "song_visibility = models.CharField(max_length=100, default=\"private\") is_favorite = models.BooleanField(default=False) def __str__(self): return", "models.CharField(max_length=100) album_logo = models.FileField(default=\"avatar.jpg\") album_visibility = models.CharField(max_length=100, default=\"private\") is_favorite =", "models.CharField(max_length=250) album_title = models.CharField(max_length=500) genre = models.CharField(max_length=100) album_logo = models.FileField(default=\"avatar.jpg\")", "song_title = models.CharField(max_length=250) audio_file = models.FileField(default='') song_visibility = models.CharField(max_length=100, default=\"private\")", "+ self.artist + '-' + self.genre class Song(models.Model): user =", "= models.ForeignKey(User, default=1,on_delete=models.CASCADE) artist = models.CharField(max_length=250) album_title = models.CharField(max_length=500) genre", "import models class Album(models.Model): user = models.ForeignKey(User, default=1,on_delete=models.CASCADE) artist =", "self.album_title + '-' + self.artist + '-' + self.genre class", "= models.ForeignKey(User, default=1,on_delete=models.CASCADE) album = models.ForeignKey(Album, on_delete=models.CASCADE, null=True) song_title =", "django.contrib.auth.models import Permission, User from django.db import models class Album(models.Model):", "Permission, User from django.db import models class Album(models.Model): user =", "= models.CharField(max_length=500) genre = models.CharField(max_length=100) album_logo = models.FileField(default=\"avatar.jpg\") album_visibility =", "+ self.genre class Song(models.Model): user = models.ForeignKey(User, default=1,on_delete=models.CASCADE) album =", "class Album(models.Model): user = models.ForeignKey(User, default=1,on_delete=models.CASCADE) artist = models.CharField(max_length=250) album_title", "'-' + self.genre class Song(models.Model): user = models.ForeignKey(User, default=1,on_delete=models.CASCADE) album", "import Permission, User from django.db import models class Album(models.Model): user", "user = models.ForeignKey(User, default=1,on_delete=models.CASCADE) artist = models.CharField(max_length=250) album_title = models.CharField(max_length=500)", "default=1,on_delete=models.CASCADE) artist = models.CharField(max_length=250) album_title = models.CharField(max_length=500) genre = models.CharField(max_length=100)", "= models.CharField(max_length=250) album_title = models.CharField(max_length=500) genre = models.CharField(max_length=100) album_logo =", "= models.CharField(max_length=100, default=\"private\") is_favorite = models.BooleanField(default=False) def __str__(self): return self.album_title", "= models.ForeignKey(Album, on_delete=models.CASCADE, null=True) song_title = models.CharField(max_length=250) audio_file = models.FileField(default='')", "class Song(models.Model): user = models.ForeignKey(User, default=1,on_delete=models.CASCADE) album = models.ForeignKey(Album, on_delete=models.CASCADE,", "models.CharField(max_length=250) audio_file = models.FileField(default='') song_visibility = models.CharField(max_length=100, default=\"private\") is_favorite =", "= models.CharField(max_length=100, default=\"private\") is_favorite = models.BooleanField(default=False) def __str__(self): return self.song_title", "models.BooleanField(default=False) def __str__(self): return self.album_title + '-' + self.artist +", "album_visibility = models.CharField(max_length=100, default=\"private\") is_favorite = models.BooleanField(default=False) def __str__(self): return", "album_logo = models.FileField(default=\"avatar.jpg\") album_visibility = models.CharField(max_length=100, default=\"private\") is_favorite = models.BooleanField(default=False)", "Song(models.Model): user = models.ForeignKey(User, default=1,on_delete=models.CASCADE) album = models.ForeignKey(Album, on_delete=models.CASCADE, null=True)", "def __str__(self): return self.album_title + '-' + self.artist + '-'", "Album(models.Model): user = models.ForeignKey(User, default=1,on_delete=models.CASCADE) artist = models.CharField(max_length=250) album_title =", "audio_file = models.FileField(default='') song_visibility = models.CharField(max_length=100, default=\"private\") is_favorite = models.BooleanField(default=False)", "models.CharField(max_length=100, default=\"private\") is_favorite = models.BooleanField(default=False) def __str__(self): return self.album_title +", "is_favorite = models.BooleanField(default=False) def __str__(self): return self.album_title + '-' +", "= models.CharField(max_length=100) album_logo = models.FileField(default=\"avatar.jpg\") album_visibility = models.CharField(max_length=100, default=\"private\") is_favorite", "= models.FileField(default=\"avatar.jpg\") album_visibility = models.CharField(max_length=100, default=\"private\") is_favorite = models.BooleanField(default=False) def", "default=\"private\") is_favorite = models.BooleanField(default=False) def __str__(self): return self.album_title + '-'", "user = models.ForeignKey(User, default=1,on_delete=models.CASCADE) album = models.ForeignKey(Album, on_delete=models.CASCADE, null=True) song_title", "models.FileField(default='') song_visibility = models.CharField(max_length=100, default=\"private\") is_favorite = models.BooleanField(default=False) def __str__(self):", "artist = models.CharField(max_length=250) album_title = models.CharField(max_length=500) genre = models.CharField(max_length=100) album_logo", "genre = models.CharField(max_length=100) album_logo = models.FileField(default=\"avatar.jpg\") album_visibility = models.CharField(max_length=100, default=\"private\")", "= models.FileField(default='') song_visibility = models.CharField(max_length=100, default=\"private\") is_favorite = models.BooleanField(default=False) def", "return self.album_title + '-' + self.artist + '-' + self.genre", "'-' + self.artist + '-' + self.genre class Song(models.Model): user", "from django.contrib.auth.models import Permission, User from django.db import models class" ]
[ "= cur_end_unixtime + 60 #to prevent duplicates count = count", "df_tmp.columns = ['Time', 'Open', 'Close', 'High', 'Low', 'Volume'] #df.set_index('Time') df", "10 requests per minute df = df.sort_values(by = ['Time']) return", "json import os.path import time import MySQLdb as M from", "df def main(): db = connect_to_db() df = collect_data(start =", "#30 days at a time if (cur_end_unixtime > end_unixtime): cur_end_unixtime", "import calendar import requests import pandas as pd import json", "response.json() df_tmp = pd.DataFrame(data) df_tmp.columns = ['Time', 'Open', 'Close', 'High',", "= collect_data(start = '09/24/2018', end = '09/26/2018') write_to_db(df, db) db.close()", "M from gdax_history import timestamp_to_utcstr def connect_to_db(): config = json.load(open('dbconn.json'))[\"mysql\"]", "import time import MySQLdb as M from gdax_history import timestamp_to_utcstr", "starttime = datetime.datetime.strptime(start, '%m/%d/%Y') endtime = datetime.datetime.strptime(end, '%m/%d/%Y') start_unixtime =", "= 0 df = pd.DataFrame(data = [], columns = ['Time',", "['Time', 'Open', 'Close', 'High', 'Low', 'Volume']) while (start_unixtime < end_unixtime):", "= time.time() - track_time if (diff <= 60): print('Sleeping for", "volume, utc_datetime) VALUES (%s, %s, %s, %s, %s, %s, %s)\"\"\",", "- diff) #sleep track_time = time.time() #bitstamp limits to 10", "as M from gdax_history import timestamp_to_utcstr def connect_to_db(): config =", "#if 10 requests are made count = 0 #reset it", "print \"Write successfully!\\n\" except (M.Error, M.Warning) as e: print e", "collect_data(start = '09/24/2018', end = '09/26/2018') write_to_db(df, db) db.close() if", "= pd.DataFrame(data) df_tmp.columns = ['Time', 'Open', 'Close', 'High', 'Low', 'Volume']", "+ \"000\", str(cur_end_unixtime) + \"000\") #1 hour can be changed", "0 df = pd.DataFrame(data = [], columns = ['Time', 'Open',", "end_unixtime #if the time is in future. url = 'https://api.bitfinex.com/v2/candles/trade:1m:tBTCUSD/hist?start={}&end={}&limit=1000'.format(str(start_unixtime)", "db): print \"Write %d entries to database.\" % df.shape[0] cur", "requests are made count = 0 #reset it diff =", "%d entries to database.\" % df.shape[0] cur = db.cursor() try:", "endtime = datetime.datetime.strptime(end, '%m/%d/%Y') start_unixtime = calendar.timegm(starttime.utctimetuple()) end_unixtime = calendar.timegm(endtime.utctimetuple())", "diff = time.time() - track_time if (diff <= 60): print('Sleeping", "= ['Time']) return df def main(): db = connect_to_db() df", "df = pd.DataFrame(data = [], columns = ['Time', 'Open', 'Close',", "close, high, low, volume, utc_datetime) VALUES (%s, %s, %s, %s,", "db def write_to_db(df, db): print \"Write %d entries to database.\"", "= M.connect(host = config[\"host\"], user = config[\"user\"], passwd = config[\"password\"],", "finex_history (timestamp, open, close, high, low, volume, utc_datetime) VALUES (%s,", "than that count = 0 df = pd.DataFrame(data = [],", "* 999 #60*60*24*30 #30 days at a time if (cur_end_unixtime", "days at a time if (cur_end_unixtime > end_unixtime): cur_end_unixtime =", "e: print e db.rollback() def collect_data(start, end): starttime = datetime.datetime.strptime(start,", "can be changed to any timeframe response = requests.get(url) data", "made count = 0 #reset it diff = time.time() -", "'Volume']) while (start_unixtime < end_unixtime): cur_end_unixtime = start_unixtime + 60", "track_time = time.time() #because bitstamp only allows 10 requests per", "if we are faster than that count = 0 df", "from gdax_history import timestamp_to_utcstr def connect_to_db(): config = json.load(open('dbconn.json'))[\"mysql\"] db", "calendar.timegm(endtime.utctimetuple()) track_time = time.time() #because bitstamp only allows 10 requests", "os.path import time import MySQLdb as M from gdax_history import", "at a time if (cur_end_unixtime > end_unixtime): cur_end_unixtime = end_unixtime", "the time is in future. url = 'https://api.bitfinex.com/v2/candles/trade:1m:tBTCUSD/hist?start={}&end={}&limit=1000'.format(str(start_unixtime) + \"000\",", "datetime.datetime.strptime(start, '%m/%d/%Y') endtime = datetime.datetime.strptime(end, '%m/%d/%Y') start_unixtime = calendar.timegm(starttime.utctimetuple()) end_unixtime", "row.Low, row.Volume, timestamp_to_utcstr(ts)]) db.commit() print \"Write successfully!\\n\" except (M.Error, M.Warning)", "timeframe response = requests.get(url) data = response.json() df_tmp = pd.DataFrame(data)", "+ 60 * 999 #60*60*24*30 #30 days at a time", "#1 hour can be changed to any timeframe response =", "requests.get(url) data = response.json() df_tmp = pd.DataFrame(data) df_tmp.columns = ['Time',", "= start_unixtime + 60 * 999 #60*60*24*30 #30 days at", "<= 60): print('Sleeping for {} seconds'.format(str(60 - diff))) time.sleep(60 -", "diff))) time.sleep(60 - diff) #sleep track_time = time.time() #bitstamp limits", "print \"Write %d entries to database.\" % df.shape[0] cur =", "db = M.connect(host = config[\"host\"], user = config[\"user\"], passwd =", "['Time']) return df def main(): db = connect_to_db() df =", "db = config[\"database\"]) return db def write_to_db(df, db): print \"Write", "for {} seconds'.format(str(60 - diff))) time.sleep(60 - diff) #sleep track_time", "%s, %s, %s)\"\"\", [ts, row.Open, row.Close, row.High, row.Low, row.Volume, timestamp_to_utcstr(ts)])", "utc_datetime) VALUES (%s, %s, %s, %s, %s, %s, %s)\"\"\", [ts,", "low, volume, utc_datetime) VALUES (%s, %s, %s, %s, %s, %s,", "except (M.Error, M.Warning) as e: print e db.rollback() def collect_data(start,", "per minute. Take rest if we are faster than that", "import pandas as pd import json import os.path import time", "'Close', 'High', 'Low', 'Volume'] #df.set_index('Time') df = pd.concat([df, df_tmp]) start_unixtime", "time.time() #bitstamp limits to 10 requests per minute df =", "database.\" % df.shape[0] cur = db.cursor() try: for row in", "df.sort_values(by = ['Time']) return df def main(): db = connect_to_db()", "'Close', 'High', 'Low', 'Volume']) while (start_unixtime < end_unixtime): cur_end_unixtime =", "cur_end_unixtime + 60 #to prevent duplicates count = count +", "(M.Error, M.Warning) as e: print e db.rollback() def collect_data(start, end):", "start_unixtime = calendar.timegm(starttime.utctimetuple()) end_unixtime = calendar.timegm(endtime.utctimetuple()) track_time = time.time() #because", "while (start_unixtime < end_unixtime): cur_end_unixtime = start_unixtime + 60 *", "= calendar.timegm(endtime.utctimetuple()) track_time = time.time() #because bitstamp only allows 10", "df_tmp = pd.DataFrame(data) df_tmp.columns = ['Time', 'Open', 'Close', 'High', 'Low',", "calendar import requests import pandas as pd import json import", "\"000\", str(cur_end_unixtime) + \"000\") #1 hour can be changed to", "any timeframe response = requests.get(url) data = response.json() df_tmp =", "= '09/24/2018', end = '09/26/2018') write_to_db(df, db) db.close() if __name__", "1000 cur.execute( \"\"\"INSERT INTO finex_history (timestamp, open, close, high, low,", "time import MySQLdb as M from gdax_history import timestamp_to_utcstr def", "- diff))) time.sleep(60 - diff) #sleep track_time = time.time() #bitstamp", "per minute df = df.sort_values(by = ['Time']) return df def", "db.commit() print \"Write successfully!\\n\" except (M.Error, M.Warning) as e: print", "count = 0 df = pd.DataFrame(data = [], columns =", "for row in df.itertuples(): ts = row.Time / 1000 cur.execute(", "config[\"password\"], db = config[\"database\"]) return db def write_to_db(df, db): print", "that count = 0 df = pd.DataFrame(data = [], columns", "timestamp_to_utcstr(ts)]) db.commit() print \"Write successfully!\\n\" except (M.Error, M.Warning) as e:", "be changed to any timeframe response = requests.get(url) data =", "as e: print e db.rollback() def collect_data(start, end): starttime =", "datetime import calendar import requests import pandas as pd import", "#reset it diff = time.time() - track_time if (diff <=", "minute. Take rest if we are faster than that count", "connect_to_db() df = collect_data(start = '09/24/2018', end = '09/26/2018') write_to_db(df,", "df_tmp]) start_unixtime = cur_end_unixtime + 60 #to prevent duplicates count", "= row.Time / 1000 cur.execute( \"\"\"INSERT INTO finex_history (timestamp, open,", "= time.time() #bitstamp limits to 10 requests per minute df", "(%s, %s, %s, %s, %s, %s, %s)\"\"\", [ts, row.Open, row.Close,", "import os.path import time import MySQLdb as M from gdax_history", "passwd = config[\"password\"], db = config[\"database\"]) return db def write_to_db(df,", "df.shape[0] cur = db.cursor() try: for row in df.itertuples(): ts", "cur.execute( \"\"\"INSERT INTO finex_history (timestamp, open, close, high, low, volume,", "INTO finex_history (timestamp, open, close, high, low, volume, utc_datetime) VALUES", "VALUES (%s, %s, %s, %s, %s, %s, %s)\"\"\", [ts, row.Open,", "= db.cursor() try: for row in df.itertuples(): ts = row.Time", "bitstamp only allows 10 requests per minute. Take rest if", "= requests.get(url) data = response.json() df_tmp = pd.DataFrame(data) df_tmp.columns =", "#df.set_index('Time') df = pd.concat([df, df_tmp]) start_unixtime = cur_end_unixtime + 60", "= pd.concat([df, df_tmp]) start_unixtime = cur_end_unixtime + 60 #to prevent", "#to prevent duplicates count = count + 1 if (count", "columns = ['Time', 'Open', 'Close', 'High', 'Low', 'Volume']) while (start_unixtime", "row.Close, row.High, row.Low, row.Volume, timestamp_to_utcstr(ts)]) db.commit() print \"Write successfully!\\n\" except", "{} seconds'.format(str(60 - diff))) time.sleep(60 - diff) #sleep track_time =", "import datetime import calendar import requests import pandas as pd", "#because bitstamp only allows 10 requests per minute. Take rest", "entries to database.\" % df.shape[0] cur = db.cursor() try: for", "requests per minute df = df.sort_values(by = ['Time']) return df", "\"\"\"INSERT INTO finex_history (timestamp, open, close, high, low, volume, utc_datetime)", "allows 10 requests per minute. Take rest if we are", "'High', 'Low', 'Volume']) while (start_unixtime < end_unixtime): cur_end_unixtime = start_unixtime", "if (diff <= 60): print('Sleeping for {} seconds'.format(str(60 - diff)))", "import json import os.path import time import MySQLdb as M", "successfully!\\n\" except (M.Error, M.Warning) as e: print e db.rollback() def", "ts = row.Time / 1000 cur.execute( \"\"\"INSERT INTO finex_history (timestamp,", "#60*60*24*30 #30 days at a time if (cur_end_unixtime > end_unixtime):", "'High', 'Low', 'Volume'] #df.set_index('Time') df = pd.concat([df, df_tmp]) start_unixtime =", "to 10 requests per minute df = df.sort_values(by = ['Time'])", "1 if (count == 10): #if 10 requests are made", "'https://api.bitfinex.com/v2/candles/trade:1m:tBTCUSD/hist?start={}&end={}&limit=1000'.format(str(start_unixtime) + \"000\", str(cur_end_unixtime) + \"000\") #1 hour can be", "pd.DataFrame(data) df_tmp.columns = ['Time', 'Open', 'Close', 'High', 'Low', 'Volume'] #df.set_index('Time')", "= 'https://api.bitfinex.com/v2/candles/trade:1m:tBTCUSD/hist?start={}&end={}&limit=1000'.format(str(start_unixtime) + \"000\", str(cur_end_unixtime) + \"000\") #1 hour can", "minute df = df.sort_values(by = ['Time']) return df def main():", "10 requests are made count = 0 #reset it diff", "json.load(open('dbconn.json'))[\"mysql\"] db = M.connect(host = config[\"host\"], user = config[\"user\"], passwd", "to database.\" % df.shape[0] cur = db.cursor() try: for row", "datetime.datetime.strptime(end, '%m/%d/%Y') start_unixtime = calendar.timegm(starttime.utctimetuple()) end_unixtime = calendar.timegm(endtime.utctimetuple()) track_time =", "future. url = 'https://api.bitfinex.com/v2/candles/trade:1m:tBTCUSD/hist?start={}&end={}&limit=1000'.format(str(start_unixtime) + \"000\", str(cur_end_unixtime) + \"000\") #1", "hour can be changed to any timeframe response = requests.get(url)", "import timestamp_to_utcstr def connect_to_db(): config = json.load(open('dbconn.json'))[\"mysql\"] db = M.connect(host", "faster than that count = 0 df = pd.DataFrame(data =", "in df.itertuples(): ts = row.Time / 1000 cur.execute( \"\"\"INSERT INTO", "as pd import json import os.path import time import MySQLdb", "#if the time is in future. url = 'https://api.bitfinex.com/v2/candles/trade:1m:tBTCUSD/hist?start={}&end={}&limit=1000'.format(str(start_unixtime) +", "'Open', 'Close', 'High', 'Low', 'Volume']) while (start_unixtime < end_unixtime): cur_end_unixtime", "60 #to prevent duplicates count = count + 1 if", "'%m/%d/%Y') endtime = datetime.datetime.strptime(end, '%m/%d/%Y') start_unixtime = calendar.timegm(starttime.utctimetuple()) end_unixtime =", "M.Warning) as e: print e db.rollback() def collect_data(start, end): starttime", "db = connect_to_db() df = collect_data(start = '09/24/2018', end =", "- track_time if (diff <= 60): print('Sleeping for {} seconds'.format(str(60", "are made count = 0 #reset it diff = time.time()", "= ['Time', 'Open', 'Close', 'High', 'Low', 'Volume'] #df.set_index('Time') df =", "db.rollback() def collect_data(start, end): starttime = datetime.datetime.strptime(start, '%m/%d/%Y') endtime =", "main(): db = connect_to_db() df = collect_data(start = '09/24/2018', end", "open, close, high, low, volume, utc_datetime) VALUES (%s, %s, %s,", "0 #reset it diff = time.time() - track_time if (diff", "\"Write successfully!\\n\" except (M.Error, M.Warning) as e: print e db.rollback()", "time.time() - track_time if (diff <= 60): print('Sleeping for {}", "= json.load(open('dbconn.json'))[\"mysql\"] db = M.connect(host = config[\"host\"], user = config[\"user\"],", "% df.shape[0] cur = db.cursor() try: for row in df.itertuples():", "track_time if (diff <= 60): print('Sleeping for {} seconds'.format(str(60 -", "999 #60*60*24*30 #30 days at a time if (cur_end_unixtime >", "(diff <= 60): print('Sleeping for {} seconds'.format(str(60 - diff))) time.sleep(60", "MySQLdb as M from gdax_history import timestamp_to_utcstr def connect_to_db(): config", "= connect_to_db() df = collect_data(start = '09/24/2018', end = '09/26/2018')", "cur_end_unixtime = start_unixtime + 60 * 999 #60*60*24*30 #30 days", "pd.DataFrame(data = [], columns = ['Time', 'Open', 'Close', 'High', 'Low',", "end_unixtime): cur_end_unixtime = end_unixtime #if the time is in future.", "row.Volume, timestamp_to_utcstr(ts)]) db.commit() print \"Write successfully!\\n\" except (M.Error, M.Warning) as", "config[\"database\"]) return db def write_to_db(df, db): print \"Write %d entries", "row.Open, row.Close, row.High, row.Low, row.Volume, timestamp_to_utcstr(ts)]) db.commit() print \"Write successfully!\\n\"", "Take rest if we are faster than that count =", "= pd.DataFrame(data = [], columns = ['Time', 'Open', 'Close', 'High',", "(cur_end_unixtime > end_unixtime): cur_end_unixtime = end_unixtime #if the time is", "%s, %s, %s, %s)\"\"\", [ts, row.Open, row.Close, row.High, row.Low, row.Volume,", "['Time', 'Open', 'Close', 'High', 'Low', 'Volume'] #df.set_index('Time') df = pd.concat([df,", "< end_unixtime): cur_end_unixtime = start_unixtime + 60 * 999 #60*60*24*30", "[], columns = ['Time', 'Open', 'Close', 'High', 'Low', 'Volume']) while", "'%m/%d/%Y') start_unixtime = calendar.timegm(starttime.utctimetuple()) end_unixtime = calendar.timegm(endtime.utctimetuple()) track_time = time.time()", "e db.rollback() def collect_data(start, end): starttime = datetime.datetime.strptime(start, '%m/%d/%Y') endtime", "= 0 #reset it diff = time.time() - track_time if", "time.sleep(60 - diff) #sleep track_time = time.time() #bitstamp limits to", "limits to 10 requests per minute df = df.sort_values(by =", "> end_unixtime): cur_end_unixtime = end_unixtime #if the time is in", "(timestamp, open, close, high, low, volume, utc_datetime) VALUES (%s, %s,", "= calendar.timegm(starttime.utctimetuple()) end_unixtime = calendar.timegm(endtime.utctimetuple()) track_time = time.time() #because bitstamp", "10 requests per minute. Take rest if we are faster", "start_unixtime + 60 * 999 #60*60*24*30 #30 days at a", "if (count == 10): #if 10 requests are made count", "time if (cur_end_unixtime > end_unixtime): cur_end_unixtime = end_unixtime #if the", "duplicates count = count + 1 if (count == 10):", "== 10): #if 10 requests are made count = 0", "\"Write %d entries to database.\" % df.shape[0] cur = db.cursor()", "end_unixtime): cur_end_unixtime = start_unixtime + 60 * 999 #60*60*24*30 #30", "'Low', 'Volume'] #df.set_index('Time') df = pd.concat([df, df_tmp]) start_unixtime = cur_end_unixtime", "start_unixtime = cur_end_unixtime + 60 #to prevent duplicates count =", "count = 0 #reset it diff = time.time() - track_time", "60 * 999 #60*60*24*30 #30 days at a time if", "import MySQLdb as M from gdax_history import timestamp_to_utcstr def connect_to_db():", "requests import pandas as pd import json import os.path import", "rest if we are faster than that count = 0", "def connect_to_db(): config = json.load(open('dbconn.json'))[\"mysql\"] db = M.connect(host = config[\"host\"],", "pandas as pd import json import os.path import time import", "response = requests.get(url) data = response.json() df_tmp = pd.DataFrame(data) df_tmp.columns", "return db def write_to_db(df, db): print \"Write %d entries to", "[ts, row.Open, row.Close, row.High, row.Low, row.Volume, timestamp_to_utcstr(ts)]) db.commit() print \"Write", "we are faster than that count = 0 df =", "%s, %s)\"\"\", [ts, row.Open, row.Close, row.High, row.Low, row.Volume, timestamp_to_utcstr(ts)]) db.commit()", "+ 1 if (count == 10): #if 10 requests are", "changed to any timeframe response = requests.get(url) data = response.json()", "/ 1000 cur.execute( \"\"\"INSERT INTO finex_history (timestamp, open, close, high,", "M.connect(host = config[\"host\"], user = config[\"user\"], passwd = config[\"password\"], db", "count = count + 1 if (count == 10): #if", "'Open', 'Close', 'High', 'Low', 'Volume'] #df.set_index('Time') df = pd.concat([df, df_tmp])", "diff) #sleep track_time = time.time() #bitstamp limits to 10 requests", "to any timeframe response = requests.get(url) data = response.json() df_tmp", "prevent duplicates count = count + 1 if (count ==", "pd import json import os.path import time import MySQLdb as", "connect_to_db(): config = json.load(open('dbconn.json'))[\"mysql\"] db = M.connect(host = config[\"host\"], user", "= config[\"user\"], passwd = config[\"password\"], db = config[\"database\"]) return db", "(start_unixtime < end_unixtime): cur_end_unixtime = start_unixtime + 60 * 999", "#sleep track_time = time.time() #bitstamp limits to 10 requests per", "end_unixtime = calendar.timegm(endtime.utctimetuple()) track_time = time.time() #because bitstamp only allows", "= df.sort_values(by = ['Time']) return df def main(): db =", "= time.time() #because bitstamp only allows 10 requests per minute.", "row.Time / 1000 cur.execute( \"\"\"INSERT INTO finex_history (timestamp, open, close,", "gdax_history import timestamp_to_utcstr def connect_to_db(): config = json.load(open('dbconn.json'))[\"mysql\"] db =", "'Volume'] #df.set_index('Time') df = pd.concat([df, df_tmp]) start_unixtime = cur_end_unixtime +", "10): #if 10 requests are made count = 0 #reset", "track_time = time.time() #bitstamp limits to 10 requests per minute", "df = collect_data(start = '09/24/2018', end = '09/26/2018') write_to_db(df, db)", "only allows 10 requests per minute. Take rest if we", "df = pd.concat([df, df_tmp]) start_unixtime = cur_end_unixtime + 60 #to", "timestamp_to_utcstr def connect_to_db(): config = json.load(open('dbconn.json'))[\"mysql\"] db = M.connect(host =", "seconds'.format(str(60 - diff))) time.sleep(60 - diff) #sleep track_time = time.time()", "calendar.timegm(starttime.utctimetuple()) end_unixtime = calendar.timegm(endtime.utctimetuple()) track_time = time.time() #because bitstamp only", "= count + 1 if (count == 10): #if 10", "= ['Time', 'Open', 'Close', 'High', 'Low', 'Volume']) while (start_unixtime <", "is in future. url = 'https://api.bitfinex.com/v2/candles/trade:1m:tBTCUSD/hist?start={}&end={}&limit=1000'.format(str(start_unixtime) + \"000\", str(cur_end_unixtime) +", "end = '09/26/2018') write_to_db(df, db) db.close() if __name__ == \"__main__\":", "df.itertuples(): ts = row.Time / 1000 cur.execute( \"\"\"INSERT INTO finex_history", "'Low', 'Volume']) while (start_unixtime < end_unixtime): cur_end_unixtime = start_unixtime +", "config[\"host\"], user = config[\"user\"], passwd = config[\"password\"], db = config[\"database\"])", "%s)\"\"\", [ts, row.Open, row.Close, row.High, row.Low, row.Volume, timestamp_to_utcstr(ts)]) db.commit() print", "cur = db.cursor() try: for row in df.itertuples(): ts =", "time.time() #because bitstamp only allows 10 requests per minute. Take", "it diff = time.time() - track_time if (diff <= 60):", "print e db.rollback() def collect_data(start, end): starttime = datetime.datetime.strptime(start, '%m/%d/%Y')", "= config[\"host\"], user = config[\"user\"], passwd = config[\"password\"], db =", "return df def main(): db = connect_to_db() df = collect_data(start", "#bitstamp limits to 10 requests per minute df = df.sort_values(by", "\"000\") #1 hour can be changed to any timeframe response", "%s, %s, %s, %s, %s)\"\"\", [ts, row.Open, row.Close, row.High, row.Low,", "= [], columns = ['Time', 'Open', 'Close', 'High', 'Low', 'Volume'])", "a time if (cur_end_unixtime > end_unixtime): cur_end_unixtime = end_unixtime #if", "write_to_db(df, db): print \"Write %d entries to database.\" % df.shape[0]", "cur_end_unixtime = end_unixtime #if the time is in future. url", "= datetime.datetime.strptime(start, '%m/%d/%Y') endtime = datetime.datetime.strptime(end, '%m/%d/%Y') start_unixtime = calendar.timegm(starttime.utctimetuple())", "pd.concat([df, df_tmp]) start_unixtime = cur_end_unixtime + 60 #to prevent duplicates", "= '09/26/2018') write_to_db(df, db) db.close() if __name__ == \"__main__\": main()", "user = config[\"user\"], passwd = config[\"password\"], db = config[\"database\"]) return", "%s, %s, %s, %s, %s, %s)\"\"\", [ts, row.Open, row.Close, row.High,", "collect_data(start, end): starttime = datetime.datetime.strptime(start, '%m/%d/%Y') endtime = datetime.datetime.strptime(end, '%m/%d/%Y')", "are faster than that count = 0 df = pd.DataFrame(data", "if (cur_end_unixtime > end_unixtime): cur_end_unixtime = end_unixtime #if the time", "config = json.load(open('dbconn.json'))[\"mysql\"] db = M.connect(host = config[\"host\"], user =", "= datetime.datetime.strptime(end, '%m/%d/%Y') start_unixtime = calendar.timegm(starttime.utctimetuple()) end_unixtime = calendar.timegm(endtime.utctimetuple()) track_time", "row.High, row.Low, row.Volume, timestamp_to_utcstr(ts)]) db.commit() print \"Write successfully!\\n\" except (M.Error,", "+ 60 #to prevent duplicates count = count + 1", "df = df.sort_values(by = ['Time']) return df def main(): db", "import requests import pandas as pd import json import os.path", "str(cur_end_unixtime) + \"000\") #1 hour can be changed to any", "db.cursor() try: for row in df.itertuples(): ts = row.Time /", "requests per minute. Take rest if we are faster than", "= end_unixtime #if the time is in future. url =", "time is in future. url = 'https://api.bitfinex.com/v2/candles/trade:1m:tBTCUSD/hist?start={}&end={}&limit=1000'.format(str(start_unixtime) + \"000\", str(cur_end_unixtime)", "count + 1 if (count == 10): #if 10 requests", "row in df.itertuples(): ts = row.Time / 1000 cur.execute( \"\"\"INSERT", "(count == 10): #if 10 requests are made count =", "def main(): db = connect_to_db() df = collect_data(start = '09/24/2018',", "'09/24/2018', end = '09/26/2018') write_to_db(df, db) db.close() if __name__ ==", "high, low, volume, utc_datetime) VALUES (%s, %s, %s, %s, %s,", "= response.json() df_tmp = pd.DataFrame(data) df_tmp.columns = ['Time', 'Open', 'Close',", "+ \"000\") #1 hour can be changed to any timeframe", "= config[\"password\"], db = config[\"database\"]) return db def write_to_db(df, db):", "url = 'https://api.bitfinex.com/v2/candles/trade:1m:tBTCUSD/hist?start={}&end={}&limit=1000'.format(str(start_unixtime) + \"000\", str(cur_end_unixtime) + \"000\") #1 hour", "end): starttime = datetime.datetime.strptime(start, '%m/%d/%Y') endtime = datetime.datetime.strptime(end, '%m/%d/%Y') start_unixtime", "try: for row in df.itertuples(): ts = row.Time / 1000", "<filename>finex_history.py import datetime import calendar import requests import pandas as", "= config[\"database\"]) return db def write_to_db(df, db): print \"Write %d", "def write_to_db(df, db): print \"Write %d entries to database.\" %", "config[\"user\"], passwd = config[\"password\"], db = config[\"database\"]) return db def", "in future. url = 'https://api.bitfinex.com/v2/candles/trade:1m:tBTCUSD/hist?start={}&end={}&limit=1000'.format(str(start_unixtime) + \"000\", str(cur_end_unixtime) + \"000\")", "def collect_data(start, end): starttime = datetime.datetime.strptime(start, '%m/%d/%Y') endtime = datetime.datetime.strptime(end,", "data = response.json() df_tmp = pd.DataFrame(data) df_tmp.columns = ['Time', 'Open',", "60): print('Sleeping for {} seconds'.format(str(60 - diff))) time.sleep(60 - diff)", "print('Sleeping for {} seconds'.format(str(60 - diff))) time.sleep(60 - diff) #sleep" ]
[ "appropriate topic prefix would be, and how frequently Kafka #", "configures the Kafka Connect connector\"\"\" logging.debug(\"Creating or updating kafka connect", "was given resp.raise_for_status() logging.info(\"-------Connector created successfully-------\") if __name__ == \"__main__\":", "configure_connector(): \"\"\"Starts and configures the Kafka Connect connector\"\"\" logging.debug(\"Creating or", "settings import Settings logger = logging.getLogger(__name__) KAFKA_CONNECT_URL = f\"{Settings.URLs.KAFKA_CONNECT_URL}/connectors\" CONNECTOR_NAME", "\"mode\": \"incrementing\", \"incrementing.column.name\": \"stop_id\", } # TODO: Complete the Kafka", "logging.debug(\"Connector already created skipping recreation\") return config = { \"connector.class\":", "incrementing column name. # Make sure to think about what", "# TODO: Complete the Kafka Connect Config below. # Directions:", "an appropriate topic prefix would be, and how frequently Kafka", "about what an appropriate topic prefix would be, and how", "# Ensure a healthy response was given resp.raise_for_status() logging.info(\"-------Connector created", "KAFKA_CONNECT_URL, headers={\"Content-Type\": \"application/json\"}, data=data, ) # Ensure a healthy response", "Make sure to think about what an appropriate topic prefix", "Kafka Connect connector\"\"\" logging.debug(\"Creating or updating kafka connect connector...\") resp", "as the incrementing column name. # Make sure to think", "already created skipping recreation\") return config = { \"connector.class\": \"io.confluent.connect.jdbc.JdbcSourceConnector\",", "\"5000\", # Poll every 5 seconds \"mode\": \"incrementing\", \"incrementing.column.name\": \"stop_id\",", "KAFKA_CONNECT_URL = f\"{Settings.URLs.KAFKA_CONNECT_URL}/connectors\" CONNECTOR_NAME = \"stations\" def configure_connector(): \"\"\"Starts and", "the JDBC Source Connector to connect to Postgres. Load the", "\"value.converter\": \"org.apache.kafka.connect.json.JsonConverter\", \"value.converter.schemas.enable\": \"false\", \"topic.prefix\": \"com.connect.transportation.\", \"connection.url\": \"jdbc:postgresql://postgres:5432/cta\", \"connection.user\": \"cta_admin\",", "\"connector.class\": \"io.confluent.connect.jdbc.JdbcSourceConnector\", \"key.converter\": \"org.apache.kafka.connect.json.JsonConverter\", \"key.converter.schemas.enable\": \"false\", \"value.converter\": \"org.apache.kafka.connect.json.JsonConverter\", \"value.converter.schemas.enable\": \"false\",", "JDBC Source Connector to connect to Postgres. Load the `stations`", "# Make sure to think about what an appropriate topic", "logger = logging.getLogger(__name__) KAFKA_CONNECT_URL = f\"{Settings.URLs.KAFKA_CONNECT_URL}/connectors\" CONNECTOR_NAME = \"stations\" def", "== 200: logging.debug(\"Connector already created skipping recreation\") return config =", "incrementing mode, with `stop_id` as the incrementing column name. #", "Settings logger = logging.getLogger(__name__) KAFKA_CONNECT_URL = f\"{Settings.URLs.KAFKA_CONNECT_URL}/connectors\" CONNECTOR_NAME = \"stations\"", "Poll every 5 seconds \"mode\": \"incrementing\", \"incrementing.column.name\": \"stop_id\", } #", "\"config\": config}) resp = requests.post( KAFKA_CONNECT_URL, headers={\"Content-Type\": \"application/json\"}, data=data, )", "skipping recreation\") return config = { \"connector.class\": \"io.confluent.connect.jdbc.JdbcSourceConnector\", \"key.converter\": \"org.apache.kafka.connect.json.JsonConverter\",", "logging.getLogger(__name__) KAFKA_CONNECT_URL = f\"{Settings.URLs.KAFKA_CONNECT_URL}/connectors\" CONNECTOR_NAME = \"stations\" def configure_connector(): \"\"\"Starts", "Postgres Station data\"\"\" import json import logging import requests from", "= requests.post( KAFKA_CONNECT_URL, headers={\"Content-Type\": \"application/json\"}, data=data, ) # Ensure a", "for Postgres Station data\"\"\" import json import logging import requests", "= { \"connector.class\": \"io.confluent.connect.jdbc.JdbcSourceConnector\", \"key.converter\": \"org.apache.kafka.connect.json.JsonConverter\", \"key.converter.schemas.enable\": \"false\", \"value.converter\": \"org.apache.kafka.connect.json.JsonConverter\",", "data=data, ) # Ensure a healthy response was given resp.raise_for_status()", "\"table.whitelist\": \"stations\", \"poll.interval.ms\": \"5000\", # Poll every 5 seconds \"mode\":", "CONNECTOR_NAME = \"stations\" def configure_connector(): \"\"\"Starts and configures the Kafka", "\"<PASSWORD>\", \"batch.max.rows\": \"500\", \"table.whitelist\": \"stations\", \"poll.interval.ms\": \"5000\", # Poll every", "import requests from settings import Settings logger = logging.getLogger(__name__) KAFKA_CONNECT_URL", "\"jdbc:postgresql://postgres:5432/cta\", \"connection.user\": \"cta_admin\", \"connection.password\": \"<PASSWORD>\", \"batch.max.rows\": \"500\", \"table.whitelist\": \"stations\", \"poll.interval.ms\":", "\"application/json\"}, data=data, ) # Ensure a healthy response was given", "requests.get(f\"{KAFKA_CONNECT_URL}/{CONNECTOR_NAME}\") if resp.status_code == 200: logging.debug(\"Connector already created skipping recreation\")", "\"org.apache.kafka.connect.json.JsonConverter\", \"value.converter.schemas.enable\": \"false\", \"topic.prefix\": \"com.connect.transportation.\", \"connection.url\": \"jdbc:postgresql://postgres:5432/cta\", \"connection.user\": \"cta_admin\", \"connection.password\":", "requests from settings import Settings logger = logging.getLogger(__name__) KAFKA_CONNECT_URL =", "\"io.confluent.connect.jdbc.JdbcSourceConnector\", \"key.converter\": \"org.apache.kafka.connect.json.JsonConverter\", \"key.converter.schemas.enable\": \"false\", \"value.converter\": \"org.apache.kafka.connect.json.JsonConverter\", \"value.converter.schemas.enable\": \"false\", \"topic.prefix\":", "\"topic.prefix\": \"com.connect.transportation.\", \"connection.url\": \"jdbc:postgresql://postgres:5432/cta\", \"connection.user\": \"cta_admin\", \"connection.password\": \"<PASSWORD>\", \"batch.max.rows\": \"500\",", "name. # Make sure to think about what an appropriate", "`stop_id` as the incrementing column name. # Make sure to", "from settings import Settings logger = logging.getLogger(__name__) KAFKA_CONNECT_URL = f\"{Settings.URLs.KAFKA_CONNECT_URL}/connectors\"", "resp.status_code == 200: logging.debug(\"Connector already created skipping recreation\") return config", "import Settings logger = logging.getLogger(__name__) KAFKA_CONNECT_URL = f\"{Settings.URLs.KAFKA_CONNECT_URL}/connectors\" CONNECTOR_NAME =", "the Kafka Connect Config below. # Directions: Use the JDBC", "prefix would be, and how frequently Kafka # Connect should", "\"stations\", \"poll.interval.ms\": \"5000\", # Poll every 5 seconds \"mode\": \"incrementing\",", "f\"{Settings.URLs.KAFKA_CONNECT_URL}/connectors\" CONNECTOR_NAME = \"stations\" def configure_connector(): \"\"\"Starts and configures the", "resp = requests.post( KAFKA_CONNECT_URL, headers={\"Content-Type\": \"application/json\"}, data=data, ) # Ensure", "below. # Directions: Use the JDBC Source Connector to connect", "run this connector (hint: not very often!) data = json.dumps({\"name\":", "\"batch.max.rows\": \"500\", \"table.whitelist\": \"stations\", \"poll.interval.ms\": \"5000\", # Poll every 5", "resp = requests.get(f\"{KAFKA_CONNECT_URL}/{CONNECTOR_NAME}\") if resp.status_code == 200: logging.debug(\"Connector already created", "\"connection.url\": \"jdbc:postgresql://postgres:5432/cta\", \"connection.user\": \"cta_admin\", \"connection.password\": \"<PASSWORD>\", \"batch.max.rows\": \"500\", \"table.whitelist\": \"stations\",", "often!) data = json.dumps({\"name\": CONNECTOR_NAME, \"config\": config}) resp = requests.post(", "= f\"{Settings.URLs.KAFKA_CONNECT_URL}/connectors\" CONNECTOR_NAME = \"stations\" def configure_connector(): \"\"\"Starts and configures", "\"500\", \"table.whitelist\": \"stations\", \"poll.interval.ms\": \"5000\", # Poll every 5 seconds", "very often!) data = json.dumps({\"name\": CONNECTOR_NAME, \"config\": config}) resp =", "\"poll.interval.ms\": \"5000\", # Poll every 5 seconds \"mode\": \"incrementing\", \"incrementing.column.name\":", "CONNECTOR_NAME, \"config\": config}) resp = requests.post( KAFKA_CONNECT_URL, headers={\"Content-Type\": \"application/json\"}, data=data,", "and configures the Kafka Connect connector\"\"\" logging.debug(\"Creating or updating kafka", "Use the JDBC Source Connector to connect to Postgres. Load", "# Connect should run this connector (hint: not very often!)", "200: logging.debug(\"Connector already created skipping recreation\") return config = {", "config}) resp = requests.post( KAFKA_CONNECT_URL, headers={\"Content-Type\": \"application/json\"}, data=data, ) #", "the Kafka Connect connector\"\"\" logging.debug(\"Creating or updating kafka connect connector...\")", "logging import requests from settings import Settings logger = logging.getLogger(__name__)", "Connect should run this connector (hint: not very often!) data", "\"\"\"Configures a Kafka Connector for Postgres Station data\"\"\" import json", "`stations` table # using incrementing mode, with `stop_id` as the", "mode, with `stop_id` as the incrementing column name. # Make", "a healthy response was given resp.raise_for_status() logging.info(\"-------Connector created successfully-------\") if", "frequently Kafka # Connect should run this connector (hint: not", "to connect to Postgres. Load the `stations` table # using", "config = { \"connector.class\": \"io.confluent.connect.jdbc.JdbcSourceConnector\", \"key.converter\": \"org.apache.kafka.connect.json.JsonConverter\", \"key.converter.schemas.enable\": \"false\", \"value.converter\":", "def configure_connector(): \"\"\"Starts and configures the Kafka Connect connector\"\"\" logging.debug(\"Creating", "data = json.dumps({\"name\": CONNECTOR_NAME, \"config\": config}) resp = requests.post( KAFKA_CONNECT_URL,", "to think about what an appropriate topic prefix would be,", "= \"stations\" def configure_connector(): \"\"\"Starts and configures the Kafka Connect", ") # Ensure a healthy response was given resp.raise_for_status() logging.info(\"-------Connector", "\"org.apache.kafka.connect.json.JsonConverter\", \"key.converter.schemas.enable\": \"false\", \"value.converter\": \"org.apache.kafka.connect.json.JsonConverter\", \"value.converter.schemas.enable\": \"false\", \"topic.prefix\": \"com.connect.transportation.\", \"connection.url\":", "\"connection.user\": \"cta_admin\", \"connection.password\": \"<PASSWORD>\", \"batch.max.rows\": \"500\", \"table.whitelist\": \"stations\", \"poll.interval.ms\": \"5000\",", "# Directions: Use the JDBC Source Connector to connect to", "column name. # Make sure to think about what an", "headers={\"Content-Type\": \"application/json\"}, data=data, ) # Ensure a healthy response was", "response was given resp.raise_for_status() logging.info(\"-------Connector created successfully-------\") if __name__ ==", "json.dumps({\"name\": CONNECTOR_NAME, \"config\": config}) resp = requests.post( KAFKA_CONNECT_URL, headers={\"Content-Type\": \"application/json\"},", "5 seconds \"mode\": \"incrementing\", \"incrementing.column.name\": \"stop_id\", } # TODO: Complete", "json import logging import requests from settings import Settings logger", "how frequently Kafka # Connect should run this connector (hint:", "should run this connector (hint: not very often!) data =", "\"connection.password\": \"<PASSWORD>\", \"batch.max.rows\": \"500\", \"table.whitelist\": \"stations\", \"poll.interval.ms\": \"5000\", # Poll", "topic prefix would be, and how frequently Kafka # Connect", "connector (hint: not very often!) data = json.dumps({\"name\": CONNECTOR_NAME, \"config\":", "Directions: Use the JDBC Source Connector to connect to Postgres.", "using incrementing mode, with `stop_id` as the incrementing column name.", "\"stop_id\", } # TODO: Complete the Kafka Connect Config below.", "what an appropriate topic prefix would be, and how frequently", "\"\"\"Starts and configures the Kafka Connect connector\"\"\" logging.debug(\"Creating or updating", "Connect connector\"\"\" logging.debug(\"Creating or updating kafka connect connector...\") resp =", "or updating kafka connect connector...\") resp = requests.get(f\"{KAFKA_CONNECT_URL}/{CONNECTOR_NAME}\") if resp.status_code", "Kafka Connector for Postgres Station data\"\"\" import json import logging", "kafka connect connector...\") resp = requests.get(f\"{KAFKA_CONNECT_URL}/{CONNECTOR_NAME}\") if resp.status_code == 200:", "Config below. # Directions: Use the JDBC Source Connector to", "Kafka Connect Config below. # Directions: Use the JDBC Source", "Connect Config below. # Directions: Use the JDBC Source Connector", "be, and how frequently Kafka # Connect should run this", "updating kafka connect connector...\") resp = requests.get(f\"{KAFKA_CONNECT_URL}/{CONNECTOR_NAME}\") if resp.status_code ==", "every 5 seconds \"mode\": \"incrementing\", \"incrementing.column.name\": \"stop_id\", } # TODO:", "= json.dumps({\"name\": CONNECTOR_NAME, \"config\": config}) resp = requests.post( KAFKA_CONNECT_URL, headers={\"Content-Type\":", "sure to think about what an appropriate topic prefix would", "recreation\") return config = { \"connector.class\": \"io.confluent.connect.jdbc.JdbcSourceConnector\", \"key.converter\": \"org.apache.kafka.connect.json.JsonConverter\", \"key.converter.schemas.enable\":", "\"cta_admin\", \"connection.password\": \"<PASSWORD>\", \"batch.max.rows\": \"500\", \"table.whitelist\": \"stations\", \"poll.interval.ms\": \"5000\", #", "\"com.connect.transportation.\", \"connection.url\": \"jdbc:postgresql://postgres:5432/cta\", \"connection.user\": \"cta_admin\", \"connection.password\": \"<PASSWORD>\", \"batch.max.rows\": \"500\", \"table.whitelist\":", "the incrementing column name. # Make sure to think about", "connector\"\"\" logging.debug(\"Creating or updating kafka connect connector...\") resp = requests.get(f\"{KAFKA_CONNECT_URL}/{CONNECTOR_NAME}\")", "requests.post( KAFKA_CONNECT_URL, headers={\"Content-Type\": \"application/json\"}, data=data, ) # Ensure a healthy", "\"false\", \"topic.prefix\": \"com.connect.transportation.\", \"connection.url\": \"jdbc:postgresql://postgres:5432/cta\", \"connection.user\": \"cta_admin\", \"connection.password\": \"<PASSWORD>\", \"batch.max.rows\":", "Source Connector to connect to Postgres. Load the `stations` table", "given resp.raise_for_status() logging.info(\"-------Connector created successfully-------\") if __name__ == \"__main__\": configure_connector()", "the `stations` table # using incrementing mode, with `stop_id` as", "} # TODO: Complete the Kafka Connect Config below. #", "Load the `stations` table # using incrementing mode, with `stop_id`", "connect connector...\") resp = requests.get(f\"{KAFKA_CONNECT_URL}/{CONNECTOR_NAME}\") if resp.status_code == 200: logging.debug(\"Connector", "connect to Postgres. Load the `stations` table # using incrementing", "not very often!) data = json.dumps({\"name\": CONNECTOR_NAME, \"config\": config}) resp", "and how frequently Kafka # Connect should run this connector", "import json import logging import requests from settings import Settings", "= logging.getLogger(__name__) KAFKA_CONNECT_URL = f\"{Settings.URLs.KAFKA_CONNECT_URL}/connectors\" CONNECTOR_NAME = \"stations\" def configure_connector():", "table # using incrementing mode, with `stop_id` as the incrementing", "with `stop_id` as the incrementing column name. # Make sure", "Postgres. Load the `stations` table # using incrementing mode, with", "= requests.get(f\"{KAFKA_CONNECT_URL}/{CONNECTOR_NAME}\") if resp.status_code == 200: logging.debug(\"Connector already created skipping", "created skipping recreation\") return config = { \"connector.class\": \"io.confluent.connect.jdbc.JdbcSourceConnector\", \"key.converter\":", "Complete the Kafka Connect Config below. # Directions: Use the", "Connector for Postgres Station data\"\"\" import json import logging import", "connector...\") resp = requests.get(f\"{KAFKA_CONNECT_URL}/{CONNECTOR_NAME}\") if resp.status_code == 200: logging.debug(\"Connector already", "import logging import requests from settings import Settings logger =", "{ \"connector.class\": \"io.confluent.connect.jdbc.JdbcSourceConnector\", \"key.converter\": \"org.apache.kafka.connect.json.JsonConverter\", \"key.converter.schemas.enable\": \"false\", \"value.converter\": \"org.apache.kafka.connect.json.JsonConverter\", \"value.converter.schemas.enable\":", "\"incrementing.column.name\": \"stop_id\", } # TODO: Complete the Kafka Connect Config", "TODO: Complete the Kafka Connect Config below. # Directions: Use", "to Postgres. Load the `stations` table # using incrementing mode,", "if resp.status_code == 200: logging.debug(\"Connector already created skipping recreation\") return", "data\"\"\" import json import logging import requests from settings import", "\"value.converter.schemas.enable\": \"false\", \"topic.prefix\": \"com.connect.transportation.\", \"connection.url\": \"jdbc:postgresql://postgres:5432/cta\", \"connection.user\": \"cta_admin\", \"connection.password\": \"<PASSWORD>\",", "\"key.converter\": \"org.apache.kafka.connect.json.JsonConverter\", \"key.converter.schemas.enable\": \"false\", \"value.converter\": \"org.apache.kafka.connect.json.JsonConverter\", \"value.converter.schemas.enable\": \"false\", \"topic.prefix\": \"com.connect.transportation.\",", "return config = { \"connector.class\": \"io.confluent.connect.jdbc.JdbcSourceConnector\", \"key.converter\": \"org.apache.kafka.connect.json.JsonConverter\", \"key.converter.schemas.enable\": \"false\",", "Connector to connect to Postgres. Load the `stations` table #", "think about what an appropriate topic prefix would be, and", "would be, and how frequently Kafka # Connect should run", "# Poll every 5 seconds \"mode\": \"incrementing\", \"incrementing.column.name\": \"stop_id\", }", "\"false\", \"value.converter\": \"org.apache.kafka.connect.json.JsonConverter\", \"value.converter.schemas.enable\": \"false\", \"topic.prefix\": \"com.connect.transportation.\", \"connection.url\": \"jdbc:postgresql://postgres:5432/cta\", \"connection.user\":", "healthy response was given resp.raise_for_status() logging.info(\"-------Connector created successfully-------\") if __name__", "seconds \"mode\": \"incrementing\", \"incrementing.column.name\": \"stop_id\", } # TODO: Complete the", "\"incrementing\", \"incrementing.column.name\": \"stop_id\", } # TODO: Complete the Kafka Connect", "(hint: not very often!) data = json.dumps({\"name\": CONNECTOR_NAME, \"config\": config})", "Station data\"\"\" import json import logging import requests from settings", "a Kafka Connector for Postgres Station data\"\"\" import json import", "logging.debug(\"Creating or updating kafka connect connector...\") resp = requests.get(f\"{KAFKA_CONNECT_URL}/{CONNECTOR_NAME}\") if", "Kafka # Connect should run this connector (hint: not very", "\"key.converter.schemas.enable\": \"false\", \"value.converter\": \"org.apache.kafka.connect.json.JsonConverter\", \"value.converter.schemas.enable\": \"false\", \"topic.prefix\": \"com.connect.transportation.\", \"connection.url\": \"jdbc:postgresql://postgres:5432/cta\",", "this connector (hint: not very often!) data = json.dumps({\"name\": CONNECTOR_NAME,", "# using incrementing mode, with `stop_id` as the incrementing column", "\"stations\" def configure_connector(): \"\"\"Starts and configures the Kafka Connect connector\"\"\"", "Ensure a healthy response was given resp.raise_for_status() logging.info(\"-------Connector created successfully-------\")" ]
[ "# store KNN word info if knn_dict: sentence.tgt_lang = tgt_lang", "model, iseval): \"\"\"Torchify a single example.\"\"\" words = ['!{}_{}'.format(ex.language, w)", "for w in ex.words] words = [model.word_dict[w] for w in", "f: data = json.load(f) knn_dict = None if knn_file: with", "return vectorize(self.examples[index], self.model, iseval=self.evaluation) def lengths(self): return [len(ex.words) for ex", "* knn_size) sentence.knn_words = knn_words examples.append(sentence) if max_examples != -1", "{ 'ids': ids, 'language': language, 'batch_size': batch_size, 'len_rep': len_rep, 'word_rep':", "batch_size self.shuffle = shuffle def __iter__(self): lengths = np.array( [(-l,", "- ex['obj_start'] < 0: # we swap the start and", "1 sentence.object = [ex['obj_end'], ex['obj_start']] else: sentence.object = [ex['obj_start'], ex['obj_end']]", "= torch.LongTensor([model.ner_dict[n] for n in ex.ner]) deprel = torch.LongTensor([model.deprel_dict[d] for", "= v return embeddings_index # ------------------------------------------------------------------------------ # Data loading #", "--------- max_len = max([ex['word'].size(0) for ex in batch]) # Batch", "ex['token'] sentence.pos = ex['stanford_pos'] sentence.ner = ex['stanford_ner'] sentence.deprel = ex['stanford_deprel']", "knn_words.append(knn_dict[w]) else: knn_words.append([constant.UNK_WORD] * knn_size) sentence.knn_words = knn_words examples.append(sentence) if", "None if use_knn: knn_rep = torch.LongTensor(batch_size, max_len, knn_size).fill_(constant.PAD) for i,", "logger = logging.getLogger(__name__) def load_word_embeddings(file): embeddings_index = {} fin =", "found!'.format( wrong_subj_pos, wrong_obj_pos)) return examples def vectorize(ex, model, iseval): \"\"\"Torchify", "= Sentence(ex['id']) sentence.language = src_lang sentence.words = ex['token'] sentence.pos =", "examples def vectorize(ex, model, iseval): \"\"\"Torchify a single example.\"\"\" words", "index wrong_obj_pos += 1 sentence.object = [ex['obj_end'], ex['obj_start']] else: sentence.object", "> 0 or wrong_obj_pos > 0: logger.info('{} and {} wrong", "= [] object = [] knn_rep = None if use_knn:", "data = json.load(f) knn_dict = None if knn_file: with open(knn_file)", "import json import numpy import torch import numpy as np", "in batch]) # Batch Code Representations len_rep = torch.LongTensor(batch_size).fill_(constant.PAD) word_rep", "= ex['type'] subject.append(ex['subject']) object.append(ex['object']) if use_knn: knn_rep[i, :len_rep[i]] = ex['knn_word']", "v = numpy.array(tokens[1:], dtype=float) embeddings_index[tokens[0]] = v return embeddings_index #", "for x in ex.head]) head = torch.LongTensor(ex.head) subj_position = torch.LongTensor(ex.subj_position)", "end = ex.subject type[start: end + 1] = [ttype] *", "ex['word'].size(0) labels[i] = ex['relation'] word_rep[i, :len_rep[i]] = ex['word'] head_rep[i, :len_rep[i]]", "map(int, fin.readline().split()) for i, line in tqdm(enumerate(fin), total=n): tokens =", "word_rep = torch.LongTensor(batch_size, max_len).fill_(constant.PAD) head_rep = torch.LongTensor(batch_size, max_len).fill_(constant.PAD) subject_pos_rep =", "ex.head]) head = torch.LongTensor(ex.head) subj_position = torch.LongTensor(ex.subj_position) obj_position = torch.LongTensor(ex.obj_position)", "batch_size, shuffle=True): self.lengths = lengths self.batch_size = batch_size self.shuffle =", "= max([ex['word'].size(0) for ex in batch]) # Batch Code Representations", "ex['word'] head_rep[i, :len_rep[i]] = ex['head'] subject_pos_rep[i, :len_rep[i]] = ex['subject_pos'] object_pos_rep[i,", "deprel_rep[i, :len_rep[i]] = ex['deprel'] type_rep[i, :len_rep[i]] = ex['type'] subject.append(ex['subject']) object.append(ex['object'])", "type_rep[i, :len_rep[i]] = ex['type'] subject.append(ex['subject']) object.append(ex['object']) if use_knn: knn_rep[i, :len_rep[i]]", "index wrong_subj_pos += 1 sentence.subject = [ex['subj_end'], ex['subj_start']] else: sentence.subject", "order=('l1', 'rand')) batches = [indices[i:i + self.batch_size] for i in", "'pos_rep': pos_rep, 'ner_rep': ner_rep, 'deprel_rep': deprel_rep, 'type_rep': type_rep } class", "w in knn] for knn in ex.knn_words] knn_word = torch.LongTensor(knn_word)", "batch[0]['knn_word'] is not None # NOTE. batch[0]['knn_word'] is a 2d", "ttype = model.type_dict[ex.subj_type] start, end = ex.subject type[start: end +", "sentence.pos = ex['stanford_pos'] sentence.ner = ex['stanford_ner'] sentence.deprel = ex['stanford_deprel'] sentence.head", "= batch_size self.shuffle = shuffle def __iter__(self): lengths = np.array(", "- ex['subj_start'] < 0: # we swap the start and", "use_knn: knn_rep = torch.LongTensor(batch_size, max_len, knn_size).fill_(constant.PAD) for i, ex in", "swap the start and end index wrong_obj_pos += 1 sentence.object", "assert len(knn_dict[w]) == knn_size knn_words.append(knn_dict[w]) else: knn_words.append([constant.UNK_WORD] * knn_size) sentence.knn_words", "class ACE05Dataset(Dataset): def __init__(self, examples, model, evaluation=False): self.model = model", "= torch.LongTensor(batch_size, max_len).fill_(constant.PAD) ner_rep = torch.LongTensor(batch_size, max_len).fill_(constant.PAD) deprel_rep = torch.LongTensor(batch_size,", "[ex['obj_end'], ex['obj_start']] else: sentence.object = [ex['obj_start'], ex['obj_end']] # store KNN", "torch.LongTensor(ex.head) subj_position = torch.LongTensor(ex.subj_position) obj_position = torch.LongTensor(ex.obj_position) type = [0]", "__init__(self, examples, model, evaluation=False): self.model = model self.examples = examples", "batches = [indices[i:i + self.batch_size] for i in range(0, len(indices),", "# ------------------------------------------------------------------------------ def load_data(filename, src_lang, tgt_lang, knn_file, knn_size, max_examples=-1): examples", "SortedBatchSampler(Sampler): def __init__(self, lengths, batch_size, shuffle=True): self.lengths = lengths self.batch_size", "= batch[0]['knn_word'] is not None # NOTE. batch[0]['knn_word'] is a", "') v = numpy.array(tokens[1:], dtype=float) embeddings_index[tokens[0]] = v return embeddings_index", "len_rep = torch.LongTensor(batch_size).fill_(constant.PAD) word_rep = torch.LongTensor(batch_size, max_len).fill_(constant.PAD) head_rep = torch.LongTensor(batch_size,", "ner_rep[i, :len_rep[i]] = ex['ner'] deprel_rep[i, :len_rep[i]] = ex['deprel'] type_rep[i, :len_rep[i]]", ":len_rep[i]] = ex['deprel'] type_rep[i, :len_rep[i]] = ex['type'] subject.append(ex['subject']) object.append(ex['object']) if", "= torch.LongTensor(batch_size, max_len).fill_(constant.PAD) pos_rep = torch.LongTensor(batch_size, max_len).fill_(constant.PAD) ner_rep = torch.LongTensor(batch_size,", "tgt_lang knn_words = [] for w in ex['token']: w =", "torch.LongTensor(ex.obj_position) type = [0] * len(ex.words) ttype = model.type_dict[ex.subj_type] start,", ":len_rep[i]] = ex['type'] subject.append(ex['subject']) object.append(ex['object']) if use_knn: knn_rep[i, :len_rep[i]] =", "np.random.shuffle(batches) return iter([i for batch in batches for i in", "idx, ex in enumerate(tqdm(data, total=len(data))): sentence = Sentence(ex['id']) sentence.language =", "subj_position, 'object_pos': obj_position, 'relation': model.label_dict[ex.relation], 'knn_word': knn_word } def batchify(batch):", "knn_word = [[model.word_dict[w] for w in knn] for knn in", "# NOTE. batch[0]['knn_word'] is a 2d list knn_size = len(batch[0]['knn_word'][0])", "def __getitem__(self, index): return vectorize(self.examples[index], self.model, iseval=self.evaluation) def lengths(self): return", ":len_rep[i]] = ex['pos'] ner_rep[i, :len_rep[i]] = ex['ner'] deprel_rep[i, :len_rep[i]] =", "ex['object_pos'] pos_rep[i, :len_rep[i]] = ex['pos'] ner_rep[i, :len_rep[i]] = ex['ner'] deprel_rep[i,", "list of vectorized examples batch_size = len(batch) ids = [ex['id']", "knn_size) sentence.knn_words = knn_words examples.append(sentence) if max_examples != -1 and", "'subject_pos': subj_position, 'object_pos': obj_position, 'relation': model.label_dict[ex.relation], 'knn_word': knn_word } def", "model.label_dict[ex.relation], 'knn_word': knn_word } def batchify(batch): \"\"\"Gather a batch of", "lengths, batch_size, shuffle=True): self.lengths = lengths self.batch_size = batch_size self.shuffle", "ex in batch] language = [ex['language'] for ex in batch]", "max_len).fill_(constant.PAD) object_pos_rep = torch.LongTensor(batch_size, max_len).fill_(constant.PAD) pos_rep = torch.LongTensor(batch_size, max_len).fill_(constant.PAD) ner_rep", "iseval): \"\"\"Torchify a single example.\"\"\" words = ['!{}_{}'.format(ex.language, w) for", "= {} fin = io.open(file, 'r', encoding='utf-8', newline='\\n', errors='ignore') n,", "in knn_dict: assert len(knn_dict[w]) == knn_size knn_words.append(knn_dict[w]) else: knn_words.append([constant.UNK_WORD] *", "as f: data = json.load(f) knn_dict = None if knn_file:", "in ex['token']: w = '!{}_{}'.format(src_lang, w) if w in knn_dict:", "ex['subject_pos'] object_pos_rep[i, :len_rep[i]] = ex['object_pos'] pos_rep[i, :len_rep[i]] = ex['pos'] ner_rep[i,", "self.batch_size = batch_size self.shuffle = shuffle def __iter__(self): lengths =", "= len(batch[0]['knn_word'][0]) if use_knn else 0 # --------- Prepare Code", "tgt_lang, knn_file, knn_size, max_examples=-1): examples = [] wrong_subj_pos, wrong_obj_pos =", "is not None # NOTE. batch[0]['knn_word'] is a 2d list", "head_rep = torch.LongTensor(batch_size, max_len).fill_(constant.PAD) subject_pos_rep = torch.LongTensor(batch_size, max_len).fill_(constant.PAD) object_pos_rep =", "return { 'id': ex.id, 'language': ex.language, 'word': word, 'pos': pos,", "len(knn_dict[w]) == knn_size knn_words.append(knn_dict[w]) else: knn_words.append([constant.UNK_WORD] * knn_size) sentence.knn_words =", "def vectorize(ex, model, iseval): \"\"\"Torchify a single example.\"\"\" words =", "torch.utils.data.sampler import Sampler logger = logging.getLogger(__name__) def load_word_embeddings(file): embeddings_index =", "[(-l, np.random.random()) for l in self.lengths], dtype=[('l1', np.int_), ('rand', np.float_)]", "d in ex.deprel]) assert any([x == 0 for x in", "= torch.LongTensor(words) pos = torch.LongTensor([model.pos_dict[p] for p in ex.pos]) ner", "= torch.LongTensor(batch_size, max_len).fill_(constant.PAD) head_rep = torch.LongTensor(batch_size, max_len).fill_(constant.PAD) subject_pos_rep = torch.LongTensor(batch_size,", "sentence = Sentence(ex['id']) sentence.language = src_lang sentence.words = ex['token'] sentence.pos", "clie.inputters import constant from clie.objects import Sentence from torch.utils.data import", "vectorized examples batch_size = len(batch) ids = [ex['id'] for ex", "= torch.LongTensor(batch_size, max_len).fill_(constant.PAD) object_pos_rep = torch.LongTensor(batch_size, max_len).fill_(constant.PAD) pos_rep = torch.LongTensor(batch_size,", "json.load(f) for idx, ex in enumerate(tqdm(data, total=len(data))): sentence = Sentence(ex['id'])", "sentence.object = [ex['obj_start'], ex['obj_end']] # store KNN word info if", "for ex in batch] use_knn = batch[0]['knn_word'] is not None", "self.model = model self.examples = examples self.evaluation = evaluation def", "end + 1] = [ttype] * (end - start +", "'word_rep': word_rep, 'knn_rep': knn_rep, 'head_rep': head_rep, 'subject': subject, 'object': object,", "= ex.object type[start: end + 1] = [atype] * (end", "= src_lang sentence.words = ex['token'] sentence.pos = ex['stanford_pos'] sentence.ner =", "ex.ner]) deprel = torch.LongTensor([model.deprel_dict[d] for d in ex.deprel]) assert any([x", "= 0, 0 with open(filename) as f: data = json.load(f)", "Dataset from torch.utils.data.sampler import Sampler logger = logging.getLogger(__name__) def load_word_embeddings(file):", "None if ex.knn_words: knn_word = [[model.word_dict[w] for w in knn]", "knn_dict = None if knn_file: with open(knn_file) as f: knn_dict", "= ex['stanford_ner'] sentence.deprel = ex['stanford_deprel'] sentence.head = [int(x) for x", "ACE05Dataset(Dataset): def __init__(self, examples, model, evaluation=False): self.model = model self.examples", "= [indices[i:i + self.batch_size] for i in range(0, len(indices), self.batch_size)]", "deprel = torch.LongTensor([model.deprel_dict[d] for d in ex.deprel]) assert any([x ==", "the start and end index wrong_obj_pos += 1 sentence.object =", "if use_knn else 0 # --------- Prepare Code tensors ---------", "= ['!{}_{}'.format(ex.language, w) for w in ex.words] words = [model.word_dict[w]", "end index wrong_obj_pos += 1 sentence.object = [ex['obj_end'], ex['obj_start']] else:", "torch.LongTensor(batch_size, max_len).fill_(constant.PAD) object_pos_rep = torch.LongTensor(batch_size, max_len).fill_(constant.PAD) pos_rep = torch.LongTensor(batch_size, max_len).fill_(constant.PAD)", "ex['obj_end'] - ex['obj_start'] < 0: # we swap the start", "return iter([i for batch in batches for i in batch])", ":len_rep[i]] = ex['ner'] deprel_rep[i, :len_rep[i]] = ex['deprel'] type_rep[i, :len_rep[i]] =", "knn_dict: assert len(knn_dict[w]) == knn_size knn_words.append(knn_dict[w]) else: knn_words.append([constant.UNK_WORD] * knn_size)", "= [int(x) for x in ex['stanford_head']] sentence.subj_type = ex['subj_type'] sentence.obj_type", "knn_size knn_words.append(knn_dict[w]) else: knn_words.append([constant.UNK_WORD] * knn_size) sentence.knn_words = knn_words examples.append(sentence)", "def __iter__(self): lengths = np.array( [(-l, np.random.random()) for l in", "ex['obj_end']] # store KNN word info if knn_dict: sentence.tgt_lang =", "= lengths self.batch_size = batch_size self.shuffle = shuffle def __iter__(self):", "= [ex['obj_start'], ex['obj_end']] # store KNN word info if knn_dict:", "len_rep[i] = ex['word'].size(0) labels[i] = ex['relation'] word_rep[i, :len_rep[i]] = ex['word']", "if w in knn_dict: assert len(knn_dict[w]) == knn_size knn_words.append(knn_dict[w]) else:", "[ex['id'] for ex in batch] language = [ex['language'] for ex", "__iter__(self): lengths = np.array( [(-l, np.random.random()) for l in self.lengths],", "self.model, iseval=self.evaluation) def lengths(self): return [len(ex.words) for ex in self.examples]", "individual examples into one batch.\"\"\" # batch is a list", "torch.LongTensor(batch_size, max_len).fill_(constant.PAD) labels = torch.LongTensor(batch_size) subject = [] object =", "model self.examples = examples self.evaluation = evaluation def __len__(self): return", "and len(examples) > max_examples: break if wrong_subj_pos > 0 or", "object_pos_rep, 'labels': labels, 'pos_rep': pos_rep, 'ner_rep': ner_rep, 'deprel_rep': deprel_rep, 'type_rep':", "== knn_size knn_words.append(knn_dict[w]) else: knn_words.append([constant.UNK_WORD] * knn_size) sentence.knn_words = knn_words", "wrong_obj_pos)) return examples def vectorize(ex, model, iseval): \"\"\"Torchify a single", "= model self.examples = examples self.evaluation = evaluation def __len__(self):", "return [len(ex.words) for ex in self.examples] class SortedBatchSampler(Sampler): def __init__(self,", "torch.LongTensor(batch_size, max_len).fill_(constant.PAD) pos_rep = torch.LongTensor(batch_size, max_len).fill_(constant.PAD) ner_rep = torch.LongTensor(batch_size, max_len).fill_(constant.PAD)", "'object_pos': obj_position, 'relation': model.label_dict[ex.relation], 'knn_word': knn_word } def batchify(batch): \"\"\"Gather", "__getitem__(self, index): return vectorize(self.examples[index], self.model, iseval=self.evaluation) def lengths(self): return [len(ex.words)", "constant from clie.objects import Sentence from torch.utils.data import Dataset from", "tqdm from clie.inputters import constant from clie.objects import Sentence from", "deprel_rep = torch.LongTensor(batch_size, max_len).fill_(constant.PAD) type_rep = torch.LongTensor(batch_size, max_len).fill_(constant.PAD) labels =", "lengths = np.array( [(-l, np.random.random()) for l in self.lengths], dtype=[('l1',", "'deprel': deprel, 'type': type, 'head': head, 'subject': ex.subj_text, 'object': ex.obj_text,", "return len(self.examples) def __getitem__(self, index): return vectorize(self.examples[index], self.model, iseval=self.evaluation) def", "f: knn_dict = json.load(f) for idx, ex in enumerate(tqdm(data, total=len(data))):", "< 0: # we swap the start and end index", "p in ex.pos]) ner = torch.LongTensor([model.ner_dict[n] for n in ex.ner])", "'word': word, 'pos': pos, 'ner': ner, 'deprel': deprel, 'type': type,", "w in knn_dict: assert len(knn_dict[w]) == knn_size knn_words.append(knn_dict[w]) else: knn_words.append([constant.UNK_WORD]", "> 0: logger.info('{} and {} wrong subject and object positions", "index): return vectorize(self.examples[index], self.model, iseval=self.evaluation) def lengths(self): return [len(ex.words) for", "ex['stanford_head']] sentence.subj_type = ex['subj_type'] sentence.obj_type = ex['obj_type'] sentence.relation = ex['relation']", "object_pos_rep = torch.LongTensor(batch_size, max_len).fill_(constant.PAD) pos_rep = torch.LongTensor(batch_size, max_len).fill_(constant.PAD) ner_rep =", "'type': type, 'head': head, 'subject': ex.subj_text, 'object': ex.obj_text, 'subject_pos': subj_position,", "len(batch[0]['knn_word'][0]) if use_knn else 0 # --------- Prepare Code tensors", "[] knn_rep = None if use_knn: knn_rep = torch.LongTensor(batch_size, max_len,", "self.lengths = lengths self.batch_size = batch_size self.shuffle = shuffle def", "batch in batches for i in batch]) def __len__(self): return", "\"\"\"Torchify a single example.\"\"\" words = ['!{}_{}'.format(ex.language, w) for w", "in ex.knn_words] knn_word = torch.LongTensor(knn_word) word = torch.LongTensor(words) pos =", "'rand')) batches = [indices[i:i + self.batch_size] for i in range(0,", "ex['subj_end']] if ex['obj_end'] - ex['obj_start'] < 0: # we swap", "knn_words examples.append(sentence) if max_examples != -1 and len(examples) > max_examples:", "= ex['word'] head_rep[i, :len_rep[i]] = ex['head'] subject_pos_rep[i, :len_rep[i]] = ex['subject_pos']", "type_rep } class ACE05Dataset(Dataset): def __init__(self, examples, model, evaluation=False): self.model", "info if knn_dict: sentence.tgt_lang = tgt_lang knn_words = [] for", "batch.\"\"\" # batch is a list of vectorized examples batch_size", "examples = [] wrong_subj_pos, wrong_obj_pos = 0, 0 with open(filename)", "clie.objects import Sentence from torch.utils.data import Dataset from torch.utils.data.sampler import", "in ex['stanford_head']] sentence.subj_type = ex['subj_type'] sentence.obj_type = ex['obj_type'] sentence.relation =", "0 for x in ex.head]) head = torch.LongTensor(ex.head) subj_position =", "evaluation=False): self.model = model self.examples = examples self.evaluation = evaluation", "ex['obj_type'] sentence.relation = ex['relation'] if ex['subj_end'] - ex['subj_start'] < 0:", "and {} wrong subject and object positions found!'.format( wrong_subj_pos, wrong_obj_pos))", "= ex['stanford_deprel'] sentence.head = [int(x) for x in ex['stanford_head']] sentence.subj_type", "Prepare Code tensors --------- max_len = max([ex['word'].size(0) for ex in", "src_lang sentence.words = ex['token'] sentence.pos = ex['stanford_pos'] sentence.ner = ex['stanford_ner']", "= [ex['subj_start'], ex['subj_end']] if ex['obj_end'] - ex['obj_start'] < 0: #", "deprel_rep, 'type_rep': type_rep } class ACE05Dataset(Dataset): def __init__(self, examples, model,", "def __init__(self, lengths, batch_size, shuffle=True): self.lengths = lengths self.batch_size =", "i, line in tqdm(enumerate(fin), total=n): tokens = line.rstrip().split(' ') v", "'type_rep': type_rep } class ACE05Dataset(Dataset): def __init__(self, examples, model, evaluation=False):", "max_len).fill_(constant.PAD) subject_pos_rep = torch.LongTensor(batch_size, max_len).fill_(constant.PAD) object_pos_rep = torch.LongTensor(batch_size, max_len).fill_(constant.PAD) pos_rep", "knn_file, knn_size, max_examples=-1): examples = [] wrong_subj_pos, wrong_obj_pos = 0,", "pos_rep[i, :len_rep[i]] = ex['pos'] ner_rep[i, :len_rep[i]] = ex['ner'] deprel_rep[i, :len_rep[i]]", "in knn] for knn in ex.knn_words] knn_word = torch.LongTensor(knn_word) word", "[atype] * (end - start + 1) type = torch.LongTensor(type)", "'head': head, 'subject': ex.subj_text, 'object': ex.obj_text, 'subject_pos': subj_position, 'object_pos': obj_position,", "line.rstrip().split(' ') v = numpy.array(tokens[1:], dtype=float) embeddings_index[tokens[0]] = v return", "torch.LongTensor(batch_size, max_len).fill_(constant.PAD) head_rep = torch.LongTensor(batch_size, max_len).fill_(constant.PAD) subject_pos_rep = torch.LongTensor(batch_size, max_len).fill_(constant.PAD)", "w in ex['token']: w = '!{}_{}'.format(src_lang, w) if w in", "for x in ex['stanford_head']] sentence.subj_type = ex['subj_type'] sentence.obj_type = ex['obj_type']", "= [] for w in ex['token']: w = '!{}_{}'.format(src_lang, w)", "sentence.tgt_lang = tgt_lang knn_words = [] for w in ex['token']:", "v return embeddings_index # ------------------------------------------------------------------------------ # Data loading # ------------------------------------------------------------------------------", "with open(knn_file) as f: knn_dict = json.load(f) for idx, ex", "w in ex.words] words = [model.word_dict[w] for w in words]", "for n in ex.ner]) deprel = torch.LongTensor([model.deprel_dict[d] for d in", ":len_rep[i]] = ex['subject_pos'] object_pos_rep[i, :len_rep[i]] = ex['object_pos'] pos_rep[i, :len_rep[i]] =", "= [ttype] * (end - start + 1) atype =", "= torch.LongTensor(batch_size, max_len).fill_(constant.PAD) subject_pos_rep = torch.LongTensor(batch_size, max_len).fill_(constant.PAD) object_pos_rep = torch.LongTensor(batch_size,", "ex['token']: w = '!{}_{}'.format(src_lang, w) if w in knn_dict: assert", "= torch.LongTensor(batch_size, max_len).fill_(constant.PAD) deprel_rep = torch.LongTensor(batch_size, max_len).fill_(constant.PAD) type_rep = torch.LongTensor(batch_size,", "[0] * len(ex.words) ttype = model.type_dict[ex.subj_type] start, end = ex.subject", "= [ex['subj_end'], ex['subj_start']] else: sentence.subject = [ex['subj_start'], ex['subj_end']] if ex['obj_end']", "# batch is a list of vectorized examples batch_size =", "ex.id, 'language': ex.language, 'word': word, 'pos': pos, 'ner': ner, 'deprel':", "ex['head'] subject_pos_rep[i, :len_rep[i]] = ex['subject_pos'] object_pos_rep[i, :len_rep[i]] = ex['object_pos'] pos_rep[i,", "subject_pos_rep[i, :len_rep[i]] = ex['subject_pos'] object_pos_rep[i, :len_rep[i]] = ex['object_pos'] pos_rep[i, :len_rep[i]]", "= torch.LongTensor(batch_size, max_len).fill_(constant.PAD) labels = torch.LongTensor(batch_size) subject = [] object", "[indices[i:i + self.batch_size] for i in range(0, len(indices), self.batch_size)] if", "word_rep[i, :len_rep[i]] = ex['word'] head_rep[i, :len_rep[i]] = ex['head'] subject_pos_rep[i, :len_rep[i]]", "object.append(ex['object']) if use_knn: knn_rep[i, :len_rep[i]] = ex['knn_word'] return { 'ids':", "ex['pos'] ner_rep[i, :len_rep[i]] = ex['ner'] deprel_rep[i, :len_rep[i]] = ex['deprel'] type_rep[i,", "batch] use_knn = batch[0]['knn_word'] is not None # NOTE. batch[0]['knn_word']", "for w in knn] for knn in ex.knn_words] knn_word =", "ex['subj_start'] < 0: # we swap the start and end", "= ex['subject_pos'] object_pos_rep[i, :len_rep[i]] = ex['object_pos'] pos_rep[i, :len_rep[i]] = ex['pos']", "with open(filename) as f: data = json.load(f) knn_dict = None", "knn_dict = json.load(f) for idx, ex in enumerate(tqdm(data, total=len(data))): sentence", "torch.LongTensor([model.deprel_dict[d] for d in ex.deprel]) assert any([x == 0 for", "in self.examples] class SortedBatchSampler(Sampler): def __init__(self, lengths, batch_size, shuffle=True): self.lengths", "Batch Code Representations len_rep = torch.LongTensor(batch_size).fill_(constant.PAD) word_rep = torch.LongTensor(batch_size, max_len).fill_(constant.PAD)", "ex['ner'] deprel_rep[i, :len_rep[i]] = ex['deprel'] type_rep[i, :len_rep[i]] = ex['type'] subject.append(ex['subject'])", "for w in ex['token']: w = '!{}_{}'.format(src_lang, w) if w", "def load_data(filename, src_lang, tgt_lang, knn_file, knn_size, max_examples=-1): examples = []", "= ex.subject type[start: end + 1] = [ttype] * (end", "iseval=self.evaluation) def lengths(self): return [len(ex.words) for ex in self.examples] class", "= len(batch) ids = [ex['id'] for ex in batch] language", "if use_knn: knn_rep[i, :len_rep[i]] = ex['knn_word'] return { 'ids': ids,", "the start and end index wrong_subj_pos += 1 sentence.subject =", "embeddings_index # ------------------------------------------------------------------------------ # Data loading # ------------------------------------------------------------------------------ def load_data(filename,", "labels = torch.LongTensor(batch_size) subject = [] object = [] knn_rep", "into one batch.\"\"\" # batch is a list of vectorized", "import torch import numpy as np from tqdm import tqdm", "'ner': ner, 'deprel': deprel, 'type': type, 'head': head, 'subject': ex.subj_text,", "Sampler logger = logging.getLogger(__name__) def load_word_embeddings(file): embeddings_index = {} fin", "def __len__(self): return len(self.examples) def __getitem__(self, index): return vectorize(self.examples[index], self.model,", "ex['subj_start']] else: sentence.subject = [ex['subj_start'], ex['subj_end']] if ex['obj_end'] - ex['obj_start']", "import Sampler logger = logging.getLogger(__name__) def load_word_embeddings(file): embeddings_index = {}", "shuffle def __iter__(self): lengths = np.array( [(-l, np.random.random()) for l", "= torch.LongTensor(batch_size, max_len).fill_(constant.PAD) type_rep = torch.LongTensor(batch_size, max_len).fill_(constant.PAD) labels = torch.LongTensor(batch_size)", "torch.LongTensor(batch_size).fill_(constant.PAD) word_rep = torch.LongTensor(batch_size, max_len).fill_(constant.PAD) head_rep = torch.LongTensor(batch_size, max_len).fill_(constant.PAD) subject_pos_rep", "[model.word_dict[w] for w in words] knn_word = None if ex.knn_words:", "} class ACE05Dataset(Dataset): def __init__(self, examples, model, evaluation=False): self.model =", "self.evaluation = evaluation def __len__(self): return len(self.examples) def __getitem__(self, index):", "torch.LongTensor([model.ner_dict[n] for n in ex.ner]) deprel = torch.LongTensor([model.deprel_dict[d] for d", "* (end - start + 1) atype = model.type_dict[ex.obj_type] start,", "import numpy as np from tqdm import tqdm from clie.inputters", "import Sentence from torch.utils.data import Dataset from torch.utils.data.sampler import Sampler", "'language': language, 'batch_size': batch_size, 'len_rep': len_rep, 'word_rep': word_rep, 'knn_rep': knn_rep,", "io import logging import json import numpy import torch import", "= [ex['obj_end'], ex['obj_start']] else: sentence.object = [ex['obj_start'], ex['obj_end']] # store", "ex in self.examples] class SortedBatchSampler(Sampler): def __init__(self, lengths, batch_size, shuffle=True):", "json.load(f) knn_dict = None if knn_file: with open(knn_file) as f:", "labels[i] = ex['relation'] word_rep[i, :len_rep[i]] = ex['word'] head_rep[i, :len_rep[i]] =", "= ex['relation'] word_rep[i, :len_rep[i]] = ex['word'] head_rep[i, :len_rep[i]] = ex['head']", "from torch.utils.data.sampler import Sampler logger = logging.getLogger(__name__) def load_word_embeddings(file): embeddings_index", "or wrong_obj_pos > 0: logger.info('{} and {} wrong subject and", "for i, ex in enumerate(batch): len_rep[i] = ex['word'].size(0) labels[i] =", "type, 'head': head, 'subject': ex.subj_text, 'object': ex.obj_text, 'subject_pos': subj_position, 'object_pos':", "np.random.random()) for l in self.lengths], dtype=[('l1', np.int_), ('rand', np.float_)] )", "a list of vectorized examples batch_size = len(batch) ids =", "d = map(int, fin.readline().split()) for i, line in tqdm(enumerate(fin), total=n):", "import io import logging import json import numpy import torch", "sentence.subject = [ex['subj_start'], ex['subj_end']] if ex['obj_end'] - ex['obj_start'] < 0:", "!= -1 and len(examples) > max_examples: break if wrong_subj_pos >", "in ex.words] words = [model.word_dict[w] for w in words] knn_word", "in ex.ner]) deprel = torch.LongTensor([model.deprel_dict[d] for d in ex.deprel]) assert", "[] object = [] knn_rep = None if use_knn: knn_rep", "pos_rep, 'ner_rep': ner_rep, 'deprel_rep': deprel_rep, 'type_rep': type_rep } class ACE05Dataset(Dataset):", "load_data(filename, src_lang, tgt_lang, knn_file, knn_size, max_examples=-1): examples = [] wrong_subj_pos,", "- start + 1) type = torch.LongTensor(type) return { 'id':", "self.examples] class SortedBatchSampler(Sampler): def __init__(self, lengths, batch_size, shuffle=True): self.lengths =", "end + 1] = [atype] * (end - start +", "return examples def vectorize(ex, model, iseval): \"\"\"Torchify a single example.\"\"\"", "'deprel_rep': deprel_rep, 'type_rep': type_rep } class ACE05Dataset(Dataset): def __init__(self, examples,", ") indices = np.argsort(lengths, order=('l1', 'rand')) batches = [indices[i:i +", "knn_dict: sentence.tgt_lang = tgt_lang knn_words = [] for w in", "pos = torch.LongTensor([model.pos_dict[p] for p in ex.pos]) ner = torch.LongTensor([model.ner_dict[n]", "('rand', np.float_)] ) indices = np.argsort(lengths, order=('l1', 'rand')) batches =", "= ex['obj_type'] sentence.relation = ex['relation'] if ex['subj_end'] - ex['subj_start'] <", "in tqdm(enumerate(fin), total=n): tokens = line.rstrip().split(' ') v = numpy.array(tokens[1:],", "open(filename) as f: data = json.load(f) knn_dict = None if", "pos, 'ner': ner, 'deprel': deprel, 'type': type, 'head': head, 'subject':", "start, end = ex.object type[start: end + 1] = [atype]", "sentence.words = ex['token'] sentence.pos = ex['stanford_pos'] sentence.ner = ex['stanford_ner'] sentence.deprel", "== 0 for x in ex.head]) head = torch.LongTensor(ex.head) subj_position", "knn_word } def batchify(batch): \"\"\"Gather a batch of individual examples", "is a list of vectorized examples batch_size = len(batch) ids", "1] = [ttype] * (end - start + 1) atype", "+ 1) type = torch.LongTensor(type) return { 'id': ex.id, 'language':", "self.batch_size)] if self.shuffle: np.random.shuffle(batches) return iter([i for batch in batches", "------------------------------------------------------------------------------ # Data loading # ------------------------------------------------------------------------------ def load_data(filename, src_lang, tgt_lang,", "= io.open(file, 'r', encoding='utf-8', newline='\\n', errors='ignore') n, d = map(int,", "len_rep, 'word_rep': word_rep, 'knn_rep': knn_rep, 'head_rep': head_rep, 'subject': subject, 'object':", "in batch] language = [ex['language'] for ex in batch] use_knn", "# we swap the start and end index wrong_subj_pos +=", "{} fin = io.open(file, 'r', encoding='utf-8', newline='\\n', errors='ignore') n, d", "0 with open(filename) as f: data = json.load(f) knn_dict =", "'!{}_{}'.format(src_lang, w) if w in knn_dict: assert len(knn_dict[w]) == knn_size", "len(batch) ids = [ex['id'] for ex in batch] language =", "ex['relation'] word_rep[i, :len_rep[i]] = ex['word'] head_rep[i, :len_rep[i]] = ex['head'] subject_pos_rep[i,", "ex.pos]) ner = torch.LongTensor([model.ner_dict[n] for n in ex.ner]) deprel =", "# Batch Code Representations len_rep = torch.LongTensor(batch_size).fill_(constant.PAD) word_rep = torch.LongTensor(batch_size,", "= json.load(f) for idx, ex in enumerate(tqdm(data, total=len(data))): sentence =", "wrong_subj_pos, wrong_obj_pos)) return examples def vectorize(ex, model, iseval): \"\"\"Torchify a", "knn_rep, 'head_rep': head_rep, 'subject': subject, 'object': object, 'subject_pos_rep': subject_pos_rep, 'object_pos_rep':", "+ 1] = [ttype] * (end - start + 1)", "language, 'batch_size': batch_size, 'len_rep': len_rep, 'word_rep': word_rep, 'knn_rep': knn_rep, 'head_rep':", "'labels': labels, 'pos_rep': pos_rep, 'ner_rep': ner_rep, 'deprel_rep': deprel_rep, 'type_rep': type_rep", "def batchify(batch): \"\"\"Gather a batch of individual examples into one", "start + 1) type = torch.LongTensor(type) return { 'id': ex.id,", "type[start: end + 1] = [ttype] * (end - start", "examples, model, evaluation=False): self.model = model self.examples = examples self.evaluation", "[ex['obj_start'], ex['obj_end']] # store KNN word info if knn_dict: sentence.tgt_lang", "for ex in batch]) # Batch Code Representations len_rep =", "'subject': subject, 'object': object, 'subject_pos_rep': subject_pos_rep, 'object_pos_rep': object_pos_rep, 'labels': labels,", "sentence.knn_words = knn_words examples.append(sentence) if max_examples != -1 and len(examples)", "x in ex.head]) head = torch.LongTensor(ex.head) subj_position = torch.LongTensor(ex.subj_position) obj_position", "sentence.deprel = ex['stanford_deprel'] sentence.head = [int(x) for x in ex['stanford_head']]", "= torch.LongTensor(ex.head) subj_position = torch.LongTensor(ex.subj_position) obj_position = torch.LongTensor(ex.obj_position) type =", "sentence.subject = [ex['subj_end'], ex['subj_start']] else: sentence.subject = [ex['subj_start'], ex['subj_end']] if", "ex['stanford_deprel'] sentence.head = [int(x) for x in ex['stanford_head']] sentence.subj_type =", "ex['knn_word'] return { 'ids': ids, 'language': language, 'batch_size': batch_size, 'len_rep':", "load_word_embeddings(file): embeddings_index = {} fin = io.open(file, 'r', encoding='utf-8', newline='\\n',", "w = '!{}_{}'.format(src_lang, w) if w in knn_dict: assert len(knn_dict[w])", "word info if knn_dict: sentence.tgt_lang = tgt_lang knn_words = []", "model, evaluation=False): self.model = model self.examples = examples self.evaluation =", "else: sentence.object = [ex['obj_start'], ex['obj_end']] # store KNN word info", ":len_rep[i]] = ex['head'] subject_pos_rep[i, :len_rep[i]] = ex['subject_pos'] object_pos_rep[i, :len_rep[i]] =", "batch_size, 'len_rep': len_rep, 'word_rep': word_rep, 'knn_rep': knn_rep, 'head_rep': head_rep, 'subject':", "1 sentence.subject = [ex['subj_end'], ex['subj_start']] else: sentence.subject = [ex['subj_start'], ex['subj_end']]", "ex.words] words = [model.word_dict[w] for w in words] knn_word =", "# we swap the start and end index wrong_obj_pos +=", "torch.LongTensor(batch_size, max_len).fill_(constant.PAD) subject_pos_rep = torch.LongTensor(batch_size, max_len).fill_(constant.PAD) object_pos_rep = torch.LongTensor(batch_size, max_len).fill_(constant.PAD)", "len(examples) > max_examples: break if wrong_subj_pos > 0 or wrong_obj_pos", "range(0, len(indices), self.batch_size)] if self.shuffle: np.random.shuffle(batches) return iter([i for batch", "evaluation def __len__(self): return len(self.examples) def __getitem__(self, index): return vectorize(self.examples[index],", "[ex['subj_end'], ex['subj_start']] else: sentence.subject = [ex['subj_start'], ex['subj_end']] if ex['obj_end'] -", "in ex.deprel]) assert any([x == 0 for x in ex.head])", "torch.LongTensor(batch_size) subject = [] object = [] knn_rep = None", "ex.knn_words: knn_word = [[model.word_dict[w] for w in knn] for knn", "type = [0] * len(ex.words) ttype = model.type_dict[ex.subj_type] start, end", ":len_rep[i]] = ex['knn_word'] return { 'ids': ids, 'language': language, 'batch_size':", "self.shuffle: np.random.shuffle(batches) return iter([i for batch in batches for i", "words] knn_word = None if ex.knn_words: knn_word = [[model.word_dict[w] for", "model.type_dict[ex.obj_type] start, end = ex.object type[start: end + 1] =", "from tqdm import tqdm from clie.inputters import constant from clie.objects", "subject_pos_rep = torch.LongTensor(batch_size, max_len).fill_(constant.PAD) object_pos_rep = torch.LongTensor(batch_size, max_len).fill_(constant.PAD) pos_rep =", "ex.subject type[start: end + 1] = [ttype] * (end -", "= [ex['id'] for ex in batch] language = [ex['language'] for", "= torch.LongTensor(batch_size) subject = [] object = [] knn_rep =", "knn_size, max_examples=-1): examples = [] wrong_subj_pos, wrong_obj_pos = 0, 0", "ex in batch] use_knn = batch[0]['knn_word'] is not None #", "for ex in self.examples] class SortedBatchSampler(Sampler): def __init__(self, lengths, batch_size,", "list knn_size = len(batch[0]['knn_word'][0]) if use_knn else 0 # ---------", "knn_word = None if ex.knn_words: knn_word = [[model.word_dict[w] for w", "else: knn_words.append([constant.UNK_WORD] * knn_size) sentence.knn_words = knn_words examples.append(sentence) if max_examples", "= ex['token'] sentence.pos = ex['stanford_pos'] sentence.ner = ex['stanford_ner'] sentence.deprel =", "and end index wrong_obj_pos += 1 sentence.object = [ex['obj_end'], ex['obj_start']]", "numpy.array(tokens[1:], dtype=float) embeddings_index[tokens[0]] = v return embeddings_index # ------------------------------------------------------------------------------ #", "'subject': ex.subj_text, 'object': ex.obj_text, 'subject_pos': subj_position, 'object_pos': obj_position, 'relation': model.label_dict[ex.relation],", "is a 2d list knn_size = len(batch[0]['knn_word'][0]) if use_knn else", "= evaluation def __len__(self): return len(self.examples) def __getitem__(self, index): return", "in enumerate(batch): len_rep[i] = ex['word'].size(0) labels[i] = ex['relation'] word_rep[i, :len_rep[i]]", "= None if knn_file: with open(knn_file) as f: knn_dict =", "1] = [atype] * (end - start + 1) type", "positions found!'.format( wrong_subj_pos, wrong_obj_pos)) return examples def vectorize(ex, model, iseval):", "max_len, knn_size).fill_(constant.PAD) for i, ex in enumerate(batch): len_rep[i] = ex['word'].size(0)", "in ex.pos]) ner = torch.LongTensor([model.ner_dict[n] for n in ex.ner]) deprel", "torch.LongTensor(batch_size, max_len).fill_(constant.PAD) deprel_rep = torch.LongTensor(batch_size, max_len).fill_(constant.PAD) type_rep = torch.LongTensor(batch_size, max_len).fill_(constant.PAD)", "= [] knn_rep = None if use_knn: knn_rep = torch.LongTensor(batch_size,", "= map(int, fin.readline().split()) for i, line in tqdm(enumerate(fin), total=n): tokens", "= model.type_dict[ex.obj_type] start, end = ex.object type[start: end + 1]", "if ex.knn_words: knn_word = [[model.word_dict[w] for w in knn] for", "n in ex.ner]) deprel = torch.LongTensor([model.deprel_dict[d] for d in ex.deprel])", "= json.load(f) knn_dict = None if knn_file: with open(knn_file) as", "of individual examples into one batch.\"\"\" # batch is a", "embeddings_index[tokens[0]] = v return embeddings_index # ------------------------------------------------------------------------------ # Data loading", "vectorize(ex, model, iseval): \"\"\"Torchify a single example.\"\"\" words = ['!{}_{}'.format(ex.language,", "embeddings_index = {} fin = io.open(file, 'r', encoding='utf-8', newline='\\n', errors='ignore')", "ex.obj_text, 'subject_pos': subj_position, 'object_pos': obj_position, 'relation': model.label_dict[ex.relation], 'knn_word': knn_word }", "0: logger.info('{} and {} wrong subject and object positions found!'.format(", "subject and object positions found!'.format( wrong_subj_pos, wrong_obj_pos)) return examples def", "-1 and len(examples) > max_examples: break if wrong_subj_pos > 0", "__init__(self, lengths, batch_size, shuffle=True): self.lengths = lengths self.batch_size = batch_size", "sentence.subj_type = ex['subj_type'] sentence.obj_type = ex['obj_type'] sentence.relation = ex['relation'] if", "batchify(batch): \"\"\"Gather a batch of individual examples into one batch.\"\"\"", "= ex['stanford_pos'] sentence.ner = ex['stanford_ner'] sentence.deprel = ex['stanford_deprel'] sentence.head =", "subj_position = torch.LongTensor(ex.subj_position) obj_position = torch.LongTensor(ex.obj_position) type = [0] *", "Data loading # ------------------------------------------------------------------------------ def load_data(filename, src_lang, tgt_lang, knn_file, knn_size,", "Sentence(ex['id']) sentence.language = src_lang sentence.words = ex['token'] sentence.pos = ex['stanford_pos']", "numpy import torch import numpy as np from tqdm import", "+ self.batch_size] for i in range(0, len(indices), self.batch_size)] if self.shuffle:", "sentence.language = src_lang sentence.words = ex['token'] sentence.pos = ex['stanford_pos'] sentence.ner", "dtype=float) embeddings_index[tokens[0]] = v return embeddings_index # ------------------------------------------------------------------------------ # Data", "(end - start + 1) type = torch.LongTensor(type) return {", "max_examples != -1 and len(examples) > max_examples: break if wrong_subj_pos", "type[start: end + 1] = [atype] * (end - start", "for w in words] knn_word = None if ex.knn_words: knn_word", "torch import numpy as np from tqdm import tqdm from", "ex.object type[start: end + 1] = [atype] * (end -", "and end index wrong_subj_pos += 1 sentence.subject = [ex['subj_end'], ex['subj_start']]", "= None if ex.knn_words: knn_word = [[model.word_dict[w] for w in", "store KNN word info if knn_dict: sentence.tgt_lang = tgt_lang knn_words", "ex['obj_start']] else: sentence.object = [ex['obj_start'], ex['obj_end']] # store KNN word", "= line.rstrip().split(' ') v = numpy.array(tokens[1:], dtype=float) embeddings_index[tokens[0]] = v", "fin = io.open(file, 'r', encoding='utf-8', newline='\\n', errors='ignore') n, d =", ":len_rep[i]] = ex['object_pos'] pos_rep[i, :len_rep[i]] = ex['pos'] ner_rep[i, :len_rep[i]] =", "= ex['subj_type'] sentence.obj_type = ex['obj_type'] sentence.relation = ex['relation'] if ex['subj_end']", "if ex['subj_end'] - ex['subj_start'] < 0: # we swap the", "not None # NOTE. batch[0]['knn_word'] is a 2d list knn_size", "Representations len_rep = torch.LongTensor(batch_size).fill_(constant.PAD) word_rep = torch.LongTensor(batch_size, max_len).fill_(constant.PAD) head_rep =", "def __init__(self, examples, model, evaluation=False): self.model = model self.examples =", "of vectorized examples batch_size = len(batch) ids = [ex['id'] for", "Sentence from torch.utils.data import Dataset from torch.utils.data.sampler import Sampler logger", "max_len).fill_(constant.PAD) deprel_rep = torch.LongTensor(batch_size, max_len).fill_(constant.PAD) type_rep = torch.LongTensor(batch_size, max_len).fill_(constant.PAD) labels", "* len(ex.words) ttype = model.type_dict[ex.subj_type] start, end = ex.subject type[start:", "'knn_rep': knn_rep, 'head_rep': head_rep, 'subject': subject, 'object': object, 'subject_pos_rep': subject_pos_rep,", "word = torch.LongTensor(words) pos = torch.LongTensor([model.pos_dict[p] for p in ex.pos])", "torch.LongTensor(batch_size, max_len, knn_size).fill_(constant.PAD) for i, ex in enumerate(batch): len_rep[i] =", "if self.shuffle: np.random.shuffle(batches) return iter([i for batch in batches for", "head, 'subject': ex.subj_text, 'object': ex.obj_text, 'subject_pos': subj_position, 'object_pos': obj_position, 'relation':", "np.int_), ('rand', np.float_)] ) indices = np.argsort(lengths, order=('l1', 'rand')) batches", "knn] for knn in ex.knn_words] knn_word = torch.LongTensor(knn_word) word =", "else 0 # --------- Prepare Code tensors --------- max_len =", "[len(ex.words) for ex in self.examples] class SortedBatchSampler(Sampler): def __init__(self, lengths,", "'object_pos_rep': object_pos_rep, 'labels': labels, 'pos_rep': pos_rep, 'ner_rep': ner_rep, 'deprel_rep': deprel_rep,", "object = [] knn_rep = None if use_knn: knn_rep =", "= [[model.word_dict[w] for w in knn] for knn in ex.knn_words]", "'language': ex.language, 'word': word, 'pos': pos, 'ner': ner, 'deprel': deprel,", "1) atype = model.type_dict[ex.obj_type] start, end = ex.object type[start: end", "= torch.LongTensor(batch_size, max_len, knn_size).fill_(constant.PAD) for i, ex in enumerate(batch): len_rep[i]", "= '!{}_{}'.format(src_lang, w) if w in knn_dict: assert len(knn_dict[w]) ==", "numpy as np from tqdm import tqdm from clie.inputters import", "sentence.head = [int(x) for x in ex['stanford_head']] sentence.subj_type = ex['subj_type']", "use_knn = batch[0]['knn_word'] is not None # NOTE. batch[0]['knn_word'] is", "return { 'ids': ids, 'language': language, 'batch_size': batch_size, 'len_rep': len_rep,", "ex['type'] subject.append(ex['subject']) object.append(ex['object']) if use_knn: knn_rep[i, :len_rep[i]] = ex['knn_word'] return", "n, d = map(int, fin.readline().split()) for i, line in tqdm(enumerate(fin),", "wrong_subj_pos, wrong_obj_pos = 0, 0 with open(filename) as f: data", "obj_position, 'relation': model.label_dict[ex.relation], 'knn_word': knn_word } def batchify(batch): \"\"\"Gather a", "'ner_rep': ner_rep, 'deprel_rep': deprel_rep, 'type_rep': type_rep } class ACE05Dataset(Dataset): def", "self.shuffle = shuffle def __iter__(self): lengths = np.array( [(-l, np.random.random())", "= [] wrong_subj_pos, wrong_obj_pos = 0, 0 with open(filename) as", "0 # --------- Prepare Code tensors --------- max_len = max([ex['word'].size(0)", "head_rep, 'subject': subject, 'object': object, 'subject_pos_rep': subject_pos_rep, 'object_pos_rep': object_pos_rep, 'labels':", "batch] language = [ex['language'] for ex in batch] use_knn =", "head_rep[i, :len_rep[i]] = ex['head'] subject_pos_rep[i, :len_rep[i]] = ex['subject_pos'] object_pos_rep[i, :len_rep[i]]", "torch.LongTensor(words) pos = torch.LongTensor([model.pos_dict[p] for p in ex.pos]) ner =", "return embeddings_index # ------------------------------------------------------------------------------ # Data loading # ------------------------------------------------------------------------------ def", "[ex['language'] for ex in batch] use_knn = batch[0]['knn_word'] is not", "model.type_dict[ex.subj_type] start, end = ex.subject type[start: end + 1] =", "= ex['ner'] deprel_rep[i, :len_rep[i]] = ex['deprel'] type_rep[i, :len_rep[i]] = ex['type']", "import numpy import torch import numpy as np from tqdm", "a batch of individual examples into one batch.\"\"\" # batch", "= examples self.evaluation = evaluation def __len__(self): return len(self.examples) def", "start, end = ex.subject type[start: end + 1] = [ttype]", "= shuffle def __iter__(self): lengths = np.array( [(-l, np.random.random()) for", "'object': ex.obj_text, 'subject_pos': subj_position, 'object_pos': obj_position, 'relation': model.label_dict[ex.relation], 'knn_word': knn_word", "self.lengths], dtype=[('l1', np.int_), ('rand', np.float_)] ) indices = np.argsort(lengths, order=('l1',", "batch[0]['knn_word'] is a 2d list knn_size = len(batch[0]['knn_word'][0]) if use_knn", "example.\"\"\" words = ['!{}_{}'.format(ex.language, w) for w in ex.words] words", "batch of individual examples into one batch.\"\"\" # batch is", "vectorize(self.examples[index], self.model, iseval=self.evaluation) def lengths(self): return [len(ex.words) for ex in", "# Data loading # ------------------------------------------------------------------------------ def load_data(filename, src_lang, tgt_lang, knn_file,", "for i, line in tqdm(enumerate(fin), total=n): tokens = line.rstrip().split(' ')", "= torch.LongTensor([model.pos_dict[p] for p in ex.pos]) ner = torch.LongTensor([model.ner_dict[n] for", "for ex in batch] language = [ex['language'] for ex in", "ex['obj_start'] < 0: # we swap the start and end", "i in range(0, len(indices), self.batch_size)] if self.shuffle: np.random.shuffle(batches) return iter([i", "0: # we swap the start and end index wrong_obj_pos", "# --------- Prepare Code tensors --------- max_len = max([ex['word'].size(0) for", "use_knn else 0 # --------- Prepare Code tensors --------- max_len", "= torch.LongTensor(batch_size).fill_(constant.PAD) word_rep = torch.LongTensor(batch_size, max_len).fill_(constant.PAD) head_rep = torch.LongTensor(batch_size, max_len).fill_(constant.PAD)", "[] for w in ex['token']: w = '!{}_{}'.format(src_lang, w) if", "class SortedBatchSampler(Sampler): def __init__(self, lengths, batch_size, shuffle=True): self.lengths = lengths", "iter([i for batch in batches for i in batch]) def", "from torch.utils.data import Dataset from torch.utils.data.sampler import Sampler logger =", "max_examples=-1): examples = [] wrong_subj_pos, wrong_obj_pos = 0, 0 with", "tokens = line.rstrip().split(' ') v = numpy.array(tokens[1:], dtype=float) embeddings_index[tokens[0]] =", "torch.LongTensor(batch_size, max_len).fill_(constant.PAD) ner_rep = torch.LongTensor(batch_size, max_len).fill_(constant.PAD) deprel_rep = torch.LongTensor(batch_size, max_len).fill_(constant.PAD)", "(end - start + 1) atype = model.type_dict[ex.obj_type] start, end", "for p in ex.pos]) ner = torch.LongTensor([model.ner_dict[n] for n in", "'pos': pos, 'ner': ner, 'deprel': deprel, 'type': type, 'head': head,", "ex in enumerate(batch): len_rep[i] = ex['word'].size(0) labels[i] = ex['relation'] word_rep[i,", "lengths(self): return [len(ex.words) for ex in self.examples] class SortedBatchSampler(Sampler): def", "word_rep, 'knn_rep': knn_rep, 'head_rep': head_rep, 'subject': subject, 'object': object, 'subject_pos_rep':", "= [model.word_dict[w] for w in words] knn_word = None if", "if wrong_subj_pos > 0 or wrong_obj_pos > 0: logger.info('{} and", "if knn_file: with open(knn_file) as f: knn_dict = json.load(f) for", "+= 1 sentence.subject = [ex['subj_end'], ex['subj_start']] else: sentence.subject = [ex['subj_start'],", "[ex['subj_start'], ex['subj_end']] if ex['obj_end'] - ex['obj_start'] < 0: # we", "single example.\"\"\" words = ['!{}_{}'.format(ex.language, w) for w in ex.words]", "from clie.objects import Sentence from torch.utils.data import Dataset from torch.utils.data.sampler", "atype = model.type_dict[ex.obj_type] start, end = ex.object type[start: end +", "0 or wrong_obj_pos > 0: logger.info('{} and {} wrong subject", "knn_size = len(batch[0]['knn_word'][0]) if use_knn else 0 # --------- Prepare", "object, 'subject_pos_rep': subject_pos_rep, 'object_pos_rep': object_pos_rep, 'labels': labels, 'pos_rep': pos_rep, 'ner_rep':", "Code tensors --------- max_len = max([ex['word'].size(0) for ex in batch])", "= logging.getLogger(__name__) def load_word_embeddings(file): embeddings_index = {} fin = io.open(file,", "1) type = torch.LongTensor(type) return { 'id': ex.id, 'language': ex.language,", "batch]) # Batch Code Representations len_rep = torch.LongTensor(batch_size).fill_(constant.PAD) word_rep =", "# ------------------------------------------------------------------------------ # Data loading # ------------------------------------------------------------------------------ def load_data(filename, src_lang,", "--------- Prepare Code tensors --------- max_len = max([ex['word'].size(0) for ex", "logging.getLogger(__name__) def load_word_embeddings(file): embeddings_index = {} fin = io.open(file, 'r',", "lengths self.batch_size = batch_size self.shuffle = shuffle def __iter__(self): lengths", "batch_size = len(batch) ids = [ex['id'] for ex in batch]", "len(self.examples) def __getitem__(self, index): return vectorize(self.examples[index], self.model, iseval=self.evaluation) def lengths(self):", "import tqdm from clie.inputters import constant from clie.objects import Sentence", "ex['relation'] if ex['subj_end'] - ex['subj_start'] < 0: # we swap", "NOTE. batch[0]['knn_word'] is a 2d list knn_size = len(batch[0]['knn_word'][0]) if", "assert any([x == 0 for x in ex.head]) head =", "in ex.head]) head = torch.LongTensor(ex.head) subj_position = torch.LongTensor(ex.subj_position) obj_position =", "words = ['!{}_{}'.format(ex.language, w) for w in ex.words] words =", "= torch.LongTensor(ex.subj_position) obj_position = torch.LongTensor(ex.obj_position) type = [0] * len(ex.words)", "max_len = max([ex['word'].size(0) for ex in batch]) # Batch Code", "ex.knn_words] knn_word = torch.LongTensor(knn_word) word = torch.LongTensor(words) pos = torch.LongTensor([model.pos_dict[p]", "start and end index wrong_subj_pos += 1 sentence.subject = [ex['subj_end'],", "word, 'pos': pos, 'ner': ner, 'deprel': deprel, 'type': type, 'head':", "if knn_dict: sentence.tgt_lang = tgt_lang knn_words = [] for w", "= torch.LongTensor([model.deprel_dict[d] for d in ex.deprel]) assert any([x == 0", "in enumerate(tqdm(data, total=len(data))): sentence = Sentence(ex['id']) sentence.language = src_lang sentence.words", "subject.append(ex['subject']) object.append(ex['object']) if use_knn: knn_rep[i, :len_rep[i]] = ex['knn_word'] return {", "examples into one batch.\"\"\" # batch is a list of", "'subject_pos_rep': subject_pos_rep, 'object_pos_rep': object_pos_rep, 'labels': labels, 'pos_rep': pos_rep, 'ner_rep': ner_rep,", "w) if w in knn_dict: assert len(knn_dict[w]) == knn_size knn_words.append(knn_dict[w])", "examples self.evaluation = evaluation def __len__(self): return len(self.examples) def __getitem__(self,", "ner_rep, 'deprel_rep': deprel_rep, 'type_rep': type_rep } class ACE05Dataset(Dataset): def __init__(self,", "def load_word_embeddings(file): embeddings_index = {} fin = io.open(file, 'r', encoding='utf-8',", "= [0] * len(ex.words) ttype = model.type_dict[ex.subj_type] start, end =", "knn_words = [] for w in ex['token']: w = '!{}_{}'.format(src_lang,", "= torch.LongTensor(knn_word) word = torch.LongTensor(words) pos = torch.LongTensor([model.pos_dict[p] for p", "json import numpy import torch import numpy as np from", "= ex['pos'] ner_rep[i, :len_rep[i]] = ex['ner'] deprel_rep[i, :len_rep[i]] = ex['deprel']", "= ex['word'].size(0) labels[i] = ex['relation'] word_rep[i, :len_rep[i]] = ex['word'] head_rep[i,", "for l in self.lengths], dtype=[('l1', np.int_), ('rand', np.float_)] ) indices", "'ids': ids, 'language': language, 'batch_size': batch_size, 'len_rep': len_rep, 'word_rep': word_rep,", "wrong_obj_pos > 0: logger.info('{} and {} wrong subject and object", "self.batch_size] for i in range(0, len(indices), self.batch_size)] if self.shuffle: np.random.shuffle(batches)", "encoding='utf-8', newline='\\n', errors='ignore') n, d = map(int, fin.readline().split()) for i,", "ex['stanford_ner'] sentence.deprel = ex['stanford_deprel'] sentence.head = [int(x) for x in", "ner = torch.LongTensor([model.ner_dict[n] for n in ex.ner]) deprel = torch.LongTensor([model.deprel_dict[d]", "= tgt_lang knn_words = [] for w in ex['token']: w", "max_len).fill_(constant.PAD) labels = torch.LongTensor(batch_size) subject = [] object = []", "ex['subj_end'] - ex['subj_start'] < 0: # we swap the start", "deprel, 'type': type, 'head': head, 'subject': ex.subj_text, 'object': ex.obj_text, 'subject_pos':", "logger.info('{} and {} wrong subject and object positions found!'.format( wrong_subj_pos,", "'relation': model.label_dict[ex.relation], 'knn_word': knn_word } def batchify(batch): \"\"\"Gather a batch", "= ex['knn_word'] return { 'ids': ids, 'language': language, 'batch_size': batch_size,", "wrong_obj_pos = 0, 0 with open(filename) as f: data =", "'object': object, 'subject_pos_rep': subject_pos_rep, 'object_pos_rep': object_pos_rep, 'labels': labels, 'pos_rep': pos_rep,", "= model.type_dict[ex.subj_type] start, end = ex.subject type[start: end + 1]", "knn_rep = torch.LongTensor(batch_size, max_len, knn_size).fill_(constant.PAD) for i, ex in enumerate(batch):", "open(knn_file) as f: knn_dict = json.load(f) for idx, ex in", "for d in ex.deprel]) assert any([x == 0 for x", "[] wrong_subj_pos, wrong_obj_pos = 0, 0 with open(filename) as f:", "ex['subj_type'] sentence.obj_type = ex['obj_type'] sentence.relation = ex['relation'] if ex['subj_end'] -", "len(indices), self.batch_size)] if self.shuffle: np.random.shuffle(batches) return iter([i for batch in", "enumerate(tqdm(data, total=len(data))): sentence = Sentence(ex['id']) sentence.language = src_lang sentence.words =", "2d list knn_size = len(batch[0]['knn_word'][0]) if use_knn else 0 #", "[int(x) for x in ex['stanford_head']] sentence.subj_type = ex['subj_type'] sentence.obj_type =", "ex.language, 'word': word, 'pos': pos, 'ner': ner, 'deprel': deprel, 'type':", "wrong_subj_pos > 0 or wrong_obj_pos > 0: logger.info('{} and {}", "if use_knn: knn_rep = torch.LongTensor(batch_size, max_len, knn_size).fill_(constant.PAD) for i, ex", "indices = np.argsort(lengths, order=('l1', 'rand')) batches = [indices[i:i + self.batch_size]", "+ 1) atype = model.type_dict[ex.obj_type] start, end = ex.object type[start:", "knn_rep[i, :len_rep[i]] = ex['knn_word'] return { 'ids': ids, 'language': language,", "subject_pos_rep, 'object_pos_rep': object_pos_rep, 'labels': labels, 'pos_rep': pos_rep, 'ner_rep': ner_rep, 'deprel_rep':", "ids, 'language': language, 'batch_size': batch_size, 'len_rep': len_rep, 'word_rep': word_rep, 'knn_rep':", "from clie.inputters import constant from clie.objects import Sentence from torch.utils.data", "= numpy.array(tokens[1:], dtype=float) embeddings_index[tokens[0]] = v return embeddings_index # ------------------------------------------------------------------------------", "'r', encoding='utf-8', newline='\\n', errors='ignore') n, d = map(int, fin.readline().split()) for", "None if knn_file: with open(knn_file) as f: knn_dict = json.load(f)", "total=len(data))): sentence = Sentence(ex['id']) sentence.language = src_lang sentence.words = ex['token']", "loading # ------------------------------------------------------------------------------ def load_data(filename, src_lang, tgt_lang, knn_file, knn_size, max_examples=-1):", "wrong_subj_pos += 1 sentence.subject = [ex['subj_end'], ex['subj_start']] else: sentence.subject =", "len(ex.words) ttype = model.type_dict[ex.subj_type] start, end = ex.subject type[start: end", "{ 'id': ex.id, 'language': ex.language, 'word': word, 'pos': pos, 'ner':", "Code Representations len_rep = torch.LongTensor(batch_size).fill_(constant.PAD) word_rep = torch.LongTensor(batch_size, max_len).fill_(constant.PAD) head_rep", "wrong_obj_pos += 1 sentence.object = [ex['obj_end'], ex['obj_start']] else: sentence.object =", "torch.LongTensor(type) return { 'id': ex.id, 'language': ex.language, 'word': word, 'pos':", "labels, 'pos_rep': pos_rep, 'ner_rep': ner_rep, 'deprel_rep': deprel_rep, 'type_rep': type_rep }", "'knn_word': knn_word } def batchify(batch): \"\"\"Gather a batch of individual", "} def batchify(batch): \"\"\"Gather a batch of individual examples into", "type_rep = torch.LongTensor(batch_size, max_len).fill_(constant.PAD) labels = torch.LongTensor(batch_size) subject = []", "in self.lengths], dtype=[('l1', np.int_), ('rand', np.float_)] ) indices = np.argsort(lengths,", "KNN word info if knn_dict: sentence.tgt_lang = tgt_lang knn_words =", "total=n): tokens = line.rstrip().split(' ') v = numpy.array(tokens[1:], dtype=float) embeddings_index[tokens[0]]", "shuffle=True): self.lengths = lengths self.batch_size = batch_size self.shuffle = shuffle", "= ex['deprel'] type_rep[i, :len_rep[i]] = ex['type'] subject.append(ex['subject']) object.append(ex['object']) if use_knn:", "we swap the start and end index wrong_subj_pos += 1", "tqdm(enumerate(fin), total=n): tokens = line.rstrip().split(' ') v = numpy.array(tokens[1:], dtype=float)", "object positions found!'.format( wrong_subj_pos, wrong_obj_pos)) return examples def vectorize(ex, model,", "l in self.lengths], dtype=[('l1', np.int_), ('rand', np.float_)] ) indices =", "torch.LongTensor(batch_size, max_len).fill_(constant.PAD) type_rep = torch.LongTensor(batch_size, max_len).fill_(constant.PAD) labels = torch.LongTensor(batch_size) subject", "None # NOTE. batch[0]['knn_word'] is a 2d list knn_size =", "pos_rep = torch.LongTensor(batch_size, max_len).fill_(constant.PAD) ner_rep = torch.LongTensor(batch_size, max_len).fill_(constant.PAD) deprel_rep =", "subject, 'object': object, 'subject_pos_rep': subject_pos_rep, 'object_pos_rep': object_pos_rep, 'labels': labels, 'pos_rep':", "sentence.object = [ex['obj_end'], ex['obj_start']] else: sentence.object = [ex['obj_start'], ex['obj_end']] #", "in batches for i in batch]) def __len__(self): return len(self.lengths)", "- start + 1) atype = model.type_dict[ex.obj_type] start, end =", "tensors --------- max_len = max([ex['word'].size(0) for ex in batch]) #", "0, 0 with open(filename) as f: data = json.load(f) knn_dict", "src_lang, tgt_lang, knn_file, knn_size, max_examples=-1): examples = [] wrong_subj_pos, wrong_obj_pos", "one batch.\"\"\" # batch is a list of vectorized examples", "ex['deprel'] type_rep[i, :len_rep[i]] = ex['type'] subject.append(ex['subject']) object.append(ex['object']) if use_knn: knn_rep[i,", "= [atype] * (end - start + 1) type =", "'head_rep': head_rep, 'subject': subject, 'object': object, 'subject_pos_rep': subject_pos_rep, 'object_pos_rep': object_pos_rep,", "ner_rep = torch.LongTensor(batch_size, max_len).fill_(constant.PAD) deprel_rep = torch.LongTensor(batch_size, max_len).fill_(constant.PAD) type_rep =", "examples.append(sentence) if max_examples != -1 and len(examples) > max_examples: break", "io.open(file, 'r', encoding='utf-8', newline='\\n', errors='ignore') n, d = map(int, fin.readline().split())", "any([x == 0 for x in ex.head]) head = torch.LongTensor(ex.head)", "def lengths(self): return [len(ex.words) for ex in self.examples] class SortedBatchSampler(Sampler):", "np.array( [(-l, np.random.random()) for l in self.lengths], dtype=[('l1', np.int_), ('rand',", "end = ex.object type[start: end + 1] = [atype] *", "and object positions found!'.format( wrong_subj_pos, wrong_obj_pos)) return examples def vectorize(ex,", "{} wrong subject and object positions found!'.format( wrong_subj_pos, wrong_obj_pos)) return", "max_len).fill_(constant.PAD) pos_rep = torch.LongTensor(batch_size, max_len).fill_(constant.PAD) ner_rep = torch.LongTensor(batch_size, max_len).fill_(constant.PAD) deprel_rep", "max_len).fill_(constant.PAD) type_rep = torch.LongTensor(batch_size, max_len).fill_(constant.PAD) labels = torch.LongTensor(batch_size) subject =", "sentence.ner = ex['stanford_ner'] sentence.deprel = ex['stanford_deprel'] sentence.head = [int(x) for", "[[model.word_dict[w] for w in knn] for knn in ex.knn_words] knn_word", "ids = [ex['id'] for ex in batch] language = [ex['language']", "enumerate(batch): len_rep[i] = ex['word'].size(0) labels[i] = ex['relation'] word_rep[i, :len_rep[i]] =", "'batch_size': batch_size, 'len_rep': len_rep, 'word_rep': word_rep, 'knn_rep': knn_rep, 'head_rep': head_rep,", "= np.array( [(-l, np.random.random()) for l in self.lengths], dtype=[('l1', np.int_),", "x in ex['stanford_head']] sentence.subj_type = ex['subj_type'] sentence.obj_type = ex['obj_type'] sentence.relation", "in range(0, len(indices), self.batch_size)] if self.shuffle: np.random.shuffle(batches) return iter([i for", "ex in batch]) # Batch Code Representations len_rep = torch.LongTensor(batch_size).fill_(constant.PAD)", "start + 1) atype = model.type_dict[ex.obj_type] start, end = ex.object", "type = torch.LongTensor(type) return { 'id': ex.id, 'language': ex.language, 'word':", "ner, 'deprel': deprel, 'type': type, 'head': head, 'subject': ex.subj_text, 'object':", "object_pos_rep[i, :len_rep[i]] = ex['object_pos'] pos_rep[i, :len_rep[i]] = ex['pos'] ner_rep[i, :len_rep[i]]", "words = [model.word_dict[w] for w in words] knn_word = None", "if max_examples != -1 and len(examples) > max_examples: break if", "= ex['object_pos'] pos_rep[i, :len_rep[i]] = ex['pos'] ner_rep[i, :len_rep[i]] = ex['ner']", "ex in enumerate(tqdm(data, total=len(data))): sentence = Sentence(ex['id']) sentence.language = src_lang", "as f: knn_dict = json.load(f) for idx, ex in enumerate(tqdm(data,", "+= 1 sentence.object = [ex['obj_end'], ex['obj_start']] else: sentence.object = [ex['obj_start'],", "* (end - start + 1) type = torch.LongTensor(type) return", "swap the start and end index wrong_subj_pos += 1 sentence.subject", "= [ex['language'] for ex in batch] use_knn = batch[0]['knn_word'] is", "use_knn: knn_rep[i, :len_rep[i]] = ex['knn_word'] return { 'ids': ids, 'language':", "logging import json import numpy import torch import numpy as", "line in tqdm(enumerate(fin), total=n): tokens = line.rstrip().split(' ') v =", "knn_rep = None if use_knn: knn_rep = torch.LongTensor(batch_size, max_len, knn_size).fill_(constant.PAD)", "break if wrong_subj_pos > 0 or wrong_obj_pos > 0: logger.info('{}", "__len__(self): return len(self.examples) def __getitem__(self, index): return vectorize(self.examples[index], self.model, iseval=self.evaluation)", "= knn_words examples.append(sentence) if max_examples != -1 and len(examples) >", "in words] knn_word = None if ex.knn_words: knn_word = [[model.word_dict[w]", "= ex['head'] subject_pos_rep[i, :len_rep[i]] = ex['subject_pos'] object_pos_rep[i, :len_rep[i]] = ex['object_pos']", "import constant from clie.objects import Sentence from torch.utils.data import Dataset", "0: # we swap the start and end index wrong_subj_pos", "= None if use_knn: knn_rep = torch.LongTensor(batch_size, max_len, knn_size).fill_(constant.PAD) for", "fin.readline().split()) for i, line in tqdm(enumerate(fin), total=n): tokens = line.rstrip().split('", "import logging import json import numpy import torch import numpy", "np.float_)] ) indices = np.argsort(lengths, order=('l1', 'rand')) batches = [indices[i:i", "for batch in batches for i in batch]) def __len__(self):", "= ex['relation'] if ex['subj_end'] - ex['subj_start'] < 0: # we", "max_examples: break if wrong_subj_pos > 0 or wrong_obj_pos > 0:", "w) for w in ex.words] words = [model.word_dict[w] for w", "a 2d list knn_size = len(batch[0]['knn_word'][0]) if use_knn else 0", "knn_file: with open(knn_file) as f: knn_dict = json.load(f) for idx,", "if ex['obj_end'] - ex['obj_start'] < 0: # we swap the", "max_len).fill_(constant.PAD) head_rep = torch.LongTensor(batch_size, max_len).fill_(constant.PAD) subject_pos_rep = torch.LongTensor(batch_size, max_len).fill_(constant.PAD) object_pos_rep", "['!{}_{}'.format(ex.language, w) for w in ex.words] words = [model.word_dict[w] for", "w in words] knn_word = None if ex.knn_words: knn_word =", "language = [ex['language'] for ex in batch] use_knn = batch[0]['knn_word']", "for idx, ex in enumerate(tqdm(data, total=len(data))): sentence = Sentence(ex['id']) sentence.language", "i, ex in enumerate(batch): len_rep[i] = ex['word'].size(0) labels[i] = ex['relation']", "dtype=[('l1', np.int_), ('rand', np.float_)] ) indices = np.argsort(lengths, order=('l1', 'rand'))", "tqdm import tqdm from clie.inputters import constant from clie.objects import", "sentence.obj_type = ex['obj_type'] sentence.relation = ex['relation'] if ex['subj_end'] - ex['subj_start']", "we swap the start and end index wrong_obj_pos += 1", "self.examples = examples self.evaluation = evaluation def __len__(self): return len(self.examples)", "a single example.\"\"\" words = ['!{}_{}'.format(ex.language, w) for w in", "= np.argsort(lengths, order=('l1', 'rand')) batches = [indices[i:i + self.batch_size] for", "errors='ignore') n, d = map(int, fin.readline().split()) for i, line in", "as np from tqdm import tqdm from clie.inputters import constant", "batch is a list of vectorized examples batch_size = len(batch)", "max([ex['word'].size(0) for ex in batch]) # Batch Code Representations len_rep", "np from tqdm import tqdm from clie.inputters import constant from", "ex['stanford_pos'] sentence.ner = ex['stanford_ner'] sentence.deprel = ex['stanford_deprel'] sentence.head = [int(x)", "sentence.relation = ex['relation'] if ex['subj_end'] - ex['subj_start'] < 0: #", "= torch.LongTensor(ex.obj_position) type = [0] * len(ex.words) ttype = model.type_dict[ex.subj_type]", "start and end index wrong_obj_pos += 1 sentence.object = [ex['obj_end'],", "head = torch.LongTensor(ex.head) subj_position = torch.LongTensor(ex.subj_position) obj_position = torch.LongTensor(ex.obj_position) type", "in batch] use_knn = batch[0]['knn_word'] is not None # NOTE.", "wrong subject and object positions found!'.format( wrong_subj_pos, wrong_obj_pos)) return examples", "for i in range(0, len(indices), self.batch_size)] if self.shuffle: np.random.shuffle(batches) return", "end index wrong_subj_pos += 1 sentence.subject = [ex['subj_end'], ex['subj_start']] else:", "newline='\\n', errors='ignore') n, d = map(int, fin.readline().split()) for i, line", "torch.LongTensor(ex.subj_position) obj_position = torch.LongTensor(ex.obj_position) type = [0] * len(ex.words) ttype", "\"\"\"Gather a batch of individual examples into one batch.\"\"\" #", "for knn in ex.knn_words] knn_word = torch.LongTensor(knn_word) word = torch.LongTensor(words)", "knn_size).fill_(constant.PAD) for i, ex in enumerate(batch): len_rep[i] = ex['word'].size(0) labels[i]", "------------------------------------------------------------------------------ def load_data(filename, src_lang, tgt_lang, knn_file, knn_size, max_examples=-1): examples =", "max_len).fill_(constant.PAD) ner_rep = torch.LongTensor(batch_size, max_len).fill_(constant.PAD) deprel_rep = torch.LongTensor(batch_size, max_len).fill_(constant.PAD) type_rep", "= torch.LongTensor(type) return { 'id': ex.id, 'language': ex.language, 'word': word,", "+ 1] = [atype] * (end - start + 1)", "torch.utils.data import Dataset from torch.utils.data.sampler import Sampler logger = logging.getLogger(__name__)", "[ttype] * (end - start + 1) atype = model.type_dict[ex.obj_type]", "torch.LongTensor(knn_word) word = torch.LongTensor(words) pos = torch.LongTensor([model.pos_dict[p] for p in", "knn in ex.knn_words] knn_word = torch.LongTensor(knn_word) word = torch.LongTensor(words) pos", "examples batch_size = len(batch) ids = [ex['id'] for ex in", "subject = [] object = [] knn_rep = None if", "> max_examples: break if wrong_subj_pos > 0 or wrong_obj_pos >", "torch.LongTensor([model.pos_dict[p] for p in ex.pos]) ner = torch.LongTensor([model.ner_dict[n] for n", "else: sentence.subject = [ex['subj_start'], ex['subj_end']] if ex['obj_end'] - ex['obj_start'] <", "ex.subj_text, 'object': ex.obj_text, 'subject_pos': subj_position, 'object_pos': obj_position, 'relation': model.label_dict[ex.relation], 'knn_word':", "'len_rep': len_rep, 'word_rep': word_rep, 'knn_rep': knn_rep, 'head_rep': head_rep, 'subject': subject,", "obj_position = torch.LongTensor(ex.obj_position) type = [0] * len(ex.words) ttype =", "'id': ex.id, 'language': ex.language, 'word': word, 'pos': pos, 'ner': ner,", "knn_word = torch.LongTensor(knn_word) word = torch.LongTensor(words) pos = torch.LongTensor([model.pos_dict[p] for", "knn_words.append([constant.UNK_WORD] * knn_size) sentence.knn_words = knn_words examples.append(sentence) if max_examples !=", "ex.deprel]) assert any([x == 0 for x in ex.head]) head", "np.argsort(lengths, order=('l1', 'rand')) batches = [indices[i:i + self.batch_size] for i", "import Dataset from torch.utils.data.sampler import Sampler logger = logging.getLogger(__name__) def", ":len_rep[i]] = ex['word'] head_rep[i, :len_rep[i]] = ex['head'] subject_pos_rep[i, :len_rep[i]] =" ]
[ "except ImportError: # without Cython module_src = \"cgranges/python/cgranges.c\" def build(setup_kwargs):", "Extension cmdclass = {} try: # with Cython from Cython.Build", "import build_ext cmdclass[\"build_ext\"] = build_ext module_src = \"cgranges/python/cgranges.pyx\" except ImportError:", "is mandatory in order to build the extensions. \"\"\" setup_kwargs.update(", "= build_ext module_src = \"cgranges/python/cgranges.pyx\" except ImportError: # without Cython", "def build(setup_kwargs): \"\"\" This function is mandatory in order to", "# without Cython module_src = \"cgranges/python/cgranges.c\" def build(setup_kwargs): \"\"\" This", "try: # with Cython from Cython.Build import build_ext cmdclass[\"build_ext\"] =", "\"cgranges/khash.h\", \"cgranges/python/cgranges.pyx\" ], include_dirs=[\"cgranges\"] ) ], \"cmdclass\": cmdclass } )", "to build the extensions. \"\"\" setup_kwargs.update( { \"ext_modules\": [ Extension(", "function is mandatory in order to build the extensions. \"\"\"", "build the extensions. \"\"\" setup_kwargs.update( { \"ext_modules\": [ Extension( \"cgranges\",", "build_ext cmdclass[\"build_ext\"] = build_ext module_src = \"cgranges/python/cgranges.pyx\" except ImportError: #", "\"cgranges/python/cgranges.pyx\" except ImportError: # without Cython module_src = \"cgranges/python/cgranges.c\" def", "cmdclass = {} try: # with Cython from Cython.Build import", "{} try: # with Cython from Cython.Build import build_ext cmdclass[\"build_ext\"]", "setup_kwargs.update( { \"ext_modules\": [ Extension( \"cgranges\", sources=[module_src, \"cgranges/cgranges.c\"], depends=[ \"cgranges/cgranges.h\",", "\"cgranges/cgranges.h\", \"cgranges/khash.h\", \"cgranges/python/cgranges.pyx\" ], include_dirs=[\"cgranges\"] ) ], \"cmdclass\": cmdclass }", "build(setup_kwargs): \"\"\" This function is mandatory in order to build", "Cython module_src = \"cgranges/python/cgranges.c\" def build(setup_kwargs): \"\"\" This function is", "[ Extension( \"cgranges\", sources=[module_src, \"cgranges/cgranges.c\"], depends=[ \"cgranges/cgranges.h\", \"cgranges/khash.h\", \"cgranges/python/cgranges.pyx\" ],", "\"cgranges\", sources=[module_src, \"cgranges/cgranges.c\"], depends=[ \"cgranges/cgranges.h\", \"cgranges/khash.h\", \"cgranges/python/cgranges.pyx\" ], include_dirs=[\"cgranges\"] )", "= {} try: # with Cython from Cython.Build import build_ext", "= \"cgranges/python/cgranges.pyx\" except ImportError: # without Cython module_src = \"cgranges/python/cgranges.c\"", "ImportError: # without Cython module_src = \"cgranges/python/cgranges.c\" def build(setup_kwargs): \"\"\"", "extensions. \"\"\" setup_kwargs.update( { \"ext_modules\": [ Extension( \"cgranges\", sources=[module_src, \"cgranges/cgranges.c\"],", "module_src = \"cgranges/python/cgranges.pyx\" except ImportError: # without Cython module_src =", "sources=[module_src, \"cgranges/cgranges.c\"], depends=[ \"cgranges/cgranges.h\", \"cgranges/khash.h\", \"cgranges/python/cgranges.pyx\" ], include_dirs=[\"cgranges\"] ) ],", "build_ext module_src = \"cgranges/python/cgranges.pyx\" except ImportError: # without Cython module_src", "{ \"ext_modules\": [ Extension( \"cgranges\", sources=[module_src, \"cgranges/cgranges.c\"], depends=[ \"cgranges/cgranges.h\", \"cgranges/khash.h\",", "depends=[ \"cgranges/cgranges.h\", \"cgranges/khash.h\", \"cgranges/python/cgranges.pyx\" ], include_dirs=[\"cgranges\"] ) ], \"cmdclass\": cmdclass", "Extension( \"cgranges\", sources=[module_src, \"cgranges/cgranges.c\"], depends=[ \"cgranges/cgranges.h\", \"cgranges/khash.h\", \"cgranges/python/cgranges.pyx\" ], include_dirs=[\"cgranges\"]", "with Cython from Cython.Build import build_ext cmdclass[\"build_ext\"] = build_ext module_src", "cmdclass[\"build_ext\"] = build_ext module_src = \"cgranges/python/cgranges.pyx\" except ImportError: # without", "\"cgranges/python/cgranges.c\" def build(setup_kwargs): \"\"\" This function is mandatory in order", "import Extension cmdclass = {} try: # with Cython from", "module_src = \"cgranges/python/cgranges.c\" def build(setup_kwargs): \"\"\" This function is mandatory", "from Cython.Build import build_ext cmdclass[\"build_ext\"] = build_ext module_src = \"cgranges/python/cgranges.pyx\"", "\"\"\" This function is mandatory in order to build the", "the extensions. \"\"\" setup_kwargs.update( { \"ext_modules\": [ Extension( \"cgranges\", sources=[module_src,", "\"\"\" setup_kwargs.update( { \"ext_modules\": [ Extension( \"cgranges\", sources=[module_src, \"cgranges/cgranges.c\"], depends=[", "without Cython module_src = \"cgranges/python/cgranges.c\" def build(setup_kwargs): \"\"\" This function", "= \"cgranges/python/cgranges.c\" def build(setup_kwargs): \"\"\" This function is mandatory in", "order to build the extensions. \"\"\" setup_kwargs.update( { \"ext_modules\": [", "Cython from Cython.Build import build_ext cmdclass[\"build_ext\"] = build_ext module_src =", "in order to build the extensions. \"\"\" setup_kwargs.update( { \"ext_modules\":", "Cython.Build import build_ext cmdclass[\"build_ext\"] = build_ext module_src = \"cgranges/python/cgranges.pyx\" except", "\"cgranges/cgranges.c\"], depends=[ \"cgranges/cgranges.h\", \"cgranges/khash.h\", \"cgranges/python/cgranges.pyx\" ], include_dirs=[\"cgranges\"] ) ], \"cmdclass\":", "distutils.extension import Extension cmdclass = {} try: # with Cython", "\"ext_modules\": [ Extension( \"cgranges\", sources=[module_src, \"cgranges/cgranges.c\"], depends=[ \"cgranges/cgranges.h\", \"cgranges/khash.h\", \"cgranges/python/cgranges.pyx\"", "# with Cython from Cython.Build import build_ext cmdclass[\"build_ext\"] = build_ext", "from distutils.extension import Extension cmdclass = {} try: # with", "mandatory in order to build the extensions. \"\"\" setup_kwargs.update( {", "This function is mandatory in order to build the extensions." ]
[ "<filename>icarus/models/service/__init__.py<gh_stars>1-10 # -*- coding: utf-8 -*- from .compSpot import *" ]
[ ">= self.new_length: offset = random.randint(0, average_duration - self.new_length) # No", "'triple_jump', 'tying_bow_tie', 'tying_knot_-not_on_a_tie-', 'tying_tie', 'unboxing', 'unloading_truck', 'using_computer', 'using_remote_controller_-not_gaming-', 'using_segway', 'vault',", "'riding_camel', 'riding_elephant', 'riding_mechanical_bull', 'riding_mountain_bike', 'riding_mule', 'riding_or_walking_with_horse', 'riding_scooter', 'riding_unicycle', 'ripping_paper', 'robot_dancing',", "target_height : int, default 224 Scale the height of transformed", "and C = num_segments * new_length * 3 clip_input =", "= 400 self.classes = ['abseiling', 'air_drumming', 'answering_questions', 'applauding', 'applying_cream', 'archery',", "'playing_cello', 'playing_chess', 'playing_clarinet', 'playing_controller', 'playing_cricket', 'playing_cymbals', 'playing_didgeridoo', 'playing_drums', 'playing_flute', 'playing_guitar',", "image later. num_segments : int, default 1 Number of segments", "* num_segments clip_input = clip_input.reshape((-1, 3 * self.new_length, self.target_height, self.target_width))", "= ['abseiling', 'air_drumming', 'answering_questions', 'applauding', 'applying_cream', 'archery', 'arm_wrestling', 'arranging_flowers', 'assembling_computer',", "'playing_squash_or_racquetball', 'playing_tennis', 'playing_trombone', 'playing_trumpet', 'playing_ukulele', 'playing_violin', 'playing_volleyball', 'playing_xylophone', 'pole_vault', 'presenting_weather_forecast',", "image to 'new_width' for later multiscale cropping and resizing. new_height", "'building_cabinet', 'building_shed', 'bungee_jumping', 'busking', 'canoeing_or_kayaking', 'capoeira', 'carrying_baby', 'cartwheeling', 'carving_pumpkin', 'catching_fish',", "'tying_bow_tie', 'tying_knot_-not_on_a_tie-', 'tying_tie', 'unboxing', 'unloading_truck', 'using_computer', 'using_remote_controller_-not_gaming-', 'using_segway', 'vault', 'waiting_in_line',", "cv2.cvtColor(cv_img, cv2.COLOR_BGR2RGB) sampled_list.append(cv_img) # the shape of clip_input will be", "'golf_putting', 'grinding_meat', 'grooming_dog', 'grooming_horse', 'gymnastics_tumbling', 'hammer_throw', 'headbanging', 'headbutting', 'high_jump', 'high_kick',", "'javelin_throw', 'jetskiing', 'jogging', 'juggling_balls', 'juggling_fire', 'juggling_soccer_ball', 'jumping_into_pool', 'jumpstyle_dancing', 'kicking_field_goal', 'kicking_soccer_ball',", "offsets.append(0) elif not self.train and not self.test_mode: # validation if", "'spraying', 'springboard_diving', 'squat', 'sticking_tongue_out', 'stomping_grapes', 'stretching_arm', 'stretching_leg', 'strumming_guitar', 'surfing_crowd', 'surfing_water',", "'skipping_rope', 'skydiving', 'slacklining', 'slapping', 'sled_dog_racing', 'smoking', 'smoking_hookah', 'snatch_weight_lifting', 'sneezing', 'sniffing',", "'contact_juggling', 'cooking_chicken', 'cooking_egg', 'cooking_on_campfire', 'cooking_sausages', 'counting_money', 'country_line_dancing', 'cracking_neck', 'crawling_baby', 'crossing_river',", "'eating_ice_cream', 'eating_spaghetti', 'eating_watermelon', 'egg_hunting', 'exercising_arm', 'exercising_with_an_exercise_ball', 'extinguishing_fire', 'faceplanting', 'feeding_birds', 'feeding_fish',", "setting) if len(self.clips) == 0: raise(RuntimeError(\"Found 0 video clips in", "'new_height' for later multiscale cropping and resizing. target_width : int,", "\" + root + \"\\n\" \"Check your data directory (opt.data-dir).\"))", "Input modalities, we support only rgb video frames for now.", "+ \"\\n\" \"Check your data directory (opt.data-dir).\")) if name_pattern: self.name_pattern", "frame_name cv_img_origin = cv2.imread(frame_path, cv_read_flag) if cv_img_origin is None: raise(RuntimeError(\"Could", "if average_duration >= self.new_length: offsets.append(int((average_duration - self.new_length + 1)/2 +", "+ offset) frame_path = directory + \"/\" + frame_name cv_img_origin", "= cv2.imread(frame_path, cv_read_flag) if cv_img_origin is None: raise(RuntimeError(\"Could not load", "self.num_class = 400 self.classes = ['abseiling', 'air_drumming', 'answering_questions', 'applauding', 'applying_cream',", "'dodgeball', 'doing_aerobics', 'doing_laundry', 'doing_nails', 'drawing', 'dribbling_basketball', 'drinking', 'drinking_beer', 'drinking_shots', 'driving_car',", "'folding_clothes', 'folding_napkins', 'folding_paper', 'front_raises', 'frying_vegetables', 'garbage_collecting', 'gargling', 'getting_a_haircut', 'getting_a_tattoo', 'giving_or_receiving_award',", "'kicking_field_goal', 'kicking_soccer_ball', 'kissing', 'kitesurfing', 'knitting', 'krumping', 'laughing', 'laying_bricks', 'long_jump', 'lunge',", "the loaded image is color or grayscale modality : str,", "and resizing. new_height : int, default 256 Scale the height", "forwarding. transform : function, default None A function that takes", "= batch_size * num_segments clip_input = clip_input.reshape((-1, 3 * self.new_length,", "and not self.test_mode: # validation if average_duration >= self.new_length: offsets.append(int((average_duration", "(setting))) clips = [] with open(setting) as split_f: data =", "Scale the height of transformed image to the same 'target_height'", "training if average_duration >= self.new_length: offset = random.randint(0, average_duration -", "Networks: Towards Good Practices for Deep Action Recognition, ECCV 2016", "'ice_fishing', 'ice_skating', 'ironing', 'javelin_throw', 'jetskiing', 'jogging', 'juggling_balls', 'juggling_fire', 'juggling_soccer_ball', 'jumping_into_pool',", "'hitting_baseball', 'hockey_stop', 'holding_snake', 'hopscotch', 'hoverboarding', 'hugging', 'hula_hooping', 'hurdling', 'hurling_-sport-', 'ice_climbing',", "'massaging_feet', 'massaging_legs', \"massaging_person's_head\", 'milking_cow', 'mopping_floor', 'motorcycling', 'moving_furniture', 'mowing_lawn', 'news_anchoring', 'opening_bottle',", "'shaving_head', 'shaving_legs', 'shearing_sheep', 'shining_shoes', 'shooting_basketball', 'shooting_goal_-soccer-', 'shot_put', 'shoveling_snow', 'shredding_paper', 'shuffling_cards',", "'playing_violin', 'playing_volleyball', 'playing_xylophone', 'pole_vault', 'presenting_weather_forecast', 'pull_ups', 'pumping_fist', 'pumping_gas', 'punching_bag', 'punching_person_-boxing-',", "to obtain global video-level information. <NAME>, etal, Temporal Segment Networks:", "True Whether the loaded image is color or grayscale modality", "for batch forwarding. target_height : int, default 224 Scale the", "Practices for Deep Action Recognition, ECCV 2016 new_length : int,", "# pylint: disable=line-too-long,too-many-lines,missing-docstring \"\"\"Kinetics400 action classification dataset.\"\"\" import os import", "input format is not correct, missing one or more element.", "the input to B x 3 x H x W.", "path.\" % (frame_path))) if new_width > 0 and new_height >", "'baking_cookies', 'balloon_blowing', 'bandaging', 'barbequing', 'bartending', 'beatboxing', 'bee_keeping', 'belly_dancing', 'bench_pressing', 'bending_back',", "default 256 Scale the height of loaded image to 'new_height'", "clips def _TSN_RGB(self, directory, offsets, new_height, new_width, new_length, is_color, name_pattern):", "'bartending', 'beatboxing', 'bee_keeping', 'belly_dancing', 'bench_pressing', 'bending_back', 'bending_metal', 'biking_through_snow', 'blasting_sand', 'blowing_glass',", "'disc_golfing', 'diving_cliff', 'dodgeball', 'doing_aerobics', 'doing_laundry', 'doing_nails', 'drawing', 'dribbling_basketball', 'drinking', 'drinking_beer',", "test set name_pattern : str, default None The naming pattern", "cv2 = try_import_cv2() if is_color: cv_read_flag = cv2.IMREAD_COLOR else: cv_read_flag", "'washing_hands', 'water_skiing', 'water_sliding', 'watering_plants', 'waxing_back', 'waxing_chest', 'waxing_eyebrows', 'waxing_legs', 'weaving_basket', 'welding',", "not self.test_mode: # validation if average_duration >= self.new_length: offsets.append(int((average_duration -", "seg_id * average_duration) else: offsets.append(0) elif not self.train and not", "TSN training, reshape the input to B x 3 x", "x 3 x H x W. Here, B = batch_size", "d))] classes.sort() class_to_idx = {classes[i]: i for i in range(len(classes))}", "'punching_person_-boxing-', 'push_up', 'pushing_car', 'pushing_cart', 'pushing_wheelchair', 'reading_book', 'reading_newspaper', 'recording_music', 'riding_a_bike', 'riding_camel',", "evaluation on the test set name_pattern : str, default None", "3 * self.new_length, self.target_height, self.target_width)) return clip_input, target def __len__(self):", "the prepared dataset. train : bool, default True Whether to", "split_f.readlines() for line in data: line_info = line.split() # line", "'trapezing', 'trimming_or_shaving_beard', 'trimming_trees', 'triple_jump', 'tying_bow_tie', 'tying_knot_-not_on_a_tie-', 'tying_tie', 'unboxing', 'unloading_truck', 'using_computer',", "of this dataset and how to prepare it. Parameters ----------", "data directory (opt.data-dir).\")) if name_pattern: self.name_pattern = name_pattern else: if", "name_pattern else: if self.modality == \"rgb\": self.name_pattern = \"img_%05d.jpg\" elif", "'cleaning_shoes', 'cleaning_toilet', 'cleaning_windows', 'climbing_a_rope', 'climbing_ladder', 'climbing_tree', 'contact_juggling', 'cooking_chicken', 'cooking_egg', 'cooking_on_campfire',", "shape of clip_input will be H x W x C,", "offsets.append(0) clip_input = self._TSN_RGB(directory, offsets, self.new_height, self.new_width, self.new_length, self.is_color, self.name_pattern)", "'barbequing', 'bartending', 'beatboxing', 'bee_keeping', 'belly_dancing', 'bench_pressing', 'bending_back', 'bending_metal', 'biking_through_snow', 'blasting_sand',", "'making_pizza', 'making_snowman', 'making_sushi', 'making_tea', 'marching', 'massaging_back', 'massaging_feet', 'massaging_legs', \"massaging_person's_head\", 'milking_cow',", "'presenting_weather_forecast', 'pull_ups', 'pumping_fist', 'pumping_gas', 'punching_bag', 'punching_person_-boxing-', 'push_up', 'pushing_car', 'pushing_cart', 'pushing_wheelchair',", "self.new_length, self.target_height, self.target_width)) return clip_input, target def __len__(self): return len(self.clips)", "= line.split() # line format: video_path, video_duration, video_label if len(line_info)", "'petting_cat', 'picking_fruit', 'planting_trees', 'plastering', 'playing_accordion', 'playing_badminton', 'playing_bagpipes', 'playing_basketball', 'playing_bass_guitar', 'playing_cards',", "dataset. setting : str, required Config file of the prepared", "and new_height > 0: cv_img = cv2.resize(cv_img_origin, (new_width, new_height), interpolation)", "(opt.data-dir).\")) if name_pattern: self.name_pattern = name_pattern else: if self.modality ==", "data = split_f.readlines() for line in data: line_info = line.split()", "for rgb difference image and optical flow image later. num_segments", "'arranging_flowers', 'assembling_computer', 'auctioning', 'baby_waking_up', 'baking_cookies', 'balloon_blowing', 'bandaging', 'barbequing', 'bartending', 'beatboxing',", "...utils.filesystem import try_import_cv2 cv2 = try_import_cv2() if is_color: cv_read_flag =", "'jogging', 'juggling_balls', 'juggling_fire', 'juggling_soccer_ball', 'jumping_into_pool', 'jumpstyle_dancing', 'kicking_field_goal', 'kicking_soccer_ball', 'kissing', 'kitesurfing',", "* new_length * 3 clip_input = np.concatenate(sampled_list, axis=2) return nd.array(clip_input)", "directory (opt.data-dir).\")) if name_pattern: self.name_pattern = name_pattern else: if self.modality", "name_pattern: self.name_pattern = name_pattern else: if self.modality == \"rgb\": self.name_pattern", "'snowkiting', 'snowmobiling', 'somersaulting', 'spinning_poi', 'spray_painting', 'spraying', 'springboard_diving', 'squat', 'sticking_tongue_out', 'stomping_grapes',", "def __init__(self): self.num_class = 400 self.classes = ['abseiling', 'air_drumming', 'answering_questions',", "randint(a,b) return a random integer N such that a <=", "\"/\" + frame_name cv_img_origin = cv2.imread(frame_path, cv_read_flag) if cv_img_origin is", "'hurling_-sport-', 'ice_climbing', 'ice_fishing', 'ice_skating', 'ironing', 'javelin_throw', 'jetskiing', 'jogging', 'juggling_balls', 'juggling_fire',", "not load file %s. Check data path.\" % (frame_path))) if", "'eating_hotdog', 'eating_ice_cream', 'eating_spaghetti', 'eating_watermelon', 'egg_hunting', 'exercising_arm', 'exercising_with_an_exercise_ball', 'extinguishing_fire', 'faceplanting', 'feeding_birds',", "pattern of the decoded video frames. For example, img_00012.jpg is_color", "+1 because randint(a,b) return a random integer N such that", "'holding_snake', 'hopscotch', 'hoverboarding', 'hugging', 'hula_hooping', 'hurdling', 'hurling_-sport-', 'ice_climbing', 'ice_fishing', 'ice_skating',", "with open(setting) as split_f: data = split_f.readlines() for line in", "cv2.resize(cv_img_origin, (new_width, new_height), interpolation) else: cv_img = cv_img_origin cv_img =", "self.classes = ['abseiling', 'air_drumming', 'answering_questions', 'applauding', 'applying_cream', 'archery', 'arm_wrestling', 'arranging_flowers',", "% (setting))) clips = [] with open(setting) as split_f: data", "modality self.num_segments = num_segments self.new_height = new_height self.new_width = new_width", "self.test_mode: # training if average_duration >= self.new_length: offset = random.randint(0,", "in range(len(classes))} return classes, class_to_idx def _make_dataset(self, directory, setting): if", "later multiscale cropping and resizing. new_height : int, default 256", "of segments to evenly divide the video into clips. A", "'braiding_hair', 'breading_or_breadcrumbing', 'breakdancing', 'brush_painting', 'brushing_hair', 'brushing_teeth', 'building_cabinet', 'building_shed', 'bungee_jumping', 'busking',", "'cutting_pineapple', 'cutting_watermelon', 'dancing_ballet', 'dancing_charleston', 'dancing_gangnam_style', 'dancing_macarena', 'deadlifting', 'decorating_the_christmas_tree', 'digging', 'dining',", "'sharpening_knives', 'sharpening_pencil', 'shaving_head', 'shaving_legs', 'shearing_sheep', 'shining_shoes', 'shooting_basketball', 'shooting_goal_-soccer-', 'shot_put', 'shoveling_snow',", "average_duration)) else: offsets.append(0) else: # test if average_duration >= self.new_length:", "'fixing_hair', 'flipping_pancake', 'flying_kite', 'folding_clothes', 'folding_napkins', 'folding_paper', 'front_raises', 'frying_vegetables', 'garbage_collecting', 'gargling',", "None: raise(RuntimeError(\"Could not load file %s. Check data path.\" %", "'folding_napkins', 'folding_paper', 'front_raises', 'frying_vegetables', 'garbage_collecting', 'gargling', 'getting_a_haircut', 'getting_a_tattoo', 'giving_or_receiving_award', 'golf_chipping',", "action classification dataset.\"\"\" import os import random import numpy as", "Path to the folder stored the dataset. setting : str,", "'eating_carrots', 'eating_chips', 'eating_doughnuts', 'eating_hotdog', 'eating_ice_cream', 'eating_spaghetti', 'eating_watermelon', 'egg_hunting', 'exercising_arm', 'exercising_with_an_exercise_ball',", "transforms them. \"\"\" def __init__(self, setting=os.path.expanduser('~/.mxnet/datasets/kinetics400/kinetics400_train_list_rawframes.txt'), root=os.path.expanduser('~/.mxnet/datasets/kinetics400/rawframes_train'), train=True, test_mode=False, name_pattern=None,", "'mowing_lawn', 'news_anchoring', 'opening_bottle', 'opening_present', 'paragliding', 'parasailing', 'parkour', 'passing_American_football_-in_game-', 'passing_American_football_-not_in_game-', 'peeling_apples',", "video clip. Default is a single image, but it can", "return nd.array(clip_input) class Kinetics400Attr(object): def __init__(self): self.num_class = 400 self.classes", "'waxing_back', 'waxing_chest', 'waxing_eyebrows', 'waxing_legs', 'weaving_basket', 'welding', 'whistling', 'windsurfing', 'wrapping_present', 'wrestling',", "train self.test_mode = test_mode self.is_color = is_color self.modality = modality", "the Kinetics400 action recognition dataset. Refer to :doc:`../build/examples_datasets/kinetics400` for the", "'cutting_watermelon', 'dancing_ballet', 'dancing_charleston', 'dancing_gangnam_style', 'dancing_macarena', 'deadlifting', 'decorating_the_christmas_tree', 'digging', 'dining', 'disc_golfing',", "< 3: print('Video input format is not correct, missing one", "'riding_mule', 'riding_or_walking_with_horse', 'riding_scooter', 'riding_unicycle', 'ripping_paper', 'robot_dancing', 'rock_climbing', 'rock_scissors_paper', 'roller_skating', 'running_on_treadmill',", "if average_duration >= self.new_length: offset = random.randint(0, average_duration - self.new_length)", "'swimming_breast_stroke', 'swimming_butterfly_stroke', 'swing_dancing', 'swinging_legs', 'swinging_on_something', 'sword_fighting', 'tai_chi', 'taking_a_shower', 'tango_dancing', 'tap_dancing',", "the height of loaded image to 'new_height' for later multiscale", "or more element. %s' % line) continue clip_path = os.path.join(directory,", "the dataset. setting : str, required Config file of the", "for the description of this dataset and how to prepare", "'laying_bricks', 'long_jump', 'lunge', 'making_a_cake', 'making_a_sandwich', 'making_bed', 'making_jewelry', 'making_pizza', 'making_snowman', 'making_sushi',", "class Kinetics400Attr(object): def __init__(self): self.num_class = 400 self.classes = ['abseiling',", "grayscale modality : str, default 'rgb' Input modalities, we support", "= (clip_path, duration, target) clips.append(item) return clips def _TSN_RGB(self, directory,", "\"rgb\": self.name_pattern = \"img_%05d.jpg\" elif self.modality == \"flow\": self.name_pattern =", "self.test_mode = test_mode self.is_color = is_color self.modality = modality self.num_segments", "= new_height self.new_width = new_width self.target_height = target_height self.target_width =", "self.name_pattern = \"flow_%s_%05d.jpg\" def __getitem__(self, index): directory, duration, target =", "average_duration - self.new_length) # No +1 because randint(a,b) return a", "else: cv_img = cv_img_origin cv_img = cv2.cvtColor(cv_img, cv2.COLOR_BGR2RGB) sampled_list.append(cv_img) #", "'playing_saxophone', 'playing_squash_or_racquetball', 'playing_tennis', 'playing_trombone', 'playing_trumpet', 'playing_ukulele', 'playing_violin', 'playing_volleyball', 'playing_xylophone', 'pole_vault',", "return a random integer N such that a <= N", "of the prepared dataset. train : bool, default True Whether", "the height of transformed image to the same 'target_height' for", "'ski_jumping', 'skiing_-not_slalom_or_crosscountry-', 'skiing_crosscountry', 'skiing_slalom', 'skipping_rope', 'skydiving', 'slacklining', 'slapping', 'sled_dog_racing', 'smoking',", "'cleaning_toilet', 'cleaning_windows', 'climbing_a_rope', 'climbing_ladder', 'climbing_tree', 'contact_juggling', 'cooking_chicken', 'cooking_egg', 'cooking_on_campfire', 'cooking_sausages',", "'smoking', 'smoking_hookah', 'snatch_weight_lifting', 'sneezing', 'sniffing', 'snorkeling', 'snowboarding', 'snowkiting', 'snowmobiling', 'somersaulting',", "import random import numpy as np from mxnet import nd", "= int(line_info[1]) target = int(line_info[2]) item = (clip_path, duration, target)", "= name_pattern % (length_id + offset) frame_path = directory +", "the width of transformed image to the same 'target_width' for", "'tango_dancing', 'tap_dancing', 'tapping_guitar', 'tapping_pen', 'tasting_beer', 'tasting_food', 'testifying', 'texting', 'throwing_axe', 'throwing_ball',", "int(line_info[1]) target = int(line_info[2]) item = (clip_path, duration, target) clips.append(item)", "'playing_monopoly', 'playing_organ', 'playing_paintball', 'playing_piano', 'playing_poker', 'playing_recorder', 'playing_saxophone', 'playing_squash_or_racquetball', 'playing_tennis', 'playing_trombone',", "'surfing_water', 'sweeping_floor', 'swimming_backstroke', 'swimming_breast_stroke', 'swimming_butterfly_stroke', 'swing_dancing', 'swinging_legs', 'swinging_on_something', 'sword_fighting', 'tai_chi',", "load the training or validation set. test_mode : bool, default", "self.test_mode: # For TSN training, reshape the input to B", "sampled_list.append(cv_img) # the shape of clip_input will be H x", "'throwing_ball', 'throwing_discus', 'tickling', 'tobogganing', 'tossing_coin', 'tossing_salad', 'training_dog', 'trapezing', 'trimming_or_shaving_beard', 'trimming_trees',", "'robot_dancing', 'rock_climbing', 'rock_scissors_paper', 'roller_skating', 'running_on_treadmill', 'sailing', 'salsa_dancing', 'sanding_floor', 'scrambling_eggs', 'scuba_diving',", "'playing_drums', 'playing_flute', 'playing_guitar', 'playing_harmonica', 'playing_harp', 'playing_ice_hockey', 'playing_keyboard', 'playing_kickball', 'playing_monopoly', 'playing_organ',", "file %s. Check data path.\" % (frame_path))) if new_width >", "= self.transform(clip_input) if self.num_segments > 1 and not self.test_mode: #", "_TSN_RGB(self, directory, offsets, new_height, new_width, new_length, is_color, name_pattern): from ...utils.filesystem", "image, but it can be multiple video frames. For example,", "'faceplanting', 'feeding_birds', 'feeding_fish', 'feeding_goats', 'filling_eyebrows', 'finger_snapping', 'fixing_hair', 'flipping_pancake', 'flying_kite', 'folding_clothes',", "'sharpening_pencil', 'shaving_head', 'shaving_legs', 'shearing_sheep', 'shining_shoes', 'shooting_basketball', 'shooting_goal_-soccer-', 'shot_put', 'shoveling_snow', 'shredding_paper',", "default None A function that takes data and label and", "Number of segments to evenly divide the video into clips.", "'cheerleading', 'chopping_wood', 'clapping', 'clay_pottery_making', 'clean_and_jerk', 'cleaning_floor', 'cleaning_gutters', 'cleaning_pool', 'cleaning_shoes', 'cleaning_toilet',", "it can be multiple video frames. For example, new_length=16 means", "'stomping_grapes', 'stretching_arm', 'stretching_leg', 'strumming_guitar', 'surfing_crowd', 'surfing_water', 'sweeping_floor', 'swimming_backstroke', 'swimming_breast_stroke', 'swimming_butterfly_stroke',", "pylint: disable=line-too-long,too-many-lines,missing-docstring \"\"\"Kinetics400 action classification dataset.\"\"\" import os import random", "'spray_painting', 'spraying', 'springboard_diving', 'squat', 'sticking_tongue_out', 'stomping_grapes', 'stretching_arm', 'stretching_leg', 'strumming_guitar', 'surfing_crowd',", "'getting_a_haircut', 'getting_a_tattoo', 'giving_or_receiving_award', 'golf_chipping', 'golf_driving', 'golf_putting', 'grinding_meat', 'grooming_dog', 'grooming_horse', 'gymnastics_tumbling',", "Parameters ---------- root : str, default '~/.mxnet/datasets/kinetics400' Path to the", "self.modality == \"rgb\": self.name_pattern = \"img_%05d.jpg\" elif self.modality == \"flow\":", "'building_shed', 'bungee_jumping', 'busking', 'canoeing_or_kayaking', 'capoeira', 'carrying_baby', 'cartwheeling', 'carving_pumpkin', 'catching_fish', 'catching_or_throwing_baseball',", "'shot_put', 'shoveling_snow', 'shredding_paper', 'shuffling_cards', 'side_kick', 'sign_language_interpreting', 'singing', 'situp', 'skateboarding', 'ski_jumping',", "\"flow\": self.name_pattern = \"flow_%s_%05d.jpg\" def __getitem__(self, index): directory, duration, target", "range(self.num_segments): if self.train and not self.test_mode: # training if average_duration", "'target_height' for batch forwarding. transform : function, default None A", ": bool, default True Whether the loaded image is color", "'climbing_tree', 'contact_juggling', 'cooking_chicken', 'cooking_egg', 'cooking_on_campfire', 'cooking_sausages', 'counting_money', 'country_line_dancing', 'cracking_neck', 'crawling_baby',", "self.new_width = new_width self.target_height = target_height self.target_width = target_width self.new_length", "'playing_kickball', 'playing_monopoly', 'playing_organ', 'playing_paintball', 'playing_piano', 'playing_poker', 'playing_recorder', 'playing_saxophone', 'playing_squash_or_racquetball', 'playing_tennis',", "num_segments : int, default 1 Number of segments to evenly", "'sweeping_floor', 'swimming_backstroke', 'swimming_breast_stroke', 'swimming_butterfly_stroke', 'swing_dancing', 'swinging_legs', 'swinging_on_something', 'sword_fighting', 'tai_chi', 'taking_a_shower',", "'texting', 'throwing_axe', 'throwing_ball', 'throwing_discus', 'tickling', 'tobogganing', 'tossing_coin', 'tossing_salad', 'training_dog', 'trapezing',", "'krumping', 'laughing', 'laying_bricks', 'long_jump', 'lunge', 'making_a_cake', 'making_a_sandwich', 'making_bed', 'making_jewelry', 'making_pizza',", "forwarding. target_height : int, default 224 Scale the height of", "= {classes[i]: i for i in range(len(classes))} return classes, class_to_idx", "if len(line_info) < 3: print('Video input format is not correct,", "bool, default False Whether to perform evaluation on the test", "'laughing', 'laying_bricks', 'long_jump', 'lunge', 'making_a_cake', 'making_a_sandwich', 'making_bed', 'making_jewelry', 'making_pizza', 'making_snowman',", "'recording_music', 'riding_a_bike', 'riding_camel', 'riding_elephant', 'riding_mechanical_bull', 'riding_mountain_bike', 'riding_mule', 'riding_or_walking_with_horse', 'riding_scooter', 'riding_unicycle',", "'flipping_pancake', 'flying_kite', 'folding_clothes', 'folding_napkins', 'folding_paper', 'front_raises', 'frying_vegetables', 'garbage_collecting', 'gargling', 'getting_a_haircut',", "1 Number of segments to evenly divide the video into", "self.transform is not None: clip_input = self.transform(clip_input) if self.num_segments >", "video frames. For example, new_length=16 means we will extract a", "height of loaded image to 'new_height' for later multiscale cropping", "for batch forwarding. transform : function, default None A function", "'using_remote_controller_-not_gaming-', 'using_segway', 'vault', 'waiting_in_line', 'walking_the_dog', 'washing_dishes', 'washing_feet', 'washing_hair', 'washing_hands', 'water_skiing',", "if self.modality == \"rgb\": self.name_pattern = \"img_%05d.jpg\" elif self.modality ==", "clip_input, target def __len__(self): return len(self.clips) def _find_classes(self, directory): classes", "is None: raise(RuntimeError(\"Could not load file %s. Check data path.\"", "'playing_accordion', 'playing_badminton', 'playing_bagpipes', 'playing_basketball', 'playing_bass_guitar', 'playing_cards', 'playing_cello', 'playing_chess', 'playing_clarinet', 'playing_controller',", "'eating_spaghetti', 'eating_watermelon', 'egg_hunting', 'exercising_arm', 'exercising_with_an_exercise_ball', 'extinguishing_fire', 'faceplanting', 'feeding_birds', 'feeding_fish', 'feeding_goats',", "self.train and not self.test_mode: # training if average_duration >= self.new_length:", "average_duration = int(duration / self.num_segments) offsets = [] for seg_id", "for Deep Action Recognition, ECCV 2016 new_length : int, default", "offsets.append(0) else: # test if average_duration >= self.new_length: offsets.append(int((average_duration -", "'roller_skating', 'running_on_treadmill', 'sailing', 'salsa_dancing', 'sanding_floor', 'scrambling_eggs', 'scuba_diving', 'setting_table', 'shaking_hands', 'shaking_head',", "'tasting_beer', 'tasting_food', 'testifying', 'texting', 'throwing_axe', 'throwing_ball', 'throwing_discus', 'tickling', 'tobogganing', 'tossing_coin',", "useful technique to obtain global video-level information. <NAME>, etal, Temporal", "self.is_color = is_color self.modality = modality self.num_segments = num_segments self.new_height", "offset = random.randint(0, average_duration - self.new_length) # No +1 because", "dataset and how to prepare it. Parameters ---------- root :", "'beatboxing', 'bee_keeping', 'belly_dancing', 'bench_pressing', 'bending_back', 'bending_metal', 'biking_through_snow', 'blasting_sand', 'blowing_glass', 'blowing_leaves',", "'kitesurfing', 'knitting', 'krumping', 'laughing', 'laying_bricks', 'long_jump', 'lunge', 'making_a_cake', 'making_a_sandwich', 'making_bed',", "train=True, test_mode=False, name_pattern=None, is_color=True, modality='rgb', num_segments=1, new_length=1, new_width=340, new_height=256, target_width=224,", "one or more element. %s' % line) continue clip_path =", "'milking_cow', 'mopping_floor', 'motorcycling', 'moving_furniture', 'mowing_lawn', 'news_anchoring', 'opening_bottle', 'opening_present', 'paragliding', 'parasailing',", "perform evaluation on the test set name_pattern : str, default", ": int, default 1 Number of segments to evenly divide", "try_import_cv2 cv2 = try_import_cv2() if is_color: cv_read_flag = cv2.IMREAD_COLOR else:", ": int, default 340 Scale the width of loaded image", "\"\"\"Kinetics400 action classification dataset.\"\"\" import os import random import numpy", "obtain global video-level information. <NAME>, etal, Temporal Segment Networks: Towards", "image to the same 'target_height' for batch forwarding. transform :", "self).__init__() self.root = root self.setting = setting self.train = train", "= cv2.IMREAD_GRAYSCALE interpolation = cv2.INTER_LINEAR sampled_list = [] for _,", "- self.new_length) # No +1 because randint(a,b) return a random", "return classes, class_to_idx def _make_dataset(self, directory, setting): if not os.path.exists(setting):", "Config file of the prepared dataset. train : bool, default", "self.train and not self.test_mode: # validation if average_duration >= self.new_length:", "to perform evaluation on the test set name_pattern : str,", "clip_input = np.concatenate(sampled_list, axis=2) return nd.array(clip_input) class Kinetics400Attr(object): def __init__(self):", "not self.test_mode: # training if average_duration >= self.new_length: offset =", "'using_segway', 'vault', 'waiting_in_line', 'walking_the_dog', 'washing_dishes', 'washing_feet', 'washing_hair', 'washing_hands', 'water_skiing', 'water_sliding',", "the shape of clip_input will be H x W x", "cv_img_origin cv_img = cv2.cvtColor(cv_img, cv2.COLOR_BGR2RGB) sampled_list.append(cv_img) # the shape of", "frames. For example, img_00012.jpg is_color : bool, default True Whether", "line_info = line.split() # line format: video_path, video_duration, video_label if", "'training_dog', 'trapezing', 'trimming_or_shaving_beard', 'trimming_trees', 'triple_jump', 'tying_bow_tie', 'tying_knot_-not_on_a_tie-', 'tying_tie', 'unboxing', 'unloading_truck',", "dataset.\"\"\" import os import random import numpy as np from", "= transform self.classes, self.class_to_idx = self._find_classes(root) self.clips = self._make_dataset(root, setting)", "'parkour', 'passing_American_football_-in_game-', 'passing_American_football_-not_in_game-', 'peeling_apples', 'peeling_potatoes', 'petting_animal_-not_cat-', 'petting_cat', 'picking_fruit', 'planting_trees', 'plastering',", "'playing_harp', 'playing_ice_hockey', 'playing_keyboard', 'playing_kickball', 'playing_monopoly', 'playing_organ', 'playing_paintball', 'playing_piano', 'playing_poker', 'playing_recorder',", "'walking_the_dog', 'washing_dishes', 'washing_feet', 'washing_hair', 'washing_hands', 'water_skiing', 'water_sliding', 'watering_plants', 'waxing_back', 'waxing_chest',", "'country_line_dancing', 'cracking_neck', 'crawling_baby', 'crossing_river', 'crying', 'curling_hair', 'cutting_nails', 'cutting_pineapple', 'cutting_watermelon', 'dancing_ballet',", "%s' % line) continue clip_path = os.path.join(directory, line_info[0]) duration =", "information. <NAME>, etal, Temporal Segment Networks: Towards Good Practices for", "'capoeira', 'carrying_baby', 'cartwheeling', 'carving_pumpkin', 'catching_fish', 'catching_or_throwing_baseball', 'catching_or_throwing_frisbee', 'catching_or_throwing_softball', 'celebrating', 'changing_oil',", "bool, default True Whether to load the training or validation", "duration = int(line_info[1]) target = int(line_info[2]) item = (clip_path, duration,", "height of transformed image to the same 'target_height' for batch", "nd.array(clip_input) class Kinetics400Attr(object): def __init__(self): self.num_class = 400 self.classes =", "if is_color: cv_read_flag = cv2.IMREAD_COLOR else: cv_read_flag = cv2.IMREAD_GRAYSCALE interpolation", "__all__ = ['Kinetics400'] class Kinetics400(dataset.Dataset): \"\"\"Load the Kinetics400 action recognition", "'situp', 'skateboarding', 'ski_jumping', 'skiing_-not_slalom_or_crosscountry-', 'skiing_crosscountry', 'skiing_slalom', 'skipping_rope', 'skydiving', 'slacklining', 'slapping',", "# No +1 because randint(a,b) return a random integer N", "cv2.IMREAD_GRAYSCALE interpolation = cv2.INTER_LINEAR sampled_list = [] for _, offset", "'drop_kicking', 'drumming_fingers', 'dunking_basketball', 'dying_hair', 'eating_burger', 'eating_cake', 'eating_carrots', 'eating_chips', 'eating_doughnuts', 'eating_hotdog',", "frames for now. Will add support for rgb difference image", "x H x W. Here, B = batch_size * num_segments", "<= N <= b. offsets.append(offset + seg_id * average_duration) else:", "and opt.val-list. \" % (setting))) clips = [] with open(setting)", "Kinetics400Attr(object): def __init__(self): self.num_class = 400 self.classes = ['abseiling', 'air_drumming',", "'springboard_diving', 'squat', 'sticking_tongue_out', 'stomping_grapes', 'stretching_arm', 'stretching_leg', 'strumming_guitar', 'surfing_crowd', 'surfing_water', 'sweeping_floor',", "'high_jump', 'high_kick', 'hitting_baseball', 'hockey_stop', 'holding_snake', 'hopscotch', 'hoverboarding', 'hugging', 'hula_hooping', 'hurdling',", "and transforms them. \"\"\" def __init__(self, setting=os.path.expanduser('~/.mxnet/datasets/kinetics400/kinetics400_train_list_rawframes.txt'), root=os.path.expanduser('~/.mxnet/datasets/kinetics400/rawframes_train'), train=True, test_mode=False,", "test_mode : bool, default False Whether to perform evaluation on", "default '~/.mxnet/datasets/kinetics400' Path to the folder stored the dataset. setting", "directory, setting): if not os.path.exists(setting): raise(RuntimeError(\"Setting file %s doesn't exist.", "video frames for now. Will add support for rgb difference", "def _find_classes(self, directory): classes = [d for d in os.listdir(directory)", "to :doc:`../build/examples_datasets/kinetics400` for the description of this dataset and how", "the same 'target_width' for batch forwarding. target_height : int, default", "A function that takes data and label and transforms them.", "from ...utils.filesystem import try_import_cv2 cv2 = try_import_cv2() if is_color: cv_read_flag", "'deadlifting', 'decorating_the_christmas_tree', 'digging', 'dining', 'disc_golfing', 'diving_cliff', 'dodgeball', 'doing_aerobics', 'doing_laundry', 'doing_nails',", "os.listdir(directory) if os.path.isdir(os.path.join(directory, d))] classes.sort() class_to_idx = {classes[i]: i for", "resizing. target_width : int, default 224 Scale the width of", "we support only rgb video frames for now. Will add", "'sword_fighting', 'tai_chi', 'taking_a_shower', 'tango_dancing', 'tap_dancing', 'tapping_guitar', 'tapping_pen', 'tasting_beer', 'tasting_food', 'testifying',", "example, new_length=16 means we will extract a video clip of", "width of loaded image to 'new_width' for later multiscale cropping", "for i in range(len(classes))} return classes, class_to_idx def _make_dataset(self, directory,", "'cracking_neck', 'crawling_baby', 'crossing_river', 'crying', 'curling_hair', 'cutting_nails', 'cutting_pineapple', 'cutting_watermelon', 'dancing_ballet', 'dancing_charleston',", "a single image, but it can be multiple video frames.", "'playing_organ', 'playing_paintball', 'playing_piano', 'playing_poker', 'playing_recorder', 'playing_saxophone', 'playing_squash_or_racquetball', 'playing_tennis', 'playing_trombone', 'playing_trumpet',", "'tobogganing', 'tossing_coin', 'tossing_salad', 'training_dog', 'trapezing', 'trimming_or_shaving_beard', 'trimming_trees', 'triple_jump', 'tying_bow_tie', 'tying_knot_-not_on_a_tie-',", "new_width > 0 and new_height > 0: cv_img = cv2.resize(cv_img_origin,", "i for i in range(len(classes))} return classes, class_to_idx def _make_dataset(self,", "offsets.append(offset + seg_id * average_duration) else: offsets.append(0) elif not self.train", "= num_segments * new_length * 3 clip_input = np.concatenate(sampled_list, axis=2)", "for d in os.listdir(directory) if os.path.isdir(os.path.join(directory, d))] classes.sort() class_to_idx =", "video_duration, video_label if len(line_info) < 3: print('Video input format is", "The naming pattern of the decoded video frames. For example,", "'giving_or_receiving_award', 'golf_chipping', 'golf_driving', 'golf_putting', 'grinding_meat', 'grooming_dog', 'grooming_horse', 'gymnastics_tumbling', 'hammer_throw', 'headbanging',", "'smoking_hookah', 'snatch_weight_lifting', 'sneezing', 'sniffing', 'snorkeling', 'snowboarding', 'snowkiting', 'snowmobiling', 'somersaulting', 'spinning_poi',", "frames. For example, new_length=16 means we will extract a video", "etal, Temporal Segment Networks: Towards Good Practices for Deep Action", "enumerate(offsets): for length_id in range(1, new_length+1): frame_name = name_pattern %", "Whether the loaded image is color or grayscale modality :", "% (frame_path))) if new_width > 0 and new_height > 0:", "256 Scale the height of loaded image to 'new_height' for", "and optical flow image later. num_segments : int, default 1", "multiscale cropping and resizing. target_width : int, default 224 Scale", "[] for seg_id in range(self.num_segments): if self.train and not self.test_mode:", "try_import_cv2() if is_color: cv_read_flag = cv2.IMREAD_COLOR else: cv_read_flag = cv2.IMREAD_GRAYSCALE", "or validation set. test_mode : bool, default False Whether to", "int, default 224 Scale the width of transformed image to", "def _TSN_RGB(self, directory, offsets, new_height, new_width, new_length, is_color, name_pattern): from", "'brushing_hair', 'brushing_teeth', 'building_cabinet', 'building_shed', 'bungee_jumping', 'busking', 'canoeing_or_kayaking', 'capoeira', 'carrying_baby', 'cartwheeling',", "__len__(self): return len(self.clips) def _find_classes(self, directory): classes = [d for", "multiple video frames. For example, new_length=16 means we will extract", "N such that a <= N <= b. offsets.append(offset +", "'surfing_crowd', 'surfing_water', 'sweeping_floor', 'swimming_backstroke', 'swimming_breast_stroke', 'swimming_butterfly_stroke', 'swing_dancing', 'swinging_legs', 'swinging_on_something', 'sword_fighting',", "extract a video clip of consecutive 16 frames. new_width :", "return clip_input, target def __len__(self): return len(self.clips) def _find_classes(self, directory):", "'washing_hair', 'washing_hands', 'water_skiing', 'water_sliding', 'watering_plants', 'waxing_back', 'waxing_chest', 'waxing_eyebrows', 'waxing_legs', 'weaving_basket',", ": int, default 256 Scale the height of loaded image", "action recognition dataset. Refer to :doc:`../build/examples_datasets/kinetics400` for the description of", "is_color self.modality = modality self.num_segments = num_segments self.new_height = new_height", "class_to_idx def _make_dataset(self, directory, setting): if not os.path.exists(setting): raise(RuntimeError(\"Setting file", "'changing_wheel', 'checking_tires', 'cheerleading', 'chopping_wood', 'clapping', 'clay_pottery_making', 'clean_and_jerk', 'cleaning_floor', 'cleaning_gutters', 'cleaning_pool',", "'skateboarding', 'ski_jumping', 'skiing_-not_slalom_or_crosscountry-', 'skiing_crosscountry', 'skiing_slalom', 'skipping_rope', 'skydiving', 'slacklining', 'slapping', 'sled_dog_racing',", "0 video clips in subfolders of: \" + root +", "# test if average_duration >= self.new_length: offsets.append(int((average_duration - self.new_length +", "video_label if len(line_info) < 3: print('Video input format is not", "later. num_segments : int, default 1 Number of segments to", "'marching', 'massaging_back', 'massaging_feet', 'massaging_legs', \"massaging_person's_head\", 'milking_cow', 'mopping_floor', 'motorcycling', 'moving_furniture', 'mowing_lawn',", "# training if average_duration >= self.new_length: offset = random.randint(0, average_duration", "target_height=224, transform=None): super(Kinetics400, self).__init__() self.root = root self.setting = setting", "default 224 Scale the height of transformed image to the", "self.target_width = target_width self.new_length = new_length self.transform = transform self.classes,", "length_id in range(1, new_length+1): frame_name = name_pattern % (length_id +", "'mopping_floor', 'motorcycling', 'moving_furniture', 'mowing_lawn', 'news_anchoring', 'opening_bottle', 'opening_present', 'paragliding', 'parasailing', 'parkour',", "'passing_American_football_-in_game-', 'passing_American_football_-not_in_game-', 'peeling_apples', 'peeling_potatoes', 'petting_animal_-not_cat-', 'petting_cat', 'picking_fruit', 'planting_trees', 'plastering', 'playing_accordion',", "'tickling', 'tobogganing', 'tossing_coin', 'tossing_salad', 'training_dog', 'trapezing', 'trimming_or_shaving_beard', 'trimming_trees', 'triple_jump', 'tying_bow_tie',", "'trimming_trees', 'triple_jump', 'tying_bow_tie', 'tying_knot_-not_on_a_tie-', 'tying_tie', 'unboxing', 'unloading_truck', 'using_computer', 'using_remote_controller_-not_gaming-', 'using_segway',", "'blowing_out_candles', 'bobsledding', 'bookbinding', 'bouncing_on_trampoline', 'bowling', 'braiding_hair', 'breading_or_breadcrumbing', 'breakdancing', 'brush_painting', 'brushing_hair',", "'drinking_shots', 'driving_car', 'driving_tractor', 'drop_kicking', 'drumming_fingers', 'dunking_basketball', 'dying_hair', 'eating_burger', 'eating_cake', 'eating_carrots',", "'headbutting', 'high_jump', 'high_kick', 'hitting_baseball', 'hockey_stop', 'holding_snake', 'hopscotch', 'hoverboarding', 'hugging', 'hula_hooping',", "batch forwarding. target_height : int, default 224 Scale the height", "target def __len__(self): return len(self.clips) def _find_classes(self, directory): classes =", "B x 3 x H x W. Here, B =", "'playing_harmonica', 'playing_harp', 'playing_ice_hockey', 'playing_keyboard', 'playing_kickball', 'playing_monopoly', 'playing_organ', 'playing_paintball', 'playing_piano', 'playing_poker',", "'crawling_baby', 'crossing_river', 'crying', 'curling_hair', 'cutting_nails', 'cutting_pineapple', 'cutting_watermelon', 'dancing_ballet', 'dancing_charleston', 'dancing_gangnam_style',", "default 340 Scale the width of loaded image to 'new_width'", "of clip_input will be H x W x C, and", "'ironing', 'javelin_throw', 'jetskiing', 'jogging', 'juggling_balls', 'juggling_fire', 'juggling_soccer_ball', 'jumping_into_pool', 'jumpstyle_dancing', 'kicking_field_goal',", "%s. Check data path.\" % (frame_path))) if new_width > 0", "'sailing', 'salsa_dancing', 'sanding_floor', 'scrambling_eggs', 'scuba_diving', 'setting_table', 'shaking_hands', 'shaking_head', 'sharpening_knives', 'sharpening_pencil',", "new_height), interpolation) else: cv_img = cv_img_origin cv_img = cv2.cvtColor(cv_img, cv2.COLOR_BGR2RGB)", "'hammer_throw', 'headbanging', 'headbutting', 'high_jump', 'high_kick', 'hitting_baseball', 'hockey_stop', 'holding_snake', 'hopscotch', 'hoverboarding',", "'shaving_legs', 'shearing_sheep', 'shining_shoes', 'shooting_basketball', 'shooting_goal_-soccer-', 'shot_put', 'shoveling_snow', 'shredding_paper', 'shuffling_cards', 'side_kick',", "# validation if average_duration >= self.new_length: offsets.append(int((average_duration - self.new_length +", "target_width : int, default 224 Scale the width of transformed", "def __getitem__(self, index): directory, duration, target = self.clips[index] average_duration =", "'rgb' Input modalities, we support only rgb video frames for", "same 'target_height' for batch forwarding. transform : function, default None", "= modality self.num_segments = num_segments self.new_height = new_height self.new_width =", "new_height self.new_width = new_width self.target_height = target_height self.target_width = target_width", "is_color : bool, default True Whether the loaded image is", "3: print('Video input format is not correct, missing one or", "doesn't exist. Check opt.train-list and opt.val-list. \" % (setting))) clips", "in range(1, new_length+1): frame_name = name_pattern % (length_id + offset)", "root : str, default '~/.mxnet/datasets/kinetics400' Path to the folder stored", "'scuba_diving', 'setting_table', 'shaking_hands', 'shaking_head', 'sharpening_knives', 'sharpening_pencil', 'shaving_head', 'shaving_legs', 'shearing_sheep', 'shining_shoes',", "\"\"\"Load the Kinetics400 action recognition dataset. Refer to :doc:`../build/examples_datasets/kinetics400` for", "data: line_info = line.split() # line format: video_path, video_duration, video_label", "not correct, missing one or more element. %s' % line)", "400 self.classes = ['abseiling', 'air_drumming', 'answering_questions', 'applauding', 'applying_cream', 'archery', 'arm_wrestling',", "'eating_doughnuts', 'eating_hotdog', 'eating_ice_cream', 'eating_spaghetti', 'eating_watermelon', 'egg_hunting', 'exercising_arm', 'exercising_with_an_exercise_ball', 'extinguishing_fire', 'faceplanting',", "'ripping_paper', 'robot_dancing', 'rock_climbing', 'rock_scissors_paper', 'roller_skating', 'running_on_treadmill', 'sailing', 'salsa_dancing', 'sanding_floor', 'scrambling_eggs',", "'vault', 'waiting_in_line', 'walking_the_dog', 'washing_dishes', 'washing_feet', 'washing_hair', 'washing_hands', 'water_skiing', 'water_sliding', 'watering_plants',", "'doing_aerobics', 'doing_laundry', 'doing_nails', 'drawing', 'dribbling_basketball', 'drinking', 'drinking_beer', 'drinking_shots', 'driving_car', 'driving_tractor',", "'brush_painting', 'brushing_hair', 'brushing_teeth', 'building_cabinet', 'building_shed', 'bungee_jumping', 'busking', 'canoeing_or_kayaking', 'capoeira', 'carrying_baby',", "rgb difference image and optical flow image later. num_segments :", "import try_import_cv2 cv2 = try_import_cv2() if is_color: cv_read_flag = cv2.IMREAD_COLOR", "'pumping_gas', 'punching_bag', 'punching_person_-boxing-', 'push_up', 'pushing_car', 'pushing_cart', 'pushing_wheelchair', 'reading_book', 'reading_newspaper', 'recording_music',", "'sanding_floor', 'scrambling_eggs', 'scuba_diving', 'setting_table', 'shaking_hands', 'shaking_head', 'sharpening_knives', 'sharpening_pencil', 'shaving_head', 'shaving_legs',", "'shooting_goal_-soccer-', 'shot_put', 'shoveling_snow', 'shredding_paper', 'shuffling_cards', 'side_kick', 'sign_language_interpreting', 'singing', 'situp', 'skateboarding',", "of: \" + root + \"\\n\" \"Check your data directory", "_find_classes(self, directory): classes = [d for d in os.listdir(directory) if", "'rock_scissors_paper', 'roller_skating', 'running_on_treadmill', 'sailing', 'salsa_dancing', 'sanding_floor', 'scrambling_eggs', 'scuba_diving', 'setting_table', 'shaking_hands',", "default False Whether to perform evaluation on the test set", "random.randint(0, average_duration - self.new_length) # No +1 because randint(a,b) return", "'juggling_balls', 'juggling_fire', 'juggling_soccer_ball', 'jumping_into_pool', 'jumpstyle_dancing', 'kicking_field_goal', 'kicking_soccer_ball', 'kissing', 'kitesurfing', 'knitting',", "clip_input = clip_input.reshape((-1, 3 * self.new_length, self.target_height, self.target_width)) return clip_input,", "= cv2.IMREAD_COLOR else: cv_read_flag = cv2.IMREAD_GRAYSCALE interpolation = cv2.INTER_LINEAR sampled_list", "'brushing_teeth', 'building_cabinet', 'building_shed', 'bungee_jumping', 'busking', 'canoeing_or_kayaking', 'capoeira', 'carrying_baby', 'cartwheeling', 'carving_pumpkin',", "= cv2.cvtColor(cv_img, cv2.COLOR_BGR2RGB) sampled_list.append(cv_img) # the shape of clip_input will", ": int, default 224 Scale the width of transformed image", "'pull_ups', 'pumping_fist', 'pumping_gas', 'punching_bag', 'punching_person_-boxing-', 'push_up', 'pushing_car', 'pushing_cart', 'pushing_wheelchair', 'reading_book',", "# the shape of clip_input will be H x W", "'eating_cake', 'eating_carrots', 'eating_chips', 'eating_doughnuts', 'eating_hotdog', 'eating_ice_cream', 'eating_spaghetti', 'eating_watermelon', 'egg_hunting', 'exercising_arm',", "for length_id in range(1, new_length+1): frame_name = name_pattern % (length_id", "Scale the height of loaded image to 'new_height' for later", "it. Parameters ---------- root : str, default '~/.mxnet/datasets/kinetics400' Path to", "os.path.join(directory, line_info[0]) duration = int(line_info[1]) target = int(line_info[2]) item =", "'skydiving', 'slacklining', 'slapping', 'sled_dog_racing', 'smoking', 'smoking_hookah', 'snatch_weight_lifting', 'sneezing', 'sniffing', 'snorkeling',", "'tying_tie', 'unboxing', 'unloading_truck', 'using_computer', 'using_remote_controller_-not_gaming-', 'using_segway', 'vault', 'waiting_in_line', 'walking_the_dog', 'washing_dishes',", "is_color: cv_read_flag = cv2.IMREAD_COLOR else: cv_read_flag = cv2.IMREAD_GRAYSCALE interpolation =", "'hurdling', 'hurling_-sport-', 'ice_climbing', 'ice_fishing', 'ice_skating', 'ironing', 'javelin_throw', 'jetskiing', 'jogging', 'juggling_balls',", "'bench_pressing', 'bending_back', 'bending_metal', 'biking_through_snow', 'blasting_sand', 'blowing_glass', 'blowing_leaves', 'blowing_nose', 'blowing_out_candles', 'bobsledding',", "'exercising_with_an_exercise_ball', 'extinguishing_fire', 'faceplanting', 'feeding_birds', 'feeding_fish', 'feeding_goats', 'filling_eyebrows', 'finger_snapping', 'fixing_hair', 'flipping_pancake',", "'playing_ukulele', 'playing_violin', 'playing_volleyball', 'playing_xylophone', 'pole_vault', 'presenting_weather_forecast', 'pull_ups', 'pumping_fist', 'pumping_gas', 'punching_bag',", "loaded image to 'new_width' for later multiscale cropping and resizing.", "naming pattern of the decoded video frames. For example, img_00012.jpg", "offsets.append(int((average_duration - self.new_length + 1)/2 + seg_id * average_duration)) else:", "reshape the input to B x 3 x H x", "'making_bed', 'making_jewelry', 'making_pizza', 'making_snowman', 'making_sushi', 'making_tea', 'marching', 'massaging_back', 'massaging_feet', 'massaging_legs',", ": str, default 'rgb' Input modalities, we support only rgb", "transform self.classes, self.class_to_idx = self._find_classes(root) self.clips = self._make_dataset(root, setting) if", "cv_img_origin = cv2.imread(frame_path, cv_read_flag) if cv_img_origin is None: raise(RuntimeError(\"Could not", "<= b. offsets.append(offset + seg_id * average_duration) else: offsets.append(0) elif", "self.new_length: offsets.append(int((average_duration - self.new_length + 1)/2 + seg_id * average_duration))", "random import numpy as np from mxnet import nd from", "single image, but it can be multiple video frames. For", "'playing_guitar', 'playing_harmonica', 'playing_harp', 'playing_ice_hockey', 'playing_keyboard', 'playing_kickball', 'playing_monopoly', 'playing_organ', 'playing_paintball', 'playing_piano',", ": bool, default True Whether to load the training or", "= cv_img_origin cv_img = cv2.cvtColor(cv_img, cv2.COLOR_BGR2RGB) sampled_list.append(cv_img) # the shape", "'reading_newspaper', 'recording_music', 'riding_a_bike', 'riding_camel', 'riding_elephant', 'riding_mechanical_bull', 'riding_mountain_bike', 'riding_mule', 'riding_or_walking_with_horse', 'riding_scooter',", "video clips in subfolders of: \" + root + \"\\n\"", "Kinetics400(dataset.Dataset): \"\"\"Load the Kinetics400 action recognition dataset. Refer to :doc:`../build/examples_datasets/kinetics400`", "target_width=224, target_height=224, transform=None): super(Kinetics400, self).__init__() self.root = root self.setting =", "of the decoded video frames. For example, img_00012.jpg is_color :", "the description of this dataset and how to prepare it.", "= try_import_cv2() if is_color: cv_read_flag = cv2.IMREAD_COLOR else: cv_read_flag =", "in os.listdir(directory) if os.path.isdir(os.path.join(directory, d))] classes.sort() class_to_idx = {classes[i]: i", "the training or validation set. test_mode : bool, default False", "loaded image is color or grayscale modality : str, default", "cropping and resizing. new_height : int, default 256 Scale the", "self.setting = setting self.train = train self.test_mode = test_mode self.is_color", "len(self.clips) def _find_classes(self, directory): classes = [d for d in", "raise(RuntimeError(\"Found 0 video clips in subfolders of: \" + root", "'exercising_arm', 'exercising_with_an_exercise_ball', 'extinguishing_fire', 'faceplanting', 'feeding_birds', 'feeding_fish', 'feeding_goats', 'filling_eyebrows', 'finger_snapping', 'fixing_hair',", "'bowling', 'braiding_hair', 'breading_or_breadcrumbing', 'breakdancing', 'brush_painting', 'brushing_hair', 'brushing_teeth', 'building_cabinet', 'building_shed', 'bungee_jumping',", "'bee_keeping', 'belly_dancing', 'bench_pressing', 'bending_back', 'bending_metal', 'biking_through_snow', 'blasting_sand', 'blowing_glass', 'blowing_leaves', 'blowing_nose',", "seg_id * average_duration)) else: offsets.append(0) clip_input = self._TSN_RGB(directory, offsets, self.new_height,", "will be H x W x C, and C =", "continue clip_path = os.path.join(directory, line_info[0]) duration = int(line_info[1]) target =", "'planting_trees', 'plastering', 'playing_accordion', 'playing_badminton', 'playing_bagpipes', 'playing_basketball', 'playing_bass_guitar', 'playing_cards', 'playing_cello', 'playing_chess',", "optical flow image later. num_segments : int, default 1 Number", "support for rgb difference image and optical flow image later.", "os.path.isdir(os.path.join(directory, d))] classes.sort() class_to_idx = {classes[i]: i for i in", "else: if self.modality == \"rgb\": self.name_pattern = \"img_%05d.jpg\" elif self.modality", "\"flow_%s_%05d.jpg\" def __getitem__(self, index): directory, duration, target = self.clips[index] average_duration", "'getting_a_tattoo', 'giving_or_receiving_award', 'golf_chipping', 'golf_driving', 'golf_putting', 'grinding_meat', 'grooming_dog', 'grooming_horse', 'gymnastics_tumbling', 'hammer_throw',", "'playing_paintball', 'playing_piano', 'playing_poker', 'playing_recorder', 'playing_saxophone', 'playing_squash_or_racquetball', 'playing_tennis', 'playing_trombone', 'playing_trumpet', 'playing_ukulele',", "Kinetics400 action recognition dataset. Refer to :doc:`../build/examples_datasets/kinetics400` for the description", "example, img_00012.jpg is_color : bool, default True Whether the loaded", "for _, offset in enumerate(offsets): for length_id in range(1, new_length+1):", "'crying', 'curling_hair', 'cutting_nails', 'cutting_pineapple', 'cutting_watermelon', 'dancing_ballet', 'dancing_charleston', 'dancing_gangnam_style', 'dancing_macarena', 'deadlifting',", "'somersaulting', 'spinning_poi', 'spray_painting', 'spraying', 'springboard_diving', 'squat', 'sticking_tongue_out', 'stomping_grapes', 'stretching_arm', 'stretching_leg',", "For example, img_00012.jpg is_color : bool, default True Whether the", "'grinding_meat', 'grooming_dog', 'grooming_horse', 'gymnastics_tumbling', 'hammer_throw', 'headbanging', 'headbutting', 'high_jump', 'high_kick', 'hitting_baseball',", "transformed image to the same 'target_width' for batch forwarding. target_height", "2016 new_length : int, default 1 The length of input", "'swinging_on_something', 'sword_fighting', 'tai_chi', 'taking_a_shower', 'tango_dancing', 'tap_dancing', 'tapping_guitar', 'tapping_pen', 'tasting_beer', 'tasting_food',", "= [d for d in os.listdir(directory) if os.path.isdir(os.path.join(directory, d))] classes.sort()", "'shearing_sheep', 'shining_shoes', 'shooting_basketball', 'shooting_goal_-soccer-', 'shot_put', 'shoveling_snow', 'shredding_paper', 'shuffling_cards', 'side_kick', 'sign_language_interpreting',", "self.is_color, self.name_pattern) if self.transform is not None: clip_input = self.transform(clip_input)", "directory + \"/\" + frame_name cv_img_origin = cv2.imread(frame_path, cv_read_flag) if", "'unloading_truck', 'using_computer', 'using_remote_controller_-not_gaming-', 'using_segway', 'vault', 'waiting_in_line', 'walking_the_dog', 'washing_dishes', 'washing_feet', 'washing_hair',", "'opening_present', 'paragliding', 'parasailing', 'parkour', 'passing_American_football_-in_game-', 'passing_American_football_-not_in_game-', 'peeling_apples', 'peeling_potatoes', 'petting_animal_-not_cat-', 'petting_cat',", "'doing_nails', 'drawing', 'dribbling_basketball', 'drinking', 'drinking_beer', 'drinking_shots', 'driving_car', 'driving_tractor', 'drop_kicking', 'drumming_fingers',", "and not self.test_mode: # training if average_duration >= self.new_length: offset", "Scale the width of loaded image to 'new_width' for later", "modality='rgb', num_segments=1, new_length=1, new_width=340, new_height=256, target_width=224, target_height=224, transform=None): super(Kinetics400, self).__init__()", "because randint(a,b) return a random integer N such that a", "new_height : int, default 256 Scale the height of loaded", "self.new_width, self.new_length, self.is_color, self.name_pattern) if self.transform is not None: clip_input", "0: cv_img = cv2.resize(cv_img_origin, (new_width, new_height), interpolation) else: cv_img =", "if os.path.isdir(os.path.join(directory, d))] classes.sort() class_to_idx = {classes[i]: i for i", "'extinguishing_fire', 'faceplanting', 'feeding_birds', 'feeding_fish', 'feeding_goats', 'filling_eyebrows', 'finger_snapping', 'fixing_hair', 'flipping_pancake', 'flying_kite',", "'snatch_weight_lifting', 'sneezing', 'sniffing', 'snorkeling', 'snowboarding', 'snowkiting', 'snowmobiling', 'somersaulting', 'spinning_poi', 'spray_painting',", "function, default None A function that takes data and label", "'balloon_blowing', 'bandaging', 'barbequing', 'bartending', 'beatboxing', 'bee_keeping', 'belly_dancing', 'bench_pressing', 'bending_back', 'bending_metal',", "'bungee_jumping', 'busking', 'canoeing_or_kayaking', 'capoeira', 'carrying_baby', 'cartwheeling', 'carving_pumpkin', 'catching_fish', 'catching_or_throwing_baseball', 'catching_or_throwing_frisbee',", "+ seg_id * average_duration)) else: offsets.append(0) else: # test if", "'sign_language_interpreting', 'singing', 'situp', 'skateboarding', 'ski_jumping', 'skiing_-not_slalom_or_crosscountry-', 'skiing_crosscountry', 'skiing_slalom', 'skipping_rope', 'skydiving',", "self.transform = transform self.classes, self.class_to_idx = self._find_classes(root) self.clips = self._make_dataset(root,", "self.target_height, self.target_width)) return clip_input, target def __len__(self): return len(self.clips) def", "'feeding_birds', 'feeding_fish', 'feeding_goats', 'filling_eyebrows', 'finger_snapping', 'fixing_hair', 'flipping_pancake', 'flying_kite', 'folding_clothes', 'folding_napkins',", "'playing_xylophone', 'pole_vault', 'presenting_weather_forecast', 'pull_ups', 'pumping_fist', 'pumping_gas', 'punching_bag', 'punching_person_-boxing-', 'push_up', 'pushing_car',", "'dancing_ballet', 'dancing_charleston', 'dancing_gangnam_style', 'dancing_macarena', 'deadlifting', 'decorating_the_christmas_tree', 'digging', 'dining', 'disc_golfing', 'diving_cliff',", "'gargling', 'getting_a_haircut', 'getting_a_tattoo', 'giving_or_receiving_award', 'golf_chipping', 'golf_driving', 'golf_putting', 'grinding_meat', 'grooming_dog', 'grooming_horse',", "Refer to :doc:`../build/examples_datasets/kinetics400` for the description of this dataset and", "is not None: clip_input = self.transform(clip_input) if self.num_segments > 1", "'playing_keyboard', 'playing_kickball', 'playing_monopoly', 'playing_organ', 'playing_paintball', 'playing_piano', 'playing_poker', 'playing_recorder', 'playing_saxophone', 'playing_squash_or_racquetball',", "import numpy as np from mxnet import nd from mxnet.gluon.data", "= num_segments self.new_height = new_height self.new_width = new_width self.target_height =", "seg_id in range(self.num_segments): if self.train and not self.test_mode: # training", "transform : function, default None A function that takes data", "'folding_paper', 'front_raises', 'frying_vegetables', 'garbage_collecting', 'gargling', 'getting_a_haircut', 'getting_a_tattoo', 'giving_or_receiving_award', 'golf_chipping', 'golf_driving',", "random integer N such that a <= N <= b.", "b. offsets.append(offset + seg_id * average_duration) else: offsets.append(0) elif not", "'catching_or_throwing_baseball', 'catching_or_throwing_frisbee', 'catching_or_throwing_softball', 'celebrating', 'changing_oil', 'changing_wheel', 'checking_tires', 'cheerleading', 'chopping_wood', 'clapping',", "'target_width' for batch forwarding. target_height : int, default 224 Scale", "'water_sliding', 'watering_plants', 'waxing_back', 'waxing_chest', 'waxing_eyebrows', 'waxing_legs', 'weaving_basket', 'welding', 'whistling', 'windsurfing',", "C, and C = num_segments * new_length * 3 clip_input", "'making_a_cake', 'making_a_sandwich', 'making_bed', 'making_jewelry', 'making_pizza', 'making_snowman', 'making_sushi', 'making_tea', 'marching', 'massaging_back',", "\"img_%05d.jpg\" elif self.modality == \"flow\": self.name_pattern = \"flow_%s_%05d.jpg\" def __getitem__(self,", "to 'new_width' for later multiscale cropping and resizing. new_height :", "means we will extract a video clip of consecutive 16", "'driving_car', 'driving_tractor', 'drop_kicking', 'drumming_fingers', 'dunking_basketball', 'dying_hair', 'eating_burger', 'eating_cake', 'eating_carrots', 'eating_chips',", "'trimming_or_shaving_beard', 'trimming_trees', 'triple_jump', 'tying_bow_tie', 'tying_knot_-not_on_a_tie-', 'tying_tie', 'unboxing', 'unloading_truck', 'using_computer', 'using_remote_controller_-not_gaming-',", "'pole_vault', 'presenting_weather_forecast', 'pull_ups', 'pumping_fist', 'pumping_gas', 'punching_bag', 'punching_person_-boxing-', 'push_up', 'pushing_car', 'pushing_cart',", "file %s doesn't exist. Check opt.train-list and opt.val-list. \" %", "transformed image to the same 'target_height' for batch forwarding. transform", "self.test_mode: # validation if average_duration >= self.new_length: offsets.append(int((average_duration - self.new_length", "frames. new_width : int, default 340 Scale the width of", "+ seg_id * average_duration)) else: offsets.append(0) clip_input = self._TSN_RGB(directory, offsets,", "'slacklining', 'slapping', 'sled_dog_racing', 'smoking', 'smoking_hookah', 'snatch_weight_lifting', 'sneezing', 'sniffing', 'snorkeling', 'snowboarding',", "mxnet import nd from mxnet.gluon.data import dataset __all__ = ['Kinetics400']", "= ['Kinetics400'] class Kinetics400(dataset.Dataset): \"\"\"Load the Kinetics400 action recognition dataset.", "'cooking_sausages', 'counting_money', 'country_line_dancing', 'cracking_neck', 'crawling_baby', 'crossing_river', 'crying', 'curling_hair', 'cutting_nails', 'cutting_pineapple',", "new_length : int, default 1 The length of input video", "new_length, is_color, name_pattern): from ...utils.filesystem import try_import_cv2 cv2 = try_import_cv2()", "frame_name = name_pattern % (length_id + offset) frame_path = directory", "'riding_a_bike', 'riding_camel', 'riding_elephant', 'riding_mechanical_bull', 'riding_mountain_bike', 'riding_mule', 'riding_or_walking_with_horse', 'riding_scooter', 'riding_unicycle', 'ripping_paper',", "'shaking_head', 'sharpening_knives', 'sharpening_pencil', 'shaving_head', 'shaving_legs', 'shearing_sheep', 'shining_shoes', 'shooting_basketball', 'shooting_goal_-soccer-', 'shot_put',", "> 1 and not self.test_mode: # For TSN training, reshape", "'tapping_guitar', 'tapping_pen', 'tasting_beer', 'tasting_food', 'testifying', 'texting', 'throwing_axe', 'throwing_ball', 'throwing_discus', 'tickling',", "cv_img = cv2.resize(cv_img_origin, (new_width, new_height), interpolation) else: cv_img = cv_img_origin", "'dancing_gangnam_style', 'dancing_macarena', 'deadlifting', 'decorating_the_christmas_tree', 'digging', 'dining', 'disc_golfing', 'diving_cliff', 'dodgeball', 'doing_aerobics',", "self.name_pattern) if self.transform is not None: clip_input = self.transform(clip_input) if", "/ self.num_segments) offsets = [] for seg_id in range(self.num_segments): if", "int(line_info[2]) item = (clip_path, duration, target) clips.append(item) return clips def", "cv2.INTER_LINEAR sampled_list = [] for _, offset in enumerate(offsets): for", "evenly divide the video into clips. A useful technique to", "data path.\" % (frame_path))) if new_width > 0 and new_height", "set name_pattern : str, default None The naming pattern of", "'counting_money', 'country_line_dancing', 'cracking_neck', 'crawling_baby', 'crossing_river', 'crying', 'curling_hair', 'cutting_nails', 'cutting_pineapple', 'cutting_watermelon',", "class_to_idx = {classes[i]: i for i in range(len(classes))} return classes,", "cv_img_origin is None: raise(RuntimeError(\"Could not load file %s. Check data", "'swinging_legs', 'swinging_on_something', 'sword_fighting', 'tai_chi', 'taking_a_shower', 'tango_dancing', 'tap_dancing', 'tapping_guitar', 'tapping_pen', 'tasting_beer',", "name_pattern : str, default None The naming pattern of the", "16 frames. new_width : int, default 340 Scale the width", "'drinking_beer', 'drinking_shots', 'driving_car', 'driving_tractor', 'drop_kicking', 'drumming_fingers', 'dunking_basketball', 'dying_hair', 'eating_burger', 'eating_cake',", "= [] for _, offset in enumerate(offsets): for length_id in", "W. Here, B = batch_size * num_segments clip_input = clip_input.reshape((-1,", "Deep Action Recognition, ECCV 2016 new_length : int, default 1", "takes data and label and transforms them. \"\"\" def __init__(self,", "new_length self.transform = transform self.classes, self.class_to_idx = self._find_classes(root) self.clips =", "such that a <= N <= b. offsets.append(offset + seg_id", "self.new_height, self.new_width, self.new_length, self.is_color, self.name_pattern) if self.transform is not None:", "'eating_chips', 'eating_doughnuts', 'eating_hotdog', 'eating_ice_cream', 'eating_spaghetti', 'eating_watermelon', 'egg_hunting', 'exercising_arm', 'exercising_with_an_exercise_ball', 'extinguishing_fire',", "name_pattern % (length_id + offset) frame_path = directory + \"/\"", "correct, missing one or more element. %s' % line) continue", "'cartwheeling', 'carving_pumpkin', 'catching_fish', 'catching_or_throwing_baseball', 'catching_or_throwing_frisbee', 'catching_or_throwing_softball', 'celebrating', 'changing_oil', 'changing_wheel', 'checking_tires',", "'shaking_hands', 'shaking_head', 'sharpening_knives', 'sharpening_pencil', 'shaving_head', 'shaving_legs', 'shearing_sheep', 'shining_shoes', 'shooting_basketball', 'shooting_goal_-soccer-',", "'bouncing_on_trampoline', 'bowling', 'braiding_hair', 'breading_or_breadcrumbing', 'breakdancing', 'brush_painting', 'brushing_hair', 'brushing_teeth', 'building_cabinet', 'building_shed',", "'tying_knot_-not_on_a_tie-', 'tying_tie', 'unboxing', 'unloading_truck', 'using_computer', 'using_remote_controller_-not_gaming-', 'using_segway', 'vault', 'waiting_in_line', 'walking_the_dog',", "cv_img = cv2.cvtColor(cv_img, cv2.COLOR_BGR2RGB) sampled_list.append(cv_img) # the shape of clip_input", "'gymnastics_tumbling', 'hammer_throw', 'headbanging', 'headbutting', 'high_jump', 'high_kick', 'hitting_baseball', 'hockey_stop', 'holding_snake', 'hopscotch',", "of loaded image to 'new_height' for later multiscale cropping and", "is not correct, missing one or more element. %s' %", "'hoverboarding', 'hugging', 'hula_hooping', 'hurdling', 'hurling_-sport-', 'ice_climbing', 'ice_fishing', 'ice_skating', 'ironing', 'javelin_throw',", "'watering_plants', 'waxing_back', 'waxing_chest', 'waxing_eyebrows', 'waxing_legs', 'weaving_basket', 'welding', 'whistling', 'windsurfing', 'wrapping_present',", "and not self.test_mode: # For TSN training, reshape the input", "'checking_tires', 'cheerleading', 'chopping_wood', 'clapping', 'clay_pottery_making', 'clean_and_jerk', 'cleaning_floor', 'cleaning_gutters', 'cleaning_pool', 'cleaning_shoes',", "batch_size * num_segments clip_input = clip_input.reshape((-1, 3 * self.new_length, self.target_height,", "'front_raises', 'frying_vegetables', 'garbage_collecting', 'gargling', 'getting_a_haircut', 'getting_a_tattoo', 'giving_or_receiving_award', 'golf_chipping', 'golf_driving', 'golf_putting',", "mxnet.gluon.data import dataset __all__ = ['Kinetics400'] class Kinetics400(dataset.Dataset): \"\"\"Load the", "'playing_flute', 'playing_guitar', 'playing_harmonica', 'playing_harp', 'playing_ice_hockey', 'playing_keyboard', 'playing_kickball', 'playing_monopoly', 'playing_organ', 'playing_paintball',", "'egg_hunting', 'exercising_arm', 'exercising_with_an_exercise_ball', 'extinguishing_fire', 'faceplanting', 'feeding_birds', 'feeding_fish', 'feeding_goats', 'filling_eyebrows', 'finger_snapping',", "'tai_chi', 'taking_a_shower', 'tango_dancing', 'tap_dancing', 'tapping_guitar', 'tapping_pen', 'tasting_beer', 'tasting_food', 'testifying', 'texting',", "self.new_length, self.is_color, self.name_pattern) if self.transform is not None: clip_input =", "new_width : int, default 340 Scale the width of loaded", "int, default 340 Scale the width of loaded image to", "to B x 3 x H x W. Here, B", "1)/2 + seg_id * average_duration)) else: offsets.append(0) else: # test", "input to B x 3 x H x W. Here,", "'hockey_stop', 'holding_snake', 'hopscotch', 'hoverboarding', 'hugging', 'hula_hooping', 'hurdling', 'hurling_-sport-', 'ice_climbing', 'ice_fishing',", "= [] for seg_id in range(self.num_segments): if self.train and not", "'filling_eyebrows', 'finger_snapping', 'fixing_hair', 'flipping_pancake', 'flying_kite', 'folding_clothes', 'folding_napkins', 'folding_paper', 'front_raises', 'frying_vegetables',", "'cutting_nails', 'cutting_pineapple', 'cutting_watermelon', 'dancing_ballet', 'dancing_charleston', 'dancing_gangnam_style', 'dancing_macarena', 'deadlifting', 'decorating_the_christmas_tree', 'digging',", "training, reshape the input to B x 3 x H", "self.new_length = new_length self.transform = transform self.classes, self.class_to_idx = self._find_classes(root)", "clip_input = self.transform(clip_input) if self.num_segments > 1 and not self.test_mode:", "class Kinetics400(dataset.Dataset): \"\"\"Load the Kinetics400 action recognition dataset. Refer to", "batch forwarding. transform : function, default None A function that", "cv_read_flag = cv2.IMREAD_GRAYSCALE interpolation = cv2.INTER_LINEAR sampled_list = [] for", "new_width=340, new_height=256, target_width=224, target_height=224, transform=None): super(Kinetics400, self).__init__() self.root = root", "to load the training or validation set. test_mode : bool,", "opt.train-list and opt.val-list. \" % (setting))) clips = [] with", "clip of consecutive 16 frames. new_width : int, default 340", "'making_snowman', 'making_sushi', 'making_tea', 'marching', 'massaging_back', 'massaging_feet', 'massaging_legs', \"massaging_person's_head\", 'milking_cow', 'mopping_floor',", "default 1 The length of input video clip. Default is", "element. %s' % line) continue clip_path = os.path.join(directory, line_info[0]) duration", "\" % (setting))) clips = [] with open(setting) as split_f:", "target = self.clips[index] average_duration = int(duration / self.num_segments) offsets =", "line.split() # line format: video_path, video_duration, video_label if len(line_info) <", "cv2.IMREAD_COLOR else: cv_read_flag = cv2.IMREAD_GRAYSCALE interpolation = cv2.INTER_LINEAR sampled_list =", "'playing_tennis', 'playing_trombone', 'playing_trumpet', 'playing_ukulele', 'playing_violin', 'playing_volleyball', 'playing_xylophone', 'pole_vault', 'presenting_weather_forecast', 'pull_ups',", "self.name_pattern = \"img_%05d.jpg\" elif self.modality == \"flow\": self.name_pattern = \"flow_%s_%05d.jpg\"", "'snorkeling', 'snowboarding', 'snowkiting', 'snowmobiling', 'somersaulting', 'spinning_poi', 'spray_painting', 'spraying', 'springboard_diving', 'squat',", "'pushing_wheelchair', 'reading_book', 'reading_newspaper', 'recording_music', 'riding_a_bike', 'riding_camel', 'riding_elephant', 'riding_mechanical_bull', 'riding_mountain_bike', 'riding_mule',", "self.new_length + 1)/2 + seg_id * average_duration)) else: offsets.append(0) else:", "'reading_book', 'reading_newspaper', 'recording_music', 'riding_a_bike', 'riding_camel', 'riding_elephant', 'riding_mechanical_bull', 'riding_mountain_bike', 'riding_mule', 'riding_or_walking_with_horse',", "folder stored the dataset. setting : str, required Config file", "color or grayscale modality : str, default 'rgb' Input modalities,", "= self._make_dataset(root, setting) if len(self.clips) == 0: raise(RuntimeError(\"Found 0 video", "if self.num_segments > 1 and not self.test_mode: # For TSN", "required Config file of the prepared dataset. train : bool,", "target = int(line_info[2]) item = (clip_path, duration, target) clips.append(item) return", "= cv2.INTER_LINEAR sampled_list = [] for _, offset in enumerate(offsets):", "num_segments clip_input = clip_input.reshape((-1, 3 * self.new_length, self.target_height, self.target_width)) return", "C = num_segments * new_length * 3 clip_input = np.concatenate(sampled_list,", "'high_kick', 'hitting_baseball', 'hockey_stop', 'holding_snake', 'hopscotch', 'hoverboarding', 'hugging', 'hula_hooping', 'hurdling', 'hurling_-sport-',", "'jumping_into_pool', 'jumpstyle_dancing', 'kicking_field_goal', 'kicking_soccer_ball', 'kissing', 'kitesurfing', 'knitting', 'krumping', 'laughing', 'laying_bricks',", "add support for rgb difference image and optical flow image", "'swing_dancing', 'swinging_legs', 'swinging_on_something', 'sword_fighting', 'tai_chi', 'taking_a_shower', 'tango_dancing', 'tap_dancing', 'tapping_guitar', 'tapping_pen',", ": bool, default False Whether to perform evaluation on the", "setting=os.path.expanduser('~/.mxnet/datasets/kinetics400/kinetics400_train_list_rawframes.txt'), root=os.path.expanduser('~/.mxnet/datasets/kinetics400/rawframes_train'), train=True, test_mode=False, name_pattern=None, is_color=True, modality='rgb', num_segments=1, new_length=1, new_width=340,", "if self.transform is not None: clip_input = self.transform(clip_input) if self.num_segments", "is_color=True, modality='rgb', num_segments=1, new_length=1, new_width=340, new_height=256, target_width=224, target_height=224, transform=None): super(Kinetics400,", "* average_duration) else: offsets.append(0) elif not self.train and not self.test_mode:", "raise(RuntimeError(\"Setting file %s doesn't exist. Check opt.train-list and opt.val-list. \"", "self.clips = self._make_dataset(root, setting) if len(self.clips) == 0: raise(RuntimeError(\"Found 0", "be multiple video frames. For example, new_length=16 means we will", "'eating_watermelon', 'egg_hunting', 'exercising_arm', 'exercising_with_an_exercise_ball', 'extinguishing_fire', 'faceplanting', 'feeding_birds', 'feeding_fish', 'feeding_goats', 'filling_eyebrows',", "'parasailing', 'parkour', 'passing_American_football_-in_game-', 'passing_American_football_-not_in_game-', 'peeling_apples', 'peeling_potatoes', 'petting_animal_-not_cat-', 'petting_cat', 'picking_fruit', 'planting_trees',", "'playing_trombone', 'playing_trumpet', 'playing_ukulele', 'playing_violin', 'playing_volleyball', 'playing_xylophone', 'pole_vault', 'presenting_weather_forecast', 'pull_ups', 'pumping_fist',", "'drinking', 'drinking_beer', 'drinking_shots', 'driving_car', 'driving_tractor', 'drop_kicking', 'drumming_fingers', 'dunking_basketball', 'dying_hair', 'eating_burger',", "'ice_climbing', 'ice_fishing', 'ice_skating', 'ironing', 'javelin_throw', 'jetskiing', 'jogging', 'juggling_balls', 'juggling_fire', 'juggling_soccer_ball',", "'frying_vegetables', 'garbage_collecting', 'gargling', 'getting_a_haircut', 'getting_a_tattoo', 'giving_or_receiving_award', 'golf_chipping', 'golf_driving', 'golf_putting', 'grinding_meat',", "clips = [] with open(setting) as split_f: data = split_f.readlines()", "H x W x C, and C = num_segments *", "= split_f.readlines() for line in data: line_info = line.split() #", "and how to prepare it. Parameters ---------- root : str,", "self.modality == \"flow\": self.name_pattern = \"flow_%s_%05d.jpg\" def __getitem__(self, index): directory,", "if name_pattern: self.name_pattern = name_pattern else: if self.modality == \"rgb\":", "if cv_img_origin is None: raise(RuntimeError(\"Could not load file %s. Check", "'bending_metal', 'biking_through_snow', 'blasting_sand', 'blowing_glass', 'blowing_leaves', 'blowing_nose', 'blowing_out_candles', 'bobsledding', 'bookbinding', 'bouncing_on_trampoline',", "'playing_chess', 'playing_clarinet', 'playing_controller', 'playing_cricket', 'playing_cymbals', 'playing_didgeridoo', 'playing_drums', 'playing_flute', 'playing_guitar', 'playing_harmonica',", "'pumping_fist', 'pumping_gas', 'punching_bag', 'punching_person_-boxing-', 'push_up', 'pushing_car', 'pushing_cart', 'pushing_wheelchair', 'reading_book', 'reading_newspaper',", "self.clips[index] average_duration = int(duration / self.num_segments) offsets = [] for", "self.new_length + 1)/2 + seg_id * average_duration)) else: offsets.append(0) clip_input", "new_length * 3 clip_input = np.concatenate(sampled_list, axis=2) return nd.array(clip_input) class", "Here, B = batch_size * num_segments clip_input = clip_input.reshape((-1, 3", "'doing_laundry', 'doing_nails', 'drawing', 'dribbling_basketball', 'drinking', 'drinking_beer', 'drinking_shots', 'driving_car', 'driving_tractor', 'drop_kicking',", "Check opt.train-list and opt.val-list. \" % (setting))) clips = []", "'golf_chipping', 'golf_driving', 'golf_putting', 'grinding_meat', 'grooming_dog', 'grooming_horse', 'gymnastics_tumbling', 'hammer_throw', 'headbanging', 'headbutting',", "'long_jump', 'lunge', 'making_a_cake', 'making_a_sandwich', 'making_bed', 'making_jewelry', 'making_pizza', 'making_snowman', 'making_sushi', 'making_tea',", "dataset. Refer to :doc:`../build/examples_datasets/kinetics400` for the description of this dataset", "'massaging_back', 'massaging_feet', 'massaging_legs', \"massaging_person's_head\", 'milking_cow', 'mopping_floor', 'motorcycling', 'moving_furniture', 'mowing_lawn', 'news_anchoring',", "'dining', 'disc_golfing', 'diving_cliff', 'dodgeball', 'doing_aerobics', 'doing_laundry', 'doing_nails', 'drawing', 'dribbling_basketball', 'drinking',", "default 'rgb' Input modalities, we support only rgb video frames", "'cleaning_floor', 'cleaning_gutters', 'cleaning_pool', 'cleaning_shoes', 'cleaning_toilet', 'cleaning_windows', 'climbing_a_rope', 'climbing_ladder', 'climbing_tree', 'contact_juggling',", "the same 'target_height' for batch forwarding. transform : function, default", "'digging', 'dining', 'disc_golfing', 'diving_cliff', 'dodgeball', 'doing_aerobics', 'doing_laundry', 'doing_nails', 'drawing', 'dribbling_basketball',", "test if average_duration >= self.new_length: offsets.append(int((average_duration - self.new_length + 1)/2", "if new_width > 0 and new_height > 0: cv_img =", "'punching_bag', 'punching_person_-boxing-', 'push_up', 'pushing_car', 'pushing_cart', 'pushing_wheelchair', 'reading_book', 'reading_newspaper', 'recording_music', 'riding_a_bike',", "'cleaning_gutters', 'cleaning_pool', 'cleaning_shoes', 'cleaning_toilet', 'cleaning_windows', 'climbing_a_rope', 'climbing_ladder', 'climbing_tree', 'contact_juggling', 'cooking_chicken',", "'riding_mechanical_bull', 'riding_mountain_bike', 'riding_mule', 'riding_or_walking_with_horse', 'riding_scooter', 'riding_unicycle', 'ripping_paper', 'robot_dancing', 'rock_climbing', 'rock_scissors_paper',", "'grooming_dog', 'grooming_horse', 'gymnastics_tumbling', 'hammer_throw', 'headbanging', 'headbutting', 'high_jump', 'high_kick', 'hitting_baseball', 'hockey_stop',", "sampled_list = [] for _, offset in enumerate(offsets): for length_id", "flow image later. num_segments : int, default 1 Number of", "self.new_height = new_height self.new_width = new_width self.target_height = target_height self.target_width", "self.transform(clip_input) if self.num_segments > 1 and not self.test_mode: # For", "'making_sushi', 'making_tea', 'marching', 'massaging_back', 'massaging_feet', 'massaging_legs', \"massaging_person's_head\", 'milking_cow', 'mopping_floor', 'motorcycling',", "'playing_poker', 'playing_recorder', 'playing_saxophone', 'playing_squash_or_racquetball', 'playing_tennis', 'playing_trombone', 'playing_trumpet', 'playing_ukulele', 'playing_violin', 'playing_volleyball',", "nd from mxnet.gluon.data import dataset __all__ = ['Kinetics400'] class Kinetics400(dataset.Dataset):", "not os.path.exists(setting): raise(RuntimeError(\"Setting file %s doesn't exist. Check opt.train-list and", "'canoeing_or_kayaking', 'capoeira', 'carrying_baby', 'cartwheeling', 'carving_pumpkin', 'catching_fish', 'catching_or_throwing_baseball', 'catching_or_throwing_frisbee', 'catching_or_throwing_softball', 'celebrating',", "'playing_controller', 'playing_cricket', 'playing_cymbals', 'playing_didgeridoo', 'playing_drums', 'playing_flute', 'playing_guitar', 'playing_harmonica', 'playing_harp', 'playing_ice_hockey',", "same 'target_width' for batch forwarding. target_height : int, default 224", "'tapping_pen', 'tasting_beer', 'tasting_food', 'testifying', 'texting', 'throwing_axe', 'throwing_ball', 'throwing_discus', 'tickling', 'tobogganing',", "'dunking_basketball', 'dying_hair', 'eating_burger', 'eating_cake', 'eating_carrots', 'eating_chips', 'eating_doughnuts', 'eating_hotdog', 'eating_ice_cream', 'eating_spaghetti',", "them. \"\"\" def __init__(self, setting=os.path.expanduser('~/.mxnet/datasets/kinetics400/kinetics400_train_list_rawframes.txt'), root=os.path.expanduser('~/.mxnet/datasets/kinetics400/rawframes_train'), train=True, test_mode=False, name_pattern=None, is_color=True,", "self.class_to_idx = self._find_classes(root) self.clips = self._make_dataset(root, setting) if len(self.clips) ==", "root self.setting = setting self.train = train self.test_mode = test_mode", "training or validation set. test_mode : bool, default False Whether", "format: video_path, video_duration, video_label if len(line_info) < 3: print('Video input", "1 and not self.test_mode: # For TSN training, reshape the", "'clean_and_jerk', 'cleaning_floor', 'cleaning_gutters', 'cleaning_pool', 'cleaning_shoes', 'cleaning_toilet', 'cleaning_windows', 'climbing_a_rope', 'climbing_ladder', 'climbing_tree',", "'driving_tractor', 'drop_kicking', 'drumming_fingers', 'dunking_basketball', 'dying_hair', 'eating_burger', 'eating_cake', 'eating_carrots', 'eating_chips', 'eating_doughnuts',", "return len(self.clips) def _find_classes(self, directory): classes = [d for d", "'drumming_fingers', 'dunking_basketball', 'dying_hair', 'eating_burger', 'eating_cake', 'eating_carrots', 'eating_chips', 'eating_doughnuts', 'eating_hotdog', 'eating_ice_cream',", "default True Whether the loaded image is color or grayscale", "new_length=1, new_width=340, new_height=256, target_width=224, target_height=224, transform=None): super(Kinetics400, self).__init__() self.root =", "'playing_ice_hockey', 'playing_keyboard', 'playing_kickball', 'playing_monopoly', 'playing_organ', 'playing_paintball', 'playing_piano', 'playing_poker', 'playing_recorder', 'playing_saxophone',", "'pushing_cart', 'pushing_wheelchair', 'reading_book', 'reading_newspaper', 'recording_music', 'riding_a_bike', 'riding_camel', 'riding_elephant', 'riding_mechanical_bull', 'riding_mountain_bike',", "'shuffling_cards', 'side_kick', 'sign_language_interpreting', 'singing', 'situp', 'skateboarding', 'ski_jumping', 'skiing_-not_slalom_or_crosscountry-', 'skiing_crosscountry', 'skiing_slalom',", "'snowmobiling', 'somersaulting', 'spinning_poi', 'spray_painting', 'spraying', 'springboard_diving', 'squat', 'sticking_tongue_out', 'stomping_grapes', 'stretching_arm',", "of transformed image to the same 'target_width' for batch forwarding.", "'bobsledding', 'bookbinding', 'bouncing_on_trampoline', 'bowling', 'braiding_hair', 'breading_or_breadcrumbing', 'breakdancing', 'brush_painting', 'brushing_hair', 'brushing_teeth',", "from mxnet import nd from mxnet.gluon.data import dataset __all__ =", "'news_anchoring', 'opening_bottle', 'opening_present', 'paragliding', 'parasailing', 'parkour', 'passing_American_football_-in_game-', 'passing_American_football_-not_in_game-', 'peeling_apples', 'peeling_potatoes',", "average_duration) else: offsets.append(0) elif not self.train and not self.test_mode: #", "\"\\n\" \"Check your data directory (opt.data-dir).\")) if name_pattern: self.name_pattern =", "frame_path = directory + \"/\" + frame_name cv_img_origin = cv2.imread(frame_path,", "video_path, video_duration, video_label if len(line_info) < 3: print('Video input format", "'sneezing', 'sniffing', 'snorkeling', 'snowboarding', 'snowkiting', 'snowmobiling', 'somersaulting', 'spinning_poi', 'spray_painting', 'spraying',", "'bending_back', 'bending_metal', 'biking_through_snow', 'blasting_sand', 'blowing_glass', 'blowing_leaves', 'blowing_nose', 'blowing_out_candles', 'bobsledding', 'bookbinding',", "'blowing_leaves', 'blowing_nose', 'blowing_out_candles', 'bobsledding', 'bookbinding', 'bouncing_on_trampoline', 'bowling', 'braiding_hair', 'breading_or_breadcrumbing', 'breakdancing',", "recognition dataset. Refer to :doc:`../build/examples_datasets/kinetics400` for the description of this", "'catching_fish', 'catching_or_throwing_baseball', 'catching_or_throwing_frisbee', 'catching_or_throwing_softball', 'celebrating', 'changing_oil', 'changing_wheel', 'checking_tires', 'cheerleading', 'chopping_wood',", "transform=None): super(Kinetics400, self).__init__() self.root = root self.setting = setting self.train", "> 0 and new_height > 0: cv_img = cv2.resize(cv_img_origin, (new_width,", "'swimming_backstroke', 'swimming_breast_stroke', 'swimming_butterfly_stroke', 'swing_dancing', 'swinging_legs', 'swinging_on_something', 'sword_fighting', 'tai_chi', 'taking_a_shower', 'tango_dancing',", "into clips. A useful technique to obtain global video-level information.", "+ 1)/2 + seg_id * average_duration)) else: offsets.append(0) else: #", "'skiing_slalom', 'skipping_rope', 'skydiving', 'slacklining', 'slapping', 'sled_dog_racing', 'smoking', 'smoking_hookah', 'snatch_weight_lifting', 'sneezing',", "for later multiscale cropping and resizing. target_width : int, default", "default 224 Scale the width of transformed image to the", "(frame_path))) if new_width > 0 and new_height > 0: cv_img", "'baby_waking_up', 'baking_cookies', 'balloon_blowing', 'bandaging', 'barbequing', 'bartending', 'beatboxing', 'bee_keeping', 'belly_dancing', 'bench_pressing',", "'cooking_egg', 'cooking_on_campfire', 'cooking_sausages', 'counting_money', 'country_line_dancing', 'cracking_neck', 'crawling_baby', 'crossing_river', 'crying', 'curling_hair',", "'tossing_salad', 'training_dog', 'trapezing', 'trimming_or_shaving_beard', 'trimming_trees', 'triple_jump', 'tying_bow_tie', 'tying_knot_-not_on_a_tie-', 'tying_tie', 'unboxing',", "Will add support for rgb difference image and optical flow", "'making_tea', 'marching', 'massaging_back', 'massaging_feet', 'massaging_legs', \"massaging_person's_head\", 'milking_cow', 'mopping_floor', 'motorcycling', 'moving_furniture',", "label and transforms them. \"\"\" def __init__(self, setting=os.path.expanduser('~/.mxnet/datasets/kinetics400/kinetics400_train_list_rawframes.txt'), root=os.path.expanduser('~/.mxnet/datasets/kinetics400/rawframes_train'), train=True,", "index): directory, duration, target = self.clips[index] average_duration = int(duration /", "'stretching_arm', 'stretching_leg', 'strumming_guitar', 'surfing_crowd', 'surfing_water', 'sweeping_floor', 'swimming_backstroke', 'swimming_breast_stroke', 'swimming_butterfly_stroke', 'swing_dancing',", "self.target_width)) return clip_input, target def __len__(self): return len(self.clips) def _find_classes(self,", "print('Video input format is not correct, missing one or more", "'applying_cream', 'archery', 'arm_wrestling', 'arranging_flowers', 'assembling_computer', 'auctioning', 'baby_waking_up', 'baking_cookies', 'balloon_blowing', 'bandaging',", "Default is a single image, but it can be multiple", "+ \"/\" + frame_name cv_img_origin = cv2.imread(frame_path, cv_read_flag) if cv_img_origin", "subfolders of: \" + root + \"\\n\" \"Check your data", "consecutive 16 frames. new_width : int, default 340 Scale the", "The length of input video clip. Default is a single", "= [] with open(setting) as split_f: data = split_f.readlines() for", "new_width, new_length, is_color, name_pattern): from ...utils.filesystem import try_import_cv2 cv2 =", "'peeling_apples', 'peeling_potatoes', 'petting_animal_-not_cat-', 'petting_cat', 'picking_fruit', 'planting_trees', 'plastering', 'playing_accordion', 'playing_badminton', 'playing_bagpipes',", "'riding_scooter', 'riding_unicycle', 'ripping_paper', 'robot_dancing', 'rock_climbing', 'rock_scissors_paper', 'roller_skating', 'running_on_treadmill', 'sailing', 'salsa_dancing',", "'shoveling_snow', 'shredding_paper', 'shuffling_cards', 'side_kick', 'sign_language_interpreting', 'singing', 'situp', 'skateboarding', 'ski_jumping', 'skiing_-not_slalom_or_crosscountry-',", "- self.new_length + 1)/2 + seg_id * average_duration)) else: offsets.append(0)", "'archery', 'arm_wrestling', 'arranging_flowers', 'assembling_computer', 'auctioning', 'baby_waking_up', 'baking_cookies', 'balloon_blowing', 'bandaging', 'barbequing',", "'crossing_river', 'crying', 'curling_hair', 'cutting_nails', 'cutting_pineapple', 'cutting_watermelon', 'dancing_ballet', 'dancing_charleston', 'dancing_gangnam_style', 'dancing_macarena',", "* average_duration)) else: offsets.append(0) else: # test if average_duration >=", "= \"img_%05d.jpg\" elif self.modality == \"flow\": self.name_pattern = \"flow_%s_%05d.jpg\" def", "int, default 224 Scale the height of transformed image to", "Check data path.\" % (frame_path))) if new_width > 0 and", "if len(self.clips) == 0: raise(RuntimeError(\"Found 0 video clips in subfolders", "image is color or grayscale modality : str, default 'rgb'", "a <= N <= b. offsets.append(offset + seg_id * average_duration)", "'hopscotch', 'hoverboarding', 'hugging', 'hula_hooping', 'hurdling', 'hurling_-sport-', 'ice_climbing', 'ice_fishing', 'ice_skating', 'ironing',", "'playing_clarinet', 'playing_controller', 'playing_cricket', 'playing_cymbals', 'playing_didgeridoo', 'playing_drums', 'playing_flute', 'playing_guitar', 'playing_harmonica', 'playing_harp',", "'weaving_basket', 'welding', 'whistling', 'windsurfing', 'wrapping_present', 'wrestling', 'writing', 'yawning', 'yoga', 'zumba']", "the folder stored the dataset. setting : str, required Config", "average_duration >= self.new_length: offset = random.randint(0, average_duration - self.new_length) #", "'answering_questions', 'applauding', 'applying_cream', 'archery', 'arm_wrestling', 'arranging_flowers', 'assembling_computer', 'auctioning', 'baby_waking_up', 'baking_cookies',", "<NAME>, etal, Temporal Segment Networks: Towards Good Practices for Deep", "interpolation) else: cv_img = cv_img_origin cv_img = cv2.cvtColor(cv_img, cv2.COLOR_BGR2RGB) sampled_list.append(cv_img)", "'unboxing', 'unloading_truck', 'using_computer', 'using_remote_controller_-not_gaming-', 'using_segway', 'vault', 'waiting_in_line', 'walking_the_dog', 'washing_dishes', 'washing_feet',", "clip_input = self._TSN_RGB(directory, offsets, self.new_height, self.new_width, self.new_length, self.is_color, self.name_pattern) if", "= is_color self.modality = modality self.num_segments = num_segments self.new_height =", "'shooting_basketball', 'shooting_goal_-soccer-', 'shot_put', 'shoveling_snow', 'shredding_paper', 'shuffling_cards', 'side_kick', 'sign_language_interpreting', 'singing', 'situp',", "== \"flow\": self.name_pattern = \"flow_%s_%05d.jpg\" def __getitem__(self, index): directory, duration,", "the video into clips. A useful technique to obtain global", "if self.train and not self.test_mode: # training if average_duration >=", "else: offsets.append(0) clip_input = self._TSN_RGB(directory, offsets, self.new_height, self.new_width, self.new_length, self.is_color,", "'tossing_coin', 'tossing_salad', 'training_dog', 'trapezing', 'trimming_or_shaving_beard', 'trimming_trees', 'triple_jump', 'tying_bow_tie', 'tying_knot_-not_on_a_tie-', 'tying_tie',", "that takes data and label and transforms them. \"\"\" def", "'~/.mxnet/datasets/kinetics400' Path to the folder stored the dataset. setting :", "line format: video_path, video_duration, video_label if len(line_info) < 3: print('Video", "---------- root : str, default '~/.mxnet/datasets/kinetics400' Path to the folder", "<reponame>YvetteGuo/gluon-cv<filename>gluoncv/data/kinetics400/classification.py # pylint: disable=line-too-long,too-many-lines,missing-docstring \"\"\"Kinetics400 action classification dataset.\"\"\" import os", "(new_width, new_height), interpolation) else: cv_img = cv_img_origin cv_img = cv2.cvtColor(cv_img,", "'playing_cymbals', 'playing_didgeridoo', 'playing_drums', 'playing_flute', 'playing_guitar', 'playing_harmonica', 'playing_harp', 'playing_ice_hockey', 'playing_keyboard', 'playing_kickball',", "Recognition, ECCV 2016 new_length : int, default 1 The length", "image and optical flow image later. num_segments : int, default", "'assembling_computer', 'auctioning', 'baby_waking_up', 'baking_cookies', 'balloon_blowing', 'bandaging', 'barbequing', 'bartending', 'beatboxing', 'bee_keeping',", "'sled_dog_racing', 'smoking', 'smoking_hookah', 'snatch_weight_lifting', 'sneezing', 'sniffing', 'snorkeling', 'snowboarding', 'snowkiting', 'snowmobiling',", "* average_duration)) else: offsets.append(0) clip_input = self._TSN_RGB(directory, offsets, self.new_height, self.new_width,", "'applauding', 'applying_cream', 'archery', 'arm_wrestling', 'arranging_flowers', 'assembling_computer', 'auctioning', 'baby_waking_up', 'baking_cookies', 'balloon_blowing',", "'cleaning_pool', 'cleaning_shoes', 'cleaning_toilet', 'cleaning_windows', 'climbing_a_rope', 'climbing_ladder', 'climbing_tree', 'contact_juggling', 'cooking_chicken', 'cooking_egg',", "'playing_bagpipes', 'playing_basketball', 'playing_bass_guitar', 'playing_cards', 'playing_cello', 'playing_chess', 'playing_clarinet', 'playing_controller', 'playing_cricket', 'playing_cymbals',", "to prepare it. Parameters ---------- root : str, default '~/.mxnet/datasets/kinetics400'", "self.target_height = target_height self.target_width = target_width self.new_length = new_length self.transform", "target_height self.target_width = target_width self.new_length = new_length self.transform = transform", "classes.sort() class_to_idx = {classes[i]: i for i in range(len(classes))} return", "len(self.clips) == 0: raise(RuntimeError(\"Found 0 video clips in subfolders of:", "'finger_snapping', 'fixing_hair', 'flipping_pancake', 'flying_kite', 'folding_clothes', 'folding_napkins', 'folding_paper', 'front_raises', 'frying_vegetables', 'garbage_collecting',", "new_width self.target_height = target_height self.target_width = target_width self.new_length = new_length", "'changing_oil', 'changing_wheel', 'checking_tires', 'cheerleading', 'chopping_wood', 'clapping', 'clay_pottery_making', 'clean_and_jerk', 'cleaning_floor', 'cleaning_gutters',", "import dataset __all__ = ['Kinetics400'] class Kinetics400(dataset.Dataset): \"\"\"Load the Kinetics400", "'moving_furniture', 'mowing_lawn', 'news_anchoring', 'opening_bottle', 'opening_present', 'paragliding', 'parasailing', 'parkour', 'passing_American_football_-in_game-', 'passing_American_football_-not_in_game-',", "self.num_segments = num_segments self.new_height = new_height self.new_width = new_width self.target_height", "else: offsets.append(0) elif not self.train and not self.test_mode: # validation", "'running_on_treadmill', 'sailing', 'salsa_dancing', 'sanding_floor', 'scrambling_eggs', 'scuba_diving', 'setting_table', 'shaking_hands', 'shaking_head', 'sharpening_knives',", "'peeling_potatoes', 'petting_animal_-not_cat-', 'petting_cat', 'picking_fruit', 'planting_trees', 'plastering', 'playing_accordion', 'playing_badminton', 'playing_bagpipes', 'playing_basketball',", "'golf_driving', 'golf_putting', 'grinding_meat', 'grooming_dog', 'grooming_horse', 'gymnastics_tumbling', 'hammer_throw', 'headbanging', 'headbutting', 'high_jump',", "N <= b. offsets.append(offset + seg_id * average_duration) else: offsets.append(0)", "offsets, new_height, new_width, new_length, is_color, name_pattern): from ...utils.filesystem import try_import_cv2", "'diving_cliff', 'dodgeball', 'doing_aerobics', 'doing_laundry', 'doing_nails', 'drawing', 'dribbling_basketball', 'drinking', 'drinking_beer', 'drinking_shots',", "offset in enumerate(offsets): for length_id in range(1, new_length+1): frame_name =", "(clip_path, duration, target) clips.append(item) return clips def _TSN_RGB(self, directory, offsets,", "clip. Default is a single image, but it can be", "== \"rgb\": self.name_pattern = \"img_%05d.jpg\" elif self.modality == \"flow\": self.name_pattern", "in data: line_info = line.split() # line format: video_path, video_duration,", "function that takes data and label and transforms them. \"\"\"", "'dribbling_basketball', 'drinking', 'drinking_beer', 'drinking_shots', 'driving_car', 'driving_tractor', 'drop_kicking', 'drumming_fingers', 'dunking_basketball', 'dying_hair',", "= target_width self.new_length = new_length self.transform = transform self.classes, self.class_to_idx", "224 Scale the height of transformed image to the same", "self._make_dataset(root, setting) if len(self.clips) == 0: raise(RuntimeError(\"Found 0 video clips", "[d for d in os.listdir(directory) if os.path.isdir(os.path.join(directory, d))] classes.sort() class_to_idx", "np.concatenate(sampled_list, axis=2) return nd.array(clip_input) class Kinetics400Attr(object): def __init__(self): self.num_class =", "'playing_basketball', 'playing_bass_guitar', 'playing_cards', 'playing_cello', 'playing_chess', 'playing_clarinet', 'playing_controller', 'playing_cricket', 'playing_cymbals', 'playing_didgeridoo',", "x C, and C = num_segments * new_length * 3", "self.classes, self.class_to_idx = self._find_classes(root) self.clips = self._make_dataset(root, setting) if len(self.clips)", "clips.append(item) return clips def _TSN_RGB(self, directory, offsets, new_height, new_width, new_length,", "% line) continue clip_path = os.path.join(directory, line_info[0]) duration = int(line_info[1])", "(length_id + offset) frame_path = directory + \"/\" + frame_name", "from mxnet.gluon.data import dataset __all__ = ['Kinetics400'] class Kinetics400(dataset.Dataset): \"\"\"Load", "the test set name_pattern : str, default None The naming", "is a single image, but it can be multiple video", "'new_width' for later multiscale cropping and resizing. new_height : int,", "'playing_didgeridoo', 'playing_drums', 'playing_flute', 'playing_guitar', 'playing_harmonica', 'playing_harp', 'playing_ice_hockey', 'playing_keyboard', 'playing_kickball', 'playing_monopoly',", "input video clip. Default is a single image, but it", "the decoded video frames. For example, img_00012.jpg is_color : bool,", "offset) frame_path = directory + \"/\" + frame_name cv_img_origin =", "= self._TSN_RGB(directory, offsets, self.new_height, self.new_width, self.new_length, self.is_color, self.name_pattern) if self.transform", "'paragliding', 'parasailing', 'parkour', 'passing_American_football_-in_game-', 'passing_American_football_-not_in_game-', 'peeling_apples', 'peeling_potatoes', 'petting_animal_-not_cat-', 'petting_cat', 'picking_fruit',", "'riding_mountain_bike', 'riding_mule', 'riding_or_walking_with_horse', 'riding_scooter', 'riding_unicycle', 'ripping_paper', 'robot_dancing', 'rock_climbing', 'rock_scissors_paper', 'roller_skating',", "'snowboarding', 'snowkiting', 'snowmobiling', 'somersaulting', 'spinning_poi', 'spray_painting', 'spraying', 'springboard_diving', 'squat', 'sticking_tongue_out',", "ECCV 2016 new_length : int, default 1 The length of", "duration, target = self.clips[index] average_duration = int(duration / self.num_segments) offsets", "['abseiling', 'air_drumming', 'answering_questions', 'applauding', 'applying_cream', 'archery', 'arm_wrestling', 'arranging_flowers', 'assembling_computer', 'auctioning',", "of transformed image to the same 'target_height' for batch forwarding.", "'dancing_charleston', 'dancing_gangnam_style', 'dancing_macarena', 'deadlifting', 'decorating_the_christmas_tree', 'digging', 'dining', 'disc_golfing', 'diving_cliff', 'dodgeball',", "'lunge', 'making_a_cake', 'making_a_sandwich', 'making_bed', 'making_jewelry', 'making_pizza', 'making_snowman', 'making_sushi', 'making_tea', 'marching',", "cv2.imread(frame_path, cv_read_flag) if cv_img_origin is None: raise(RuntimeError(\"Could not load file", "'climbing_ladder', 'climbing_tree', 'contact_juggling', 'cooking_chicken', 'cooking_egg', 'cooking_on_campfire', 'cooking_sausages', 'counting_money', 'country_line_dancing', 'cracking_neck',", "'plastering', 'playing_accordion', 'playing_badminton', 'playing_bagpipes', 'playing_basketball', 'playing_bass_guitar', 'playing_cards', 'playing_cello', 'playing_chess', 'playing_clarinet',", "'rock_climbing', 'rock_scissors_paper', 'roller_skating', 'running_on_treadmill', 'sailing', 'salsa_dancing', 'sanding_floor', 'scrambling_eggs', 'scuba_diving', 'setting_table',", "new_height=256, target_width=224, target_height=224, transform=None): super(Kinetics400, self).__init__() self.root = root self.setting", "'massaging_legs', \"massaging_person's_head\", 'milking_cow', 'mopping_floor', 'motorcycling', 'moving_furniture', 'mowing_lawn', 'news_anchoring', 'opening_bottle', 'opening_present',", "'playing_volleyball', 'playing_xylophone', 'pole_vault', 'presenting_weather_forecast', 'pull_ups', 'pumping_fist', 'pumping_gas', 'punching_bag', 'punching_person_-boxing-', 'push_up',", "'making_a_sandwich', 'making_bed', 'making_jewelry', 'making_pizza', 'making_snowman', 'making_sushi', 'making_tea', 'marching', 'massaging_back', 'massaging_feet',", "Action Recognition, ECCV 2016 new_length : int, default 1 The", "{classes[i]: i for i in range(len(classes))} return classes, class_to_idx def", "'playing_badminton', 'playing_bagpipes', 'playing_basketball', 'playing_bass_guitar', 'playing_cards', 'playing_cello', 'playing_chess', 'playing_clarinet', 'playing_controller', 'playing_cricket',", "support only rgb video frames for now. Will add support", "for later multiscale cropping and resizing. new_height : int, default", "is_color, name_pattern): from ...utils.filesystem import try_import_cv2 cv2 = try_import_cv2() if", "else: # test if average_duration >= self.new_length: offsets.append(int((average_duration - self.new_length", "True Whether to load the training or validation set. test_mode", ": str, default '~/.mxnet/datasets/kinetics400' Path to the folder stored the", "global video-level information. <NAME>, etal, Temporal Segment Networks: Towards Good", "# For TSN training, reshape the input to B x", "dataset __all__ = ['Kinetics400'] class Kinetics400(dataset.Dataset): \"\"\"Load the Kinetics400 action", "\"massaging_person's_head\", 'milking_cow', 'mopping_floor', 'motorcycling', 'moving_furniture', 'mowing_lawn', 'news_anchoring', 'opening_bottle', 'opening_present', 'paragliding',", "'tasting_food', 'testifying', 'texting', 'throwing_axe', 'throwing_ball', 'throwing_discus', 'tickling', 'tobogganing', 'tossing_coin', 'tossing_salad',", "= int(line_info[2]) item = (clip_path, duration, target) clips.append(item) return clips", "'knitting', 'krumping', 'laughing', 'laying_bricks', 'long_jump', 'lunge', 'making_a_cake', 'making_a_sandwich', 'making_bed', 'making_jewelry',", "def _make_dataset(self, directory, setting): if not os.path.exists(setting): raise(RuntimeError(\"Setting file %s", "None: clip_input = self.transform(clip_input) if self.num_segments > 1 and not", "'making_jewelry', 'making_pizza', 'making_snowman', 'making_sushi', 'making_tea', 'marching', 'massaging_back', 'massaging_feet', 'massaging_legs', \"massaging_person's_head\",", "'washing_feet', 'washing_hair', 'washing_hands', 'water_skiing', 'water_sliding', 'watering_plants', 'waxing_back', 'waxing_chest', 'waxing_eyebrows', 'waxing_legs',", "axis=2) return nd.array(clip_input) class Kinetics400Attr(object): def __init__(self): self.num_class = 400", "= setting self.train = train self.test_mode = test_mode self.is_color =", "'grooming_horse', 'gymnastics_tumbling', 'hammer_throw', 'headbanging', 'headbutting', 'high_jump', 'high_kick', 'hitting_baseball', 'hockey_stop', 'holding_snake',", "0: raise(RuntimeError(\"Found 0 video clips in subfolders of: \" +", "def __init__(self, setting=os.path.expanduser('~/.mxnet/datasets/kinetics400/kinetics400_train_list_rawframes.txt'), root=os.path.expanduser('~/.mxnet/datasets/kinetics400/rawframes_train'), train=True, test_mode=False, name_pattern=None, is_color=True, modality='rgb', num_segments=1,", "img_00012.jpg is_color : bool, default True Whether the loaded image", "'playing_recorder', 'playing_saxophone', 'playing_squash_or_racquetball', 'playing_tennis', 'playing_trombone', 'playing_trumpet', 'playing_ukulele', 'playing_violin', 'playing_volleyball', 'playing_xylophone',", "classification dataset.\"\"\" import os import random import numpy as np", "'air_drumming', 'answering_questions', 'applauding', 'applying_cream', 'archery', 'arm_wrestling', 'arranging_flowers', 'assembling_computer', 'auctioning', 'baby_waking_up',", "self.modality = modality self.num_segments = num_segments self.new_height = new_height self.new_width", "os import random import numpy as np from mxnet import", "__init__(self, setting=os.path.expanduser('~/.mxnet/datasets/kinetics400/kinetics400_train_list_rawframes.txt'), root=os.path.expanduser('~/.mxnet/datasets/kinetics400/rawframes_train'), train=True, test_mode=False, name_pattern=None, is_color=True, modality='rgb', num_segments=1, new_length=1,", "num_segments self.new_height = new_height self.new_width = new_width self.target_height = target_height", "later multiscale cropping and resizing. target_width : int, default 224", "'bandaging', 'barbequing', 'bartending', 'beatboxing', 'bee_keeping', 'belly_dancing', 'bench_pressing', 'bending_back', 'bending_metal', 'biking_through_snow',", "str, default '~/.mxnet/datasets/kinetics400' Path to the folder stored the dataset.", "range(len(classes))} return classes, class_to_idx def _make_dataset(self, directory, setting): if not", "default True Whether to load the training or validation set.", "'eating_burger', 'eating_cake', 'eating_carrots', 'eating_chips', 'eating_doughnuts', 'eating_hotdog', 'eating_ice_cream', 'eating_spaghetti', 'eating_watermelon', 'egg_hunting',", "'petting_animal_-not_cat-', 'petting_cat', 'picking_fruit', 'planting_trees', 'plastering', 'playing_accordion', 'playing_badminton', 'playing_bagpipes', 'playing_basketball', 'playing_bass_guitar',", "'shredding_paper', 'shuffling_cards', 'side_kick', 'sign_language_interpreting', 'singing', 'situp', 'skateboarding', 'ski_jumping', 'skiing_-not_slalom_or_crosscountry-', 'skiing_crosscountry',", "For example, new_length=16 means we will extract a video clip", "new_length=16 means we will extract a video clip of consecutive", "Scale the width of transformed image to the same 'target_width'", "to the folder stored the dataset. setting : str, required", "name_pattern=None, is_color=True, modality='rgb', num_segments=1, new_length=1, new_width=340, new_height=256, target_width=224, target_height=224, transform=None):", "elif self.modality == \"flow\": self.name_pattern = \"flow_%s_%05d.jpg\" def __getitem__(self, index):", "classes = [d for d in os.listdir(directory) if os.path.isdir(os.path.join(directory, d))]", "format is not correct, missing one or more element. %s'", "x W x C, and C = num_segments * new_length", "image to the same 'target_width' for batch forwarding. target_height :", "= train self.test_mode = test_mode self.is_color = is_color self.modality =", "3 clip_input = np.concatenate(sampled_list, axis=2) return nd.array(clip_input) class Kinetics400Attr(object): def", "'clapping', 'clay_pottery_making', 'clean_and_jerk', 'cleaning_floor', 'cleaning_gutters', 'cleaning_pool', 'cleaning_shoes', 'cleaning_toilet', 'cleaning_windows', 'climbing_a_rope',", "= self.clips[index] average_duration = int(duration / self.num_segments) offsets = []", "for seg_id in range(self.num_segments): if self.train and not self.test_mode: #", "'carrying_baby', 'cartwheeling', 'carving_pumpkin', 'catching_fish', 'catching_or_throwing_baseball', 'catching_or_throwing_frisbee', 'catching_or_throwing_softball', 'celebrating', 'changing_oil', 'changing_wheel',", "int, default 256 Scale the height of loaded image to", "decoded video frames. For example, img_00012.jpg is_color : bool, default", "# line format: video_path, video_duration, video_label if len(line_info) < 3:", "int, default 1 The length of input video clip. Default", "a random integer N such that a <= N <=", "= test_mode self.is_color = is_color self.modality = modality self.num_segments =", "if not os.path.exists(setting): raise(RuntimeError(\"Setting file %s doesn't exist. Check opt.train-list", "'celebrating', 'changing_oil', 'changing_wheel', 'checking_tires', 'cheerleading', 'chopping_wood', 'clapping', 'clay_pottery_making', 'clean_and_jerk', 'cleaning_floor',", "'kicking_soccer_ball', 'kissing', 'kitesurfing', 'knitting', 'krumping', 'laughing', 'laying_bricks', 'long_jump', 'lunge', 'making_a_cake',", "def __len__(self): return len(self.clips) def _find_classes(self, directory): classes = [d", "'washing_dishes', 'washing_feet', 'washing_hair', 'washing_hands', 'water_skiing', 'water_sliding', 'watering_plants', 'waxing_back', 'waxing_chest', 'waxing_eyebrows',", "interpolation = cv2.INTER_LINEAR sampled_list = [] for _, offset in", "\"\"\" def __init__(self, setting=os.path.expanduser('~/.mxnet/datasets/kinetics400/kinetics400_train_list_rawframes.txt'), root=os.path.expanduser('~/.mxnet/datasets/kinetics400/rawframes_train'), train=True, test_mode=False, name_pattern=None, is_color=True, modality='rgb',", "'breading_or_breadcrumbing', 'breakdancing', 'brush_painting', 'brushing_hair', 'brushing_teeth', 'building_cabinet', 'building_shed', 'bungee_jumping', 'busking', 'canoeing_or_kayaking',", "cv_read_flag = cv2.IMREAD_COLOR else: cv_read_flag = cv2.IMREAD_GRAYSCALE interpolation = cv2.INTER_LINEAR", "= self._find_classes(root) self.clips = self._make_dataset(root, setting) if len(self.clips) == 0:", "'scrambling_eggs', 'scuba_diving', 'setting_table', 'shaking_hands', 'shaking_head', 'sharpening_knives', 'sharpening_pencil', 'shaving_head', 'shaving_legs', 'shearing_sheep',", "= os.path.join(directory, line_info[0]) duration = int(line_info[1]) target = int(line_info[2]) item", "not self.test_mode: # For TSN training, reshape the input to", "on the test set name_pattern : str, default None The", "'throwing_discus', 'tickling', 'tobogganing', 'tossing_coin', 'tossing_salad', 'training_dog', 'trapezing', 'trimming_or_shaving_beard', 'trimming_trees', 'triple_jump',", "self.num_segments) offsets = [] for seg_id in range(self.num_segments): if self.train", "'garbage_collecting', 'gargling', 'getting_a_haircut', 'getting_a_tattoo', 'giving_or_receiving_award', 'golf_chipping', 'golf_driving', 'golf_putting', 'grinding_meat', 'grooming_dog',", "'hula_hooping', 'hurdling', 'hurling_-sport-', 'ice_climbing', 'ice_fishing', 'ice_skating', 'ironing', 'javelin_throw', 'jetskiing', 'jogging',", "self._TSN_RGB(directory, offsets, self.new_height, self.new_width, self.new_length, self.is_color, self.name_pattern) if self.transform is", "offsets = [] for seg_id in range(self.num_segments): if self.train and", "in enumerate(offsets): for length_id in range(1, new_length+1): frame_name = name_pattern", "self.name_pattern = name_pattern else: if self.modality == \"rgb\": self.name_pattern =", "No +1 because randint(a,b) return a random integer N such", "cv2.COLOR_BGR2RGB) sampled_list.append(cv_img) # the shape of clip_input will be H", "'playing_piano', 'playing_poker', 'playing_recorder', 'playing_saxophone', 'playing_squash_or_racquetball', 'playing_tennis', 'playing_trombone', 'playing_trumpet', 'playing_ukulele', 'playing_violin',", "be H x W x C, and C = num_segments", "'water_skiing', 'water_sliding', 'watering_plants', 'waxing_back', 'waxing_chest', 'waxing_eyebrows', 'waxing_legs', 'weaving_basket', 'welding', 'whistling',", "the width of loaded image to 'new_width' for later multiscale", "'waxing_chest', 'waxing_eyebrows', 'waxing_legs', 'weaving_basket', 'welding', 'whistling', 'windsurfing', 'wrapping_present', 'wrestling', 'writing',", "can be multiple video frames. For example, new_length=16 means we", "x W. Here, B = batch_size * num_segments clip_input =", "exist. Check opt.train-list and opt.val-list. \" % (setting))) clips =", "return clips def _TSN_RGB(self, directory, offsets, new_height, new_width, new_length, is_color,", "'drawing', 'dribbling_basketball', 'drinking', 'drinking_beer', 'drinking_shots', 'driving_car', 'driving_tractor', 'drop_kicking', 'drumming_fingers', 'dunking_basketball',", "Temporal Segment Networks: Towards Good Practices for Deep Action Recognition,", "'motorcycling', 'moving_furniture', 'mowing_lawn', 'news_anchoring', 'opening_bottle', 'opening_present', 'paragliding', 'parasailing', 'parkour', 'passing_American_football_-in_game-',", "and label and transforms them. \"\"\" def __init__(self, setting=os.path.expanduser('~/.mxnet/datasets/kinetics400/kinetics400_train_list_rawframes.txt'), root=os.path.expanduser('~/.mxnet/datasets/kinetics400/rawframes_train'),", "'throwing_axe', 'throwing_ball', 'throwing_discus', 'tickling', 'tobogganing', 'tossing_coin', 'tossing_salad', 'training_dog', 'trapezing', 'trimming_or_shaving_beard',", "length of input video clip. Default is a single image,", "'playing_cards', 'playing_cello', 'playing_chess', 'playing_clarinet', 'playing_controller', 'playing_cricket', 'playing_cymbals', 'playing_didgeridoo', 'playing_drums', 'playing_flute',", "stored the dataset. setting : str, required Config file of", "range(1, new_length+1): frame_name = name_pattern % (length_id + offset) frame_path", "to the same 'target_width' for batch forwarding. target_height : int,", "that a <= N <= b. offsets.append(offset + seg_id *", "i in range(len(classes))} return classes, class_to_idx def _make_dataset(self, directory, setting):", "of consecutive 16 frames. new_width : int, default 340 Scale", "'arm_wrestling', 'arranging_flowers', 'assembling_computer', 'auctioning', 'baby_waking_up', 'baking_cookies', 'balloon_blowing', 'bandaging', 'barbequing', 'bartending',", "'blowing_nose', 'blowing_out_candles', 'bobsledding', 'bookbinding', 'bouncing_on_trampoline', 'bowling', 'braiding_hair', 'breading_or_breadcrumbing', 'breakdancing', 'brush_painting',", "_, offset in enumerate(offsets): for length_id in range(1, new_length+1): frame_name", "to evenly divide the video into clips. A useful technique", "numpy as np from mxnet import nd from mxnet.gluon.data import", "Whether to load the training or validation set. test_mode :", ": int, default 1 The length of input video clip.", "multiscale cropping and resizing. new_height : int, default 256 Scale", "rgb video frames for now. Will add support for rgb", "segments to evenly divide the video into clips. A useful", "divide the video into clips. A useful technique to obtain", "'auctioning', 'baby_waking_up', 'baking_cookies', 'balloon_blowing', 'bandaging', 'barbequing', 'bartending', 'beatboxing', 'bee_keeping', 'belly_dancing',", "new_length+1): frame_name = name_pattern % (length_id + offset) frame_path =", "'riding_elephant', 'riding_mechanical_bull', 'riding_mountain_bike', 'riding_mule', 'riding_or_walking_with_horse', 'riding_scooter', 'riding_unicycle', 'ripping_paper', 'robot_dancing', 'rock_climbing',", "__getitem__(self, index): directory, duration, target = self.clips[index] average_duration = int(duration", "'side_kick', 'sign_language_interpreting', 'singing', 'situp', 'skateboarding', 'ski_jumping', 'skiing_-not_slalom_or_crosscountry-', 'skiing_crosscountry', 'skiing_slalom', 'skipping_rope',", "difference image and optical flow image later. num_segments : int,", "root=os.path.expanduser('~/.mxnet/datasets/kinetics400/rawframes_train'), train=True, test_mode=False, name_pattern=None, is_color=True, modality='rgb', num_segments=1, new_length=1, new_width=340, new_height=256,", "'kissing', 'kitesurfing', 'knitting', 'krumping', 'laughing', 'laying_bricks', 'long_jump', 'lunge', 'making_a_cake', 'making_a_sandwich',", "'swimming_butterfly_stroke', 'swing_dancing', 'swinging_legs', 'swinging_on_something', 'sword_fighting', 'tai_chi', 'taking_a_shower', 'tango_dancing', 'tap_dancing', 'tapping_guitar',", "'cooking_on_campfire', 'cooking_sausages', 'counting_money', 'country_line_dancing', 'cracking_neck', 'crawling_baby', 'crossing_river', 'crying', 'curling_hair', 'cutting_nails',", "self.new_length: offset = random.randint(0, average_duration - self.new_length) # No +1", "for line in data: line_info = line.split() # line format:", "= cv2.resize(cv_img_origin, (new_width, new_height), interpolation) else: cv_img = cv_img_origin cv_img", "'bookbinding', 'bouncing_on_trampoline', 'bowling', 'braiding_hair', 'breading_or_breadcrumbing', 'breakdancing', 'brush_painting', 'brushing_hair', 'brushing_teeth', 'building_cabinet',", "self.num_segments > 1 and not self.test_mode: # For TSN training,", "else: offsets.append(0) else: # test if average_duration >= self.new_length: offsets.append(int((average_duration", "'picking_fruit', 'planting_trees', 'plastering', 'playing_accordion', 'playing_badminton', 'playing_bagpipes', 'playing_basketball', 'playing_bass_guitar', 'playing_cards', 'playing_cello',", "self._find_classes(root) self.clips = self._make_dataset(root, setting) if len(self.clips) == 0: raise(RuntimeError(\"Found", "= target_height self.target_width = target_width self.new_length = new_length self.transform =", "'climbing_a_rope', 'climbing_ladder', 'climbing_tree', 'contact_juggling', 'cooking_chicken', 'cooking_egg', 'cooking_on_campfire', 'cooking_sausages', 'counting_money', 'country_line_dancing',", "'strumming_guitar', 'surfing_crowd', 'surfing_water', 'sweeping_floor', 'swimming_backstroke', 'swimming_breast_stroke', 'swimming_butterfly_stroke', 'swing_dancing', 'swinging_legs', 'swinging_on_something',", "more element. %s' % line) continue clip_path = os.path.join(directory, line_info[0])", "'jetskiing', 'jogging', 'juggling_balls', 'juggling_fire', 'juggling_soccer_ball', 'jumping_into_pool', 'jumpstyle_dancing', 'kicking_field_goal', 'kicking_soccer_ball', 'kissing',", "target_width self.new_length = new_length self.transform = transform self.classes, self.class_to_idx =", "or grayscale modality : str, default 'rgb' Input modalities, we", "will extract a video clip of consecutive 16 frames. new_width", "video frames. For example, img_00012.jpg is_color : bool, default True", "prepare it. Parameters ---------- root : str, default '~/.mxnet/datasets/kinetics400' Path", "int, default 1 Number of segments to evenly divide the", "0 and new_height > 0: cv_img = cv2.resize(cv_img_origin, (new_width, new_height),", "default 1 Number of segments to evenly divide the video", "validation set. test_mode : bool, default False Whether to perform", "cropping and resizing. target_width : int, default 224 Scale the", "super(Kinetics400, self).__init__() self.root = root self.setting = setting self.train =", "clips in subfolders of: \" + root + \"\\n\" \"Check", "'breakdancing', 'brush_painting', 'brushing_hair', 'brushing_teeth', 'building_cabinet', 'building_shed', 'bungee_jumping', 'busking', 'canoeing_or_kayaking', 'capoeira',", "of input video clip. Default is a single image, but", "duration, target) clips.append(item) return clips def _TSN_RGB(self, directory, offsets, new_height,", "file of the prepared dataset. train : bool, default True", "resizing. new_height : int, default 256 Scale the height of", "\"Check your data directory (opt.data-dir).\")) if name_pattern: self.name_pattern = name_pattern", "int(duration / self.num_segments) offsets = [] for seg_id in range(self.num_segments):", "open(setting) as split_f: data = split_f.readlines() for line in data:", "root + \"\\n\" \"Check your data directory (opt.data-dir).\")) if name_pattern:", "= clip_input.reshape((-1, 3 * self.new_length, self.target_height, self.target_width)) return clip_input, target", "line) continue clip_path = os.path.join(directory, line_info[0]) duration = int(line_info[1]) target", "self.new_length) # No +1 because randint(a,b) return a random integer", "setting self.train = train self.test_mode = test_mode self.is_color = is_color", "clip_path = os.path.join(directory, line_info[0]) duration = int(line_info[1]) target = int(line_info[2])", "a video clip of consecutive 16 frames. new_width : int,", "'using_computer', 'using_remote_controller_-not_gaming-', 'using_segway', 'vault', 'waiting_in_line', 'walking_the_dog', 'washing_dishes', 'washing_feet', 'washing_hair', 'washing_hands',", "raise(RuntimeError(\"Could not load file %s. Check data path.\" % (frame_path)))", "now. Will add support for rgb difference image and optical", "'spinning_poi', 'spray_painting', 'spraying', 'springboard_diving', 'squat', 'sticking_tongue_out', 'stomping_grapes', 'stretching_arm', 'stretching_leg', 'strumming_guitar',", "load file %s. Check data path.\" % (frame_path))) if new_width", "= new_length self.transform = transform self.classes, self.class_to_idx = self._find_classes(root) self.clips", "'dancing_macarena', 'deadlifting', 'decorating_the_christmas_tree', 'digging', 'dining', 'disc_golfing', 'diving_cliff', 'dodgeball', 'doing_aerobics', 'doing_laundry',", "in subfolders of: \" + root + \"\\n\" \"Check your", "H x W. Here, B = batch_size * num_segments clip_input", ": function, default None A function that takes data and", "'sniffing', 'snorkeling', 'snowboarding', 'snowkiting', 'snowmobiling', 'somersaulting', 'spinning_poi', 'spray_painting', 'spraying', 'springboard_diving',", "image to 'new_height' for later multiscale cropping and resizing. target_width", "and resizing. target_width : int, default 224 Scale the width", "directory, duration, target = self.clips[index] average_duration = int(duration / self.num_segments)", "'juggling_fire', 'juggling_soccer_ball', 'jumping_into_pool', 'jumpstyle_dancing', 'kicking_field_goal', 'kicking_soccer_ball', 'kissing', 'kitesurfing', 'knitting', 'krumping',", "'waiting_in_line', 'walking_the_dog', 'washing_dishes', 'washing_feet', 'washing_hair', 'washing_hands', 'water_skiing', 'water_sliding', 'watering_plants', 'waxing_back',", "of loaded image to 'new_width' for later multiscale cropping and", "'squat', 'sticking_tongue_out', 'stomping_grapes', 'stretching_arm', 'stretching_leg', 'strumming_guitar', 'surfing_crowd', 'surfing_water', 'sweeping_floor', 'swimming_backstroke',", "setting : str, required Config file of the prepared dataset.", "340 Scale the width of loaded image to 'new_width' for", "['Kinetics400'] class Kinetics400(dataset.Dataset): \"\"\"Load the Kinetics400 action recognition dataset. Refer", "A useful technique to obtain global video-level information. <NAME>, etal,", "video-level information. <NAME>, etal, Temporal Segment Networks: Towards Good Practices", "= random.randint(0, average_duration - self.new_length) # No +1 because randint(a,b)", "cv_img = cv_img_origin cv_img = cv2.cvtColor(cv_img, cv2.COLOR_BGR2RGB) sampled_list.append(cv_img) # the", "str, default 'rgb' Input modalities, we support only rgb video", "'ice_skating', 'ironing', 'javelin_throw', 'jetskiing', 'jogging', 'juggling_balls', 'juggling_fire', 'juggling_soccer_ball', 'jumping_into_pool', 'jumpstyle_dancing',", "your data directory (opt.data-dir).\")) if name_pattern: self.name_pattern = name_pattern else:", "'feeding_fish', 'feeding_goats', 'filling_eyebrows', 'finger_snapping', 'fixing_hair', 'flipping_pancake', 'flying_kite', 'folding_clothes', 'folding_napkins', 'folding_paper',", "Whether to perform evaluation on the test set name_pattern :", "but it can be multiple video frames. For example, new_length=16", "how to prepare it. Parameters ---------- root : str, default", "clip_input will be H x W x C, and C", "as np from mxnet import nd from mxnet.gluon.data import dataset", "modality : str, default 'rgb' Input modalities, we support only", "str, default None The naming pattern of the decoded video", "'blasting_sand', 'blowing_glass', 'blowing_leaves', 'blowing_nose', 'blowing_out_candles', 'bobsledding', 'bookbinding', 'bouncing_on_trampoline', 'bowling', 'braiding_hair',", "is color or grayscale modality : str, default 'rgb' Input", "'waxing_eyebrows', 'waxing_legs', 'weaving_basket', 'welding', 'whistling', 'windsurfing', 'wrapping_present', 'wrestling', 'writing', 'yawning',", "'slapping', 'sled_dog_racing', 'smoking', 'smoking_hookah', 'snatch_weight_lifting', 'sneezing', 'sniffing', 'snorkeling', 'snowboarding', 'snowkiting',", "= \"flow_%s_%05d.jpg\" def __getitem__(self, index): directory, duration, target = self.clips[index]", "'riding_unicycle', 'ripping_paper', 'robot_dancing', 'rock_climbing', 'rock_scissors_paper', 'roller_skating', 'running_on_treadmill', 'sailing', 'salsa_dancing', 'sanding_floor',", "new_height > 0: cv_img = cv2.resize(cv_img_origin, (new_width, new_height), interpolation) else:", "'tap_dancing', 'tapping_guitar', 'tapping_pen', 'tasting_beer', 'tasting_food', 'testifying', 'texting', 'throwing_axe', 'throwing_ball', 'throwing_discus',", "= new_width self.target_height = target_height self.target_width = target_width self.new_length =", ":doc:`../build/examples_datasets/kinetics400` for the description of this dataset and how to", "Segment Networks: Towards Good Practices for Deep Action Recognition, ECCV", "prepared dataset. train : bool, default True Whether to load", "validation if average_duration >= self.new_length: offsets.append(int((average_duration - self.new_length + 1)/2", "clip_input.reshape((-1, 3 * self.new_length, self.target_height, self.target_width)) return clip_input, target def", "d in os.listdir(directory) if os.path.isdir(os.path.join(directory, d))] classes.sort() class_to_idx = {classes[i]:", "missing one or more element. %s' % line) continue clip_path", "bool, default True Whether the loaded image is color or", "+ root + \"\\n\" \"Check your data directory (opt.data-dir).\")) if", "else: cv_read_flag = cv2.IMREAD_GRAYSCALE interpolation = cv2.INTER_LINEAR sampled_list = []", "num_segments * new_length * 3 clip_input = np.concatenate(sampled_list, axis=2) return", "'curling_hair', 'cutting_nails', 'cutting_pineapple', 'cutting_watermelon', 'dancing_ballet', 'dancing_charleston', 'dancing_gangnam_style', 'dancing_macarena', 'deadlifting', 'decorating_the_christmas_tree',", "'jumpstyle_dancing', 'kicking_field_goal', 'kicking_soccer_ball', 'kissing', 'kitesurfing', 'knitting', 'krumping', 'laughing', 'laying_bricks', 'long_jump',", "elif not self.train and not self.test_mode: # validation if average_duration", "loaded image to 'new_height' for later multiscale cropping and resizing.", "this dataset and how to prepare it. Parameters ---------- root", "str, required Config file of the prepared dataset. train :", "data and label and transforms them. \"\"\" def __init__(self, setting=os.path.expanduser('~/.mxnet/datasets/kinetics400/kinetics400_train_list_rawframes.txt'),", "W x C, and C = num_segments * new_length *", "'riding_or_walking_with_horse', 'riding_scooter', 'riding_unicycle', 'ripping_paper', 'robot_dancing', 'rock_climbing', 'rock_scissors_paper', 'roller_skating', 'running_on_treadmill', 'sailing',", "train : bool, default True Whether to load the training", "line in data: line_info = line.split() # line format: video_path,", "for now. Will add support for rgb difference image and", "224 Scale the width of transformed image to the same", "'cooking_chicken', 'cooking_egg', 'cooking_on_campfire', 'cooking_sausages', 'counting_money', 'country_line_dancing', 'cracking_neck', 'crawling_baby', 'crossing_river', 'crying',", "'headbanging', 'headbutting', 'high_jump', 'high_kick', 'hitting_baseball', 'hockey_stop', 'holding_snake', 'hopscotch', 'hoverboarding', 'hugging',", "= root self.setting = setting self.train = train self.test_mode =", "video clip of consecutive 16 frames. new_width : int, default", "classes, class_to_idx def _make_dataset(self, directory, setting): if not os.path.exists(setting): raise(RuntimeError(\"Setting", "B = batch_size * num_segments clip_input = clip_input.reshape((-1, 3 *", "in range(self.num_segments): if self.train and not self.test_mode: # training if", "description of this dataset and how to prepare it. Parameters", "as split_f: data = split_f.readlines() for line in data: line_info", "import nd from mxnet.gluon.data import dataset __all__ = ['Kinetics400'] class", "average_duration)) else: offsets.append(0) clip_input = self._TSN_RGB(directory, offsets, self.new_height, self.new_width, self.new_length,", "None The naming pattern of the decoded video frames. For", "Good Practices for Deep Action Recognition, ECCV 2016 new_length :", "opt.val-list. \" % (setting))) clips = [] with open(setting) as", "'pushing_car', 'pushing_cart', 'pushing_wheelchair', 'reading_book', 'reading_newspaper', 'recording_music', 'riding_a_bike', 'riding_camel', 'riding_elephant', 'riding_mechanical_bull',", "seg_id * average_duration)) else: offsets.append(0) else: # test if average_duration", "test_mode self.is_color = is_color self.modality = modality self.num_segments = num_segments", "Towards Good Practices for Deep Action Recognition, ECCV 2016 new_length", "we will extract a video clip of consecutive 16 frames.", "'biking_through_snow', 'blasting_sand', 'blowing_glass', 'blowing_leaves', 'blowing_nose', 'blowing_out_candles', 'bobsledding', 'bookbinding', 'bouncing_on_trampoline', 'bowling',", "'busking', 'canoeing_or_kayaking', 'capoeira', 'carrying_baby', 'cartwheeling', 'carving_pumpkin', 'catching_fish', 'catching_or_throwing_baseball', 'catching_or_throwing_frisbee', 'catching_or_throwing_softball',", "to the same 'target_height' for batch forwarding. transform : function,", "self.root = root self.setting = setting self.train = train self.test_mode", "directory, offsets, new_height, new_width, new_length, is_color, name_pattern): from ...utils.filesystem import", "== 0: raise(RuntimeError(\"Found 0 video clips in subfolders of: \"", "= name_pattern else: if self.modality == \"rgb\": self.name_pattern = \"img_%05d.jpg\"", ": str, default None The naming pattern of the decoded", "% (length_id + offset) frame_path = directory + \"/\" +", "self.train = train self.test_mode = test_mode self.is_color = is_color self.modality", "offsets, self.new_height, self.new_width, self.new_length, self.is_color, self.name_pattern) if self.transform is not", "= np.concatenate(sampled_list, axis=2) return nd.array(clip_input) class Kinetics400Attr(object): def __init__(self): self.num_class", "'carving_pumpkin', 'catching_fish', 'catching_or_throwing_baseball', 'catching_or_throwing_frisbee', 'catching_or_throwing_softball', 'celebrating', 'changing_oil', 'changing_wheel', 'checking_tires', 'cheerleading',", "'push_up', 'pushing_car', 'pushing_cart', 'pushing_wheelchair', 'reading_book', 'reading_newspaper', 'recording_music', 'riding_a_bike', 'riding_camel', 'riding_elephant',", "'passing_American_football_-not_in_game-', 'peeling_apples', 'peeling_potatoes', 'petting_animal_-not_cat-', 'petting_cat', 'picking_fruit', 'planting_trees', 'plastering', 'playing_accordion', 'playing_badminton',", "item = (clip_path, duration, target) clips.append(item) return clips def _TSN_RGB(self,", "'dying_hair', 'eating_burger', 'eating_cake', 'eating_carrots', 'eating_chips', 'eating_doughnuts', 'eating_hotdog', 'eating_ice_cream', 'eating_spaghetti', 'eating_watermelon',", "len(line_info) < 3: print('Video input format is not correct, missing", "3 x H x W. Here, B = batch_size *", "default None The naming pattern of the decoded video frames.", "np from mxnet import nd from mxnet.gluon.data import dataset __all__", "not self.train and not self.test_mode: # validation if average_duration >=", "'chopping_wood', 'clapping', 'clay_pottery_making', 'clean_and_jerk', 'cleaning_floor', 'cleaning_gutters', 'cleaning_pool', 'cleaning_shoes', 'cleaning_toilet', 'cleaning_windows',", "'cleaning_windows', 'climbing_a_rope', 'climbing_ladder', 'climbing_tree', 'contact_juggling', 'cooking_chicken', 'cooking_egg', 'cooking_on_campfire', 'cooking_sausages', 'counting_money',", "not None: clip_input = self.transform(clip_input) if self.num_segments > 1 and", "'catching_or_throwing_frisbee', 'catching_or_throwing_softball', 'celebrating', 'changing_oil', 'changing_wheel', 'checking_tires', 'cheerleading', 'chopping_wood', 'clapping', 'clay_pottery_making',", "'skiing_-not_slalom_or_crosscountry-', 'skiing_crosscountry', 'skiing_slalom', 'skipping_rope', 'skydiving', 'slacklining', 'slapping', 'sled_dog_racing', 'smoking', 'smoking_hookah',", "+ frame_name cv_img_origin = cv2.imread(frame_path, cv_read_flag) if cv_img_origin is None:", "modalities, we support only rgb video frames for now. Will", "> 0: cv_img = cv2.resize(cv_img_origin, (new_width, new_height), interpolation) else: cv_img", "'taking_a_shower', 'tango_dancing', 'tap_dancing', 'tapping_guitar', 'tapping_pen', 'tasting_beer', 'tasting_food', 'testifying', 'texting', 'throwing_axe',", "test_mode=False, name_pattern=None, is_color=True, modality='rgb', num_segments=1, new_length=1, new_width=340, new_height=256, target_width=224, target_height=224,", "1)/2 + seg_id * average_duration)) else: offsets.append(0) clip_input = self._TSN_RGB(directory,", "technique to obtain global video-level information. <NAME>, etal, Temporal Segment", "directory): classes = [d for d in os.listdir(directory) if os.path.isdir(os.path.join(directory,", "'hugging', 'hula_hooping', 'hurdling', 'hurling_-sport-', 'ice_climbing', 'ice_fishing', 'ice_skating', 'ironing', 'javelin_throw', 'jetskiing',", "'playing_bass_guitar', 'playing_cards', 'playing_cello', 'playing_chess', 'playing_clarinet', 'playing_controller', 'playing_cricket', 'playing_cymbals', 'playing_didgeridoo', 'playing_drums',", "_make_dataset(self, directory, setting): if not os.path.exists(setting): raise(RuntimeError(\"Setting file %s doesn't", "name_pattern): from ...utils.filesystem import try_import_cv2 cv2 = try_import_cv2() if is_color:", "setting): if not os.path.exists(setting): raise(RuntimeError(\"Setting file %s doesn't exist. Check", "'setting_table', 'shaking_hands', 'shaking_head', 'sharpening_knives', 'sharpening_pencil', 'shaving_head', 'shaving_legs', 'shearing_sheep', 'shining_shoes', 'shooting_basketball',", "'sticking_tongue_out', 'stomping_grapes', 'stretching_arm', 'stretching_leg', 'strumming_guitar', 'surfing_crowd', 'surfing_water', 'sweeping_floor', 'swimming_backstroke', 'swimming_breast_stroke',", "set. test_mode : bool, default False Whether to perform evaluation", "'testifying', 'texting', 'throwing_axe', 'throwing_ball', 'throwing_discus', 'tickling', 'tobogganing', 'tossing_coin', 'tossing_salad', 'training_dog',", "[] for _, offset in enumerate(offsets): for length_id in range(1,", "For TSN training, reshape the input to B x 3", "+ seg_id * average_duration) else: offsets.append(0) elif not self.train and", ": int, default 224 Scale the height of transformed image", "'stretching_leg', 'strumming_guitar', 'surfing_crowd', 'surfing_water', 'sweeping_floor', 'swimming_backstroke', 'swimming_breast_stroke', 'swimming_butterfly_stroke', 'swing_dancing', 'swinging_legs',", "None A function that takes data and label and transforms", "1 The length of input video clip. Default is a", "split_f: data = split_f.readlines() for line in data: line_info =", "'juggling_soccer_ball', 'jumping_into_pool', 'jumpstyle_dancing', 'kicking_field_goal', 'kicking_soccer_ball', 'kissing', 'kitesurfing', 'knitting', 'krumping', 'laughing',", "'playing_cricket', 'playing_cymbals', 'playing_didgeridoo', 'playing_drums', 'playing_flute', 'playing_guitar', 'playing_harmonica', 'playing_harp', 'playing_ice_hockey', 'playing_keyboard',", "num_segments=1, new_length=1, new_width=340, new_height=256, target_width=224, target_height=224, transform=None): super(Kinetics400, self).__init__() self.root", "disable=line-too-long,too-many-lines,missing-docstring \"\"\"Kinetics400 action classification dataset.\"\"\" import os import random import", "'salsa_dancing', 'sanding_floor', 'scrambling_eggs', 'scuba_diving', 'setting_table', 'shaking_hands', 'shaking_head', 'sharpening_knives', 'sharpening_pencil', 'shaving_head',", "'blowing_glass', 'blowing_leaves', 'blowing_nose', 'blowing_out_candles', 'bobsledding', 'bookbinding', 'bouncing_on_trampoline', 'bowling', 'braiding_hair', 'breading_or_breadcrumbing',", ": str, required Config file of the prepared dataset. train", "width of transformed image to the same 'target_width' for batch", "video into clips. A useful technique to obtain global video-level", "import os import random import numpy as np from mxnet", "clips. A useful technique to obtain global video-level information. <NAME>,", "[] with open(setting) as split_f: data = split_f.readlines() for line", "line_info[0]) duration = int(line_info[1]) target = int(line_info[2]) item = (clip_path,", "'clay_pottery_making', 'clean_and_jerk', 'cleaning_floor', 'cleaning_gutters', 'cleaning_pool', 'cleaning_shoes', 'cleaning_toilet', 'cleaning_windows', 'climbing_a_rope', 'climbing_ladder',", "= int(duration / self.num_segments) offsets = [] for seg_id in", "integer N such that a <= N <= b. offsets.append(offset", "'feeding_goats', 'filling_eyebrows', 'finger_snapping', 'fixing_hair', 'flipping_pancake', 'flying_kite', 'folding_clothes', 'folding_napkins', 'folding_paper', 'front_raises',", "average_duration >= self.new_length: offsets.append(int((average_duration - self.new_length + 1)/2 + seg_id", "* 3 clip_input = np.concatenate(sampled_list, axis=2) return nd.array(clip_input) class Kinetics400Attr(object):", "'singing', 'situp', 'skateboarding', 'ski_jumping', 'skiing_-not_slalom_or_crosscountry-', 'skiing_crosscountry', 'skiing_slalom', 'skipping_rope', 'skydiving', 'slacklining',", "False Whether to perform evaluation on the test set name_pattern", "+ 1)/2 + seg_id * average_duration)) else: offsets.append(0) clip_input =", "'catching_or_throwing_softball', 'celebrating', 'changing_oil', 'changing_wheel', 'checking_tires', 'cheerleading', 'chopping_wood', 'clapping', 'clay_pottery_making', 'clean_and_jerk',", "'belly_dancing', 'bench_pressing', 'bending_back', 'bending_metal', 'biking_through_snow', 'blasting_sand', 'blowing_glass', 'blowing_leaves', 'blowing_nose', 'blowing_out_candles',", "* self.new_length, self.target_height, self.target_width)) return clip_input, target def __len__(self): return", "to 'new_height' for later multiscale cropping and resizing. target_width :", "__init__(self): self.num_class = 400 self.classes = ['abseiling', 'air_drumming', 'answering_questions', 'applauding',", "%s doesn't exist. Check opt.train-list and opt.val-list. \" % (setting)))", "new_height, new_width, new_length, is_color, name_pattern): from ...utils.filesystem import try_import_cv2 cv2", "'playing_trumpet', 'playing_ukulele', 'playing_violin', 'playing_volleyball', 'playing_xylophone', 'pole_vault', 'presenting_weather_forecast', 'pull_ups', 'pumping_fist', 'pumping_gas',", "'waxing_legs', 'weaving_basket', 'welding', 'whistling', 'windsurfing', 'wrapping_present', 'wrestling', 'writing', 'yawning', 'yoga',", "'decorating_the_christmas_tree', 'digging', 'dining', 'disc_golfing', 'diving_cliff', 'dodgeball', 'doing_aerobics', 'doing_laundry', 'doing_nails', 'drawing',", ">= self.new_length: offsets.append(int((average_duration - self.new_length + 1)/2 + seg_id *", "only rgb video frames for now. Will add support for", "target) clips.append(item) return clips def _TSN_RGB(self, directory, offsets, new_height, new_width,", "'flying_kite', 'folding_clothes', 'folding_napkins', 'folding_paper', 'front_raises', 'frying_vegetables', 'garbage_collecting', 'gargling', 'getting_a_haircut', 'getting_a_tattoo',", "'shining_shoes', 'shooting_basketball', 'shooting_goal_-soccer-', 'shot_put', 'shoveling_snow', 'shredding_paper', 'shuffling_cards', 'side_kick', 'sign_language_interpreting', 'singing',", "dataset. train : bool, default True Whether to load the", "'skiing_crosscountry', 'skiing_slalom', 'skipping_rope', 'skydiving', 'slacklining', 'slapping', 'sled_dog_racing', 'smoking', 'smoking_hookah', 'snatch_weight_lifting',", "cv_read_flag) if cv_img_origin is None: raise(RuntimeError(\"Could not load file %s.", "'opening_bottle', 'opening_present', 'paragliding', 'parasailing', 'parkour', 'passing_American_football_-in_game-', 'passing_American_football_-not_in_game-', 'peeling_apples', 'peeling_potatoes', 'petting_animal_-not_cat-',", "os.path.exists(setting): raise(RuntimeError(\"Setting file %s doesn't exist. Check opt.train-list and opt.val-list.", "= directory + \"/\" + frame_name cv_img_origin = cv2.imread(frame_path, cv_read_flag)" ]
[ "not isinstance(other, FutureContract): return False return (self.ticker, self.exp_date, self.data) ==", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "isinstance(other, FutureContract): return False return (self.ticker, self.exp_date, self.data) == (other.ticker,", "future contract (e.g. BloombergFutureTicker(“CTZ9 Comdty”)), expiration date of the contract", "# # Licensed under the Apache License, Version 2.0 (the", "compliance with the License. # You may obtain a copy", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "2.0 (the \"License\"); # you may not use this file", "– European Organization for Nuclear Research # # Licensed under", "file except in compliance with the License. # You may", "agreed to in writing, software # distributed under the License", "Unless required by applicable law or agreed to in writing,", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "FutureContract(object): \"\"\" Class representing a single future contract. The FutureContract", "parameters: ticker, which is the symbol of the specific future", "Comdty”)), expiration date of the contract and a PricesDataFrame, containing", "chaining possibilities. It requires 3 parameters: ticker, which is the", "one futures contract. The FutureContract objects are used by the", "representing a single future contract. The FutureContract is a simple", "distributed under the License is distributed on an \"AS IS\"", "contract exp_date: datetime expiration date data: PricesDataFrame data frame containing", "data: PricesDataFrame data frame containing dates with price fields values", "in order to provide the contracts chaining possibilities. It requires", "the contracts chaining possibilities. It requires 3 parameters: ticker, which", "the specific language governing permissions and # limitations under the", "permissions and # limitations under the License. from datetime import", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "self.exp_date) def __eq__(self, other): if self is other: return True", "True if not isinstance(other, FutureContract): return False return (self.ticker, self.exp_date,", "express or implied. # See the License for the specific", "applicable law or agreed to in writing, software # distributed", "self is other: return True if not isinstance(other, FutureContract): return", "except in compliance with the License. # You may obtain", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "future contract exp_date: datetime expiration date data: PricesDataFrame data frame", "which is the symbol of the specific future contract (e.g.", "dates with price field values. Parameters ---------- ticker: Ticker symbol", "expiration date data: PricesDataFrame data frame containing dates with price", "Copyright 2016-present CERN – European Organization for Nuclear Research #", "Licensed under the Apache License, Version 2.0 (the \"License\"); #", "class FutureContract(object): \"\"\" Class representing a single future contract. The", "and # limitations under the License. from datetime import datetime", "not use this file except in compliance with the License.", "contract. The FutureContract is a simple class representing one futures", "symbol of the future contract exp_date: datetime expiration date data:", "of the future contract exp_date: datetime expiration date data: PricesDataFrame", "Nuclear Research # # Licensed under the Apache License, Version", "writing, software # distributed under the License is distributed on", "in writing, software # distributed under the License is distributed", "price fields values \"\"\" def __init__(self, ticker: Ticker, exp_date: datetime,", "you may not use this file except in compliance with", "import Ticker from qf_lib.containers.dataframe.prices_dataframe import PricesDataFrame class FutureContract(object): \"\"\" Class", "\"\"\" def __init__(self, ticker: Ticker, exp_date: datetime, data: PricesDataFrame): self.ticker", "# Licensed under the Apache License, Version 2.0 (the \"License\");", "__str__(self): return 'Contract: ticker: {}, expiration date: {}'.format( self.ticker, self.exp_date)", "expiration date: {}'.format( self.ticker, self.exp_date) def __eq__(self, other): if self", "language governing permissions and # limitations under the License. from", "FutureContract objects are used by the FuturesChain, in order to", "ticker self.exp_date = exp_date self.data = data def __str__(self): return", "Ticker, exp_date: datetime, data: PricesDataFrame): self.ticker = ticker self.exp_date =", "qf_lib.containers.dataframe.prices_dataframe import PricesDataFrame class FutureContract(object): \"\"\" Class representing a single", "exp_date self.data = data def __str__(self): return 'Contract: ticker: {},", "a simple class representing one futures contract. The FutureContract objects", "from qf_lib.common.tickers.tickers import Ticker from qf_lib.containers.dataframe.prices_dataframe import PricesDataFrame class FutureContract(object):", "future contract. The FutureContract is a simple class representing one", "order to provide the contracts chaining possibilities. It requires 3", "date of the contract and a PricesDataFrame, containing dates with", "datetime import datetime from qf_lib.common.tickers.tickers import Ticker from qf_lib.containers.dataframe.prices_dataframe import", "use this file except in compliance with the License. #", "expiration date of the contract and a PricesDataFrame, containing dates", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "is other: return True if not isinstance(other, FutureContract): return False", "self.exp_date = exp_date self.data = data def __str__(self): return 'Contract:", "self.exp_date, self.data) == (other.ticker, other.exp_date, other.data) def __hash__(self): return hash((self.ticker,", "import datetime from qf_lib.common.tickers.tickers import Ticker from qf_lib.containers.dataframe.prices_dataframe import PricesDataFrame", "self.data) == (other.ticker, other.exp_date, other.data) def __hash__(self): return hash((self.ticker, self.exp_date,", "of the specific future contract (e.g. BloombergFutureTicker(“CTZ9 Comdty”)), expiration date", "Class representing a single future contract. The FutureContract is a", "= exp_date self.data = data def __str__(self): return 'Contract: ticker:", "to provide the contracts chaining possibilities. It requires 3 parameters:", "Organization for Nuclear Research # # Licensed under the Apache", "CONDITIONS OF ANY KIND, either express or implied. # See", "---------- ticker: Ticker symbol of the future contract exp_date: datetime", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "return 'Contract: ticker: {}, expiration date: {}'.format( self.ticker, self.exp_date) def", "requires 3 parameters: ticker, which is the symbol of the", "contract and a PricesDataFrame, containing dates with price field values.", "self.ticker = ticker self.exp_date = exp_date self.data = data def", "or implied. # See the License for the specific language", "License is distributed on an \"AS IS\" BASIS, # WITHOUT", "License. # You may obtain a copy of the License", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "License, Version 2.0 (the \"License\"); # you may not use", "the License. from datetime import datetime from qf_lib.common.tickers.tickers import Ticker", "with price fields values \"\"\" def __init__(self, ticker: Ticker, exp_date:", "if self is other: return True if not isinstance(other, FutureContract):", "# You may obtain a copy of the License at", "def __eq__(self, other): if self is other: return True if", "KIND, either express or implied. # See the License for", "specific language governing permissions and # limitations under the License.", "PricesDataFrame data frame containing dates with price fields values \"\"\"", "under the License is distributed on an \"AS IS\" BASIS,", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "License for the specific language governing permissions and # limitations", "ticker: Ticker symbol of the future contract exp_date: datetime expiration", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "specific future contract (e.g. BloombergFutureTicker(“CTZ9 Comdty”)), expiration date of the", "contract. The FutureContract objects are used by the FuturesChain, in", "'Contract: ticker: {}, expiration date: {}'.format( self.ticker, self.exp_date) def __eq__(self,", "datetime expiration date data: PricesDataFrame data frame containing dates with", "ticker, which is the symbol of the specific future contract", "the specific future contract (e.g. BloombergFutureTicker(“CTZ9 Comdty”)), expiration date of", "It requires 3 parameters: ticker, which is the symbol of", "symbol of the specific future contract (e.g. BloombergFutureTicker(“CTZ9 Comdty”)), expiration", "a PricesDataFrame, containing dates with price field values. Parameters ----------", "dates with price fields values \"\"\" def __init__(self, ticker: Ticker,", "def __init__(self, ticker: Ticker, exp_date: datetime, data: PricesDataFrame): self.ticker =", "= ticker self.exp_date = exp_date self.data = data def __str__(self):", "fields values \"\"\" def __init__(self, ticker: Ticker, exp_date: datetime, data:", "data: PricesDataFrame): self.ticker = ticker self.exp_date = exp_date self.data =", "the License for the specific language governing permissions and #", "(the \"License\"); # you may not use this file except", "Apache License, Version 2.0 (the \"License\"); # you may not", "import PricesDataFrame class FutureContract(object): \"\"\" Class representing a single future", "# you may not use this file except in compliance", "either express or implied. # See the License for the", "self.data = data def __str__(self): return 'Contract: ticker: {}, expiration", "objects are used by the FuturesChain, in order to provide", "OR CONDITIONS OF ANY KIND, either express or implied. #", "of the contract and a PricesDataFrame, containing dates with price", "the future contract exp_date: datetime expiration date data: PricesDataFrame data", "FutureContract is a simple class representing one futures contract. The", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "representing one futures contract. The FutureContract objects are used by", "exp_date: datetime, data: PricesDataFrame): self.ticker = ticker self.exp_date = exp_date", "the License is distributed on an \"AS IS\" BASIS, #", "in compliance with the License. # You may obtain a", "date data: PricesDataFrame data frame containing dates with price fields", "software # distributed under the License is distributed on an", "date: {}'.format( self.ticker, self.exp_date) def __eq__(self, other): if self is", "is the symbol of the specific future contract (e.g. BloombergFutureTicker(“CTZ9", "datetime, data: PricesDataFrame): self.ticker = ticker self.exp_date = exp_date self.data", "qf_lib.common.tickers.tickers import Ticker from qf_lib.containers.dataframe.prices_dataframe import PricesDataFrame class FutureContract(object): \"\"\"", "other: return True if not isinstance(other, FutureContract): return False return", "containing dates with price fields values \"\"\" def __init__(self, ticker:", "# # Unless required by applicable law or agreed to", "under the License. from datetime import datetime from qf_lib.common.tickers.tickers import", "{}, expiration date: {}'.format( self.ticker, self.exp_date) def __eq__(self, other): if", "__eq__(self, other): if self is other: return True if not", "governing permissions and # limitations under the License. from datetime", "Parameters ---------- ticker: Ticker symbol of the future contract exp_date:", "values \"\"\" def __init__(self, ticker: Ticker, exp_date: datetime, data: PricesDataFrame):", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "CERN – European Organization for Nuclear Research # # Licensed", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "limitations under the License. from datetime import datetime from qf_lib.common.tickers.tickers", "the symbol of the specific future contract (e.g. BloombergFutureTicker(“CTZ9 Comdty”)),", "return (self.ticker, self.exp_date, self.data) == (other.ticker, other.exp_date, other.data) def __hash__(self):", "the contract and a PricesDataFrame, containing dates with price field", "License. from datetime import datetime from qf_lib.common.tickers.tickers import Ticker from", "single future contract. The FutureContract is a simple class representing", "Version 2.0 (the \"License\"); # you may not use this", "futures contract. The FutureContract objects are used by the FuturesChain,", "(self.ticker, self.exp_date, self.data) == (other.ticker, other.exp_date, other.data) def __hash__(self): return", "PricesDataFrame, containing dates with price field values. Parameters ---------- ticker:", "provide the contracts chaining possibilities. It requires 3 parameters: ticker,", "law or agreed to in writing, software # distributed under", "PricesDataFrame class FutureContract(object): \"\"\" Class representing a single future contract.", "FuturesChain, in order to provide the contracts chaining possibilities. It", "class representing one futures contract. The FutureContract objects are used", "a single future contract. The FutureContract is a simple class", "datetime from qf_lib.common.tickers.tickers import Ticker from qf_lib.containers.dataframe.prices_dataframe import PricesDataFrame class", "contracts chaining possibilities. It requires 3 parameters: ticker, which is", "== (other.ticker, other.exp_date, other.data) def __hash__(self): return hash((self.ticker, self.exp_date, self.data))", "implied. # See the License for the specific language governing", "if not isinstance(other, FutureContract): return False return (self.ticker, self.exp_date, self.data)", "field values. Parameters ---------- ticker: Ticker symbol of the future", "under the Apache License, Version 2.0 (the \"License\"); # you", "\"License\"); # you may not use this file except in", "ticker: Ticker, exp_date: datetime, data: PricesDataFrame): self.ticker = ticker self.exp_date", "return True if not isinstance(other, FutureContract): return False return (self.ticker,", "frame containing dates with price fields values \"\"\" def __init__(self,", "used by the FuturesChain, in order to provide the contracts", "2016-present CERN – European Organization for Nuclear Research # #", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "# Copyright 2016-present CERN – European Organization for Nuclear Research", "with price field values. Parameters ---------- ticker: Ticker symbol of", "FutureContract): return False return (self.ticker, self.exp_date, self.data) == (other.ticker, other.exp_date,", "exp_date: datetime expiration date data: PricesDataFrame data frame containing dates", "containing dates with price field values. Parameters ---------- ticker: Ticker", "self.ticker, self.exp_date) def __eq__(self, other): if self is other: return", "by applicable law or agreed to in writing, software #", "# distributed under the License is distributed on an \"AS", "OF ANY KIND, either express or implied. # See the", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "may obtain a copy of the License at # #", "Research # # Licensed under the Apache License, Version 2.0", "# Unless required by applicable law or agreed to in", "ANY KIND, either express or implied. # See the License", "See the License for the specific language governing permissions and", "price field values. Parameters ---------- ticker: Ticker symbol of the", "values. Parameters ---------- ticker: Ticker symbol of the future contract", "def __str__(self): return 'Contract: ticker: {}, expiration date: {}'.format( self.ticker,", "the License. # You may obtain a copy of the", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "for the specific language governing permissions and # limitations under", "Ticker from qf_lib.containers.dataframe.prices_dataframe import PricesDataFrame class FutureContract(object): \"\"\" Class representing", "The FutureContract objects are used by the FuturesChain, in order", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "return False return (self.ticker, self.exp_date, self.data) == (other.ticker, other.exp_date, other.data)", "to in writing, software # distributed under the License is", "Ticker symbol of the future contract exp_date: datetime expiration date", "are used by the FuturesChain, in order to provide the", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "# See the License for the specific language governing permissions", "simple class representing one futures contract. The FutureContract objects are", "contract (e.g. BloombergFutureTicker(“CTZ9 Comdty”)), expiration date of the contract and", "BloombergFutureTicker(“CTZ9 Comdty”)), expiration date of the contract and a PricesDataFrame,", "__init__(self, ticker: Ticker, exp_date: datetime, data: PricesDataFrame): self.ticker = ticker", "and a PricesDataFrame, containing dates with price field values. Parameters", "other): if self is other: return True if not isinstance(other,", "You may obtain a copy of the License at #", "# limitations under the License. from datetime import datetime from", "(e.g. BloombergFutureTicker(“CTZ9 Comdty”)), expiration date of the contract and a", "European Organization for Nuclear Research # # Licensed under the", "may not use this file except in compliance with the", "for Nuclear Research # # Licensed under the Apache License,", "or agreed to in writing, software # distributed under the", "by the FuturesChain, in order to provide the contracts chaining", "3 parameters: ticker, which is the symbol of the specific", "required by applicable law or agreed to in writing, software", "PricesDataFrame): self.ticker = ticker self.exp_date = exp_date self.data = data", "from qf_lib.containers.dataframe.prices_dataframe import PricesDataFrame class FutureContract(object): \"\"\" Class representing a", "False return (self.ticker, self.exp_date, self.data) == (other.ticker, other.exp_date, other.data) def", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "from datetime import datetime from qf_lib.common.tickers.tickers import Ticker from qf_lib.containers.dataframe.prices_dataframe", "data frame containing dates with price fields values \"\"\" def", "= data def __str__(self): return 'Contract: ticker: {}, expiration date:", "with the License. # You may obtain a copy of", "this file except in compliance with the License. # You", "data def __str__(self): return 'Contract: ticker: {}, expiration date: {}'.format(", "the FuturesChain, in order to provide the contracts chaining possibilities.", "the Apache License, Version 2.0 (the \"License\"); # you may", "The FutureContract is a simple class representing one futures contract.", "is a simple class representing one futures contract. The FutureContract", "\"\"\" Class representing a single future contract. The FutureContract is", "{}'.format( self.ticker, self.exp_date) def __eq__(self, other): if self is other:", "ticker: {}, expiration date: {}'.format( self.ticker, self.exp_date) def __eq__(self, other):", "possibilities. It requires 3 parameters: ticker, which is the symbol" ]
[ "parent = pecan.request.path.split('/')[:-1][-1] if parent != \"action_plans\": raise exception.HTTPNotFound expand", "kwargs.get('strategy_id', wtypes.Unset)) fields.append('strategy_name') setattr(self, 'strategy_name', kwargs.get('strategy_id', wtypes.Unset)) @staticmethod def _convert_with_links(action_plan,", "parent != \"action_plans\": raise exception.HTTPNotFound expand = True resource_url =", "2.0 (the \"License\"); # you may not use this file", "if value and self._strategy_name != value: self._strategy_name = None strategy", "not hasattr(self, field): continue self.fields.append(field) setattr(self, field, kwargs.get(field, wtypes.Unset)) self.fields.append('audit_uuid')", "patch=serialized_patch, reason=msg % dict(state=state_value)) @staticmethod def validate(patch): if patch.path ==", "'action_plan:start', action_plan_to_start, action='action_plan:start') if action_plan_to_start['state'] != \\ objects.action_plan.State.RECOMMENDED: raise exception.StartError(", "link from watcher.api.controllers.v1 import collection from watcher.api.controllers.v1 import efficacy_indicator as", "a.save() if launch_action_plan: self.applier_client.launch_action_plan(pecan.request.context, action_plan.uuid) action_plan_to_update = objects.ActionPlan.get_by_uuid( pecan.request.context, action_plan_uuid)", "if self._efficacy_indicators is None: self._set_efficacy_indicators(wtypes.Unset) return self._efficacy_indicators def _set_efficacy_indicators(self, value):", "wtypes import wsmeext.pecan as wsme_pecan from watcher._i18n import _ from", "= objects.ActionPlan.get_by_uuid( pecan.request.context, marker) filters = {} if audit_uuid: filters['audit_uuid']", "data sets. :param limit: maximum number of resources to return", "_efficacy_indicators = objects.EfficacyIndicator.list( pecan.request.context, filters={\"action_plan_uuid\": self.uuid}) for indicator in _efficacy_indicators:", "strategy_uuid = wtypes.wsproperty( wtypes.text, _get_strategy_uuid, _set_strategy_uuid, mandatory=False) \"\"\"Strategy UUID the", "False \"\"\"A flag to indicate if the requests to this", "Default: asc. :param audit_uuid: Optional UUID of an audit, to", "e: raise exception.PatchError(patch=patch, reason=e) launch_action_plan = False cancel_action_plan = False", "<http://www.omg.org/spec/BPMN/2.0/>`_ or `Unified Modeling Language (UML) <http://www.uml.org/>`_. To see the", "list of efficacy indicators associated to this action plan\"\"\" global_efficacy", "not wtypes.Unset: serialized_patch['value'] = patch.value # todo: use state machines", "action_plan = api_utils.get_resource('ActionPlan', action_plan_uuid) policy.enforce( context, 'action_plan:get', action_plan, action='action_plan:get') return", ":ref:`Action Plan(s) <action_plan_definition>` composed of two types of Action Item(s):", "if not expand: action_plan.unset_fields_except( ['uuid', 'state', 'efficacy_indicators', 'global_efficacy', 'updated_at', 'audit_uuid',", "efficacyindicator from watcher.api.controllers.v1 import types from watcher.api.controllers.v1 import utils as", "class ActionPlan(base.APIBase): \"\"\"API representation of a action plan. This class", "**kwargs) return ap_collection @classmethod def sample(cls): sample = cls() sample.action_plans", "= audit.uuid self.audit_id = audit.id except exception.AuditNotFound: self._audit_uuid = None", "Update only the fields that have changed for field in", "mandatory=True) \"\"\"The UUID of the audit this port belongs to\"\"\"", "= False cancel_action_plan = False # transitions that are allowed", "expose. if not hasattr(self, field): continue self.fields.append(field) setattr(self, field, kwargs.get(field,", "implementations to generate and handle more complex :ref:`Action Plan(s) <action_plan_definition>`", "of :ref:`Actions <action_definition>` that should be executed in order to", "License for the specific language governing permissions and # limitations", "'strategy_uuid', 'strategy_name']) action_plan.links = [ link.Link.make_link( 'self', url, 'action_plans', action_plan.uuid),", "!= value: self._strategy_name = None strategy = self._get_strategy(value) if strategy:", "action_plans_collection = ActionPlanCollection.convert_with_links( action_plans, limit, url=resource_url, expand=expand, sort_key=sort_key, sort_dir=sort_dir) if", "field) except AttributeError: # Ignore fields that aren't exposed in", "Reserved. # # Licensed under the Apache License, Version 2.0", "<reponame>ajaytikoo/watcher # -*- encoding: utf-8 -*- # Copyright 2013 Red", "<action_plan_definition>` states, visit :ref:`the Action Plan state machine <action_plan_state_machine>`. \"\"\"", "'strategy_uuid', 'strategy_name'] api_utils.validate_sort_key( sort_key, list(objects.ActionPlan.fields) + additional_fields) limit = api_utils.validate_limit(limit)", "/detail should only work agaist collections parent = pecan.request.path.split('/')[:-1][-1] if", "if value == wtypes.Unset and not self._efficacy_indicators: try: _efficacy_indicators =", "and not self._efficacy_indicators: try: _efficacy_indicators = objects.EfficacyIndicator.list( pecan.request.context, filters={\"action_plan_uuid\": self.uuid})", "None strategy = self._get_strategy(value) if strategy: self._strategy_uuid = strategy.uuid def", "= objects.ActionPlan.get_by_uuid( pecan.request.context, action_plan_uuid) return ActionPlan.convert_with_links(action_plan_to_update) @wsme_pecan.wsexpose(ActionPlan, types.uuid) def start(self,", "oslo_log import log import pecan from pecan import rest import", "global efficacy of this action plan\"\"\" state = wtypes.text \"\"\"This", "state machines to handle state transitions state_value = patch.value if", "name to filter by \"\"\" context = pecan.request.context policy.enforce(context, 'action_plan:get_all',", "actions for that audit. :param strategy: strategy UUID or name", "contains an estimated :ref:`global efficacy <efficacy_definition>` alongside a set of", "action_plans = objects.ActionPlan.list( pecan.request.context, limit, marker_obj, sort_key=sort_db_key, sort_dir=sort_dir, filters=filters) action_plans_collection", "of action_plans with detail. :param marker: pagination marker for large", "expand=True): sample = cls(uuid='9ef4d84c-41e8-4418-9220-ce55be0436af', state='ONGOING', created_at=datetime.datetime.utcnow(), deleted_at=None, updated_at=datetime.datetime.utcnow()) sample._audit_uuid =", "2013 Red Hat, Inc. # All Rights Reserved. # #", "if not need_api_sort else None) action_plans = objects.ActionPlan.list( pecan.request.context, limit,", ":ref:`global efficacy <efficacy_definition>` alongside a set of :ref:`efficacy indicators <efficacy_indicator_definition>`.", "audit_uuid: filters['audit_uuid'] = audit_uuid if strategy: if utils.is_uuid_like(strategy): filters['strategy_uuid'] =", "in fields: # Skip fields we do not expose. if", "audit this port belongs to\"\"\" strategy_uuid = wtypes.wsproperty( wtypes.text, _get_strategy_uuid,", "plan state\"\"\" links = wtypes.wsattr([link.Link], readonly=True) \"\"\"A list containing a", "except api_utils.JSONPATCH_EXCEPTIONS as e: raise exception.PatchError(patch=patch, reason=e) launch_action_plan = False", "of the strategy this action plan refers to\"\"\" efficacy_indicators =", "action plan is composed of a list of successive :ref:`Actions", "exposed in the API continue if patch_val == wtypes.Unset: patch_val", "this action plan\"\"\" global_efficacy = wtypes.wsattr(types.jsontype, readonly=True) \"\"\"The global efficacy", "= pecan.request.path.split('/')[:-1][-1] if parent != \"action_plans\": raise exception.HTTPNotFound expand =", "launch_action_plan = True action_plan_to_update.save() # NOTE: if action plan is", "the versions when these fields were introduced. \"\"\" pass class", "wtypes.text, wtypes.text, types.uuid, wtypes.text) def get_all(self, marker=None, limit=None, sort_key='id', sort_dir='asc',", "= ActionPlanCollection() ap_collection.action_plans = [ActionPlan.convert_with_links( p, expand) for p in", "watcher.api.controllers.v1 import collection from watcher.api.controllers.v1 import efficacy_indicator as efficacyindicator from", "for p in rpc_action_plans] ap_collection.next = ap_collection.get_next(limit, url=url, **kwargs) return", "transition not allowed: \" \"(%(initial_state)s -> %(new_state)s)\") raise exception.PatchError( patch=patch,", "cancelled from pending or recommended # state update action state", "plan. :param action_plan_uuid: UUID of a action. \"\"\" context =", "value constraints, and converts between the internal object model and", "See the License for the specific language governing permissions and", "field): continue self.fields.append(field) setattr(self, field, kwargs.get(field, wtypes.Unset)) self.fields.append('audit_uuid') self.fields.append('efficacy_indicators') setattr(self,", "\"\"\"REST controller for Actions.\"\"\" def __init__(self): super(ActionPlansController, self).__init__() self.applier_client =", "watcher.api.controllers.v1 import types from watcher.api.controllers.v1 import utils as api_utils from", "to in writing, software # distributed under the License is", "utils.is_int_like(value): strategy = objects.Strategy.get( pecan.request.context, value) else: strategy = objects.Strategy.get_by_name(", "policy.enforce( context, 'action_plan:get', action_plan, action='action_plan:get') return ActionPlan.convert_with_links(action_plan) @wsme_pecan.wsexpose(None, types.uuid, status_code=HTTPStatus.NO_CONTENT)", "that have changed for field in objects.ActionPlan.fields: try: patch_val =", "action_plan.state == ap_objects.State.PENDING: launch_action_plan = True if action_plan.state == ap_objects.State.CANCELLED:", "pecan.request.context, value) else: strategy = objects.Strategy.get_by_name( pecan.request.context, value) except exception.StrategyNotFound:", "limit=None, sort_key='id', sort_dir='asc', audit_uuid=None, strategy=None): \"\"\"Retrieve a list of action_plans", "dict(state=state_value)) @staticmethod def validate(patch): if patch.path == \"/state\": ActionPlanPatchType._validate_state(patch) return", "It also contains an estimated :ref:`global efficacy <efficacy_definition>` alongside a", "return types.JsonPatchType.validate(patch) @staticmethod def internal_attrs(): return types.JsonPatchType.internal_attrs() @staticmethod def mandatory_attrs():", "or agreed to in writing, software # distributed under the", "= None _efficacy_indicators = None def _get_audit_uuid(self): return self._audit_uuid def", "sets. :param limit: maximum number of resources to return in", "if strategy: if utils.is_uuid_like(strategy): filters['strategy_uuid'] = strategy else: filters['strategy_name'] =", "an action plan. \"\"\" _audit_uuid = None _strategy_uuid = None", "in blueprint watcher-api-validation if hasattr(action_plan, 'state'): transition = (action_plan_to_update.state, action_plan.state)", "of an action_plan. \"\"\" action_plan_to_start = api_utils.get_resource( 'ActionPlan', action_plan_uuid, eager=True)", "do not expose. if not hasattr(self, field): continue self.fields.append(field) setattr(self,", "_custom_actions = { 'start': ['POST'], 'detail': ['GET'] } def _get_action_plans_collection(self,", "_set_strategy_name(self, value): if value and self._strategy_name != value: self._strategy_name =", "'updated_at', 'audit_uuid', 'strategy_uuid', 'strategy_name']) action_plan.links = [ link.Link.make_link( 'self', url,", "try: _efficacy_indicators = objects.EfficacyIndicator.list( pecan.request.context, filters={\"action_plan_uuid\": self.uuid}) for indicator in", "= True action_plan_to_update.save() # NOTE: if action plan is cancelled", "action_plans = [ActionPlan] \"\"\"A list containing action_plans objects\"\"\" def __init__(self,", "action_plan_to_start, action='action_plan:start') if action_plan_to_start['state'] != \\ objects.action_plan.State.RECOMMENDED: raise exception.StartError( state=action_plan_to_start.state)", ":param sort_key: column to sort results by. Default: id. :param", "_get_audit_uuid(self): return self._audit_uuid def _set_audit_uuid(self, value): if value == wtypes.Unset:", "compliance with the License. # You may obtain a copy", "All Rights Reserved. # # Licensed under the Apache License,", "strategy: strategy UUID or name to filter by \"\"\" context", "watcher-api-validation if hasattr(action_plan, 'state'): transition = (action_plan_to_update.state, action_plan.state) if transition", "\"\"\" if self.from_actionsPlans: raise exception.OperationNotPermitted context = pecan.request.context action_plan_to_update =", "state here only if cancel_action_plan: filters = {'action_plan_uuid': action_plan.uuid} actions", "or # implied. # See the License for the specific", "\"\"\"The name of the strategy this action plan refers to\"\"\"", "from pecan import rest import wsme from wsme import types", "list containing action_plans objects\"\"\" def __init__(self, **kwargs): self._type = 'action_plans'", "<action_plan_state_machine>`. \"\"\" import datetime from http import HTTPStatus from oslo_log", "Language (UML) <http://www.uml.org/>`_. To see the life-cycle and description of", "action_plan_uuid) policy.enforce( context, 'action_plan:get', action_plan, action='action_plan:get') return ActionPlan.convert_with_links(action_plan) @wsme_pecan.wsexpose(None, types.uuid,", "action='action_plan:get_all') return self._get_action_plans_collection( marker, limit, sort_key, sort_dir, audit_uuid=audit_uuid, strategy=strategy) @wsme_pecan.wsexpose(ActionPlanCollection,", "api_utils.make_api_sort(action_plans_collection.action_plans, sort_key, sort_dir) return action_plans_collection @wsme_pecan.wsexpose(ActionPlanCollection, types.uuid, int, wtypes.text, wtypes.text,", "value: self._efficacy_indicators = value def _get_strategy(self, value): if value ==", "name to filter by \"\"\" context = pecan.request.context policy.enforce(context, 'action_plan:detail',", "not use this file except in compliance with the License.", "action_plan_to_update = api_utils.get_resource( 'ActionPlan', action_plan_uuid, eager=True) policy.enforce(context, 'action_plan:update', action_plan_to_update, action='action_plan:update')", "express or # implied. # See the License for the", "NOTE: if action plan is cancelled from pending or recommended", "this :ref:`Audit <audit_definition>`. In the default implementation of Watcher, an", ":ref:`Action Plan <action_plan_definition>` is generated by Watcher when an :ref:`Audit", "you may not use this file except in compliance with", "import action_plan as ap_objects LOG = log.getLogger(__name__) def hide_fields_in_newer_versions(obj): \"\"\"This", "parallel flows. An :ref:`Action Plan <action_plan_definition>` may be described using", "objects.EfficacyIndicator.list( pecan.request.context, filters={\"action_plan_uuid\": self.uuid}) for indicator in _efficacy_indicators: efficacy_indicator =", "sort_key, sort_dir) return action_plans_collection @wsme_pecan.wsexpose(ActionPlanCollection, types.uuid, int, wtypes.text, wtypes.text, types.uuid,", "_strategy_uuid = None _strategy_name = None _efficacy_indicators = None def", "ap_objects.State.CANCELLED), ] # todo: improve this in blueprint watcher-api-validation if", "return None strategy = None try: if utils.is_uuid_like(value) or utils.is_int_like(value):", "wtypes.Unset)) fields.append('strategy_uuid') setattr(self, 'strategy_uuid', kwargs.get('strategy_id', wtypes.Unset)) fields.append('strategy_name') setattr(self, 'strategy_name', kwargs.get('strategy_id',", "\"\"\"Update an existing action plan. :param action_plan_uuid: UUID of a", "plan. :param action_plan_uuid: UUID of a action plan. :param patch:", "as e: raise exception.PatchError(patch=patch, reason=e) launch_action_plan = False cancel_action_plan =", "def _convert_with_links(action_plan, url, expand=True): if not expand: action_plan.unset_fields_except( ['uuid', 'state',", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "utf-8 -*- # Copyright 2013 Red Hat, Inc. # All", "plan\"\"\" state = wtypes.text \"\"\"This action plan state\"\"\" links =", "Actions.\"\"\" def __init__(self): super(ActionPlansController, self).__init__() self.applier_client = rpcapi.ApplierAPI() from_actionsPlans =", "= list(objects.ActionPlan.fields) for field in fields: # Skip fields we", "used has found a :ref:`Solution <solution_definition>` to achieve the :ref:`Goal", "objects.Audit.get(pecan.request.context, value) self._audit_uuid = audit.uuid self.audit_id = audit.id except exception.AuditNotFound:", ":ref:`efficacy indicators <efficacy_indicator_definition>`. An :ref:`Action Plan <action_plan_definition>` is generated by", "in sequential and/or parallel flows. An :ref:`Action Plan <action_plan_definition>` may", "\"\"\" import datetime from http import HTTPStatus from oslo_log import", "the request's API version matches or exceeds the versions when", "__init__(self, **kwargs): super(ActionPlan, self).__init__() self.fields = [] fields = list(objects.ActionPlan.fields)", "exception.StartError( state=action_plan_to_start.state) action_plan_to_start['state'] = objects.action_plan.State.PENDING action_plan_to_start.save() self.applier_client.launch_action_plan(pecan.request.context, action_plan_uuid) action_plan_to_start =", "is None: self._set_efficacy_indicators(wtypes.Unset) return self._efficacy_indicators def _set_efficacy_indicators(self, value): efficacy_indicators =", "readonly=True) \"\"\"The global efficacy of this action plan\"\"\" state =", "tasks, which means it can not be split into smaller", "= {'description': 'Global efficacy', 'name': 'test_global_efficacy', 'unit': '%'} return cls._convert_with_links(sample,", "and handle more complex :ref:`Action Plan(s) <action_plan_definition>` composed of two", "which implies that the :ref:`Strategy <strategy_definition>` which was used has", "mandatory_attrs(): return [\"audit_id\", \"state\"] class ActionPlan(base.APIBase): \"\"\"API representation of a", "efficacy_indicator as efficacyindicator from watcher.api.controllers.v1 import types from watcher.api.controllers.v1 import", "sort_db_key = (sort_key if not need_api_sort else None) action_plans =", "def _get_audit_uuid(self): return self._audit_uuid def _set_audit_uuid(self, value): if value ==", "from watcher.api.controllers.v1 import types from watcher.api.controllers.v1 import utils as api_utils", "_validate_state(patch): serialized_patch = {'path': patch.path, 'op': patch.op} if patch.value is", "by \"\"\" context = pecan.request.context policy.enforce(context, 'action_plan:detail', action='action_plan:detail') # NOTE(lucasagomes):", "action plan\"\"\" state = wtypes.text \"\"\"This action plan state\"\"\" links", "the :ref:`Strategy <strategy_definition>` which was used has found a :ref:`Solution", "transition = (action_plan_to_update.state, action_plan.state) if transition not in allowed_patch_transitions: error_message", "'action_plan:detail', action='action_plan:detail') # NOTE(lucasagomes): /detail should only work agaist collections", "action_plan, action='action_plan:delete') allowed_states = (ap_objects.State.SUCCEEDED, ap_objects.State.RECOMMENDED, ap_objects.State.FAILED, ap_objects.State.SUPERSEDED, ap_objects.State.CANCELLED) if", "wtypes.Unset: return None strategy = None try: if utils.is_uuid_like(value) or", "an action plan. :param action_plan_uuid: UUID of a action. \"\"\"", "to filter by \"\"\" context = pecan.request.context policy.enforce(context, 'action_plan:get_all', action='action_plan:get_all')", "if action_plan.state not in allowed_states: raise exception.DeleteError( state=action_plan.state) action_plan.soft_delete() @wsme.validate(types.uuid,", "plan refers to\"\"\" strategy_name = wtypes.wsproperty( wtypes.text, _get_strategy_name, _set_strategy_name, mandatory=False)", "Item(s): - simple :ref:`Actions <action_definition>`: atomic tasks, which means it", "and not hasattr(ap_objects.State, state_value): msg = _(\"Invalid state: %(state)s\") raise", "_get_strategy_uuid(self): return self._strategy_uuid def _set_strategy_uuid(self, value): if value and self._strategy_uuid", "state transitions state_value = patch.value if state_value and not hasattr(ap_objects.State,", "= api_utils.validate_limit(limit) api_utils.validate_sort_dir(sort_dir) marker_obj = None if marker: marker_obj =", "audit_uuid = wtypes.wsproperty(types.uuid, _get_audit_uuid, _set_audit_uuid, mandatory=True) \"\"\"The UUID of the", "may be described using standard workflow model description formats such", "body=[ActionPlanPatchType]) def patch(self, action_plan_uuid, patch): \"\"\"Update an existing action plan.", "in allowed_states: raise exception.DeleteError( state=action_plan.state) action_plan.soft_delete() @wsme.validate(types.uuid, [ActionPlanPatchType]) @wsme_pecan.wsexpose(ActionPlan, types.uuid,", "action_plan_uuid) return ActionPlan.convert_with_links(action_plan_to_update) @wsme_pecan.wsexpose(ActionPlan, types.uuid) def start(self, action_plan_uuid, **kwargs): \"\"\"Start", "@wsme.validate(types.uuid, [ActionPlanPatchType]) @wsme_pecan.wsexpose(ActionPlan, types.uuid, body=[ActionPlanPatchType]) def patch(self, action_plan_uuid, patch): \"\"\"Update", "= { 'start': ['POST'], 'detail': ['GET'] } def _get_action_plans_collection(self, marker,", "License. \"\"\" An :ref:`Action Plan <action_plan_definition>` specifies a flow of", "to this controller are coming from the top-level resource ActionPlan.\"\"\"", "limit: maximum number of resources to return in a single", "'action_plan:update', action_plan_to_update, action='action_plan:update') try: action_plan_dict = action_plan_to_update.as_dict() action_plan = ActionPlan(**api_utils.apply_jsonpatch(", "'%'}] sample._global_efficacy = {'description': 'Global efficacy', 'name': 'test_global_efficacy', 'unit': '%'}", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "'audit_uuid', 'strategy_uuid', 'strategy_name']) action_plan.links = [ link.Link.make_link( 'self', url, 'action_plans',", "sort_dir=sort_dir, filters=filters) action_plans_collection = ActionPlanCollection.convert_with_links( action_plans, limit, url=resource_url, expand=expand, sort_key=sort_key,", "_get_efficacy_indicators(self): if self._efficacy_indicators is None: self._set_efficacy_indicators(wtypes.Unset) return self._efficacy_indicators def _set_efficacy_indicators(self,", "strategy=None): \"\"\"Retrieve a list of action plans. :param marker: pagination", "\"\"\"Retrieve a list of action_plans with detail. :param marker: pagination", "a action plan. :param patch: a json PATCH document to", "except exception.AuditNotFound: self._audit_uuid = None def _get_efficacy_indicators(self): if self._efficacy_indicators is", "\"(%(initial_state)s -> %(new_state)s)\") raise exception.PatchError( patch=patch, reason=error_message % dict( initial_state=action_plan_to_update.state,", "introduced. \"\"\" pass class ActionPlanPatchType(types.JsonPatchType): @staticmethod def _validate_state(patch): serialized_patch =", "action_plan @classmethod def convert_with_links(cls, rpc_action_plan, expand=True): action_plan = ActionPlan(**rpc_action_plan.as_dict()) hide_fields_in_newer_versions(action_plan)", "= wtypes.wsattr(types.jsontype, readonly=True) \"\"\"The global efficacy of this action plan\"\"\"", "context = pecan.request.context policy.enforce(context, 'action_plan:get_all', action='action_plan:get_all') return self._get_action_plans_collection( marker, limit,", "= [ActionPlan] \"\"\"A list containing action_plans objects\"\"\" def __init__(self, **kwargs):", "ActionPlan(base.APIBase): \"\"\"API representation of a action plan. This class enforces", "value: self._strategy_name = None strategy = self._get_strategy(value) if strategy: self._strategy_name", "\"\"\"The list of efficacy indicators associated to this action plan\"\"\"", "file except in compliance with the License. # You may", "converts between the internal object model and the API representation", "strategy UUID or name to filter by \"\"\" context =", "= [{'description': 'Test indicator', 'name': 'test_indicator', 'unit': '%'}] sample._global_efficacy =", "wtypes.wsattr(types.uuid, readonly=True) \"\"\"Unique UUID for this action plan\"\"\" audit_uuid =", "only made available when the request's API version matches or", "efficacy', 'name': 'test_global_efficacy', 'unit': '%'} return cls._convert_with_links(sample, 'http://localhost:9322', expand) class", "existing action plan. :param action_plan_uuid: UUID of a action plan.", "of several simple :ref:`Actions <action_definition>` ordered in sequential and/or parallel", "wtypes.wsproperty( wtypes.text, _get_strategy_name, _set_strategy_name, mandatory=False) \"\"\"The name of the strategy", "self._efficacy_indicators is None: self._set_efficacy_indicators(wtypes.Unset) return self._efficacy_indicators def _set_efficacy_indicators(self, value): efficacy_indicators", "'state' and patch_val == objects.action_plan.State.PENDING): launch_action_plan = True action_plan_to_update.save() #", "new_state=action_plan.state)) if action_plan.state == ap_objects.State.PENDING: launch_action_plan = True if action_plan.state", "are allowed via PATCH allowed_patch_transitions = [ (ap_objects.State.RECOMMENDED, ap_objects.State.PENDING), (ap_objects.State.RECOMMENDED,", "action_plan.uuid) action_plan_to_update = objects.ActionPlan.get_by_uuid( pecan.request.context, action_plan_uuid) return ActionPlan.convert_with_links(action_plan_to_update) @wsme_pecan.wsexpose(ActionPlan, types.uuid)", "that aren't exposed in the API continue if patch_val ==", "['uuid', 'state', 'efficacy_indicators', 'global_efficacy', 'updated_at', 'audit_uuid', 'strategy_uuid', 'strategy_name']) action_plan.links =", "True action_plan_to_update.save() # NOTE: if action plan is cancelled from", "self.fields.append(field) setattr(self, field, kwargs.get(field, wtypes.Unset)) self.fields.append('audit_uuid') self.fields.append('efficacy_indicators') setattr(self, 'audit_uuid', kwargs.get('audit_id',", "implementation of Watcher, an action plan is composed of a", "containing action_plans objects\"\"\" def __init__(self, **kwargs): self._type = 'action_plans' @staticmethod", "the :ref:`Goal <goal_definition>` of this :ref:`Audit <audit_definition>`. In the default", "api_utils.validate_limit(limit) api_utils.validate_sort_dir(sort_dir) marker_obj = None if marker: marker_obj = objects.ActionPlan.get_by_uuid(", "exception.OperationNotPermitted context = pecan.request.context action_plan = api_utils.get_resource('ActionPlan', action_plan_uuid) policy.enforce( context,", "a collection of action_plans.\"\"\" action_plans = [ActionPlan] \"\"\"A list containing", "action_plan = api_utils.get_resource( 'ActionPlan', action_plan_uuid, eager=True) policy.enforce(context, 'action_plan:delete', action_plan, action='action_plan:delete')", "expand) for p in rpc_action_plans] ap_collection.next = ap_collection.get_next(limit, url=url, **kwargs)", "strategy: self._strategy_uuid = strategy.uuid def _get_strategy_name(self): return self._strategy_name def _set_strategy_name(self,", "action plan refers to\"\"\" strategy_name = wtypes.wsproperty( wtypes.text, _get_strategy_name, _set_strategy_name,", "= True if action_plan.state == ap_objects.State.CANCELLED: cancel_action_plan = True #", "type checking and value constraints, and converts between the internal", "== wtypes.Unset: patch_val = None if action_plan_to_update[field] != patch_val: action_plan_to_update[field]", "status_code=HTTPStatus.NO_CONTENT) def delete(self, action_plan_uuid): \"\"\"Delete an action plan. :param action_plan_uuid:", "with detail. :param marker: pagination marker for large data sets.", "<action_plan_definition>` is generated by Watcher when an :ref:`Audit <audit_definition>` is", "fields.append('strategy_name') setattr(self, 'strategy_name', kwargs.get('strategy_id', wtypes.Unset)) @staticmethod def _convert_with_links(action_plan, url, expand=True):", "sort_dir, expand, resource_url, audit_uuid=audit_uuid, strategy=strategy) @wsme_pecan.wsexpose(ActionPlan, types.uuid) def get_one(self, action_plan_uuid):", "= {'action_plan_uuid': action_plan.uuid} actions = objects.Action.list(pecan.request.context, filters=filters, eager=True) for a", "from http import HTTPStatus from oslo_log import log import pecan", "use state machines to handle state transitions state_value = patch.value", "mandatory=False) \"\"\"The name of the strategy this action plan refers", "action_plan.uuid} actions = objects.Action.list(pecan.request.context, filters=filters, eager=True) for a in actions:", "pending or recommended # state update action state here only", "<http://www.uml.org/>`_. To see the life-cycle and description of :ref:`Action Plan", "launch_action_plan = True if action_plan.state == ap_objects.State.CANCELLED: cancel_action_plan = True", "belonging to a unique branch). However, Watcher provides abstract interfaces", "is successful which implies that the :ref:`Strategy <strategy_definition>` which was", "cancel_action_plan: filters = {'action_plan_uuid': action_plan.uuid} actions = objects.Action.list(pecan.request.context, filters=filters, eager=True)", "results by. Default: id. :param sort_dir: direction to sort. \"asc\"", "(the \"License\"); # you may not use this file except", "UUID of an audit, to get only actions for that", "state_value): msg = _(\"Invalid state: %(state)s\") raise exception.PatchError( patch=serialized_patch, reason=msg", "a action plan. \"\"\" if self.from_actionsPlans: raise exception.OperationNotPermitted context =", "def start(self, action_plan_uuid, **kwargs): \"\"\"Start an action_plan :param action_plan_uuid: UUID", "types.JsonPatchType.internal_attrs() @staticmethod def mandatory_attrs(): return [\"audit_id\", \"state\"] class ActionPlan(base.APIBase): \"\"\"API", "These fields are only made available when the request's API", "action_plan.unset_fields_except( ['uuid', 'state', 'efficacy_indicators', 'global_efficacy', 'updated_at', 'audit_uuid', 'strategy_uuid', 'strategy_name']) action_plan.links", "= True # Update only the fields that have changed", "given action plan. :param action_plan_uuid: UUID of a action plan.", "indicator in _efficacy_indicators: efficacy_indicator = efficacyindicator.EfficacyIndicator( context=pecan.request.context, name=indicator.name, description=indicator.description, unit=indicator.unit,", "not be split into smaller tasks or commands from an", "object model and the API representation of an action plan.", "\"\"\"The UUID of the audit this port belongs to\"\"\" strategy_uuid", "from watcher.applier import rpcapi from watcher.common import exception from watcher.common", "\"desc\". Default: asc. :param audit_uuid: Optional UUID of an audit,", "action plan. :param action_plan_uuid: UUID of a action plan. \"\"\"", "'state', 'efficacy_indicators', 'global_efficacy', 'updated_at', 'audit_uuid', 'strategy_uuid', 'strategy_name']) action_plan.links = [", "allowed_states = (ap_objects.State.SUCCEEDED, ap_objects.State.RECOMMENDED, ap_objects.State.FAILED, ap_objects.State.SUPERSEDED, ap_objects.State.CANCELLED) if action_plan.state not", "['POST'], 'detail': ['GET'] } def _get_action_plans_collection(self, marker, limit, sort_key, sort_dir,", "# # Unless required by applicable law or agreed to", "launch_action_plan = False cancel_action_plan = False # transitions that are", "can not be split into smaller tasks or commands from", "return [\"audit_id\", \"state\"] class ActionPlan(base.APIBase): \"\"\"API representation of a action", "self.strategy_id = strategy.id return strategy def _get_strategy_uuid(self): return self._strategy_uuid def", "reason=e) launch_action_plan = False cancel_action_plan = False # transitions that", "its components, allowing other implementations to generate and handle more", "should be executed in order to satisfy a given :ref:`Goal", "UUID or name to filter by \"\"\" context = pecan.request.context", "and self._strategy_name != value: self._strategy_name = None strategy = self._get_strategy(value)", "action_plan_dict, patch)) except api_utils.JSONPATCH_EXCEPTIONS as e: raise exception.PatchError(patch=patch, reason=e) launch_action_plan", "\"\"\" context = pecan.request.context policy.enforce(context, 'action_plan:detail', action='action_plan:detail') # NOTE(lucasagomes): /detail", "implied. # See the License for the specific language governing", "means it can not be split into smaller tasks or", "= objects.action_plan.State.PENDING action_plan_to_start.save() self.applier_client.launch_action_plan(pecan.request.context, action_plan_uuid) action_plan_to_start = objects.ActionPlan.get_by_uuid( pecan.request.context, action_plan_uuid)", "commands from an OpenStack point of view. - composite Actions:", "['GET'] } def _get_action_plans_collection(self, marker, limit, sort_key, sort_dir, expand=False, resource_url=None,", "context, 'action_plan:get', action_plan, action='action_plan:get') return ActionPlan.convert_with_links(action_plan) @wsme_pecan.wsexpose(None, types.uuid, status_code=HTTPStatus.NO_CONTENT) def", "LOG = log.getLogger(__name__) def hide_fields_in_newer_versions(obj): \"\"\"This method hides fields that", "**kwargs): ap_collection = ActionPlanCollection() ap_collection.action_plans = [ActionPlan.convert_with_links( p, expand) for", "'strategy_uuid', kwargs.get('strategy_id', wtypes.Unset)) fields.append('strategy_name') setattr(self, 'strategy_name', kwargs.get('strategy_id', wtypes.Unset)) @staticmethod def", "'Test indicator', 'name': 'test_indicator', 'unit': '%'}] sample._global_efficacy = {'description': 'Global", "return in a single result. :param sort_key: column to sort", "mandatory=True) \"\"\"The list of efficacy indicators associated to this action", "expand) class ActionPlanCollection(collection.Collection): \"\"\"API representation of a collection of action_plans.\"\"\"", "if (field == 'state' and patch_val == objects.action_plan.State.PENDING): launch_action_plan =", "if action plan is cancelled from pending or recommended #", "ActionPlanPatchType(types.JsonPatchType): @staticmethod def _validate_state(patch): serialized_patch = {'path': patch.path, 'op': patch.op}", "expand=True): action_plan = ActionPlan(**rpc_action_plan.as_dict()) hide_fields_in_newer_versions(action_plan) return cls._convert_with_links(action_plan, pecan.request.host_url, expand) @classmethod", "@staticmethod def convert_with_links(rpc_action_plans, limit, url=None, expand=False, **kwargs): ap_collection = ActionPlanCollection()", "point of view. - composite Actions: which are composed of", "_audit_uuid = None _strategy_uuid = None _strategy_name = None _efficacy_indicators", "we do not expose. if not hasattr(self, field): continue self.fields.append(field)", "try: if utils.is_uuid_like(value) or utils.is_int_like(value): strategy = objects.Strategy.get( pecan.request.context, value)", "!= value: try: audit = objects.Audit.get(pecan.request.context, value) self._audit_uuid = audit.uuid", "strategy this action plan refers to\"\"\" efficacy_indicators = wtypes.wsproperty( types.jsontype,", "= objects.Strategy.get_by_name( pecan.request.context, value) except exception.StrategyNotFound: pass if strategy: self.strategy_id", "import efficacy_indicator as efficacyindicator from watcher.api.controllers.v1 import types from watcher.api.controllers.v1", "utils.is_uuid_like(strategy): filters['strategy_uuid'] = strategy else: filters['strategy_name'] = strategy need_api_sort =", "hides fields that were added in newer API versions. Certain", "import policy from watcher.common import utils from watcher import objects", "ap_objects LOG = log.getLogger(__name__) def hide_fields_in_newer_versions(obj): \"\"\"This method hides fields", "strategy: self._strategy_name = strategy.name uuid = wtypes.wsattr(types.uuid, readonly=True) \"\"\"Unique UUID", "description formats such as `Business Process Model and Notation 2.0", "\" \"(%(initial_state)s -> %(new_state)s)\") raise exception.PatchError( patch=patch, reason=error_message % dict(", ":ref:`Actions <action_definition>`: atomic tasks, which means it can not be", "filter by \"\"\" context = pecan.request.context policy.enforce(context, 'action_plan:detail', action='action_plan:detail') #", "\"\"\" context = pecan.request.context action_plan = api_utils.get_resource( 'ActionPlan', action_plan_uuid, eager=True)", "value def _get_strategy(self, value): if value == wtypes.Unset: return None", "number of resources to return in a single result. :param", "if hasattr(action_plan, 'state'): transition = (action_plan_to_update.state, action_plan.state) if transition not", "setattr(self, 'audit_uuid', kwargs.get('audit_id', wtypes.Unset)) fields.append('strategy_uuid') setattr(self, 'strategy_uuid', kwargs.get('strategy_id', wtypes.Unset)) fields.append('strategy_name')", "Unless required by applicable law or agreed to in writing,", "!= \"action_plans\": raise exception.HTTPNotFound expand = True resource_url = '/'.join(['action_plans',", "[\"audit_id\", \"state\"] class ActionPlan(base.APIBase): \"\"\"API representation of a action plan.", "the specific language governing permissions and # limitations under the", "action plan. \"\"\" if self.from_actionsPlans: raise exception.OperationNotPermitted context = pecan.request.context", "<goal_definition>` of this :ref:`Audit <audit_definition>`. In the default implementation of", "value: try: audit = objects.Audit.get(pecan.request.context, value) self._audit_uuid = audit.uuid self.audit_id", "'http://localhost:9322', expand) class ActionPlanCollection(collection.Collection): \"\"\"API representation of a collection of", "wtypes.wsattr(types.jsontype, readonly=True) \"\"\"The global efficacy of this action plan\"\"\" state", "expand=False, **kwargs): ap_collection = ActionPlanCollection() ap_collection.action_plans = [ActionPlan.convert_with_links( p, expand)", "the API continue if patch_val == wtypes.Unset: patch_val = None", "refers to\"\"\" strategy_name = wtypes.wsproperty( wtypes.text, _get_strategy_name, _set_strategy_name, mandatory=False) \"\"\"The", "to indicate if the requests to this controller are coming", "versions. These fields are only made available when the request's", "additional_fields) limit = api_utils.validate_limit(limit) api_utils.validate_sort_dir(sort_dir) marker_obj = None if marker:", "action='action_plan:update') try: action_plan_dict = action_plan_to_update.as_dict() action_plan = ActionPlan(**api_utils.apply_jsonpatch( action_plan_dict, patch))", "satisfy a given :ref:`Goal <goal_definition>`. It also contains an estimated", "<action_definition>` ordered in sequential and/or parallel flows. An :ref:`Action Plan", "An :ref:`Action Plan <action_plan_definition>` is generated by Watcher when an", "machines to handle state transitions state_value = patch.value if state_value", "as wsme_pecan from watcher._i18n import _ from watcher.api.controllers import base", "update action state here only if cancel_action_plan: filters = {'action_plan_uuid':", "[ (ap_objects.State.RECOMMENDED, ap_objects.State.PENDING), (ap_objects.State.RECOMMENDED, ap_objects.State.CANCELLED), (ap_objects.State.ONGOING, ap_objects.State.CANCELLING), (ap_objects.State.PENDING, ap_objects.State.CANCELLED), ]", "Default: id. :param sort_dir: direction to sort. \"asc\" or \"desc\".", "limitations under the License. \"\"\" An :ref:`Action Plan <action_plan_definition>` specifies", "= value def _get_strategy(self, value): if value == wtypes.Unset: return", "associated action links\"\"\" hostname = wtypes.wsattr(wtypes.text, mandatory=False) \"\"\"Hostname the actionplan", "wtypes.text) def get_all(self, marker=None, limit=None, sort_key='id', sort_dir='asc', audit_uuid=None, strategy=None): \"\"\"Retrieve", "strategy = self._get_strategy(value) if strategy: self._strategy_name = strategy.name uuid =", "= pecan.request.context policy.enforce(context, 'action_plan:get_all', action='action_plan:get_all') return self._get_action_plans_collection( marker, limit, sort_key,", "exception.PatchError(patch=patch, reason=e) launch_action_plan = False cancel_action_plan = False # transitions", "p in rpc_action_plans] ap_collection.next = ap_collection.get_next(limit, url=url, **kwargs) return ap_collection", "pagination marker for large data sets. :param limit: maximum number", "marker_obj = objects.ActionPlan.get_by_uuid( pecan.request.context, marker) filters = {} if audit_uuid:", "strategy=None): \"\"\"Retrieve a list of action_plans with detail. :param marker:", "!= value: self._strategy_uuid = None strategy = self._get_strategy(value) if strategy:", "pecan.request.context, limit, marker_obj, sort_key=sort_db_key, sort_dir=sort_dir, filters=filters) action_plans_collection = ActionPlanCollection.convert_with_links( action_plans,", "strategy.uuid def _get_strategy_name(self): return self._strategy_name def _set_strategy_name(self, value): if value", "= self._get_strategy(value) if strategy: self._strategy_uuid = strategy.uuid def _get_strategy_name(self): return", "governing permissions and # limitations under the License. \"\"\" An", "getattr(action_plan, field) except AttributeError: # Ignore fields that aren't exposed", "generated by Watcher when an :ref:`Audit <audit_definition>` is successful which", "API versions. These fields are only made available when the", "indicate if the requests to this controller are coming from", "'action_plans' @staticmethod def convert_with_links(rpc_action_plans, limit, url=None, expand=False, **kwargs): ap_collection =", "\"action_plans\": raise exception.HTTPNotFound expand = True resource_url = '/'.join(['action_plans', 'detail'])", "def _get_strategy(self, value): if value == wtypes.Unset: return None strategy", "global_efficacy = wtypes.wsattr(types.jsontype, readonly=True) \"\"\"The global efficacy of this action", "None strategy = self._get_strategy(value) if strategy: self._strategy_name = strategy.name uuid", "\"/state\": ActionPlanPatchType._validate_state(patch) return types.JsonPatchType.validate(patch) @staticmethod def internal_attrs(): return types.JsonPatchType.internal_attrs() @staticmethod", "UUID of an action_plan. \"\"\" action_plan_to_start = api_utils.get_resource( 'ActionPlan', action_plan_uuid,", "'ActionPlan', action_plan_uuid, eager=True) context = pecan.request.context policy.enforce(context, 'action_plan:start', action_plan_to_start, action='action_plan:start')", "checking and value constraints, and converts between the internal object", "if launch_action_plan: self.applier_client.launch_action_plan(pecan.request.context, action_plan.uuid) action_plan_to_update = objects.ActionPlan.get_by_uuid( pecan.request.context, action_plan_uuid) return", "state\"\"\" links = wtypes.wsattr([link.Link], readonly=True) \"\"\"A list containing a self", "and associated action links\"\"\" hostname = wtypes.wsattr(wtypes.text, mandatory=False) \"\"\"Hostname the", "exception.StrategyNotFound: pass if strategy: self.strategy_id = strategy.id return strategy def", "API versions. Certain node fields were introduced at certain API", "resource ActionPlan.\"\"\" _custom_actions = { 'start': ['POST'], 'detail': ['GET'] }", "wtypes.text) def detail(self, marker=None, limit=None, sort_key='id', sort_dir='asc', audit_uuid=None, strategy=None): \"\"\"Retrieve", "of action plans. :param marker: pagination marker for large data", "plan is composed of a list of successive :ref:`Actions <action_definition>`", "have changed for field in objects.ActionPlan.fields: try: patch_val = getattr(action_plan,", "wtypes.wsattr([link.Link], readonly=True) \"\"\"A list containing a self link and associated", "self._strategy_uuid = strategy.uuid def _get_strategy_name(self): return self._strategy_name def _set_strategy_name(self, value):", "self._strategy_name = None strategy = self._get_strategy(value) if strategy: self._strategy_name =", "UUID of a action plan. :param patch: a json PATCH", "audit. :param strategy: strategy UUID or name to filter by", "using standard workflow model description formats such as `Business Process", "resource_url=None, audit_uuid=None, strategy=None): additional_fields = ['audit_uuid', 'strategy_uuid', 'strategy_name'] api_utils.validate_sort_key( sort_key,", "= [] if value == wtypes.Unset and not self._efficacy_indicators: try:", "None if action_plan_to_update[field] != patch_val: action_plan_to_update[field] = patch_val if (field", "if action_plan.state == ap_objects.State.CANCELLED: cancel_action_plan = True # Update only", "return types.JsonPatchType.internal_attrs() @staticmethod def mandatory_attrs(): return [\"audit_id\", \"state\"] class ActionPlan(base.APIBase):", "wtypes.wsproperty( types.jsontype, _get_efficacy_indicators, _set_efficacy_indicators, mandatory=True) \"\"\"The list of efficacy indicators", "of this :ref:`Audit <audit_definition>`. In the default implementation of Watcher,", "patch.value is not wtypes.Unset: serialized_patch['value'] = patch.value # todo: use", "import exception from watcher.common import policy from watcher.common import utils", "= pecan.request.context action_plan = api_utils.get_resource('ActionPlan', action_plan_uuid) policy.enforce( context, 'action_plan:get', action_plan,", "int, wtypes.text, wtypes.text, types.uuid, wtypes.text) def get_all(self, marker=None, limit=None, sort_key='id',", "action_plans with detail. :param marker: pagination marker for large data", "action='action_plan:get') return ActionPlan.convert_with_links(action_plan) @wsme_pecan.wsexpose(None, types.uuid, status_code=HTTPStatus.NO_CONTENT) def delete(self, action_plan_uuid): \"\"\"Delete", "patch.value if state_value and not hasattr(ap_objects.State, state_value): msg = _(\"Invalid", "in _efficacy_indicators: efficacy_indicator = efficacyindicator.EfficacyIndicator( context=pecan.request.context, name=indicator.name, description=indicator.description, unit=indicator.unit, value=float(indicator.value),", "patch_val = None if action_plan_to_update[field] != patch_val: action_plan_to_update[field] = patch_val", "\"\"\"Unique UUID for this action plan\"\"\" audit_uuid = wtypes.wsproperty(types.uuid, _get_audit_uuid,", "_efficacy_indicators = None def _get_audit_uuid(self): return self._audit_uuid def _set_audit_uuid(self, value):", "expand=False, resource_url=None, audit_uuid=None, strategy=None): additional_fields = ['audit_uuid', 'strategy_uuid', 'strategy_name'] api_utils.validate_sort_key(", "containing a self link and associated action links\"\"\" hostname =", "You may obtain a copy of the License at #", "= ActionPlan(**rpc_action_plan.as_dict()) hide_fields_in_newer_versions(action_plan) return cls._convert_with_links(action_plan, pecan.request.host_url, expand) @classmethod def sample(cls,", "not hasattr(ap_objects.State, state_value): msg = _(\"Invalid state: %(state)s\") raise exception.PatchError(", "only if cancel_action_plan: filters = {'action_plan_uuid': action_plan.uuid} actions = objects.Action.list(pecan.request.context,", "import log import pecan from pecan import rest import wsme", "This class enforces type checking and value constraints, and converts", "= '/'.join(['action_plans', 'detail']) return self._get_action_plans_collection( marker, limit, sort_key, sort_dir, expand,", "transition not in allowed_patch_transitions: error_message = _(\"State transition not allowed:", "def hide_fields_in_newer_versions(obj): \"\"\"This method hides fields that were added in", "successful which implies that the :ref:`Strategy <strategy_definition>` which was used", "resource_url, audit_uuid=audit_uuid, strategy=strategy) @wsme_pecan.wsexpose(ActionPlan, types.uuid) def get_one(self, action_plan_uuid): \"\"\"Retrieve information", "= False # transitions that are allowed via PATCH allowed_patch_transitions", "sort_key=sort_db_key, sort_dir=sort_dir, filters=filters) action_plans_collection = ActionPlanCollection.convert_with_links( action_plans, limit, url=resource_url, expand=expand,", "sort_dir, expand=False, resource_url=None, audit_uuid=None, strategy=None): additional_fields = ['audit_uuid', 'strategy_uuid', 'strategy_name']", "import link from watcher.api.controllers.v1 import collection from watcher.api.controllers.v1 import efficacy_indicator", "(ap_objects.State.RECOMMENDED, ap_objects.State.PENDING), (ap_objects.State.RECOMMENDED, ap_objects.State.CANCELLED), (ap_objects.State.ONGOING, ap_objects.State.CANCELLING), (ap_objects.State.PENDING, ap_objects.State.CANCELLED), ] #", "self).__init__() self.fields = [] fields = list(objects.ActionPlan.fields) for field in", "associated to this action plan\"\"\" global_efficacy = wtypes.wsattr(types.jsontype, readonly=True) \"\"\"The", "for that audit. :param strategy: strategy UUID or name to", "value == wtypes.Unset: self._audit_uuid = wtypes.Unset elif value and self._audit_uuid", "None def _get_audit_uuid(self): return self._audit_uuid def _set_audit_uuid(self, value): if value", "efficacy <efficacy_definition>` alongside a set of :ref:`efficacy indicators <efficacy_indicator_definition>`. An", "refers to\"\"\" efficacy_indicators = wtypes.wsproperty( types.jsontype, _get_efficacy_indicators, _set_efficacy_indicators, mandatory=True) \"\"\"The", "only work agaist collections parent = pecan.request.path.split('/')[:-1][-1] if parent !=", "error_message = _(\"State transition not allowed: \" \"(%(initial_state)s -> %(new_state)s)\")", "self._audit_uuid = None def _get_efficacy_indicators(self): if self._efficacy_indicators is None: self._set_efficacy_indicators(wtypes.Unset)", "\"\"\"Delete an action plan. :param action_plan_uuid: UUID of a action.", "types.uuid, wtypes.text) def detail(self, marker=None, limit=None, sort_key='id', sort_dir='asc', audit_uuid=None, strategy=None):", "= [ActionPlan.convert_with_links( p, expand) for p in rpc_action_plans] ap_collection.next =", "Workflow of :ref:`Actions <action_definition>` belonging to a unique branch). However,", "hasattr(self, field): continue self.fields.append(field) setattr(self, field, kwargs.get(field, wtypes.Unset)) self.fields.append('audit_uuid') self.fields.append('efficacy_indicators')", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "wtypes.Unset)) self.fields.append('audit_uuid') self.fields.append('efficacy_indicators') setattr(self, 'audit_uuid', kwargs.get('audit_id', wtypes.Unset)) fields.append('strategy_uuid') setattr(self, 'strategy_uuid',", "watcher.api.controllers.v1 import utils as api_utils from watcher.applier import rpcapi from", "of :ref:`efficacy indicators <efficacy_indicator_definition>`. An :ref:`Action Plan <action_plan_definition>` is generated", "pass if strategy: self.strategy_id = strategy.id return strategy def _get_strategy_uuid(self):", "a given :ref:`Goal <goal_definition>`. It also contains an estimated :ref:`global", "if action_plan.state == ap_objects.State.PENDING: launch_action_plan = True if action_plan.state ==", ":param action_plan_uuid: UUID of an action_plan. \"\"\" action_plan_to_start = api_utils.get_resource(", "from watcher.common import utils from watcher import objects from watcher.objects", "sample(cls, expand=True): sample = cls(uuid='9ef4d84c-41e8-4418-9220-ce55be0436af', state='ONGOING', created_at=datetime.datetime.utcnow(), deleted_at=None, updated_at=datetime.datetime.utcnow()) sample._audit_uuid", "action plan. \"\"\" _audit_uuid = None _strategy_uuid = None _strategy_name", "<action_plan_definition>` may be described using standard workflow model description formats", "id. :param sort_dir: direction to sort. \"asc\" or \"desc\". Default:", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "in allowed_patch_transitions: error_message = _(\"State transition not allowed: \" \"(%(initial_state)s", "link.Link.make_link( 'self', url, 'action_plans', action_plan.uuid), link.Link.make_link( 'bookmark', url, 'action_plans', action_plan.uuid,", "port belongs to\"\"\" strategy_uuid = wtypes.wsproperty( wtypes.text, _get_strategy_uuid, _set_strategy_uuid, mandatory=False)", "License. # You may obtain a copy of the License", "serialized_patch = {'path': patch.path, 'op': patch.op} if patch.value is not", "self._audit_uuid = audit.uuid self.audit_id = audit.id except exception.AuditNotFound: self._audit_uuid =", "True if action_plan.state == ap_objects.State.CANCELLED: cancel_action_plan = True # Update", "plan. \"\"\" _audit_uuid = None _strategy_uuid = None _strategy_name =", "composed of several simple :ref:`Actions <action_definition>` ordered in sequential and/or", "composed of a list of successive :ref:`Actions <action_definition>` (i.e., a", "raise exception.PatchError(patch=patch, reason=e) launch_action_plan = False cancel_action_plan = False #", "<action_plan_definition>` specifies a flow of :ref:`Actions <action_definition>` that should be", "and converts between the internal object model and the API", "field in objects.ActionPlan.fields: try: patch_val = getattr(action_plan, field) except AttributeError:", "of Action Item(s): - simple :ref:`Actions <action_definition>`: atomic tasks, which", "information about the given action plan. :param action_plan_uuid: UUID of", "get_all(self, marker=None, limit=None, sort_key='id', sort_dir='asc', audit_uuid=None, strategy=None): \"\"\"Retrieve a list", "= [] fields = list(objects.ActionPlan.fields) for field in fields: #", "plan. :param patch: a json PATCH document to apply to", "<action_plan_definition>` composed of two types of Action Item(s): - simple", "action plan. :param action_plan_uuid: UUID of a action. \"\"\" context", "is cancelled from pending or recommended # state update action", "plan. \"\"\" if self.from_actionsPlans: raise exception.OperationNotPermitted context = pecan.request.context action_plan", "if action_plan_to_start['state'] != \\ objects.action_plan.State.RECOMMENDED: raise exception.StartError( state=action_plan_to_start.state) action_plan_to_start['state'] =", "flow of :ref:`Actions <action_definition>` that should be executed in order", "fields were introduced. \"\"\" pass class ActionPlanPatchType(types.JsonPatchType): @staticmethod def _validate_state(patch):", "watcher.objects import action_plan as ap_objects LOG = log.getLogger(__name__) def hide_fields_in_newer_versions(obj):", "actions: a.state = objects.action.State.CANCELLED a.save() if launch_action_plan: self.applier_client.launch_action_plan(pecan.request.context, action_plan.uuid) action_plan_to_update", "api_utils.validate_sort_dir(sort_dir) marker_obj = None if marker: marker_obj = objects.ActionPlan.get_by_uuid( pecan.request.context,", "exception.PatchError( patch=serialized_patch, reason=msg % dict(state=state_value)) @staticmethod def validate(patch): if patch.path", "setattr(self, 'strategy_uuid', kwargs.get('strategy_id', wtypes.Unset)) fields.append('strategy_name') setattr(self, 'strategy_name', kwargs.get('strategy_id', wtypes.Unset)) @staticmethod", "def patch(self, action_plan_uuid, patch): \"\"\"Update an existing action plan. :param", "were added in newer API versions. Certain node fields were", "= efficacyindicator.EfficacyIndicator( context=pecan.request.context, name=indicator.name, description=indicator.description, unit=indicator.unit, value=float(indicator.value), ) efficacy_indicators.append(efficacy_indicator.as_dict()) self._efficacy_indicators", "and value constraints, and converts between the internal object model", "Plan <action_plan_definition>` is generated by Watcher when an :ref:`Audit <audit_definition>`", "if value and self._strategy_uuid != value: self._strategy_uuid = None strategy", "'efficacy_indicators', 'global_efficacy', 'updated_at', 'audit_uuid', 'strategy_uuid', 'strategy_name']) action_plan.links = [ link.Link.make_link(", "(ap_objects.State.RECOMMENDED, ap_objects.State.CANCELLED), (ap_objects.State.ONGOING, ap_objects.State.CANCELLING), (ap_objects.State.PENDING, ap_objects.State.CANCELLED), ] # todo: improve", "a list of successive :ref:`Actions <action_definition>` (i.e., a Workflow of", "for a in actions: a.state = objects.action.State.CANCELLED a.save() if launch_action_plan:", "when these fields were introduced. \"\"\" pass class ActionPlanPatchType(types.JsonPatchType): @staticmethod", "None) action_plans = objects.ActionPlan.list( pecan.request.context, limit, marker_obj, sort_key=sort_db_key, sort_dir=sort_dir, filters=filters)", "and description of :ref:`Action Plan <action_plan_definition>` states, visit :ref:`the Action", "are coming from the top-level resource ActionPlan.\"\"\" _custom_actions = {", "return action_plans_collection @wsme_pecan.wsexpose(ActionPlanCollection, types.uuid, int, wtypes.text, wtypes.text, types.uuid, wtypes.text) def", "_get_efficacy_indicators, _set_efficacy_indicators, mandatory=True) \"\"\"The list of efficacy indicators associated to", "sample(cls): sample = cls() sample.action_plans = [ActionPlan.sample(expand=False)] return sample class", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "for the specific language governing permissions and # limitations under", "life-cycle and description of :ref:`Action Plan <action_plan_definition>` states, visit :ref:`the", "a action. \"\"\" context = pecan.request.context action_plan = api_utils.get_resource( 'ActionPlan',", "def delete(self, action_plan_uuid): \"\"\"Delete an action plan. :param action_plan_uuid: UUID", "an audit, to get only actions for that audit. :param", "UUID of a action. \"\"\" context = pecan.request.context action_plan =", "PATCH allowed_patch_transitions = [ (ap_objects.State.RECOMMENDED, ap_objects.State.PENDING), (ap_objects.State.RECOMMENDED, ap_objects.State.CANCELLED), (ap_objects.State.ONGOING, ap_objects.State.CANCELLING),", "asc. :param audit_uuid: Optional UUID of an audit, to get", "required by applicable law or agreed to in writing, software", "objects.Strategy.get( pecan.request.context, value) else: strategy = objects.Strategy.get_by_name( pecan.request.context, value) except", "def _get_efficacy_indicators(self): if self._efficacy_indicators is None: self._set_efficacy_indicators(wtypes.Unset) return self._efficacy_indicators def", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "raise exception.StartError( state=action_plan_to_start.state) action_plan_to_start['state'] = objects.action_plan.State.PENDING action_plan_to_start.save() self.applier_client.launch_action_plan(pecan.request.context, action_plan_uuid) action_plan_to_start", "pecan.request.context, filters={\"action_plan_uuid\": self.uuid}) for indicator in _efficacy_indicators: efficacy_indicator = efficacyindicator.EfficacyIndicator(", "= wtypes.wsattr([link.Link], readonly=True) \"\"\"A list containing a self link and", "name=indicator.name, description=indicator.description, unit=indicator.unit, value=float(indicator.value), ) efficacy_indicators.append(efficacy_indicator.as_dict()) self._efficacy_indicators = efficacy_indicators except", "resources to return in a single result. :param sort_key: column", "= wtypes.text \"\"\"This action plan state\"\"\" links = wtypes.wsattr([link.Link], readonly=True)", "description of :ref:`Action Plan <action_plan_definition>` states, visit :ref:`the Action Plan", "\"\"\"API representation of a action plan. This class enforces type", "agreed to in writing, software # distributed under the License", "by. Default: id. :param sort_dir: direction to sort. \"asc\" or", "distributed under the License is distributed on an \"AS IS\"", "\"\"\"This action plan state\"\"\" links = wtypes.wsattr([link.Link], readonly=True) \"\"\"A list", "links\"\"\" hostname = wtypes.wsattr(wtypes.text, mandatory=False) \"\"\"Hostname the actionplan is running", "self._type = 'action_plans' @staticmethod def convert_with_links(rpc_action_plans, limit, url=None, expand=False, **kwargs):", "def __init__(self): super(ActionPlansController, self).__init__() self.applier_client = rpcapi.ApplierAPI() from_actionsPlans = False", "wtypes.Unset)) @staticmethod def _convert_with_links(action_plan, url, expand=True): if not expand: action_plan.unset_fields_except(", "smaller tasks or commands from an OpenStack point of view.", "exception.EfficacyIndicatorNotFound as exc: LOG.exception(exc) elif value and self._efficacy_indicators != value:", "@wsme_pecan.wsexpose(ActionPlan, types.uuid) def get_one(self, action_plan_uuid): \"\"\"Retrieve information about the given", "marker for large data sets. :param limit: maximum number of", "import pecan from pecan import rest import wsme from wsme", "wsme from wsme import types as wtypes import wsmeext.pecan as", "= (ap_objects.State.SUCCEEDED, ap_objects.State.RECOMMENDED, ap_objects.State.FAILED, ap_objects.State.SUPERSEDED, ap_objects.State.CANCELLED) if action_plan.state not in", "<audit_definition>`. In the default implementation of Watcher, an action plan", "strategy=strategy) @wsme_pecan.wsexpose(ActionPlan, types.uuid) def get_one(self, action_plan_uuid): \"\"\"Retrieve information about the", "created_at=datetime.datetime.utcnow(), deleted_at=None, updated_at=datetime.datetime.utcnow()) sample._audit_uuid = 'abcee106-14d3-4515-b744-5a26885cf6f6' sample._efficacy_indicators = [{'description': 'Test", "action_plan_to_start['state'] = objects.action_plan.State.PENDING action_plan_to_start.save() self.applier_client.launch_action_plan(pecan.request.context, action_plan_uuid) action_plan_to_start = objects.ActionPlan.get_by_uuid( pecan.request.context,", "= ['audit_uuid', 'strategy_uuid', 'strategy_name'] api_utils.validate_sort_key( sort_key, list(objects.ActionPlan.fields) + additional_fields) limit", "def _set_strategy_name(self, value): if value and self._strategy_name != value: self._strategy_name", "action_plan_uuid, eager=True) context = pecan.request.context policy.enforce(context, 'action_plan:start', action_plan_to_start, action='action_plan:start') if", "sample._audit_uuid = 'abcee106-14d3-4515-b744-5a26885cf6f6' sample._efficacy_indicators = [{'description': 'Test indicator', 'name': 'test_indicator',", "convert_with_links(rpc_action_plans, limit, url=None, expand=False, **kwargs): ap_collection = ActionPlanCollection() ap_collection.action_plans =", "(ap_objects.State.SUCCEEDED, ap_objects.State.RECOMMENDED, ap_objects.State.FAILED, ap_objects.State.SUPERSEDED, ap_objects.State.CANCELLED) if action_plan.state not in allowed_states:", "patch_val: action_plan_to_update[field] = patch_val if (field == 'state' and patch_val", "types of Action Item(s): - simple :ref:`Actions <action_definition>`: atomic tasks,", "UUID for this action plan\"\"\" audit_uuid = wtypes.wsproperty(types.uuid, _get_audit_uuid, _set_audit_uuid,", "Inc. # All Rights Reserved. # # Licensed under the", "plan. This class enforces type checking and value constraints, and", "# todo: improve this in blueprint watcher-api-validation if hasattr(action_plan, 'state'):", "= wtypes.wsproperty( wtypes.text, _get_strategy_name, _set_strategy_name, mandatory=False) \"\"\"The name of the", "action_plan_dict = action_plan_to_update.as_dict() action_plan = ActionPlan(**api_utils.apply_jsonpatch( action_plan_dict, patch)) except api_utils.JSONPATCH_EXCEPTIONS", "value and self._strategy_uuid != value: self._strategy_uuid = None strategy =", "by Watcher when an :ref:`Audit <audit_definition>` is successful which implies", "ActionPlan(**api_utils.apply_jsonpatch( action_plan_dict, patch)) except api_utils.JSONPATCH_EXCEPTIONS as e: raise exception.PatchError(patch=patch, reason=e)", "= pecan.request.context action_plan = api_utils.get_resource( 'ActionPlan', action_plan_uuid, eager=True) policy.enforce(context, 'action_plan:delete',", "policy.enforce(context, 'action_plan:update', action_plan_to_update, action='action_plan:update') try: action_plan_dict = action_plan_to_update.as_dict() action_plan =", "None: self._set_efficacy_indicators(wtypes.Unset) return self._efficacy_indicators def _set_efficacy_indicators(self, value): efficacy_indicators = []", ":ref:`Action Plan <action_plan_definition>` states, visit :ref:`the Action Plan state machine", "allowed_states: raise exception.DeleteError( state=action_plan.state) action_plan.soft_delete() @wsme.validate(types.uuid, [ActionPlanPatchType]) @wsme_pecan.wsexpose(ActionPlan, types.uuid, body=[ActionPlanPatchType])", "action_plan_uuid: UUID of a action plan. :param patch: a json", "ActionPlansController(rest.RestController): \"\"\"REST controller for Actions.\"\"\" def __init__(self): super(ActionPlansController, self).__init__() self.applier_client", "sample.action_plans = [ActionPlan.sample(expand=False)] return sample class ActionPlansController(rest.RestController): \"\"\"REST controller for", "the License is distributed on an \"AS IS\" BASIS, #", "import datetime from http import HTTPStatus from oslo_log import log", "except exception.StrategyNotFound: pass if strategy: self.strategy_id = strategy.id return strategy", "action plan. This class enforces type checking and value constraints,", "that are allowed via PATCH allowed_patch_transitions = [ (ap_objects.State.RECOMMENDED, ap_objects.State.PENDING),", "return ActionPlan.convert_with_links(action_plan) @wsme_pecan.wsexpose(None, types.uuid, status_code=HTTPStatus.NO_CONTENT) def delete(self, action_plan_uuid): \"\"\"Delete an", "patch): \"\"\"Update an existing action plan. :param action_plan_uuid: UUID of", "filters = {'action_plan_uuid': action_plan.uuid} actions = objects.Action.list(pecan.request.context, filters=filters, eager=True) for", "maximum number of resources to return in a single result.", "Plan(s) <action_plan_definition>` composed of two types of Action Item(s): -", ":ref:`Audit <audit_definition>` is successful which implies that the :ref:`Strategy <strategy_definition>`", "of an audit, to get only actions for that audit.", "versions when these fields were introduced. \"\"\" pass class ActionPlanPatchType(types.JsonPatchType):", "detail(self, marker=None, limit=None, sort_key='id', sort_dir='asc', audit_uuid=None, strategy=None): \"\"\"Retrieve a list", "# NOTE(lucasagomes): /detail should only work agaist collections parent =", "pecan.request.host_url, expand) @classmethod def sample(cls, expand=True): sample = cls(uuid='9ef4d84c-41e8-4418-9220-ce55be0436af', state='ONGOING',", "'op': patch.op} if patch.value is not wtypes.Unset: serialized_patch['value'] = patch.value", "law or agreed to in writing, software # distributed under", "from watcher.objects import action_plan as ap_objects LOG = log.getLogger(__name__) def", "= 'action_plans' @staticmethod def convert_with_links(rpc_action_plans, limit, url=None, expand=False, **kwargs): ap_collection", "to\"\"\" strategy_name = wtypes.wsproperty( wtypes.text, _get_strategy_name, _set_strategy_name, mandatory=False) \"\"\"The name", "context = pecan.request.context action_plan_to_update = api_utils.get_resource( 'ActionPlan', action_plan_uuid, eager=True) policy.enforce(context,", "def validate(patch): if patch.path == \"/state\": ActionPlanPatchType._validate_state(patch) return types.JsonPatchType.validate(patch) @staticmethod", "or `Unified Modeling Language (UML) <http://www.uml.org/>`_. To see the life-cycle", "field in fields: # Skip fields we do not expose.", "if value == wtypes.Unset: self._audit_uuid = wtypes.Unset elif value and", "expand=expand, sort_key=sort_key, sort_dir=sort_dir) if need_api_sort: api_utils.make_api_sort(action_plans_collection.action_plans, sort_key, sort_dir) return action_plans_collection", "successive :ref:`Actions <action_definition>` (i.e., a Workflow of :ref:`Actions <action_definition>` belonging", "may obtain a copy of the License at # #", "\"\"\" context = pecan.request.context policy.enforce(context, 'action_plan:get_all', action='action_plan:get_all') return self._get_action_plans_collection( marker,", "the internal object model and the API representation of an", "return self._get_action_plans_collection( marker, limit, sort_key, sort_dir, audit_uuid=audit_uuid, strategy=strategy) @wsme_pecan.wsexpose(ActionPlanCollection, types.uuid,", "exc: LOG.exception(exc) elif value and self._efficacy_indicators != value: self._efficacy_indicators =", "made available when the request's API version matches or exceeds", "indicators <efficacy_indicator_definition>`. An :ref:`Action Plan <action_plan_definition>` is generated by Watcher", "efficacy_indicators = [] if value == wtypes.Unset and not self._efficacy_indicators:", "a list of action plans. :param marker: pagination marker for", "to achieve the :ref:`Goal <goal_definition>` of this :ref:`Audit <audit_definition>`. In", "strategy else: filters['strategy_name'] = strategy need_api_sort = api_utils.check_need_api_sort(sort_key, additional_fields) sort_db_key", "import wsmeext.pecan as wsme_pecan from watcher._i18n import _ from watcher.api.controllers", "to satisfy a given :ref:`Goal <goal_definition>`. It also contains an", "def detail(self, marker=None, limit=None, sort_key='id', sort_dir='asc', audit_uuid=None, strategy=None): \"\"\"Retrieve a", "action_plan = ActionPlan(**rpc_action_plan.as_dict()) hide_fields_in_newer_versions(action_plan) return cls._convert_with_links(action_plan, pecan.request.host_url, expand) @classmethod def", "may not use this file except in compliance with the", "# Update only the fields that have changed for field", "= True resource_url = '/'.join(['action_plans', 'detail']) return self._get_action_plans_collection( marker, limit,", "at certain API versions. These fields are only made available", "[ActionPlanPatchType]) @wsme_pecan.wsexpose(ActionPlan, types.uuid, body=[ActionPlanPatchType]) def patch(self, action_plan_uuid, patch): \"\"\"Update an", "this file except in compliance with the License. # You", "additional_fields = ['audit_uuid', 'strategy_uuid', 'strategy_name'] api_utils.validate_sort_key( sort_key, list(objects.ActionPlan.fields) + additional_fields)", "get only actions for that audit. :param strategy: strategy UUID", "wtypes.wsproperty( wtypes.text, _get_strategy_uuid, _set_strategy_uuid, mandatory=False) \"\"\"Strategy UUID the action plan", "limit, sort_key, sort_dir, audit_uuid=audit_uuid, strategy=strategy) @wsme_pecan.wsexpose(ActionPlanCollection, types.uuid, int, wtypes.text, wtypes.text,", "Watcher provides abstract interfaces for many of its components, allowing", "# # Licensed under the Apache License, Version 2.0 (the", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "marker, limit, sort_key, sort_dir, expand, resource_url, audit_uuid=audit_uuid, strategy=strategy) @wsme_pecan.wsexpose(ActionPlan, types.uuid)", "context = pecan.request.context policy.enforce(context, 'action_plan:detail', action='action_plan:detail') # NOTE(lucasagomes): /detail should", "strategy=None): additional_fields = ['audit_uuid', 'strategy_uuid', 'strategy_name'] api_utils.validate_sort_key( sort_key, list(objects.ActionPlan.fields) +", "self._audit_uuid def _set_audit_uuid(self, value): if value == wtypes.Unset: self._audit_uuid =", "in actions: a.state = objects.action.State.CANCELLED a.save() if launch_action_plan: self.applier_client.launch_action_plan(pecan.request.context, action_plan.uuid)", "True # Update only the fields that have changed for", "An :ref:`Action Plan <action_plan_definition>` may be described using standard workflow", "the life-cycle and description of :ref:`Action Plan <action_plan_definition>` states, visit", "hostname = wtypes.wsattr(wtypes.text, mandatory=False) \"\"\"Hostname the actionplan is running on\"\"\"", ":ref:`Goal <goal_definition>`. It also contains an estimated :ref:`global efficacy <efficacy_definition>`", "self._get_strategy(value) if strategy: self._strategy_name = strategy.name uuid = wtypes.wsattr(types.uuid, readonly=True)", "belongs to\"\"\" strategy_uuid = wtypes.wsproperty( wtypes.text, _get_strategy_uuid, _set_strategy_uuid, mandatory=False) \"\"\"Strategy", "An :ref:`Action Plan <action_plan_definition>` specifies a flow of :ref:`Actions <action_definition>`", "_convert_with_links(action_plan, url, expand=True): if not expand: action_plan.unset_fields_except( ['uuid', 'state', 'efficacy_indicators',", "representation of a collection of action_plans.\"\"\" action_plans = [ActionPlan] \"\"\"A", "patch.op} if patch.value is not wtypes.Unset: serialized_patch['value'] = patch.value #", ":ref:`Action Plan <action_plan_definition>` specifies a flow of :ref:`Actions <action_definition>` that", "action_plans_collection @wsme_pecan.wsexpose(ActionPlanCollection, types.uuid, int, wtypes.text, wtypes.text, types.uuid, wtypes.text) def get_all(self,", "\"asc\" or \"desc\". Default: asc. :param audit_uuid: Optional UUID of", "action_plan_uuid, patch): \"\"\"Update an existing action plan. :param action_plan_uuid: UUID", "action_plan.state) if transition not in allowed_patch_transitions: error_message = _(\"State transition", "sort_dir='asc', audit_uuid=None, strategy=None): \"\"\"Retrieve a list of action_plans with detail.", "audit_uuid=audit_uuid, strategy=strategy) @wsme_pecan.wsexpose(ActionPlan, types.uuid) def get_one(self, action_plan_uuid): \"\"\"Retrieve information about", "= 'abcee106-14d3-4515-b744-5a26885cf6f6' sample._efficacy_indicators = [{'description': 'Test indicator', 'name': 'test_indicator', 'unit':", "\"\"\" if self.from_actionsPlans: raise exception.OperationNotPermitted context = pecan.request.context action_plan =", "action_plan_uuid: UUID of a action. \"\"\" context = pecan.request.context action_plan", "else: filters['strategy_name'] = strategy need_api_sort = api_utils.check_need_api_sort(sort_key, additional_fields) sort_db_key =", "action_plan_uuid): \"\"\"Delete an action plan. :param action_plan_uuid: UUID of a", ":param patch: a json PATCH document to apply to this", ":param sort_dir: direction to sort. \"asc\" or \"desc\". Default: asc.", "pecan.request.context policy.enforce(context, 'action_plan:detail', action='action_plan:detail') # NOTE(lucasagomes): /detail should only work", "of a list of successive :ref:`Actions <action_definition>` (i.e., a Workflow", "audit_uuid=None, strategy=None): \"\"\"Retrieve a list of action plans. :param marker:", "- composite Actions: which are composed of several simple :ref:`Actions", "policy.enforce(context, 'action_plan:start', action_plan_to_start, action='action_plan:start') if action_plan_to_start['state'] != \\ objects.action_plan.State.RECOMMENDED: raise", "which means it can not be split into smaller tasks", "from watcher._i18n import _ from watcher.api.controllers import base from watcher.api.controllers", "import HTTPStatus from oslo_log import log import pecan from pecan", "None try: if utils.is_uuid_like(value) or utils.is_int_like(value): strategy = objects.Strategy.get( pecan.request.context,", "self._strategy_name = strategy.name uuid = wtypes.wsattr(types.uuid, readonly=True) \"\"\"Unique UUID for", ":ref:`Actions <action_definition>` ordered in sequential and/or parallel flows. An :ref:`Action", "api_utils.validate_sort_key( sort_key, list(objects.ActionPlan.fields) + additional_fields) limit = api_utils.validate_limit(limit) api_utils.validate_sort_dir(sort_dir) marker_obj", "import utils from watcher import objects from watcher.objects import action_plan", "expand = True resource_url = '/'.join(['action_plans', 'detail']) return self._get_action_plans_collection( marker,", "action. \"\"\" context = pecan.request.context action_plan = api_utils.get_resource( 'ActionPlan', action_plan_uuid,", "\\ objects.action_plan.State.RECOMMENDED: raise exception.StartError( state=action_plan_to_start.state) action_plan_to_start['state'] = objects.action_plan.State.PENDING action_plan_to_start.save() self.applier_client.launch_action_plan(pecan.request.context,", "single result. :param sort_key: column to sort results by. Default:", "class ActionPlansController(rest.RestController): \"\"\"REST controller for Actions.\"\"\" def __init__(self): super(ActionPlansController, self).__init__()", "action_plan_to_update = objects.ActionPlan.get_by_uuid( pecan.request.context, action_plan_uuid) return ActionPlan.convert_with_links(action_plan_to_update) @wsme_pecan.wsexpose(ActionPlan, types.uuid) def", "implies that the :ref:`Strategy <strategy_definition>` which was used has found", "audit.id except exception.AuditNotFound: self._audit_uuid = None def _get_efficacy_indicators(self): if self._efficacy_indicators", "**kwargs): super(ActionPlan, self).__init__() self.fields = [] fields = list(objects.ActionPlan.fields) for", "has found a :ref:`Solution <solution_definition>` to achieve the :ref:`Goal <goal_definition>`", "Notation 2.0 (BPMN 2.0) <http://www.omg.org/spec/BPMN/2.0/>`_ or `Unified Modeling Language (UML)", "fields were introduced at certain API versions. These fields are", "def __init__(self, **kwargs): super(ActionPlan, self).__init__() self.fields = [] fields =", "== wtypes.Unset: self._audit_uuid = wtypes.Unset elif value and self._audit_uuid !=", "types.uuid, body=[ActionPlanPatchType]) def patch(self, action_plan_uuid, patch): \"\"\"Update an existing action", "blueprint watcher-api-validation if hasattr(action_plan, 'state'): transition = (action_plan_to_update.state, action_plan.state) if", "a json PATCH document to apply to this action plan.", "def get_all(self, marker=None, limit=None, sort_key='id', sort_dir='asc', audit_uuid=None, strategy=None): \"\"\"Retrieve a", ":param action_plan_uuid: UUID of a action. \"\"\" context = pecan.request.context", "running on\"\"\" def __init__(self, **kwargs): super(ActionPlan, self).__init__() self.fields = []", "action_plan_to_update[field] != patch_val: action_plan_to_update[field] = patch_val if (field == 'state'", "= _(\"Invalid state: %(state)s\") raise exception.PatchError( patch=serialized_patch, reason=msg % dict(state=state_value))", "url=resource_url, expand=expand, sort_key=sort_key, sort_dir=sort_dir) if need_api_sort: api_utils.make_api_sort(action_plans_collection.action_plans, sort_key, sort_dir) return", "marker: marker_obj = objects.ActionPlan.get_by_uuid( pecan.request.context, marker) filters = {} if", "newer API versions. Certain node fields were introduced at certain", "strategy = objects.Strategy.get( pecan.request.context, value) else: strategy = objects.Strategy.get_by_name( pecan.request.context,", "readonly=True) \"\"\"A list containing a self link and associated action", "of a action plan. \"\"\" if self.from_actionsPlans: raise exception.OperationNotPermitted context", "'strategy_name'] api_utils.validate_sort_key( sort_key, list(objects.ActionPlan.fields) + additional_fields) limit = api_utils.validate_limit(limit) api_utils.validate_sort_dir(sort_dir)", "elif value and self._audit_uuid != value: try: audit = objects.Audit.get(pecan.request.context,", ":ref:`Strategy <strategy_definition>` which was used has found a :ref:`Solution <solution_definition>`", "types.jsontype, _get_efficacy_indicators, _set_efficacy_indicators, mandatory=True) \"\"\"The list of efficacy indicators associated", "limit, sort_key, sort_dir, expand=False, resource_url=None, audit_uuid=None, strategy=None): additional_fields = ['audit_uuid',", "if patch_val == wtypes.Unset: patch_val = None if action_plan_to_update[field] !=", "= pecan.request.context policy.enforce(context, 'action_plan:detail', action='action_plan:detail') # NOTE(lucasagomes): /detail should only", "In the default implementation of Watcher, an action plan is", "that the :ref:`Strategy <strategy_definition>` which was used has found a", "2.0) <http://www.omg.org/spec/BPMN/2.0/>`_ or `Unified Modeling Language (UML) <http://www.uml.org/>`_. To see", "order to satisfy a given :ref:`Goal <goal_definition>`. It also contains", "patch(self, action_plan_uuid, patch): \"\"\"Update an existing action plan. :param action_plan_uuid:", "_set_strategy_uuid, mandatory=False) \"\"\"Strategy UUID the action plan refers to\"\"\" strategy_name", "action plans. :param marker: pagination marker for large data sets.", "ap_objects.State.CANCELLED: cancel_action_plan = True # Update only the fields that", "except AttributeError: # Ignore fields that aren't exposed in the", "url, expand=True): if not expand: action_plan.unset_fields_except( ['uuid', 'state', 'efficacy_indicators', 'global_efficacy',", "handle state transitions state_value = patch.value if state_value and not", "standard workflow model description formats such as `Business Process Model", "list of action plans. :param marker: pagination marker for large", "True resource_url = '/'.join(['action_plans', 'detail']) return self._get_action_plans_collection( marker, limit, sort_key,", "== \"/state\": ActionPlanPatchType._validate_state(patch) return types.JsonPatchType.validate(patch) @staticmethod def internal_attrs(): return types.JsonPatchType.internal_attrs()", "@classmethod def convert_with_links(cls, rpc_action_plan, expand=True): action_plan = ActionPlan(**rpc_action_plan.as_dict()) hide_fields_in_newer_versions(action_plan) return", "set of :ref:`efficacy indicators <efficacy_indicator_definition>`. An :ref:`Action Plan <action_plan_definition>` is", "context=pecan.request.context, name=indicator.name, description=indicator.description, unit=indicator.unit, value=float(indicator.value), ) efficacy_indicators.append(efficacy_indicator.as_dict()) self._efficacy_indicators = efficacy_indicators", "-*- encoding: utf-8 -*- # Copyright 2013 Red Hat, Inc.", "of the audit this port belongs to\"\"\" strategy_uuid = wtypes.wsproperty(", "wtypes.text, _get_strategy_name, _set_strategy_name, mandatory=False) \"\"\"The name of the strategy this", "if action_plan_to_update[field] != patch_val: action_plan_to_update[field] = patch_val if (field ==", "collections parent = pecan.request.path.split('/')[:-1][-1] if parent != \"action_plans\": raise exception.HTTPNotFound", "need_api_sort = api_utils.check_need_api_sort(sort_key, additional_fields) sort_db_key = (sort_key if not need_api_sort", "an OpenStack point of view. - composite Actions: which are", "ordered in sequential and/or parallel flows. An :ref:`Action Plan <action_plan_definition>`", "'unit': '%'}] sample._global_efficacy = {'description': 'Global efficacy', 'name': 'test_global_efficacy', 'unit':", "marker=None, limit=None, sort_key='id', sort_dir='asc', audit_uuid=None, strategy=None): \"\"\"Retrieve a list of", "'abcee106-14d3-4515-b744-5a26885cf6f6' sample._efficacy_indicators = [{'description': 'Test indicator', 'name': 'test_indicator', 'unit': '%'}]", "== objects.action_plan.State.PENDING): launch_action_plan = True action_plan_to_update.save() # NOTE: if action", "else None) action_plans = objects.ActionPlan.list( pecan.request.context, limit, marker_obj, sort_key=sort_db_key, sort_dir=sort_dir,", "in writing, software # distributed under the License is distributed", "types.uuid) def get_one(self, action_plan_uuid): \"\"\"Retrieve information about the given action", "apply to this action plan. \"\"\" if self.from_actionsPlans: raise exception.OperationNotPermitted", "recommended # state update action state here only if cancel_action_plan:", "a flow of :ref:`Actions <action_definition>` that should be executed in", "types.uuid) def start(self, action_plan_uuid, **kwargs): \"\"\"Start an action_plan :param action_plan_uuid:", "self link and associated action links\"\"\" hostname = wtypes.wsattr(wtypes.text, mandatory=False)", "<audit_definition>` is successful which implies that the :ref:`Strategy <strategy_definition>` which", "composite Actions: which are composed of several simple :ref:`Actions <action_definition>`", "strategy.name uuid = wtypes.wsattr(types.uuid, readonly=True) \"\"\"Unique UUID for this action", "watcher.api.controllers import link from watcher.api.controllers.v1 import collection from watcher.api.controllers.v1 import", "many of its components, allowing other implementations to generate and", "filters={\"action_plan_uuid\": self.uuid}) for indicator in _efficacy_indicators: efficacy_indicator = efficacyindicator.EfficacyIndicator( context=pecan.request.context,", "ap_collection.action_plans = [ActionPlan.convert_with_links( p, expand) for p in rpc_action_plans] ap_collection.next", "# Copyright 2013 Red Hat, Inc. # All Rights Reserved.", "if utils.is_uuid_like(strategy): filters['strategy_uuid'] = strategy else: filters['strategy_name'] = strategy need_api_sort", "from oslo_log import log import pecan from pecan import rest", "self._audit_uuid != value: try: audit = objects.Audit.get(pecan.request.context, value) self._audit_uuid =", "# state update action state here only if cancel_action_plan: filters", "only actions for that audit. :param strategy: strategy UUID or", "License is distributed on an \"AS IS\" BASIS, # WITHOUT", "OF ANY KIND, either express or # implied. # See", "links = wtypes.wsattr([link.Link], readonly=True) \"\"\"A list containing a self link", "License, Version 2.0 (the \"License\"); # you may not use", "enforces type checking and value constraints, and converts between the", "types.uuid, int, wtypes.text, wtypes.text, types.uuid, wtypes.text) def detail(self, marker=None, limit=None,", "changed for field in objects.ActionPlan.fields: try: patch_val = getattr(action_plan, field)", "raise exception.HTTPNotFound expand = True resource_url = '/'.join(['action_plans', 'detail']) return", "self._get_action_plans_collection( marker, limit, sort_key, sort_dir, audit_uuid=audit_uuid, strategy=strategy) @wsme_pecan.wsexpose(ActionPlanCollection, types.uuid, int,", "value == wtypes.Unset and not self._efficacy_indicators: try: _efficacy_indicators = objects.EfficacyIndicator.list(", "from watcher.api.controllers.v1 import utils as api_utils from watcher.applier import rpcapi", "'self', url, 'action_plans', action_plan.uuid), link.Link.make_link( 'bookmark', url, 'action_plans', action_plan.uuid, bookmark=True)]", "rpc_action_plan, expand=True): action_plan = ActionPlan(**rpc_action_plan.as_dict()) hide_fields_in_newer_versions(action_plan) return cls._convert_with_links(action_plan, pecan.request.host_url, expand)", "# transitions that are allowed via PATCH allowed_patch_transitions = [", "types.JsonPatchType.validate(patch) @staticmethod def internal_attrs(): return types.JsonPatchType.internal_attrs() @staticmethod def mandatory_attrs(): return", "request's API version matches or exceeds the versions when these", "version matches or exceeds the versions when these fields were", "def convert_with_links(cls, rpc_action_plan, expand=True): action_plan = ActionPlan(**rpc_action_plan.as_dict()) hide_fields_in_newer_versions(action_plan) return cls._convert_with_links(action_plan,", "other implementations to generate and handle more complex :ref:`Action Plan(s)", "as ap_objects LOG = log.getLogger(__name__) def hide_fields_in_newer_versions(obj): \"\"\"This method hides", "return strategy def _get_strategy_uuid(self): return self._strategy_uuid def _set_strategy_uuid(self, value): if", "ap_collection = ActionPlanCollection() ap_collection.action_plans = [ActionPlan.convert_with_links( p, expand) for p", "= [ (ap_objects.State.RECOMMENDED, ap_objects.State.PENDING), (ap_objects.State.RECOMMENDED, ap_objects.State.CANCELLED), (ap_objects.State.ONGOING, ap_objects.State.CANCELLING), (ap_objects.State.PENDING, ap_objects.State.CANCELLED),", "the License for the specific language governing permissions and #", "types as wtypes import wsmeext.pecan as wsme_pecan from watcher._i18n import", "\"\"\"Retrieve information about the given action plan. :param action_plan_uuid: UUID", "== 'state' and patch_val == objects.action_plan.State.PENDING): launch_action_plan = True action_plan_to_update.save()", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "should only work agaist collections parent = pecan.request.path.split('/')[:-1][-1] if parent", "a :ref:`Solution <solution_definition>` to achieve the :ref:`Goal <goal_definition>` of this", "action plan\"\"\" global_efficacy = wtypes.wsattr(types.jsontype, readonly=True) \"\"\"The global efficacy of", "of successive :ref:`Actions <action_definition>` (i.e., a Workflow of :ref:`Actions <action_definition>`", "todo: improve this in blueprint watcher-api-validation if hasattr(action_plan, 'state'): transition", "see the life-cycle and description of :ref:`Action Plan <action_plan_definition>` states,", "types.uuid, int, wtypes.text, wtypes.text, types.uuid, wtypes.text) def get_all(self, marker=None, limit=None,", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "this action plan. \"\"\" if self.from_actionsPlans: raise exception.OperationNotPermitted context =", "to return in a single result. :param sort_key: column to", "return self._audit_uuid def _set_audit_uuid(self, value): if value == wtypes.Unset: self._audit_uuid", "import wsme from wsme import types as wtypes import wsmeext.pecan", "aren't exposed in the API continue if patch_val == wtypes.Unset:", "fields are only made available when the request's API version", "this action plan refers to\"\"\" efficacy_indicators = wtypes.wsproperty( types.jsontype, _get_efficacy_indicators,", "\"\"\"This method hides fields that were added in newer API", "objects.action_plan.State.PENDING action_plan_to_start.save() self.applier_client.launch_action_plan(pecan.request.context, action_plan_uuid) action_plan_to_start = objects.ActionPlan.get_by_uuid( pecan.request.context, action_plan_uuid) return", "@staticmethod def internal_attrs(): return types.JsonPatchType.internal_attrs() @staticmethod def mandatory_attrs(): return [\"audit_id\",", "and self._audit_uuid != value: try: audit = objects.Audit.get(pecan.request.context, value) self._audit_uuid", "# distributed under the License is distributed on an \"AS", "cls._convert_with_links(sample, 'http://localhost:9322', expand) class ActionPlanCollection(collection.Collection): \"\"\"API representation of a collection", "name of the strategy this action plan refers to\"\"\" efficacy_indicators", "# Unless required by applicable law or agreed to in", "setattr(self, 'strategy_name', kwargs.get('strategy_id', wtypes.Unset)) @staticmethod def _convert_with_links(action_plan, url, expand=True): if", "patch_val = getattr(action_plan, field) except AttributeError: # Ignore fields that", "and # limitations under the License. \"\"\" An :ref:`Action Plan", "between the internal object model and the API representation of", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "import rpcapi from watcher.common import exception from watcher.common import policy", "[ActionPlan.convert_with_links( p, expand) for p in rpc_action_plans] ap_collection.next = ap_collection.get_next(limit,", "transitions state_value = patch.value if state_value and not hasattr(ap_objects.State, state_value):", "flows. An :ref:`Action Plan <action_plan_definition>` may be described using standard", "API representation of an action plan. \"\"\" _audit_uuid = None", "self._set_efficacy_indicators(wtypes.Unset) return self._efficacy_indicators def _set_efficacy_indicators(self, value): efficacy_indicators = [] if", "ActionPlan(**rpc_action_plan.as_dict()) hide_fields_in_newer_versions(action_plan) return cls._convert_with_links(action_plan, pecan.request.host_url, expand) @classmethod def sample(cls, expand=True):", "<action_definition>` belonging to a unique branch). However, Watcher provides abstract", "api_utils.get_resource( 'ActionPlan', action_plan_uuid, eager=True) policy.enforce(context, 'action_plan:delete', action_plan, action='action_plan:delete') allowed_states =", "an existing action plan. :param action_plan_uuid: UUID of a action", "given :ref:`Goal <goal_definition>`. It also contains an estimated :ref:`global efficacy", "Action Item(s): - simple :ref:`Actions <action_definition>`: atomic tasks, which means", "if need_api_sort: api_utils.make_api_sort(action_plans_collection.action_plans, sort_key, sort_dir) return action_plans_collection @wsme_pecan.wsexpose(ActionPlanCollection, types.uuid, int,", "patch)) except api_utils.JSONPATCH_EXCEPTIONS as e: raise exception.PatchError(patch=patch, reason=e) launch_action_plan =", "value and self._strategy_name != value: self._strategy_name = None strategy =", "== ap_objects.State.PENDING: launch_action_plan = True if action_plan.state == ap_objects.State.CANCELLED: cancel_action_plan", "these fields were introduced. \"\"\" pass class ActionPlanPatchType(types.JsonPatchType): @staticmethod def", "the Apache License, Version 2.0 (the \"License\"); # you may", "of a action plan. This class enforces type checking and", "the default implementation of Watcher, an action plan is composed", "\"\"\"Start an action_plan :param action_plan_uuid: UUID of an action_plan. \"\"\"", "= objects.Action.list(pecan.request.context, filters=filters, eager=True) for a in actions: a.state =", "if marker: marker_obj = objects.ActionPlan.get_by_uuid( pecan.request.context, marker) filters = {}", "simple :ref:`Actions <action_definition>`: atomic tasks, which means it can not", "ActionPlan.convert_with_links(action_plan_to_update) @wsme_pecan.wsexpose(ActionPlan, types.uuid) def start(self, action_plan_uuid, **kwargs): \"\"\"Start an action_plan", "a unique branch). However, Watcher provides abstract interfaces for many", "view. - composite Actions: which are composed of several simple", "several simple :ref:`Actions <action_definition>` ordered in sequential and/or parallel flows.", "specifies a flow of :ref:`Actions <action_definition>` that should be executed", "context = pecan.request.context action_plan = api_utils.get_resource( 'ActionPlan', action_plan_uuid, eager=True) policy.enforce(context,", "this in blueprint watcher-api-validation if hasattr(action_plan, 'state'): transition = (action_plan_to_update.state,", "def _get_strategy_uuid(self): return self._strategy_uuid def _set_strategy_uuid(self, value): if value and", "formats such as `Business Process Model and Notation 2.0 (BPMN", "ActionPlan.convert_with_links(action_plan) @wsme_pecan.wsexpose(None, types.uuid, status_code=HTTPStatus.NO_CONTENT) def delete(self, action_plan_uuid): \"\"\"Delete an action", "!= \\ objects.action_plan.State.RECOMMENDED: raise exception.StartError( state=action_plan_to_start.state) action_plan_to_start['state'] = objects.action_plan.State.PENDING action_plan_to_start.save()", "{'path': patch.path, 'op': patch.op} if patch.value is not wtypes.Unset: serialized_patch['value']", "value): if value and self._strategy_name != value: self._strategy_name = None", "%(state)s\") raise exception.PatchError( patch=serialized_patch, reason=msg % dict(state=state_value)) @staticmethod def validate(patch):", "watcher.common import exception from watcher.common import policy from watcher.common import", "= wtypes.wsattr(wtypes.text, mandatory=False) \"\"\"Hostname the actionplan is running on\"\"\" def", "objects.action_plan.State.RECOMMENDED: raise exception.StartError( state=action_plan_to_start.state) action_plan_to_start['state'] = objects.action_plan.State.PENDING action_plan_to_start.save() self.applier_client.launch_action_plan(pecan.request.context, action_plan_uuid)", "= ActionPlan(**api_utils.apply_jsonpatch( action_plan_dict, patch)) except api_utils.JSONPATCH_EXCEPTIONS as e: raise exception.PatchError(patch=patch,", "sort_key, sort_dir, expand=False, resource_url=None, audit_uuid=None, strategy=None): additional_fields = ['audit_uuid', 'strategy_uuid',", "None if marker: marker_obj = objects.ActionPlan.get_by_uuid( pecan.request.context, marker) filters =", "fields that aren't exposed in the API continue if patch_val", "ANY KIND, either express or # implied. # See the", "a list of action_plans with detail. :param marker: pagination marker", "(i.e., a Workflow of :ref:`Actions <action_definition>` belonging to a unique", "if audit_uuid: filters['audit_uuid'] = audit_uuid if strategy: if utils.is_uuid_like(strategy): filters['strategy_uuid']", ":param marker: pagination marker for large data sets. :param limit:", "api_utils.get_resource( 'ActionPlan', action_plan_uuid, eager=True) policy.enforce(context, 'action_plan:update', action_plan_to_update, action='action_plan:update') try: action_plan_dict", "log.getLogger(__name__) def hide_fields_in_newer_versions(obj): \"\"\"This method hides fields that were added", "= ap_collection.get_next(limit, url=url, **kwargs) return ap_collection @classmethod def sample(cls): sample", "LOG.exception(exc) elif value and self._efficacy_indicators != value: self._efficacy_indicators = value", "None def _get_efficacy_indicators(self): if self._efficacy_indicators is None: self._set_efficacy_indicators(wtypes.Unset) return self._efficacy_indicators", "action_plan_to_update, action='action_plan:update') try: action_plan_dict = action_plan_to_update.as_dict() action_plan = ActionPlan(**api_utils.apply_jsonpatch( action_plan_dict,", "pecan.request.path.split('/')[:-1][-1] if parent != \"action_plans\": raise exception.HTTPNotFound expand = True", "= cls() sample.action_plans = [ActionPlan.sample(expand=False)] return sample class ActionPlansController(rest.RestController): \"\"\"REST", "to\"\"\" strategy_uuid = wtypes.wsproperty( wtypes.text, _get_strategy_uuid, _set_strategy_uuid, mandatory=False) \"\"\"Strategy UUID", "wtypes.wsattr(wtypes.text, mandatory=False) \"\"\"Hostname the actionplan is running on\"\"\" def __init__(self,", "import rest import wsme from wsme import types as wtypes", "= patch_val if (field == 'state' and patch_val == objects.action_plan.State.PENDING):", "['audit_uuid', 'strategy_uuid', 'strategy_name'] api_utils.validate_sort_key( sort_key, list(objects.ActionPlan.fields) + additional_fields) limit =", "action_plans objects\"\"\" def __init__(self, **kwargs): self._type = 'action_plans' @staticmethod def", "were introduced at certain API versions. These fields are only", "ActionPlanPatchType._validate_state(patch) return types.JsonPatchType.validate(patch) @staticmethod def internal_attrs(): return types.JsonPatchType.internal_attrs() @staticmethod def", "self._get_strategy(value) if strategy: self._strategy_uuid = strategy.uuid def _get_strategy_name(self): return self._strategy_name", "under the License is distributed on an \"AS IS\" BASIS,", "collection from watcher.api.controllers.v1 import efficacy_indicator as efficacyindicator from watcher.api.controllers.v1 import", "strategy = objects.Strategy.get_by_name( pecan.request.context, value) except exception.StrategyNotFound: pass if strategy:", "unique branch). However, Watcher provides abstract interfaces for many of", "url=url, **kwargs) return ap_collection @classmethod def sample(cls): sample = cls()", "- simple :ref:`Actions <action_definition>`: atomic tasks, which means it can", "rpcapi from watcher.common import exception from watcher.common import policy from", "if patch.path == \"/state\": ActionPlanPatchType._validate_state(patch) return types.JsonPatchType.validate(patch) @staticmethod def internal_attrs():", "ActionPlanCollection.convert_with_links( action_plans, limit, url=resource_url, expand=expand, sort_key=sort_key, sort_dir=sort_dir) if need_api_sort: api_utils.make_api_sort(action_plans_collection.action_plans,", "= strategy.name uuid = wtypes.wsattr(types.uuid, readonly=True) \"\"\"Unique UUID for this", "when an :ref:`Audit <audit_definition>` is successful which implies that the", "state=action_plan.state) action_plan.soft_delete() @wsme.validate(types.uuid, [ActionPlanPatchType]) @wsme_pecan.wsexpose(ActionPlan, types.uuid, body=[ActionPlanPatchType]) def patch(self, action_plan_uuid,", "result. :param sort_key: column to sort results by. Default: id.", "import objects from watcher.objects import action_plan as ap_objects LOG =", "if state_value and not hasattr(ap_objects.State, state_value): msg = _(\"Invalid state:", "if the requests to this controller are coming from the", "if self.from_actionsPlans: raise exception.OperationNotPermitted context = pecan.request.context action_plan_to_update = api_utils.get_resource(", "action_plan.links = [ link.Link.make_link( 'self', url, 'action_plans', action_plan.uuid), link.Link.make_link( 'bookmark',", "pecan.request.context action_plan = api_utils.get_resource('ActionPlan', action_plan_uuid) policy.enforce( context, 'action_plan:get', action_plan, action='action_plan:get')", "as exc: LOG.exception(exc) elif value and self._efficacy_indicators != value: self._efficacy_indicators", "<efficacy_definition>` alongside a set of :ref:`efficacy indicators <efficacy_indicator_definition>`. An :ref:`Action", "self).__init__() self.applier_client = rpcapi.ApplierAPI() from_actionsPlans = False \"\"\"A flag to", "representation of a action plan. This class enforces type checking", "# -*- encoding: utf-8 -*- # Copyright 2013 Red Hat,", "a single result. :param sort_key: column to sort results by.", "_(\"Invalid state: %(state)s\") raise exception.PatchError( patch=serialized_patch, reason=msg % dict(state=state_value)) @staticmethod", "ap_objects.State.CANCELLING), (ap_objects.State.PENDING, ap_objects.State.CANCELLED), ] # todo: improve this in blueprint", "objects.Strategy.get_by_name( pecan.request.context, value) except exception.StrategyNotFound: pass if strategy: self.strategy_id =", "hasattr(action_plan, 'state'): transition = (action_plan_to_update.state, action_plan.state) if transition not in", "value): if value == wtypes.Unset: return None strategy = None", "sort_key: column to sort results by. Default: id. :param sort_dir:", "from watcher.api.controllers import base from watcher.api.controllers import link from watcher.api.controllers.v1", "sort_key='id', sort_dir='asc', audit_uuid=None, strategy=None): \"\"\"Retrieve a list of action_plans with", "rpcapi.ApplierAPI() from_actionsPlans = False \"\"\"A flag to indicate if the", "_get_strategy_name(self): return self._strategy_name def _set_strategy_name(self, value): if value and self._strategy_name", "= [ link.Link.make_link( 'self', url, 'action_plans', action_plan.uuid), link.Link.make_link( 'bookmark', url,", "Model and Notation 2.0 (BPMN 2.0) <http://www.omg.org/spec/BPMN/2.0/>`_ or `Unified Modeling", "`Business Process Model and Notation 2.0 (BPMN 2.0) <http://www.omg.org/spec/BPMN/2.0/>`_ or", "from the top-level resource ActionPlan.\"\"\" _custom_actions = { 'start': ['POST'],", "patch.path, 'op': patch.op} if patch.value is not wtypes.Unset: serialized_patch['value'] =", "pecan from pecan import rest import wsme from wsme import", "_get_strategy_name, _set_strategy_name, mandatory=False) \"\"\"The name of the strategy this action", "] # todo: improve this in blueprint watcher-api-validation if hasattr(action_plan,", "described using standard workflow model description formats such as `Business", "not need_api_sort else None) action_plans = objects.ActionPlan.list( pecan.request.context, limit, marker_obj,", "delete(self, action_plan_uuid): \"\"\"Delete an action plan. :param action_plan_uuid: UUID of", "on\"\"\" def __init__(self, **kwargs): super(ActionPlan, self).__init__() self.fields = [] fields", "strategy: self.strategy_id = strategy.id return strategy def _get_strategy_uuid(self): return self._strategy_uuid", "def get_one(self, action_plan_uuid): \"\"\"Retrieve information about the given action plan.", "watcher.api.controllers import base from watcher.api.controllers import link from watcher.api.controllers.v1 import", "the License. # You may obtain a copy of the", "value): efficacy_indicators = [] if value == wtypes.Unset and not", "# See the License for the specific language governing permissions", "= patch.value # todo: use state machines to handle state", "detail. :param marker: pagination marker for large data sets. :param", "= None if action_plan_to_update[field] != patch_val: action_plan_to_update[field] = patch_val if", "plan. \"\"\" if self.from_actionsPlans: raise exception.OperationNotPermitted context = pecan.request.context action_plan_to_update", "to sort results by. Default: id. :param sort_dir: direction to", "allowed_patch_transitions: error_message = _(\"State transition not allowed: \" \"(%(initial_state)s ->", "if strategy: self._strategy_uuid = strategy.uuid def _get_strategy_name(self): return self._strategy_name def", "watcher.common import policy from watcher.common import utils from watcher import", "(ap_objects.State.PENDING, ap_objects.State.CANCELLED), ] # todo: improve this in blueprint watcher-api-validation", "\"\"\"A list containing action_plans objects\"\"\" def __init__(self, **kwargs): self._type =", "ap_objects.State.PENDING: launch_action_plan = True if action_plan.state == ap_objects.State.CANCELLED: cancel_action_plan =", "ap_objects.State.SUPERSEDED, ap_objects.State.CANCELLED) if action_plan.state not in allowed_states: raise exception.DeleteError( state=action_plan.state)", "of this action plan\"\"\" state = wtypes.text \"\"\"This action plan", "self.fields = [] fields = list(objects.ActionPlan.fields) for field in fields:", "into smaller tasks or commands from an OpenStack point of", "if strategy: self._strategy_name = strategy.name uuid = wtypes.wsattr(types.uuid, readonly=True) \"\"\"Unique", "class ActionPlanPatchType(types.JsonPatchType): @staticmethod def _validate_state(patch): serialized_patch = {'path': patch.path, 'op':", "self._strategy_uuid def _set_strategy_uuid(self, value): if value and self._strategy_uuid != value:", "action_plans, limit, url=resource_url, expand=expand, sort_key=sort_key, sort_dir=sort_dir) if need_api_sort: api_utils.make_api_sort(action_plans_collection.action_plans, sort_key,", "@staticmethod def validate(patch): if patch.path == \"/state\": ActionPlanPatchType._validate_state(patch) return types.JsonPatchType.validate(patch)", "None strategy = None try: if utils.is_uuid_like(value) or utils.is_int_like(value): strategy", "cancel_action_plan = False # transitions that are allowed via PATCH", "action_plan_to_update[field] = patch_val if (field == 'state' and patch_val ==", "OpenStack point of view. - composite Actions: which are composed", "self._efficacy_indicators: try: _efficacy_indicators = objects.EfficacyIndicator.list( pecan.request.context, filters={\"action_plan_uuid\": self.uuid}) for indicator", "from watcher.common import policy from watcher.common import utils from watcher", "Licensed under the Apache License, Version 2.0 (the \"License\"); #", "class enforces type checking and value constraints, and converts between", "self._strategy_uuid != value: self._strategy_uuid = None strategy = self._get_strategy(value) if", "@staticmethod def _convert_with_links(action_plan, url, expand=True): if not expand: action_plan.unset_fields_except( ['uuid',", "ap_collection.next = ap_collection.get_next(limit, url=url, **kwargs) return ap_collection @classmethod def sample(cls):", "self._strategy_uuid = None strategy = self._get_strategy(value) if strategy: self._strategy_uuid =", "return ap_collection @classmethod def sample(cls): sample = cls() sample.action_plans =", "writing, software # distributed under the License is distributed on", "or name to filter by \"\"\" context = pecan.request.context policy.enforce(context,", "column to sort results by. Default: id. :param sort_dir: direction", "validate(patch): if patch.path == \"/state\": ActionPlanPatchType._validate_state(patch) return types.JsonPatchType.validate(patch) @staticmethod def", "hasattr(ap_objects.State, state_value): msg = _(\"Invalid state: %(state)s\") raise exception.PatchError( patch=serialized_patch,", "except exception.EfficacyIndicatorNotFound as exc: LOG.exception(exc) elif value and self._efficacy_indicators !=", "utils as api_utils from watcher.applier import rpcapi from watcher.common import", "filters['audit_uuid'] = audit_uuid if strategy: if utils.is_uuid_like(strategy): filters['strategy_uuid'] = strategy", "= {'path': patch.path, 'op': patch.op} if patch.value is not wtypes.Unset:", "\"\"\" action_plan_to_start = api_utils.get_resource( 'ActionPlan', action_plan_uuid, eager=True) context = pecan.request.context", "\"\"\" _audit_uuid = None _strategy_uuid = None _strategy_name = None", "to get only actions for that audit. :param strategy: strategy", "method hides fields that were added in newer API versions.", "that audit. :param strategy: strategy UUID or name to filter", "**kwargs): self._type = 'action_plans' @staticmethod def convert_with_links(rpc_action_plans, limit, url=None, expand=False,", "value) else: strategy = objects.Strategy.get_by_name( pecan.request.context, value) except exception.StrategyNotFound: pass", "serialized_patch['value'] = patch.value # todo: use state machines to handle", "@wsme_pecan.wsexpose(ActionPlanCollection, types.uuid, int, wtypes.text, wtypes.text, types.uuid, wtypes.text) def get_all(self, marker=None,", ":ref:`the Action Plan state machine <action_plan_state_machine>`. \"\"\" import datetime from", "of an action plan. \"\"\" _audit_uuid = None _strategy_uuid =", "via PATCH allowed_patch_transitions = [ (ap_objects.State.RECOMMENDED, ap_objects.State.PENDING), (ap_objects.State.RECOMMENDED, ap_objects.State.CANCELLED), (ap_objects.State.ONGOING,", "description=indicator.description, unit=indicator.unit, value=float(indicator.value), ) efficacy_indicators.append(efficacy_indicator.as_dict()) self._efficacy_indicators = efficacy_indicators except exception.EfficacyIndicatorNotFound", "fields = list(objects.ActionPlan.fields) for field in fields: # Skip fields", "certain API versions. These fields are only made available when", "composed of two types of Action Item(s): - simple :ref:`Actions", "the fields that have changed for field in objects.ActionPlan.fields: try:", "%(new_state)s)\") raise exception.PatchError( patch=patch, reason=error_message % dict( initial_state=action_plan_to_update.state, new_state=action_plan.state)) if", "Watcher, an action plan is composed of a list of", "efficacy_indicators.append(efficacy_indicator.as_dict()) self._efficacy_indicators = efficacy_indicators except exception.EfficacyIndicatorNotFound as exc: LOG.exception(exc) elif", "and/or parallel flows. An :ref:`Action Plan <action_plan_definition>` may be described", "super(ActionPlan, self).__init__() self.fields = [] fields = list(objects.ActionPlan.fields) for field", "@staticmethod def mandatory_attrs(): return [\"audit_id\", \"state\"] class ActionPlan(base.APIBase): \"\"\"API representation", "UUID the action plan refers to\"\"\" strategy_name = wtypes.wsproperty( wtypes.text,", "controller are coming from the top-level resource ActionPlan.\"\"\" _custom_actions =", "== wtypes.Unset: return None strategy = None try: if utils.is_uuid_like(value)", "'name': 'test_indicator', 'unit': '%'}] sample._global_efficacy = {'description': 'Global efficacy', 'name':", "action_plan = ActionPlan(**api_utils.apply_jsonpatch( action_plan_dict, patch)) except api_utils.JSONPATCH_EXCEPTIONS as e: raise", "is not wtypes.Unset: serialized_patch['value'] = patch.value # todo: use state", "if cancel_action_plan: filters = {'action_plan_uuid': action_plan.uuid} actions = objects.Action.list(pecan.request.context, filters=filters,", "context = pecan.request.context action_plan = api_utils.get_resource('ActionPlan', action_plan_uuid) policy.enforce( context, 'action_plan:get',", "pass class ActionPlanPatchType(types.JsonPatchType): @staticmethod def _validate_state(patch): serialized_patch = {'path': patch.path,", "to this action plan. \"\"\" if self.from_actionsPlans: raise exception.OperationNotPermitted context", "list(objects.ActionPlan.fields) + additional_fields) limit = api_utils.validate_limit(limit) api_utils.validate_sort_dir(sort_dir) marker_obj = None", "efficacy_indicators = wtypes.wsproperty( types.jsontype, _get_efficacy_indicators, _set_efficacy_indicators, mandatory=True) \"\"\"The list of", "= wtypes.wsproperty( wtypes.text, _get_strategy_uuid, _set_strategy_uuid, mandatory=False) \"\"\"Strategy UUID the action", "and patch_val == objects.action_plan.State.PENDING): launch_action_plan = True action_plan_to_update.save() # NOTE:", "efficacy indicators associated to this action plan\"\"\" global_efficacy = wtypes.wsattr(types.jsontype,", "language governing permissions and # limitations under the License. \"\"\"", "!= patch_val: action_plan_to_update[field] = patch_val if (field == 'state' and", "import types as wtypes import wsmeext.pecan as wsme_pecan from watcher._i18n", "filters=filters) action_plans_collection = ActionPlanCollection.convert_with_links( action_plans, limit, url=resource_url, expand=expand, sort_key=sort_key, sort_dir=sort_dir)", "list of action_plans with detail. :param marker: pagination marker for", "def sample(cls): sample = cls() sample.action_plans = [ActionPlan.sample(expand=False)] return sample", "_get_audit_uuid, _set_audit_uuid, mandatory=True) \"\"\"The UUID of the audit this port", "an action_plan. \"\"\" action_plan_to_start = api_utils.get_resource( 'ActionPlan', action_plan_uuid, eager=True) context", "audit_uuid=None, strategy=None): \"\"\"Retrieve a list of action_plans with detail. :param", "by \"\"\" context = pecan.request.context policy.enforce(context, 'action_plan:get_all', action='action_plan:get_all') return self._get_action_plans_collection(", "'ActionPlan', action_plan_uuid, eager=True) policy.enforce(context, 'action_plan:delete', action_plan, action='action_plan:delete') allowed_states = (ap_objects.State.SUCCEEDED,", "(UML) <http://www.uml.org/>`_. To see the life-cycle and description of :ref:`Action", "complex :ref:`Action Plan(s) <action_plan_definition>` composed of two types of Action", "Actions: which are composed of several simple :ref:`Actions <action_definition>` ordered", "wtypes.text \"\"\"This action plan state\"\"\" links = wtypes.wsattr([link.Link], readonly=True) \"\"\"A", "objects.ActionPlan.get_by_uuid( pecan.request.context, action_plan_uuid) return ActionPlan.convert_with_links(action_plan_to_update) @wsme_pecan.wsexpose(ActionPlan, types.uuid) def start(self, action_plan_uuid,", "the requests to this controller are coming from the top-level", "sort_dir, audit_uuid=audit_uuid, strategy=strategy) @wsme_pecan.wsexpose(ActionPlanCollection, types.uuid, int, wtypes.text, wtypes.text, types.uuid, wtypes.text)", "marker, limit, sort_key, sort_dir, audit_uuid=audit_uuid, strategy=strategy) @wsme_pecan.wsexpose(ActionPlanCollection, types.uuid, int, wtypes.text,", "initial_state=action_plan_to_update.state, new_state=action_plan.state)) if action_plan.state == ap_objects.State.PENDING: launch_action_plan = True if", "patch_val == objects.action_plan.State.PENDING): launch_action_plan = True action_plan_to_update.save() # NOTE: if", "audit_uuid=None, strategy=None): additional_fields = ['audit_uuid', 'strategy_uuid', 'strategy_name'] api_utils.validate_sort_key( sort_key, list(objects.ActionPlan.fields)", "AttributeError: # Ignore fields that aren't exposed in the API", "sort_key, sort_dir, expand, resource_url, audit_uuid=audit_uuid, strategy=strategy) @wsme_pecan.wsexpose(ActionPlan, types.uuid) def get_one(self,", "= pecan.request.context policy.enforce(context, 'action_plan:start', action_plan_to_start, action='action_plan:start') if action_plan_to_start['state'] != \\", "import _ from watcher.api.controllers import base from watcher.api.controllers import link", "self.applier_client = rpcapi.ApplierAPI() from_actionsPlans = False \"\"\"A flag to indicate", "(ap_objects.State.ONGOING, ap_objects.State.CANCELLING), (ap_objects.State.PENDING, ap_objects.State.CANCELLED), ] # todo: improve this in", "def _get_strategy_name(self): return self._strategy_name def _set_strategy_name(self, value): if value and", "API continue if patch_val == wtypes.Unset: patch_val = None if", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "\"\"\"A list containing a self link and associated action links\"\"\"", "[{'description': 'Test indicator', 'name': 'test_indicator', 'unit': '%'}] sample._global_efficacy = {'description':", "Plan <action_plan_definition>` states, visit :ref:`the Action Plan state machine <action_plan_state_machine>`.", ":param audit_uuid: Optional UUID of an audit, to get only", "= getattr(action_plan, field) except AttributeError: # Ignore fields that aren't", "log import pecan from pecan import rest import wsme from", "url, 'action_plans', action_plan.uuid), link.Link.make_link( 'bookmark', url, 'action_plans', action_plan.uuid, bookmark=True)] return", "@wsme_pecan.wsexpose(ActionPlanCollection, types.uuid, int, wtypes.text, wtypes.text, types.uuid, wtypes.text) def detail(self, marker=None,", "json PATCH document to apply to this action plan. \"\"\"", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "Skip fields we do not expose. if not hasattr(self, field):", "'%'} return cls._convert_with_links(sample, 'http://localhost:9322', expand) class ActionPlanCollection(collection.Collection): \"\"\"API representation of", "# limitations under the License. \"\"\" An :ref:`Action Plan <action_plan_definition>`", "to handle state transitions state_value = patch.value if state_value and", "collection of action_plans.\"\"\" action_plans = [ActionPlan] \"\"\"A list containing action_plans", "if patch.value is not wtypes.Unset: serialized_patch['value'] = patch.value # todo:", "action_plan_uuid: UUID of an action_plan. \"\"\" action_plan_to_start = api_utils.get_resource( 'ActionPlan',", "+ additional_fields) limit = api_utils.validate_limit(limit) api_utils.validate_sort_dir(sort_dir) marker_obj = None if", "api_utils.JSONPATCH_EXCEPTIONS as e: raise exception.PatchError(patch=patch, reason=e) launch_action_plan = False cancel_action_plan", "as api_utils from watcher.applier import rpcapi from watcher.common import exception", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "cancel_action_plan = True # Update only the fields that have", "introduced at certain API versions. These fields are only made", "= patch.value if state_value and not hasattr(ap_objects.State, state_value): msg =", "Watcher when an :ref:`Audit <audit_definition>` is successful which implies that", "wtypes.Unset elif value and self._audit_uuid != value: try: audit =", "actionplan is running on\"\"\" def __init__(self, **kwargs): super(ActionPlan, self).__init__() self.fields", "# Ignore fields that aren't exposed in the API continue", "Plan <action_plan_definition>` may be described using standard workflow model description", "_set_strategy_uuid(self, value): if value and self._strategy_uuid != value: self._strategy_uuid =", "{ 'start': ['POST'], 'detail': ['GET'] } def _get_action_plans_collection(self, marker, limit,", "estimated :ref:`global efficacy <efficacy_definition>` alongside a set of :ref:`efficacy indicators", "patch=patch, reason=error_message % dict( initial_state=action_plan_to_update.state, new_state=action_plan.state)) if action_plan.state == ap_objects.State.PENDING:", "policy.enforce(context, 'action_plan:get_all', action='action_plan:get_all') return self._get_action_plans_collection( marker, limit, sort_key, sort_dir, audit_uuid=audit_uuid,", "Certain node fields were introduced at certain API versions. These", "return self._strategy_uuid def _set_strategy_uuid(self, value): if value and self._strategy_uuid !=", "pecan.request.context policy.enforce(context, 'action_plan:start', action_plan_to_start, action='action_plan:start') if action_plan_to_start['state'] != \\ objects.action_plan.State.RECOMMENDED:", "= None def _get_efficacy_indicators(self): if self._efficacy_indicators is None: self._set_efficacy_indicators(wtypes.Unset) return", "_set_efficacy_indicators(self, value): efficacy_indicators = [] if value == wtypes.Unset and", "None _efficacy_indicators = None def _get_audit_uuid(self): return self._audit_uuid def _set_audit_uuid(self,", "are composed of several simple :ref:`Actions <action_definition>` ordered in sequential", "wsme_pecan from watcher._i18n import _ from watcher.api.controllers import base from", "self.uuid}) for indicator in _efficacy_indicators: efficacy_indicator = efficacyindicator.EfficacyIndicator( context=pecan.request.context, name=indicator.name,", "marker, limit, sort_key, sort_dir, expand=False, resource_url=None, audit_uuid=None, strategy=None): additional_fields =", "of efficacy indicators associated to this action plan\"\"\" global_efficacy =", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "action_plan as ap_objects LOG = log.getLogger(__name__) def hide_fields_in_newer_versions(obj): \"\"\"This method", "to this action plan\"\"\" global_efficacy = wtypes.wsattr(types.jsontype, readonly=True) \"\"\"The global", "_(\"State transition not allowed: \" \"(%(initial_state)s -> %(new_state)s)\") raise exception.PatchError(", "<action_definition>` (i.e., a Workflow of :ref:`Actions <action_definition>` belonging to a", "<strategy_definition>` which was used has found a :ref:`Solution <solution_definition>` to", "Rights Reserved. # # Licensed under the Apache License, Version", "'test_global_efficacy', 'unit': '%'} return cls._convert_with_links(sample, 'http://localhost:9322', expand) class ActionPlanCollection(collection.Collection): \"\"\"API", "specific language governing permissions and # limitations under the License.", "= cls(uuid='9ef4d84c-41e8-4418-9220-ce55be0436af', state='ONGOING', created_at=datetime.datetime.utcnow(), deleted_at=None, updated_at=datetime.datetime.utcnow()) sample._audit_uuid = 'abcee106-14d3-4515-b744-5a26885cf6f6' sample._efficacy_indicators", "return cls._convert_with_links(sample, 'http://localhost:9322', expand) class ActionPlanCollection(collection.Collection): \"\"\"API representation of a", "which was used has found a :ref:`Solution <solution_definition>` to achieve", "in the API continue if patch_val == wtypes.Unset: patch_val =", "= strategy need_api_sort = api_utils.check_need_api_sort(sort_key, additional_fields) sort_db_key = (sort_key if", "value == wtypes.Unset: return None strategy = None try: if", "pecan.request.context, marker) filters = {} if audit_uuid: filters['audit_uuid'] = audit_uuid", "marker_obj = None if marker: marker_obj = objects.ActionPlan.get_by_uuid( pecan.request.context, marker)", "if strategy: self.strategy_id = strategy.id return strategy def _get_strategy_uuid(self): return", "= [ActionPlan.sample(expand=False)] return sample class ActionPlansController(rest.RestController): \"\"\"REST controller for Actions.\"\"\"", "# you may not use this file except in compliance", "expand) @classmethod def sample(cls, expand=True): sample = cls(uuid='9ef4d84c-41e8-4418-9220-ce55be0436af', state='ONGOING', created_at=datetime.datetime.utcnow(),", "return self._efficacy_indicators def _set_efficacy_indicators(self, value): efficacy_indicators = [] if value", "rest import wsme from wsme import types as wtypes import", "@classmethod def sample(cls, expand=True): sample = cls(uuid='9ef4d84c-41e8-4418-9220-ce55be0436af', state='ONGOING', created_at=datetime.datetime.utcnow(), deleted_at=None,", "resource_url = '/'.join(['action_plans', 'detail']) return self._get_action_plans_collection( marker, limit, sort_key, sort_dir,", "branch). However, Watcher provides abstract interfaces for many of its", "from watcher import objects from watcher.objects import action_plan as ap_objects", "allowed_patch_transitions = [ (ap_objects.State.RECOMMENDED, ap_objects.State.PENDING), (ap_objects.State.RECOMMENDED, ap_objects.State.CANCELLED), (ap_objects.State.ONGOING, ap_objects.State.CANCELLING), (ap_objects.State.PENDING,", "objects.Action.list(pecan.request.context, filters=filters, eager=True) for a in actions: a.state = objects.action.State.CANCELLED", "or utils.is_int_like(value): strategy = objects.Strategy.get( pecan.request.context, value) else: strategy =", "msg = _(\"Invalid state: %(state)s\") raise exception.PatchError( patch=serialized_patch, reason=msg %", "for indicator in _efficacy_indicators: efficacy_indicator = efficacyindicator.EfficacyIndicator( context=pecan.request.context, name=indicator.name, description=indicator.description,", "model description formats such as `Business Process Model and Notation", "wtypes.text, types.uuid, wtypes.text) def detail(self, marker=None, limit=None, sort_key='id', sort_dir='asc', audit_uuid=None,", "when the request's API version matches or exceeds the versions", "# implied. # See the License for the specific language", "api_utils.get_resource('ActionPlan', action_plan_uuid) policy.enforce( context, 'action_plan:get', action_plan, action='action_plan:get') return ActionPlan.convert_with_links(action_plan) @wsme_pecan.wsexpose(None,", "sample._efficacy_indicators = [{'description': 'Test indicator', 'name': 'test_indicator', 'unit': '%'}] sample._global_efficacy", "internal object model and the API representation of an action", "= None _strategy_uuid = None _strategy_name = None _efficacy_indicators =", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "expand, resource_url, audit_uuid=audit_uuid, strategy=strategy) @wsme_pecan.wsexpose(ActionPlan, types.uuid) def get_one(self, action_plan_uuid): \"\"\"Retrieve", "exception from watcher.common import policy from watcher.common import utils from", "to generate and handle more complex :ref:`Action Plan(s) <action_plan_definition>` composed", "not expand: action_plan.unset_fields_except( ['uuid', 'state', 'efficacy_indicators', 'global_efficacy', 'updated_at', 'audit_uuid', 'strategy_uuid',", "strategy = self._get_strategy(value) if strategy: self._strategy_uuid = strategy.uuid def _get_strategy_name(self):", "be split into smaller tasks or commands from an OpenStack", "try: action_plan_dict = action_plan_to_update.as_dict() action_plan = ActionPlan(**api_utils.apply_jsonpatch( action_plan_dict, patch)) except", "`Unified Modeling Language (UML) <http://www.uml.org/>`_. To see the life-cycle and", "under the Apache License, Version 2.0 (the \"License\"); # you", "coming from the top-level resource ActionPlan.\"\"\" _custom_actions = { 'start':", "policy.enforce(context, 'action_plan:delete', action_plan, action='action_plan:delete') allowed_states = (ap_objects.State.SUCCEEDED, ap_objects.State.RECOMMENDED, ap_objects.State.FAILED, ap_objects.State.SUPERSEDED,", "provides abstract interfaces for many of its components, allowing other", "if utils.is_uuid_like(value) or utils.is_int_like(value): strategy = objects.Strategy.get( pecan.request.context, value) else:", "wtypes.Unset: serialized_patch['value'] = patch.value # todo: use state machines to", "the actionplan is running on\"\"\" def __init__(self, **kwargs): super(ActionPlan, self).__init__()", "False cancel_action_plan = False # transitions that are allowed via", "= wtypes.Unset elif value and self._audit_uuid != value: try: audit", "def convert_with_links(rpc_action_plans, limit, url=None, expand=False, **kwargs): ap_collection = ActionPlanCollection() ap_collection.action_plans", "sort_key='id', sort_dir='asc', audit_uuid=None, strategy=None): \"\"\"Retrieve a list of action plans.", "= strategy.uuid def _get_strategy_name(self): return self._strategy_name def _set_strategy_name(self, value): if", "'audit_uuid', kwargs.get('audit_id', wtypes.Unset)) fields.append('strategy_uuid') setattr(self, 'strategy_uuid', kwargs.get('strategy_id', wtypes.Unset)) fields.append('strategy_name') setattr(self,", "workflow model description formats such as `Business Process Model and", "eager=True) policy.enforce(context, 'action_plan:update', action_plan_to_update, action='action_plan:update') try: action_plan_dict = action_plan_to_update.as_dict() action_plan", "-*- # Copyright 2013 Red Hat, Inc. # All Rights", "'action_plan:delete', action_plan, action='action_plan:delete') allowed_states = (ap_objects.State.SUCCEEDED, ap_objects.State.RECOMMENDED, ap_objects.State.FAILED, ap_objects.State.SUPERSEDED, ap_objects.State.CANCELLED)", "(field == 'state' and patch_val == objects.action_plan.State.PENDING): launch_action_plan = True", "action_plan.state not in allowed_states: raise exception.DeleteError( state=action_plan.state) action_plan.soft_delete() @wsme.validate(types.uuid, [ActionPlanPatchType])", "<action_definition>`: atomic tasks, which means it can not be split", "action_plan. \"\"\" action_plan_to_start = api_utils.get_resource( 'ActionPlan', action_plan_uuid, eager=True) context =", "Modeling Language (UML) <http://www.uml.org/>`_. To see the life-cycle and description", "_get_strategy(self, value): if value == wtypes.Unset: return None strategy =", "fields: # Skip fields we do not expose. if not", "not expose. if not hasattr(self, field): continue self.fields.append(field) setattr(self, field,", "as wtypes import wsmeext.pecan as wsme_pecan from watcher._i18n import _", "document to apply to this action plan. \"\"\" if self.from_actionsPlans:", "wtypes.Unset and not self._efficacy_indicators: try: _efficacy_indicators = objects.EfficacyIndicator.list( pecan.request.context, filters={\"action_plan_uuid\":", "a Workflow of :ref:`Actions <action_definition>` belonging to a unique branch).", "<action_definition>` that should be executed in order to satisfy a", "'/'.join(['action_plans', 'detail']) return self._get_action_plans_collection( marker, limit, sort_key, sort_dir, expand, resource_url,", "not allowed: \" \"(%(initial_state)s -> %(new_state)s)\") raise exception.PatchError( patch=patch, reason=error_message", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "action_plans.\"\"\" action_plans = [ActionPlan] \"\"\"A list containing action_plans objects\"\"\" def", "sample = cls() sample.action_plans = [ActionPlan.sample(expand=False)] return sample class ActionPlansController(rest.RestController):", "sample class ActionPlansController(rest.RestController): \"\"\"REST controller for Actions.\"\"\" def __init__(self): super(ActionPlansController,", "allowed: \" \"(%(initial_state)s -> %(new_state)s)\") raise exception.PatchError( patch=patch, reason=error_message %", ":ref:`Action Plan <action_plan_definition>` may be described using standard workflow model", "is generated by Watcher when an :ref:`Audit <audit_definition>` is successful", "start(self, action_plan_uuid, **kwargs): \"\"\"Start an action_plan :param action_plan_uuid: UUID of", "in rpc_action_plans] ap_collection.next = ap_collection.get_next(limit, url=url, **kwargs) return ap_collection @classmethod", "self.applier_client.launch_action_plan(pecan.request.context, action_plan.uuid) action_plan_to_update = objects.ActionPlan.get_by_uuid( pecan.request.context, action_plan_uuid) return ActionPlan.convert_with_links(action_plan_to_update) @wsme_pecan.wsexpose(ActionPlan,", "state update action state here only if cancel_action_plan: filters =", "= None if marker: marker_obj = objects.ActionPlan.get_by_uuid( pecan.request.context, marker) filters", "indicator', 'name': 'test_indicator', 'unit': '%'}] sample._global_efficacy = {'description': 'Global efficacy',", "\"\"\" An :ref:`Action Plan <action_plan_definition>` specifies a flow of :ref:`Actions", "a.state = objects.action.State.CANCELLED a.save() if launch_action_plan: self.applier_client.launch_action_plan(pecan.request.context, action_plan.uuid) action_plan_to_update =", "plan is cancelled from pending or recommended # state update", "base from watcher.api.controllers import link from watcher.api.controllers.v1 import collection from", "None _strategy_name = None _efficacy_indicators = None def _get_audit_uuid(self): return", "simple :ref:`Actions <action_definition>` ordered in sequential and/or parallel flows. An", "a in actions: a.state = objects.action.State.CANCELLED a.save() if launch_action_plan: self.applier_client.launch_action_plan(pecan.request.context,", "link.Link.make_link( 'bookmark', url, 'action_plans', action_plan.uuid, bookmark=True)] return action_plan @classmethod def", "only the fields that have changed for field in objects.ActionPlan.fields:", "= action_plan_to_update.as_dict() action_plan = ActionPlan(**api_utils.apply_jsonpatch( action_plan_dict, patch)) except api_utils.JSONPATCH_EXCEPTIONS as", "self._efficacy_indicators != value: self._efficacy_indicators = value def _get_strategy(self, value): if", "objects.ActionPlan.get_by_uuid( pecan.request.context, marker) filters = {} if audit_uuid: filters['audit_uuid'] =", "watcher._i18n import _ from watcher.api.controllers import base from watcher.api.controllers import", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "limit, sort_key, sort_dir, expand, resource_url, audit_uuid=audit_uuid, strategy=strategy) @wsme_pecan.wsexpose(ActionPlan, types.uuid) def", "To see the life-cycle and description of :ref:`Action Plan <action_plan_definition>`", "objects.ActionPlan.list( pecan.request.context, limit, marker_obj, sort_key=sort_db_key, sort_dir=sort_dir, filters=filters) action_plans_collection = ActionPlanCollection.convert_with_links(", "either express or # implied. # See the License for", "achieve the :ref:`Goal <goal_definition>` of this :ref:`Audit <audit_definition>`. In the", "= audit_uuid if strategy: if utils.is_uuid_like(strategy): filters['strategy_uuid'] = strategy else:", "int, wtypes.text, wtypes.text, types.uuid, wtypes.text) def detail(self, marker=None, limit=None, sort_key='id',", "[] fields = list(objects.ActionPlan.fields) for field in fields: # Skip", "filters = {} if audit_uuid: filters['audit_uuid'] = audit_uuid if strategy:", "\"\"\"Hostname the actionplan is running on\"\"\" def __init__(self, **kwargs): super(ActionPlan,", "components, allowing other implementations to generate and handle more complex", "action plan\"\"\" audit_uuid = wtypes.wsproperty(types.uuid, _get_audit_uuid, _set_audit_uuid, mandatory=True) \"\"\"The UUID", "= rpcapi.ApplierAPI() from_actionsPlans = False \"\"\"A flag to indicate if", "cls(uuid='9ef4d84c-41e8-4418-9220-ce55be0436af', state='ONGOING', created_at=datetime.datetime.utcnow(), deleted_at=None, updated_at=datetime.datetime.utcnow()) sample._audit_uuid = 'abcee106-14d3-4515-b744-5a26885cf6f6' sample._efficacy_indicators =", "Apache License, Version 2.0 (the \"License\"); # you may not", "'action_plans', action_plan.uuid), link.Link.make_link( 'bookmark', url, 'action_plans', action_plan.uuid, bookmark=True)] return action_plan", "action='action_plan:start') if action_plan_to_start['state'] != \\ objects.action_plan.State.RECOMMENDED: raise exception.StartError( state=action_plan_to_start.state) action_plan_to_start['state']", "wtypes.Unset)) fields.append('strategy_name') setattr(self, 'strategy_name', kwargs.get('strategy_id', wtypes.Unset)) @staticmethod def _convert_with_links(action_plan, url,", "= objects.ActionPlan.list( pecan.request.context, limit, marker_obj, sort_key=sort_db_key, sort_dir=sort_dir, filters=filters) action_plans_collection =", "from an OpenStack point of view. - composite Actions: which", "= wtypes.wsproperty(types.uuid, _get_audit_uuid, _set_audit_uuid, mandatory=True) \"\"\"The UUID of the audit", "self.from_actionsPlans: raise exception.OperationNotPermitted context = pecan.request.context action_plan_to_update = api_utils.get_resource( 'ActionPlan',", "in a single result. :param sort_key: column to sort results", "that were added in newer API versions. Certain node fields", "def _set_strategy_uuid(self, value): if value and self._strategy_uuid != value: self._strategy_uuid", "continue if patch_val == wtypes.Unset: patch_val = None if action_plan_to_update[field]", "'global_efficacy', 'updated_at', 'audit_uuid', 'strategy_uuid', 'strategy_name']) action_plan.links = [ link.Link.make_link( 'self',", "self.audit_id = audit.id except exception.AuditNotFound: self._audit_uuid = None def _get_efficacy_indicators(self):", "alongside a set of :ref:`efficacy indicators <efficacy_indicator_definition>`. An :ref:`Action Plan", "wsmeext.pecan as wsme_pecan from watcher._i18n import _ from watcher.api.controllers import", "exception.PatchError( patch=patch, reason=error_message % dict( initial_state=action_plan_to_update.state, new_state=action_plan.state)) if action_plan.state ==", "Plan <action_plan_definition>` specifies a flow of :ref:`Actions <action_definition>` that should", "action_plan.uuid, bookmark=True)] return action_plan @classmethod def convert_with_links(cls, rpc_action_plan, expand=True): action_plan", "of a collection of action_plans.\"\"\" action_plans = [ActionPlan] \"\"\"A list", "__init__(self): super(ActionPlansController, self).__init__() self.applier_client = rpcapi.ApplierAPI() from_actionsPlans = False \"\"\"A", "filter by \"\"\" context = pecan.request.context policy.enforce(context, 'action_plan:get_all', action='action_plan:get_all') return", "cls._convert_with_links(action_plan, pecan.request.host_url, expand) @classmethod def sample(cls, expand=True): sample = cls(uuid='9ef4d84c-41e8-4418-9220-ce55be0436af',", "_get_strategy_uuid, _set_strategy_uuid, mandatory=False) \"\"\"Strategy UUID the action plan refers to\"\"\"", "'action_plans', action_plan.uuid, bookmark=True)] return action_plan @classmethod def convert_with_links(cls, rpc_action_plan, expand=True):", "'test_indicator', 'unit': '%'}] sample._global_efficacy = {'description': 'Global efficacy', 'name': 'test_global_efficacy',", ":param action_plan_uuid: UUID of a action plan. \"\"\" if self.from_actionsPlans:", "def __init__(self, **kwargs): self._type = 'action_plans' @staticmethod def convert_with_links(rpc_action_plans, limit,", "found a :ref:`Solution <solution_definition>` to achieve the :ref:`Goal <goal_definition>` of", "self._efficacy_indicators = efficacy_indicators except exception.EfficacyIndicatorNotFound as exc: LOG.exception(exc) elif value", "KIND, either express or # implied. # See the License", "class ActionPlanCollection(collection.Collection): \"\"\"API representation of a collection of action_plans.\"\"\" action_plans", "fields that were added in newer API versions. Certain node", "'name': 'test_global_efficacy', 'unit': '%'} return cls._convert_with_links(sample, 'http://localhost:9322', expand) class ActionPlanCollection(collection.Collection):", "ap_collection @classmethod def sample(cls): sample = cls() sample.action_plans = [ActionPlan.sample(expand=False)]", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "of view. - composite Actions: which are composed of several", "value): if value == wtypes.Unset: self._audit_uuid = wtypes.Unset elif value", "interfaces for many of its components, allowing other implementations to", "if transition not in allowed_patch_transitions: error_message = _(\"State transition not", "**kwargs): \"\"\"Start an action_plan :param action_plan_uuid: UUID of an action_plan.", "unit=indicator.unit, value=float(indicator.value), ) efficacy_indicators.append(efficacy_indicator.as_dict()) self._efficacy_indicators = efficacy_indicators except exception.EfficacyIndicatorNotFound as", "'action_plan:get', action_plan, action='action_plan:get') return ActionPlan.convert_with_links(action_plan) @wsme_pecan.wsexpose(None, types.uuid, status_code=HTTPStatus.NO_CONTENT) def delete(self,", "def mandatory_attrs(): return [\"audit_id\", \"state\"] class ActionPlan(base.APIBase): \"\"\"API representation of", ":param limit: maximum number of resources to return in a", "or \"desc\". Default: asc. :param audit_uuid: Optional UUID of an", "about the given action plan. :param action_plan_uuid: UUID of a", "return self._strategy_name def _set_strategy_name(self, value): if value and self._strategy_name !=", "not self._efficacy_indicators: try: _efficacy_indicators = objects.EfficacyIndicator.list( pecan.request.context, filters={\"action_plan_uuid\": self.uuid}) for", "__init__(self, **kwargs): self._type = 'action_plans' @staticmethod def convert_with_links(rpc_action_plans, limit, url=None,", "= {} if audit_uuid: filters['audit_uuid'] = audit_uuid if strategy: if", "sort_key, list(objects.ActionPlan.fields) + additional_fields) limit = api_utils.validate_limit(limit) api_utils.validate_sort_dir(sort_dir) marker_obj =", "cls() sample.action_plans = [ActionPlan.sample(expand=False)] return sample class ActionPlansController(rest.RestController): \"\"\"REST controller", "= None _strategy_name = None _efficacy_indicators = None def _get_audit_uuid(self):", "marker: pagination marker for large data sets. :param limit: maximum", "# todo: use state machines to handle state transitions state_value", "action plan refers to\"\"\" efficacy_indicators = wtypes.wsproperty( types.jsontype, _get_efficacy_indicators, _set_efficacy_indicators,", "Ignore fields that aren't exposed in the API continue if", "http import HTTPStatus from oslo_log import log import pecan from", "fields.append('strategy_uuid') setattr(self, 'strategy_uuid', kwargs.get('strategy_id', wtypes.Unset)) fields.append('strategy_name') setattr(self, 'strategy_name', kwargs.get('strategy_id', wtypes.Unset))", "self._efficacy_indicators = value def _get_strategy(self, value): if value == wtypes.Unset:", "@staticmethod def _validate_state(patch): serialized_patch = {'path': patch.path, 'op': patch.op} if", ":ref:`Actions <action_definition>` belonging to a unique branch). However, Watcher provides", "utils from watcher import objects from watcher.objects import action_plan as", "the License. \"\"\" An :ref:`Action Plan <action_plan_definition>` specifies a flow", "get_one(self, action_plan_uuid): \"\"\"Retrieve information about the given action plan. :param", "the API representation of an action plan. \"\"\" _audit_uuid =", "machine <action_plan_state_machine>`. \"\"\" import datetime from http import HTTPStatus from", "API version matches or exceeds the versions when these fields", "large data sets. :param limit: maximum number of resources to", "in newer API versions. Certain node fields were introduced at", "actions = objects.Action.list(pecan.request.context, filters=filters, eager=True) for a in actions: a.state", "this port belongs to\"\"\" strategy_uuid = wtypes.wsproperty( wtypes.text, _get_strategy_uuid, _set_strategy_uuid,", "or commands from an OpenStack point of view. - composite", "'start': ['POST'], 'detail': ['GET'] } def _get_action_plans_collection(self, marker, limit, sort_key,", "limit = api_utils.validate_limit(limit) api_utils.validate_sort_dir(sort_dir) marker_obj = None if marker: marker_obj", "for this action plan\"\"\" audit_uuid = wtypes.wsproperty(types.uuid, _get_audit_uuid, _set_audit_uuid, mandatory=True)", "def _set_audit_uuid(self, value): if value == wtypes.Unset: self._audit_uuid = wtypes.Unset", "use this file except in compliance with the License. #", "= efficacy_indicators except exception.EfficacyIndicatorNotFound as exc: LOG.exception(exc) elif value and", "handle more complex :ref:`Action Plan(s) <action_plan_definition>` composed of two types", "if value == wtypes.Unset: return None strategy = None try:", "from watcher.api.controllers import link from watcher.api.controllers.v1 import collection from watcher.api.controllers.v1", "action_plan_to_start.save() self.applier_client.launch_action_plan(pecan.request.context, action_plan_uuid) action_plan_to_start = objects.ActionPlan.get_by_uuid( pecan.request.context, action_plan_uuid) return ActionPlan.convert_with_links(action_plan_to_start)", "value) self._audit_uuid = audit.uuid self.audit_id = audit.id except exception.AuditNotFound: self._audit_uuid", "_set_efficacy_indicators, mandatory=True) \"\"\"The list of efficacy indicators associated to this", "= ActionPlanCollection.convert_with_links( action_plans, limit, url=resource_url, expand=expand, sort_key=sort_key, sort_dir=sort_dir) if need_api_sort:", "wtypes.text, types.uuid, wtypes.text) def get_all(self, marker=None, limit=None, sort_key='id', sort_dir='asc', audit_uuid=None,", "of a action. \"\"\" context = pecan.request.context action_plan = api_utils.get_resource(", "top-level resource ActionPlan.\"\"\" _custom_actions = { 'start': ['POST'], 'detail': ['GET']", "= None strategy = self._get_strategy(value) if strategy: self._strategy_uuid = strategy.uuid", "to a unique branch). However, Watcher provides abstract interfaces for", "_efficacy_indicators: efficacy_indicator = efficacyindicator.EfficacyIndicator( context=pecan.request.context, name=indicator.name, description=indicator.description, unit=indicator.unit, value=float(indicator.value), )", "ap_objects.State.CANCELLED), (ap_objects.State.ONGOING, ap_objects.State.CANCELLING), (ap_objects.State.PENDING, ap_objects.State.CANCELLED), ] # todo: improve this", "_set_audit_uuid, mandatory=True) \"\"\"The UUID of the audit this port belongs", "allowing other implementations to generate and handle more complex :ref:`Action", "strategy = None try: if utils.is_uuid_like(value) or utils.is_int_like(value): strategy =", "{'action_plan_uuid': action_plan.uuid} actions = objects.Action.list(pecan.request.context, filters=filters, eager=True) for a in", "= api_utils.get_resource( 'ActionPlan', action_plan_uuid, eager=True) policy.enforce(context, 'action_plan:delete', action_plan, action='action_plan:delete') allowed_states", "'strategy_name']) action_plan.links = [ link.Link.make_link( 'self', url, 'action_plans', action_plan.uuid), link.Link.make_link(", "and self._efficacy_indicators != value: self._efficacy_indicators = value def _get_strategy(self, value):", "is composed of a list of successive :ref:`Actions <action_definition>` (i.e.,", "ActionPlanCollection(collection.Collection): \"\"\"API representation of a collection of action_plans.\"\"\" action_plans =", "{} if audit_uuid: filters['audit_uuid'] = audit_uuid if strategy: if utils.is_uuid_like(strategy):", "sort_key=sort_key, sort_dir=sort_dir) if need_api_sort: api_utils.make_api_sort(action_plans_collection.action_plans, sort_key, sort_dir) return action_plans_collection @wsme_pecan.wsexpose(ActionPlanCollection,", "in objects.ActionPlan.fields: try: patch_val = getattr(action_plan, field) except AttributeError: #", "setattr(self, field, kwargs.get(field, wtypes.Unset)) self.fields.append('audit_uuid') self.fields.append('efficacy_indicators') setattr(self, 'audit_uuid', kwargs.get('audit_id', wtypes.Unset))", "'strategy_name', kwargs.get('strategy_id', wtypes.Unset)) @staticmethod def _convert_with_links(action_plan, url, expand=True): if not", "sort. \"asc\" or \"desc\". Default: asc. :param audit_uuid: Optional UUID", "in compliance with the License. # You may obtain a", "from_actionsPlans = False \"\"\"A flag to indicate if the requests", "mandatory=False) \"\"\"Strategy UUID the action plan refers to\"\"\" strategy_name =", "patch.value # todo: use state machines to handle state transitions", "software # distributed under the License is distributed on an", "node fields were introduced at certain API versions. These fields", "= api_utils.get_resource('ActionPlan', action_plan_uuid) policy.enforce( context, 'action_plan:get', action_plan, action='action_plan:get') return ActionPlan.convert_with_links(action_plan)", "= objects.EfficacyIndicator.list( pecan.request.context, filters={\"action_plan_uuid\": self.uuid}) for indicator in _efficacy_indicators: efficacy_indicator", "def sample(cls, expand=True): sample = cls(uuid='9ef4d84c-41e8-4418-9220-ce55be0436af', state='ONGOING', created_at=datetime.datetime.utcnow(), deleted_at=None, updated_at=datetime.datetime.utcnow())", "sort_dir) return action_plans_collection @wsme_pecan.wsexpose(ActionPlanCollection, types.uuid, int, wtypes.text, wtypes.text, types.uuid, wtypes.text)", "'detail']) return self._get_action_plans_collection( marker, limit, sort_key, sort_dir, expand, resource_url, audit_uuid=audit_uuid,", "plans. :param marker: pagination marker for large data sets. :param", "hide_fields_in_newer_versions(obj): \"\"\"This method hides fields that were added in newer", "self._audit_uuid = wtypes.Unset elif value and self._audit_uuid != value: try:", "wtypes.text, _get_strategy_uuid, _set_strategy_uuid, mandatory=False) \"\"\"Strategy UUID the action plan refers", "action plan is cancelled from pending or recommended # state", "updated_at=datetime.datetime.utcnow()) sample._audit_uuid = 'abcee106-14d3-4515-b744-5a26885cf6f6' sample._efficacy_indicators = [{'description': 'Test indicator', 'name':", "sort_dir: direction to sort. \"asc\" or \"desc\". Default: asc. :param", "versions. Certain node fields were introduced at certain API versions.", "exception.OperationNotPermitted context = pecan.request.context action_plan_to_update = api_utils.get_resource( 'ActionPlan', action_plan_uuid, eager=True)", "ap_collection.get_next(limit, url=url, **kwargs) return ap_collection @classmethod def sample(cls): sample =", "{'description': 'Global efficacy', 'name': 'test_global_efficacy', 'unit': '%'} return cls._convert_with_links(sample, 'http://localhost:9322',", "it can not be split into smaller tasks or commands", "= None strategy = self._get_strategy(value) if strategy: self._strategy_name = strategy.name", "\"\"\"Retrieve a list of action plans. :param marker: pagination marker", "value): if value and self._strategy_uuid != value: self._strategy_uuid = None", "not in allowed_patch_transitions: error_message = _(\"State transition not allowed: \"", "@classmethod def sample(cls): sample = cls() sample.action_plans = [ActionPlan.sample(expand=False)] return", "limit, url=None, expand=False, **kwargs): ap_collection = ActionPlanCollection() ap_collection.action_plans = [ActionPlan.convert_with_links(", "action_plan_uuid, eager=True) policy.enforce(context, 'action_plan:update', action_plan_to_update, action='action_plan:update') try: action_plan_dict = action_plan_to_update.as_dict()", "exception.DeleteError( state=action_plan.state) action_plan.soft_delete() @wsme.validate(types.uuid, [ActionPlanPatchType]) @wsme_pecan.wsexpose(ActionPlan, types.uuid, body=[ActionPlanPatchType]) def patch(self,", "audit = objects.Audit.get(pecan.request.context, value) self._audit_uuid = audit.uuid self.audit_id = audit.id", "limit, marker_obj, sort_key=sort_db_key, sort_dir=sort_dir, filters=filters) action_plans_collection = ActionPlanCollection.convert_with_links( action_plans, limit,", "flag to indicate if the requests to this controller are", "kwargs.get(field, wtypes.Unset)) self.fields.append('audit_uuid') self.fields.append('efficacy_indicators') setattr(self, 'audit_uuid', kwargs.get('audit_id', wtypes.Unset)) fields.append('strategy_uuid') setattr(self,", "marker_obj, sort_key=sort_db_key, sort_dir=sort_dir, filters=filters) action_plans_collection = ActionPlanCollection.convert_with_links( action_plans, limit, url=resource_url,", "of its components, allowing other implementations to generate and handle", "hide_fields_in_newer_versions(action_plan) return cls._convert_with_links(action_plan, pecan.request.host_url, expand) @classmethod def sample(cls, expand=True): sample", "a action plan. This class enforces type checking and value", "efficacyindicator.EfficacyIndicator( context=pecan.request.context, name=indicator.name, description=indicator.description, unit=indicator.unit, value=float(indicator.value), ) efficacy_indicators.append(efficacy_indicator.as_dict()) self._efficacy_indicators =", "audit_uuid: Optional UUID of an audit, to get only actions", "import utils as api_utils from watcher.applier import rpcapi from watcher.common", "constraints, and converts between the internal object model and the", "action_plan :param action_plan_uuid: UUID of an action_plan. \"\"\" action_plan_to_start =", "with the License. # You may obtain a copy of", "action state here only if cancel_action_plan: filters = {'action_plan_uuid': action_plan.uuid}", "return self._get_action_plans_collection( marker, limit, sort_key, sort_dir, expand, resource_url, audit_uuid=audit_uuid, strategy=strategy)", "= (sort_key if not need_api_sort else None) action_plans = objects.ActionPlan.list(", "an action plan is composed of a list of successive", "<goal_definition>`. It also contains an estimated :ref:`global efficacy <efficacy_definition>` alongside", "internal_attrs(): return types.JsonPatchType.internal_attrs() @staticmethod def mandatory_attrs(): return [\"audit_id\", \"state\"] class", "value=float(indicator.value), ) efficacy_indicators.append(efficacy_indicator.as_dict()) self._efficacy_indicators = efficacy_indicators except exception.EfficacyIndicatorNotFound as exc:", "plan\"\"\" global_efficacy = wtypes.wsattr(types.jsontype, readonly=True) \"\"\"The global efficacy of this", "filters['strategy_uuid'] = strategy else: filters['strategy_name'] = strategy need_api_sort = api_utils.check_need_api_sort(sort_key,", "as `Business Process Model and Notation 2.0 (BPMN 2.0) <http://www.omg.org/spec/BPMN/2.0/>`_", "(BPMN 2.0) <http://www.omg.org/spec/BPMN/2.0/>`_ or `Unified Modeling Language (UML) <http://www.uml.org/>`_. To", "= audit.id except exception.AuditNotFound: self._audit_uuid = None def _get_efficacy_indicators(self): if", "of two types of Action Item(s): - simple :ref:`Actions <action_definition>`:", "rpc_action_plans] ap_collection.next = ap_collection.get_next(limit, url=url, **kwargs) return ap_collection @classmethod def", "'bookmark', url, 'action_plans', action_plan.uuid, bookmark=True)] return action_plan @classmethod def convert_with_links(cls,", "audit_uuid if strategy: if utils.is_uuid_like(strategy): filters['strategy_uuid'] = strategy else: filters['strategy_name']", "action_plan_uuid: UUID of a action plan. \"\"\" if self.from_actionsPlans: raise", "except in compliance with the License. # You may obtain", "dict( initial_state=action_plan_to_update.state, new_state=action_plan.state)) if action_plan.state == ap_objects.State.PENDING: launch_action_plan = True", "for Actions.\"\"\" def __init__(self): super(ActionPlansController, self).__init__() self.applier_client = rpcapi.ApplierAPI() from_actionsPlans", "and Notation 2.0 (BPMN 2.0) <http://www.omg.org/spec/BPMN/2.0/>`_ or `Unified Modeling Language", "# Licensed under the Apache License, Version 2.0 (the \"License\");", "list containing a self link and associated action links\"\"\" hostname", "additional_fields) sort_db_key = (sort_key if not need_api_sort else None) action_plans", "in order to satisfy a given :ref:`Goal <goal_definition>`. It also", "the action plan refers to\"\"\" strategy_name = wtypes.wsproperty( wtypes.text, _get_strategy_name,", "try: patch_val = getattr(action_plan, field) except AttributeError: # Ignore fields", "work agaist collections parent = pecan.request.path.split('/')[:-1][-1] if parent != \"action_plans\":", "atomic tasks, which means it can not be split into", "convert_with_links(cls, rpc_action_plan, expand=True): action_plan = ActionPlan(**rpc_action_plan.as_dict()) hide_fields_in_newer_versions(action_plan) return cls._convert_with_links(action_plan, pecan.request.host_url,", "action_plan.state == ap_objects.State.CANCELLED: cancel_action_plan = True # Update only the", "were introduced. \"\"\" pass class ActionPlanPatchType(types.JsonPatchType): @staticmethod def _validate_state(patch): serialized_patch", "objects.action.State.CANCELLED a.save() if launch_action_plan: self.applier_client.launch_action_plan(pecan.request.context, action_plan.uuid) action_plan_to_update = objects.ActionPlan.get_by_uuid( pecan.request.context,", "UUID of the audit this port belongs to\"\"\" strategy_uuid =", "filters['strategy_name'] = strategy need_api_sort = api_utils.check_need_api_sort(sort_key, additional_fields) sort_db_key = (sort_key", "def _validate_state(patch): serialized_patch = {'path': patch.path, 'op': patch.op} if patch.value", "for large data sets. :param limit: maximum number of resources", "two types of Action Item(s): - simple :ref:`Actions <action_definition>`: atomic", "_ from watcher.api.controllers import base from watcher.api.controllers import link from", "self._strategy_name def _set_strategy_name(self, value): if value and self._strategy_name != value:", "abstract interfaces for many of its components, allowing other implementations", "mandatory=False) \"\"\"Hostname the actionplan is running on\"\"\" def __init__(self, **kwargs):", "# Skip fields we do not expose. if not hasattr(self,", "url, 'action_plans', action_plan.uuid, bookmark=True)] return action_plan @classmethod def convert_with_links(cls, rpc_action_plan,", "value and self._audit_uuid != value: try: audit = objects.Audit.get(pecan.request.context, value)", "Optional UUID of an audit, to get only actions for", "of Watcher, an action plan is composed of a list", "watcher.common import utils from watcher import objects from watcher.objects import", "self.from_actionsPlans: raise exception.OperationNotPermitted context = pecan.request.context action_plan = api_utils.get_resource('ActionPlan', action_plan_uuid)", "patch_val if (field == 'state' and patch_val == objects.action_plan.State.PENDING): launch_action_plan", "sort results by. Default: id. :param sort_dir: direction to sort.", "(action_plan_to_update.state, action_plan.state) if transition not in allowed_patch_transitions: error_message = _(\"State", "value and self._efficacy_indicators != value: self._efficacy_indicators = value def _get_strategy(self,", "audit.uuid self.audit_id = audit.id except exception.AuditNotFound: self._audit_uuid = None def", "for field in fields: # Skip fields we do not", "strategy.id return strategy def _get_strategy_uuid(self): return self._strategy_uuid def _set_strategy_uuid(self, value):", "sort_dir=sort_dir) if need_api_sort: api_utils.make_api_sort(action_plans_collection.action_plans, sort_key, sort_dir) return action_plans_collection @wsme_pecan.wsexpose(ActionPlanCollection, types.uuid,", "'detail': ['GET'] } def _get_action_plans_collection(self, marker, limit, sort_key, sort_dir, expand=False,", "sort_key, sort_dir, audit_uuid=audit_uuid, strategy=strategy) @wsme_pecan.wsexpose(ActionPlanCollection, types.uuid, int, wtypes.text, wtypes.text, types.uuid,", "or exceeds the versions when these fields were introduced. \"\"\"", "reason=error_message % dict( initial_state=action_plan_to_update.state, new_state=action_plan.state)) if action_plan.state == ap_objects.State.PENDING: launch_action_plan", "NOTE(lucasagomes): /detail should only work agaist collections parent = pecan.request.path.split('/')[:-1][-1]", "the audit this port belongs to\"\"\" strategy_uuid = wtypes.wsproperty( wtypes.text,", "= wtypes.wsattr(types.uuid, readonly=True) \"\"\"Unique UUID for this action plan\"\"\" audit_uuid", "bookmark=True)] return action_plan @classmethod def convert_with_links(cls, rpc_action_plan, expand=True): action_plan =", "wtypes.text, wtypes.text, types.uuid, wtypes.text) def detail(self, marker=None, limit=None, sort_key='id', sort_dir='asc',", "to\"\"\" efficacy_indicators = wtypes.wsproperty( types.jsontype, _get_efficacy_indicators, _set_efficacy_indicators, mandatory=True) \"\"\"The list", "limit=None, sort_key='id', sort_dir='asc', audit_uuid=None, strategy=None): \"\"\"Retrieve a list of action", "fields that have changed for field in objects.ActionPlan.fields: try: patch_val", "objects.ActionPlan.fields: try: patch_val = getattr(action_plan, field) except AttributeError: # Ignore", "for many of its components, allowing other implementations to generate", "= _(\"State transition not allowed: \" \"(%(initial_state)s -> %(new_state)s)\") raise", "for field in objects.ActionPlan.fields: try: patch_val = getattr(action_plan, field) except", "value) except exception.StrategyNotFound: pass if strategy: self.strategy_id = strategy.id return", "action_plan_to_start = api_utils.get_resource( 'ActionPlan', action_plan_uuid, eager=True) context = pecan.request.context policy.enforce(context,", "link and associated action links\"\"\" hostname = wtypes.wsattr(wtypes.text, mandatory=False) \"\"\"Hostname", "of :ref:`Actions <action_definition>` belonging to a unique branch). However, Watcher", "raise exception.DeleteError( state=action_plan.state) action_plan.soft_delete() @wsme.validate(types.uuid, [ActionPlanPatchType]) @wsme_pecan.wsexpose(ActionPlan, types.uuid, body=[ActionPlanPatchType]) def", "[ActionPlan.sample(expand=False)] return sample class ActionPlansController(rest.RestController): \"\"\"REST controller for Actions.\"\"\" def", "strategy=strategy) @wsme_pecan.wsexpose(ActionPlanCollection, types.uuid, int, wtypes.text, wtypes.text, types.uuid, wtypes.text) def detail(self,", "field, kwargs.get(field, wtypes.Unset)) self.fields.append('audit_uuid') self.fields.append('efficacy_indicators') setattr(self, 'audit_uuid', kwargs.get('audit_id', wtypes.Unset)) fields.append('strategy_uuid')", "fields we do not expose. if not hasattr(self, field): continue", "raise exception.OperationNotPermitted context = pecan.request.context action_plan_to_update = api_utils.get_resource( 'ActionPlan', action_plan_uuid,", "CONDITIONS OF ANY KIND, either express or # implied. #", "which are composed of several simple :ref:`Actions <action_definition>` ordered in", "@wsme_pecan.wsexpose(ActionPlan, types.uuid, body=[ActionPlanPatchType]) def patch(self, action_plan_uuid, patch): \"\"\"Update an existing", "permissions and # limitations under the License. \"\"\" An :ref:`Action", "self._strategy_name != value: self._strategy_name = None strategy = self._get_strategy(value) if", "if not hasattr(self, field): continue self.fields.append(field) setattr(self, field, kwargs.get(field, wtypes.Unset))", ":ref:`Solution <solution_definition>` to achieve the :ref:`Goal <goal_definition>` of this :ref:`Audit", "this action plan\"\"\" state = wtypes.text \"\"\"This action plan state\"\"\"", "to sort. \"asc\" or \"desc\". Default: asc. :param audit_uuid: Optional", "objects\"\"\" def __init__(self, **kwargs): self._type = 'action_plans' @staticmethod def convert_with_links(rpc_action_plans,", "_strategy_name = None _efficacy_indicators = None def _get_audit_uuid(self): return self._audit_uuid", "else: strategy = objects.Strategy.get_by_name( pecan.request.context, value) except exception.StrategyNotFound: pass if", "= self._get_strategy(value) if strategy: self._strategy_name = strategy.name uuid = wtypes.wsattr(types.uuid,", "or recommended # state update action state here only if", "to apply to this action plan. \"\"\" if self.from_actionsPlans: raise", "(sort_key if not need_api_sort else None) action_plans = objects.ActionPlan.list( pecan.request.context,", "continue self.fields.append(field) setattr(self, field, kwargs.get(field, wtypes.Unset)) self.fields.append('audit_uuid') self.fields.append('efficacy_indicators') setattr(self, 'audit_uuid',", "strategy def _get_strategy_uuid(self): return self._strategy_uuid def _set_strategy_uuid(self, value): if value", "available when the request's API version matches or exceeds the", "self.fields.append('efficacy_indicators') setattr(self, 'audit_uuid', kwargs.get('audit_id', wtypes.Unset)) fields.append('strategy_uuid') setattr(self, 'strategy_uuid', kwargs.get('strategy_id', wtypes.Unset))", "eager=True) context = pecan.request.context policy.enforce(context, 'action_plan:start', action_plan_to_start, action='action_plan:start') if action_plan_to_start['state']", "exceeds the versions when these fields were introduced. \"\"\" pass", "an :ref:`Audit <audit_definition>` is successful which implies that the :ref:`Strategy", "of action_plans.\"\"\" action_plans = [ActionPlan] \"\"\"A list containing action_plans objects\"\"\"", "action plan state\"\"\" links = wtypes.wsattr([link.Link], readonly=True) \"\"\"A list containing", "datetime from http import HTTPStatus from oslo_log import log import", "this controller are coming from the top-level resource ActionPlan.\"\"\" _custom_actions", "\"\"\"Strategy UUID the action plan refers to\"\"\" strategy_name = wtypes.wsproperty(", "eager=True) policy.enforce(context, 'action_plan:delete', action_plan, action='action_plan:delete') allowed_states = (ap_objects.State.SUCCEEDED, ap_objects.State.RECOMMENDED, ap_objects.State.FAILED,", "% dict(state=state_value)) @staticmethod def validate(patch): if patch.path == \"/state\": ActionPlanPatchType._validate_state(patch)", "and self._strategy_uuid != value: self._strategy_uuid = None strategy = self._get_strategy(value)", "ap_objects.State.PENDING), (ap_objects.State.RECOMMENDED, ap_objects.State.CANCELLED), (ap_objects.State.ONGOING, ap_objects.State.CANCELLING), (ap_objects.State.PENDING, ap_objects.State.CANCELLED), ] # todo:", "import types from watcher.api.controllers.v1 import utils as api_utils from watcher.applier", "Red Hat, Inc. # All Rights Reserved. # # Licensed", "from watcher.common import exception from watcher.common import policy from watcher.common", "patch: a json PATCH document to apply to this action", "Copyright 2013 Red Hat, Inc. # All Rights Reserved. #", "@wsme_pecan.wsexpose(ActionPlan, types.uuid) def start(self, action_plan_uuid, **kwargs): \"\"\"Start an action_plan :param", "eager=True) for a in actions: a.state = objects.action.State.CANCELLED a.save() if", "context = pecan.request.context policy.enforce(context, 'action_plan:start', action_plan_to_start, action='action_plan:start') if action_plan_to_start['state'] !=", "from watcher.api.controllers.v1 import collection from watcher.api.controllers.v1 import efficacy_indicator as efficacyindicator", "-> %(new_state)s)\") raise exception.PatchError( patch=patch, reason=error_message % dict( initial_state=action_plan_to_update.state, new_state=action_plan.state))", "an action_plan :param action_plan_uuid: UUID of an action_plan. \"\"\" action_plan_to_start", "wsme import types as wtypes import wsmeext.pecan as wsme_pecan from", "as efficacyindicator from watcher.api.controllers.v1 import types from watcher.api.controllers.v1 import utils", "% dict( initial_state=action_plan_to_update.state, new_state=action_plan.state)) if action_plan.state == ap_objects.State.PENDING: launch_action_plan =", "'unit': '%'} return cls._convert_with_links(sample, 'http://localhost:9322', expand) class ActionPlanCollection(collection.Collection): \"\"\"API representation", "audit_uuid=audit_uuid, strategy=strategy) @wsme_pecan.wsexpose(ActionPlanCollection, types.uuid, int, wtypes.text, wtypes.text, types.uuid, wtypes.text) def", "marker) filters = {} if audit_uuid: filters['audit_uuid'] = audit_uuid if", "visit :ref:`the Action Plan state machine <action_plan_state_machine>`. \"\"\" import datetime", "= (action_plan_to_update.state, action_plan.state) if transition not in allowed_patch_transitions: error_message =", "action='action_plan:delete') allowed_states = (ap_objects.State.SUCCEEDED, ap_objects.State.RECOMMENDED, ap_objects.State.FAILED, ap_objects.State.SUPERSEDED, ap_objects.State.CANCELLED) if action_plan.state", "objects.action_plan.State.PENDING): launch_action_plan = True action_plan_to_update.save() # NOTE: if action plan", "= objects.action.State.CANCELLED a.save() if launch_action_plan: self.applier_client.launch_action_plan(pecan.request.context, action_plan.uuid) action_plan_to_update = objects.ActionPlan.get_by_uuid(", "PATCH document to apply to this action plan. \"\"\" if", "audit, to get only actions for that audit. :param strategy:", "action plan. :param action_plan_uuid: UUID of a action plan. :param", "'ActionPlan', action_plan_uuid, eager=True) policy.enforce(context, 'action_plan:update', action_plan_to_update, action='action_plan:update') try: action_plan_dict =", "!= value: self._efficacy_indicators = value def _get_strategy(self, value): if value", "state='ONGOING', created_at=datetime.datetime.utcnow(), deleted_at=None, updated_at=datetime.datetime.utcnow()) sample._audit_uuid = 'abcee106-14d3-4515-b744-5a26885cf6f6' sample._efficacy_indicators = [{'description':", "def _get_action_plans_collection(self, marker, limit, sort_key, sort_dir, expand=False, resource_url=None, audit_uuid=None, strategy=None):", "requests to this controller are coming from the top-level resource", "state = wtypes.text \"\"\"This action plan state\"\"\" links = wtypes.wsattr([link.Link],", "kwargs.get('strategy_id', wtypes.Unset)) @staticmethod def _convert_with_links(action_plan, url, expand=True): if not expand:", "action links\"\"\" hostname = wtypes.wsattr(wtypes.text, mandatory=False) \"\"\"Hostname the actionplan is", "indicators associated to this action plan\"\"\" global_efficacy = wtypes.wsattr(types.jsontype, readonly=True)", "the top-level resource ActionPlan.\"\"\" _custom_actions = { 'start': ['POST'], 'detail':", ":ref:`Actions <action_definition>` (i.e., a Workflow of :ref:`Actions <action_definition>` belonging to", "launch_action_plan: self.applier_client.launch_action_plan(pecan.request.context, action_plan.uuid) action_plan_to_update = objects.ActionPlan.get_by_uuid( pecan.request.context, action_plan_uuid) return ActionPlan.convert_with_links(action_plan_to_update)", "deleted_at=None, updated_at=datetime.datetime.utcnow()) sample._audit_uuid = 'abcee106-14d3-4515-b744-5a26885cf6f6' sample._efficacy_indicators = [{'description': 'Test indicator',", "self._get_action_plans_collection( marker, limit, sort_key, sort_dir, expand, resource_url, audit_uuid=audit_uuid, strategy=strategy) @wsme_pecan.wsexpose(ActionPlan,", "such as `Business Process Model and Notation 2.0 (BPMN 2.0)", "efficacy_indicators except exception.EfficacyIndicatorNotFound as exc: LOG.exception(exc) elif value and self._efficacy_indicators", "action_plan_to_update.save() # NOTE: if action plan is cancelled from pending", "expand=True): if not expand: action_plan.unset_fields_except( ['uuid', 'state', 'efficacy_indicators', 'global_efficacy', 'updated_at',", "from wsme import types as wtypes import wsmeext.pecan as wsme_pecan", ":ref:`Goal <goal_definition>` of this :ref:`Audit <audit_definition>`. In the default implementation", "_set_strategy_name, mandatory=False) \"\"\"The name of the strategy this action plan", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "types from watcher.api.controllers.v1 import utils as api_utils from watcher.applier import", "action_plan_uuid): \"\"\"Retrieve information about the given action plan. :param action_plan_uuid:", "= log.getLogger(__name__) def hide_fields_in_newer_versions(obj): \"\"\"This method hides fields that were", "wtypes.wsproperty(types.uuid, _get_audit_uuid, _set_audit_uuid, mandatory=True) \"\"\"The UUID of the audit this", "= None def _get_audit_uuid(self): return self._audit_uuid def _set_audit_uuid(self, value): if", "list(objects.ActionPlan.fields) for field in fields: # Skip fields we do", "filters=filters, eager=True) for a in actions: a.state = objects.action.State.CANCELLED a.save()", "action_plan_to_update.as_dict() action_plan = ActionPlan(**api_utils.apply_jsonpatch( action_plan_dict, patch)) except api_utils.JSONPATCH_EXCEPTIONS as e:", "raise exception.PatchError( patch=patch, reason=error_message % dict( initial_state=action_plan_to_update.state, new_state=action_plan.state)) if action_plan.state", "patch.path == \"/state\": ActionPlanPatchType._validate_state(patch) return types.JsonPatchType.validate(patch) @staticmethod def internal_attrs(): return", "<solution_definition>` to achieve the :ref:`Goal <goal_definition>` of this :ref:`Audit <audit_definition>`.", "todo: use state machines to handle state transitions state_value =", "'Global efficacy', 'name': 'test_global_efficacy', 'unit': '%'} return cls._convert_with_links(sample, 'http://localhost:9322', expand)", "sequential and/or parallel flows. An :ref:`Action Plan <action_plan_definition>` may be", "= api_utils.check_need_api_sort(sort_key, additional_fields) sort_db_key = (sort_key if not need_api_sort else", "from pending or recommended # state update action state here", "\"\"\"The global efficacy of this action plan\"\"\" state = wtypes.text", "objects from watcher.objects import action_plan as ap_objects LOG = log.getLogger(__name__)", "the strategy this action plan refers to\"\"\" efficacy_indicators = wtypes.wsproperty(", "Hat, Inc. # All Rights Reserved. # # Licensed under", "HTTPStatus from oslo_log import log import pecan from pecan import", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or #", "reason=msg % dict(state=state_value)) @staticmethod def validate(patch): if patch.path == \"/state\":", "transitions that are allowed via PATCH allowed_patch_transitions = [ (ap_objects.State.RECOMMENDED,", "def internal_attrs(): return types.JsonPatchType.internal_attrs() @staticmethod def mandatory_attrs(): return [\"audit_id\", \"state\"]", "was used has found a :ref:`Solution <solution_definition>` to achieve the", ") efficacy_indicators.append(efficacy_indicator.as_dict()) self._efficacy_indicators = efficacy_indicators except exception.EfficacyIndicatorNotFound as exc: LOG.exception(exc)", "pecan.request.context policy.enforce(context, 'action_plan:get_all', action='action_plan:get_all') return self._get_action_plans_collection( marker, limit, sort_key, sort_dir,", "Version 2.0 (the \"License\"); # you may not use this", "if self.from_actionsPlans: raise exception.OperationNotPermitted context = pecan.request.context action_plan = api_utils.get_resource('ActionPlan',", "return action_plan @classmethod def convert_with_links(cls, rpc_action_plan, expand=True): action_plan = ActionPlan(**rpc_action_plan.as_dict())", "} def _get_action_plans_collection(self, marker, limit, sort_key, sort_dir, expand=False, resource_url=None, audit_uuid=None,", "wtypes.Unset: patch_val = None if action_plan_to_update[field] != patch_val: action_plan_to_update[field] =", "from watcher.api.controllers.v1 import efficacy_indicator as efficacyindicator from watcher.api.controllers.v1 import types", "model and the API representation of an action plan. \"\"\"", "of resources to return in a single result. :param sort_key:", "p, expand) for p in rpc_action_plans] ap_collection.next = ap_collection.get_next(limit, url=url,", "agaist collections parent = pecan.request.path.split('/')[:-1][-1] if parent != \"action_plans\": raise", "<efficacy_indicator_definition>`. An :ref:`Action Plan <action_plan_definition>` is generated by Watcher when", "action_plan.soft_delete() @wsme.validate(types.uuid, [ActionPlanPatchType]) @wsme_pecan.wsexpose(ActionPlan, types.uuid, body=[ActionPlanPatchType]) def patch(self, action_plan_uuid, patch):", "Action Plan state machine <action_plan_state_machine>`. \"\"\" import datetime from http", "limit, url=resource_url, expand=expand, sort_key=sort_key, sort_dir=sort_dir) if need_api_sort: api_utils.make_api_sort(action_plans_collection.action_plans, sort_key, sort_dir)", "and the API representation of an action plan. \"\"\" _audit_uuid", "state machine <action_plan_state_machine>`. \"\"\" import datetime from http import HTTPStatus", "None _strategy_uuid = None _strategy_name = None _efficacy_indicators = None", "that should be executed in order to satisfy a given", "split into smaller tasks or commands from an OpenStack point", "kwargs.get('audit_id', wtypes.Unset)) fields.append('strategy_uuid') setattr(self, 'strategy_uuid', kwargs.get('strategy_id', wtypes.Unset)) fields.append('strategy_name') setattr(self, 'strategy_name',", "the given action plan. :param action_plan_uuid: UUID of a action", "by applicable law or agreed to in writing, software #", "direction to sort. \"asc\" or \"desc\". Default: asc. :param audit_uuid:", "\"\"\"API representation of a collection of action_plans.\"\"\" action_plans = [ActionPlan]", "pecan.request.context, action_plan_uuid) return ActionPlan.convert_with_links(action_plan_to_update) @wsme_pecan.wsexpose(ActionPlan, types.uuid) def start(self, action_plan_uuid, **kwargs):", "action plan. :param patch: a json PATCH document to apply", "@wsme_pecan.wsexpose(None, types.uuid, status_code=HTTPStatus.NO_CONTENT) def delete(self, action_plan_uuid): \"\"\"Delete an action plan.", "uuid = wtypes.wsattr(types.uuid, readonly=True) \"\"\"Unique UUID for this action plan\"\"\"", "be described using standard workflow model description formats such as", "super(ActionPlansController, self).__init__() self.applier_client = rpcapi.ApplierAPI() from_actionsPlans = False \"\"\"A flag", "= strategy else: filters['strategy_name'] = strategy need_api_sort = api_utils.check_need_api_sort(sort_key, additional_fields)", "controller for Actions.\"\"\" def __init__(self): super(ActionPlansController, self).__init__() self.applier_client = rpcapi.ApplierAPI()", "ap_objects.State.FAILED, ap_objects.State.SUPERSEDED, ap_objects.State.CANCELLED) if action_plan.state not in allowed_states: raise exception.DeleteError(", "OR CONDITIONS OF ANY KIND, either express or # implied.", "2.0 (BPMN 2.0) <http://www.omg.org/spec/BPMN/2.0/>`_ or `Unified Modeling Language (UML) <http://www.uml.org/>`_.", "== wtypes.Unset and not self._efficacy_indicators: try: _efficacy_indicators = objects.EfficacyIndicator.list( pecan.request.context,", "action_plan.uuid), link.Link.make_link( 'bookmark', url, 'action_plans', action_plan.uuid, bookmark=True)] return action_plan @classmethod", "patch_val == wtypes.Unset: patch_val = None if action_plan_to_update[field] != patch_val:", "watcher.api.controllers.v1 import efficacy_indicator as efficacyindicator from watcher.api.controllers.v1 import types from", "if parent != \"action_plans\": raise exception.HTTPNotFound expand = True resource_url", ":param strategy: strategy UUID or name to filter by \"\"\"", "also contains an estimated :ref:`global efficacy <efficacy_definition>` alongside a set", "ActionPlanCollection() ap_collection.action_plans = [ActionPlan.convert_with_links( p, expand) for p in rpc_action_plans]", "wtypes.Unset: self._audit_uuid = wtypes.Unset elif value and self._audit_uuid != value:", "self._efficacy_indicators def _set_efficacy_indicators(self, value): efficacy_indicators = [] if value ==", "state: %(state)s\") raise exception.PatchError( patch=serialized_patch, reason=msg % dict(state=state_value)) @staticmethod def", "== ap_objects.State.CANCELLED: cancel_action_plan = True # Update only the fields", "strategy need_api_sort = api_utils.check_need_api_sort(sort_key, additional_fields) sort_db_key = (sort_key if not", "'action_plan:get_all', action='action_plan:get_all') return self._get_action_plans_collection( marker, limit, sort_key, sort_dir, audit_uuid=audit_uuid, strategy=strategy)", ":ref:`Actions <action_definition>` that should be executed in order to satisfy", "executed in order to satisfy a given :ref:`Goal <goal_definition>`. It", "# All Rights Reserved. # # Licensed under the Apache", "pecan.request.context action_plan_to_update = api_utils.get_resource( 'ActionPlan', action_plan_uuid, eager=True) policy.enforce(context, 'action_plan:update', action_plan_to_update,", "be executed in order to satisfy a given :ref:`Goal <goal_definition>`.", "= api_utils.get_resource( 'ActionPlan', action_plan_uuid, eager=True) context = pecan.request.context policy.enforce(context, 'action_plan:start',", "states, visit :ref:`the Action Plan state machine <action_plan_state_machine>`. \"\"\" import", "However, Watcher provides abstract interfaces for many of its components,", "value: self._strategy_uuid = None strategy = self._get_strategy(value) if strategy: self._strategy_uuid", "sort_dir='asc', audit_uuid=None, strategy=None): \"\"\"Retrieve a list of action plans. :param", "'state'): transition = (action_plan_to_update.state, action_plan.state) if transition not in allowed_patch_transitions:", "\"state\"] class ActionPlan(base.APIBase): \"\"\"API representation of a action plan. This", "this action plan\"\"\" audit_uuid = wtypes.wsproperty(types.uuid, _get_audit_uuid, _set_audit_uuid, mandatory=True) \"\"\"The", "applicable law or agreed to in writing, software # distributed", "default implementation of Watcher, an action plan is composed of", "watcher.applier import rpcapi from watcher.common import exception from watcher.common import", "[ link.Link.make_link( 'self', url, 'action_plans', action_plan.uuid), link.Link.make_link( 'bookmark', url, 'action_plans',", "= wtypes.wsproperty( types.jsontype, _get_efficacy_indicators, _set_efficacy_indicators, mandatory=True) \"\"\"The list of efficacy", "to filter by \"\"\" context = pecan.request.context policy.enforce(context, 'action_plan:detail', action='action_plan:detail')", "under the License. \"\"\" An :ref:`Action Plan <action_plan_definition>` specifies a", "types.uuid, status_code=HTTPStatus.NO_CONTENT) def delete(self, action_plan_uuid): \"\"\"Delete an action plan. :param", "added in newer API versions. Certain node fields were introduced", "pecan.request.context action_plan = api_utils.get_resource( 'ActionPlan', action_plan_uuid, eager=True) policy.enforce(context, 'action_plan:delete', action_plan,", "ap_objects.State.RECOMMENDED, ap_objects.State.FAILED, ap_objects.State.SUPERSEDED, ap_objects.State.CANCELLED) if action_plan.state not in allowed_states: raise", "= False \"\"\"A flag to indicate if the requests to", "need_api_sort: api_utils.make_api_sort(action_plans_collection.action_plans, sort_key, sort_dir) return action_plans_collection @wsme_pecan.wsexpose(ActionPlanCollection, types.uuid, int, wtypes.text,", "return sample class ActionPlansController(rest.RestController): \"\"\"REST controller for Actions.\"\"\" def __init__(self):", "action_plan_to_start['state'] != \\ objects.action_plan.State.RECOMMENDED: raise exception.StartError( state=action_plan_to_start.state) action_plan_to_start['state'] = objects.action_plan.State.PENDING", "import collection from watcher.api.controllers.v1 import efficacy_indicator as efficacyindicator from watcher.api.controllers.v1", "exception.AuditNotFound: self._audit_uuid = None def _get_efficacy_indicators(self): if self._efficacy_indicators is None:", "plan. :param action_plan_uuid: UUID of a action plan. \"\"\" if", "more complex :ref:`Action Plan(s) <action_plan_definition>` composed of two types of", "UUID of a action plan. \"\"\" if self.from_actionsPlans: raise exception.OperationNotPermitted", "= strategy.id return strategy def _get_strategy_uuid(self): return self._strategy_uuid def _set_strategy_uuid(self,", "list of successive :ref:`Actions <action_definition>` (i.e., a Workflow of :ref:`Actions", "tasks or commands from an OpenStack point of view. -", "\"\"\" pass class ActionPlanPatchType(types.JsonPatchType): @staticmethod def _validate_state(patch): serialized_patch = {'path':", "plan refers to\"\"\" efficacy_indicators = wtypes.wsproperty( types.jsontype, _get_efficacy_indicators, _set_efficacy_indicators, mandatory=True)", "action_plan_uuid, eager=True) policy.enforce(context, 'action_plan:delete', action_plan, action='action_plan:delete') allowed_states = (ap_objects.State.SUCCEEDED, ap_objects.State.RECOMMENDED,", "raise exception.OperationNotPermitted context = pecan.request.context action_plan = api_utils.get_resource('ActionPlan', action_plan_uuid) policy.enforce(", "# You may obtain a copy of the License at", "\"\"\"A flag to indicate if the requests to this controller", "are only made available when the request's API version matches", "readonly=True) \"\"\"Unique UUID for this action plan\"\"\" audit_uuid = wtypes.wsproperty(types.uuid,", "api_utils from watcher.applier import rpcapi from watcher.common import exception from", "= pecan.request.context action_plan_to_update = api_utils.get_resource( 'ActionPlan', action_plan_uuid, eager=True) policy.enforce(context, 'action_plan:update',", "pecan.request.context, value) except exception.StrategyNotFound: pass if strategy: self.strategy_id = strategy.id", "[ActionPlan] \"\"\"A list containing action_plans objects\"\"\" def __init__(self, **kwargs): self._type", "url=None, expand=False, **kwargs): ap_collection = ActionPlanCollection() ap_collection.action_plans = [ActionPlan.convert_with_links( p,", "an estimated :ref:`global efficacy <efficacy_definition>` alongside a set of :ref:`efficacy", "pecan import rest import wsme from wsme import types as", "plan\"\"\" audit_uuid = wtypes.wsproperty(types.uuid, _get_audit_uuid, _set_audit_uuid, mandatory=True) \"\"\"The UUID of", "import base from watcher.api.controllers import link from watcher.api.controllers.v1 import collection", "here only if cancel_action_plan: filters = {'action_plan_uuid': action_plan.uuid} actions =", "action_plan, action='action_plan:get') return ActionPlan.convert_with_links(action_plan) @wsme_pecan.wsexpose(None, types.uuid, status_code=HTTPStatus.NO_CONTENT) def delete(self, action_plan_uuid):", "return ActionPlan.convert_with_links(action_plan_to_update) @wsme_pecan.wsexpose(ActionPlan, types.uuid) def start(self, action_plan_uuid, **kwargs): \"\"\"Start an", "representation of an action plan. \"\"\" _audit_uuid = None _strategy_uuid", "= api_utils.get_resource( 'ActionPlan', action_plan_uuid, eager=True) policy.enforce(context, 'action_plan:update', action_plan_to_update, action='action_plan:update') try:", "policy.enforce(context, 'action_plan:detail', action='action_plan:detail') # NOTE(lucasagomes): /detail should only work agaist", ":ref:`Audit <audit_definition>`. In the default implementation of Watcher, an action", ":param action_plan_uuid: UUID of a action plan. :param patch: a", "efficacy_indicator = efficacyindicator.EfficacyIndicator( context=pecan.request.context, name=indicator.name, description=indicator.description, unit=indicator.unit, value=float(indicator.value), ) efficacy_indicators.append(efficacy_indicator.as_dict())", "= objects.Strategy.get( pecan.request.context, value) else: strategy = objects.Strategy.get_by_name( pecan.request.context, value)", "= None try: if utils.is_uuid_like(value) or utils.is_int_like(value): strategy = objects.Strategy.get(", "is running on\"\"\" def __init__(self, **kwargs): super(ActionPlan, self).__init__() self.fields =", "not in allowed_states: raise exception.DeleteError( state=action_plan.state) action_plan.soft_delete() @wsme.validate(types.uuid, [ActionPlanPatchType]) @wsme_pecan.wsexpose(ActionPlan,", "_get_action_plans_collection(self, marker, limit, sort_key, sort_dir, expand=False, resource_url=None, audit_uuid=None, strategy=None): additional_fields", "sample._global_efficacy = {'description': 'Global efficacy', 'name': 'test_global_efficacy', 'unit': '%'} return", "strategy_name = wtypes.wsproperty( wtypes.text, _get_strategy_name, _set_strategy_name, mandatory=False) \"\"\"The name of", "[] if value == wtypes.Unset and not self._efficacy_indicators: try: _efficacy_indicators", "a self link and associated action links\"\"\" hostname = wtypes.wsattr(wtypes.text,", "exception.HTTPNotFound expand = True resource_url = '/'.join(['action_plans', 'detail']) return self._get_action_plans_collection(", "sample = cls(uuid='9ef4d84c-41e8-4418-9220-ce55be0436af', state='ONGOING', created_at=datetime.datetime.utcnow(), deleted_at=None, updated_at=datetime.datetime.utcnow()) sample._audit_uuid = 'abcee106-14d3-4515-b744-5a26885cf6f6'", "generate and handle more complex :ref:`Action Plan(s) <action_plan_definition>` composed of", "efficacy of this action plan\"\"\" state = wtypes.text \"\"\"This action", "Plan state machine <action_plan_state_machine>`. \"\"\" import datetime from http import", "def _set_efficacy_indicators(self, value): efficacy_indicators = [] if value == wtypes.Unset", "ActionPlan.\"\"\" _custom_actions = { 'start': ['POST'], 'detail': ['GET'] } def", "api_utils.get_resource( 'ActionPlan', action_plan_uuid, eager=True) context = pecan.request.context policy.enforce(context, 'action_plan:start', action_plan_to_start,", "utils.is_uuid_like(value) or utils.is_int_like(value): strategy = objects.Strategy.get( pecan.request.context, value) else: strategy", "raise exception.PatchError( patch=serialized_patch, reason=msg % dict(state=state_value)) @staticmethod def validate(patch): if", "_set_audit_uuid(self, value): if value == wtypes.Unset: self._audit_uuid = wtypes.Unset elif", "action='action_plan:detail') # NOTE(lucasagomes): /detail should only work agaist collections parent", "False # transitions that are allowed via PATCH allowed_patch_transitions =", "state_value = patch.value if state_value and not hasattr(ap_objects.State, state_value): msg", "of a action plan. :param patch: a json PATCH document", "state=action_plan_to_start.state) action_plan_to_start['state'] = objects.action_plan.State.PENDING action_plan_to_start.save() self.applier_client.launch_action_plan(pecan.request.context, action_plan_uuid) action_plan_to_start = objects.ActionPlan.get_by_uuid(", "\"License\"); # you may not use this file except in", "api_utils.check_need_api_sort(sort_key, additional_fields) sort_db_key = (sort_key if not need_api_sort else None)", "self.fields.append('audit_uuid') self.fields.append('efficacy_indicators') setattr(self, 'audit_uuid', kwargs.get('audit_id', wtypes.Unset)) fields.append('strategy_uuid') setattr(self, 'strategy_uuid', kwargs.get('strategy_id',", "need_api_sort else None) action_plans = objects.ActionPlan.list( pecan.request.context, limit, marker_obj, sort_key=sort_db_key,", "improve this in blueprint watcher-api-validation if hasattr(action_plan, 'state'): transition =", "policy from watcher.common import utils from watcher import objects from", "try: audit = objects.Audit.get(pecan.request.context, value) self._audit_uuid = audit.uuid self.audit_id =", "types.uuid, wtypes.text) def get_all(self, marker=None, limit=None, sort_key='id', sort_dir='asc', audit_uuid=None, strategy=None):", "encoding: utf-8 -*- # Copyright 2013 Red Hat, Inc. #", "allowed via PATCH allowed_patch_transitions = [ (ap_objects.State.RECOMMENDED, ap_objects.State.PENDING), (ap_objects.State.RECOMMENDED, ap_objects.State.CANCELLED),", "watcher import objects from watcher.objects import action_plan as ap_objects LOG", "of :ref:`Action Plan <action_plan_definition>` states, visit :ref:`the Action Plan state", "strategy: if utils.is_uuid_like(strategy): filters['strategy_uuid'] = strategy else: filters['strategy_name'] = strategy", "expand: action_plan.unset_fields_except( ['uuid', 'state', 'efficacy_indicators', 'global_efficacy', 'updated_at', 'audit_uuid', 'strategy_uuid', 'strategy_name'])", "matches or exceeds the versions when these fields were introduced.", "return cls._convert_with_links(action_plan, pecan.request.host_url, expand) @classmethod def sample(cls, expand=True): sample =", "# NOTE: if action plan is cancelled from pending or", "a set of :ref:`efficacy indicators <efficacy_indicator_definition>`. An :ref:`Action Plan <action_plan_definition>`", "action_plan_uuid, **kwargs): \"\"\"Start an action_plan :param action_plan_uuid: UUID of an", "Process Model and Notation 2.0 (BPMN 2.0) <http://www.omg.org/spec/BPMN/2.0/>`_ or `Unified", "state_value and not hasattr(ap_objects.State, state_value): msg = _(\"Invalid state: %(state)s\")", "ap_objects.State.CANCELLED) if action_plan.state not in allowed_states: raise exception.DeleteError( state=action_plan.state) action_plan.soft_delete()", "= objects.Audit.get(pecan.request.context, value) self._audit_uuid = audit.uuid self.audit_id = audit.id except", "elif value and self._efficacy_indicators != value: self._efficacy_indicators = value def" ]
[ "**options) @albums.route('/albums') def albums_route(): options = { \"edit\": False }", "albums = Blueprint('albums', __name__, template_folder='templates') @albums.route('/albums/edit') def albums_edit_route(): options =", "* albums = Blueprint('albums', __name__, template_folder='templates') @albums.route('/albums/edit') def albums_edit_route(): options", "template_folder='templates') @albums.route('/albums/edit') def albums_edit_route(): options = { \"edit\": True }", "def albums_edit_route(): options = { \"edit\": True } return render_template(\"albums.html\",", "render_template(\"albums.html\", **options) @albums.route('/albums') def albums_route(): options = { \"edit\": False", "@albums.route('/albums/edit') def albums_edit_route(): options = { \"edit\": True } return", "import * albums = Blueprint('albums', __name__, template_folder='templates') @albums.route('/albums/edit') def albums_edit_route():", "def albums_route(): options = { \"edit\": False } return render_template(\"albums.html\",", "= { \"edit\": True } return render_template(\"albums.html\", **options) @albums.route('/albums') def", "flask import * albums = Blueprint('albums', __name__, template_folder='templates') @albums.route('/albums/edit') def", "True } return render_template(\"albums.html\", **options) @albums.route('/albums') def albums_route(): options =", "return render_template(\"albums.html\", **options) @albums.route('/albums') def albums_route(): options = { \"edit\":", "options = { \"edit\": True } return render_template(\"albums.html\", **options) @albums.route('/albums')", "{ \"edit\": True } return render_template(\"albums.html\", **options) @albums.route('/albums') def albums_route():", "} return render_template(\"albums.html\", **options) @albums.route('/albums') def albums_route(): options = {", "@albums.route('/albums') def albums_route(): options = { \"edit\": False } return", "albums_route(): options = { \"edit\": False } return render_template(\"albums.html\", **options)", "= Blueprint('albums', __name__, template_folder='templates') @albums.route('/albums/edit') def albums_edit_route(): options = {", "from flask import * albums = Blueprint('albums', __name__, template_folder='templates') @albums.route('/albums/edit')", "\"edit\": True } return render_template(\"albums.html\", **options) @albums.route('/albums') def albums_route(): options", "Blueprint('albums', __name__, template_folder='templates') @albums.route('/albums/edit') def albums_edit_route(): options = { \"edit\":", "albums_edit_route(): options = { \"edit\": True } return render_template(\"albums.html\", **options)", "<reponame>jeonginlee/groove_scheduler from flask import * albums = Blueprint('albums', __name__, template_folder='templates')", "__name__, template_folder='templates') @albums.route('/albums/edit') def albums_edit_route(): options = { \"edit\": True" ]
[ "from dfirtrack_main.models import Division class DivisionList(LoginRequiredMixin, ListView): login_url = '/login'", "division = self.get_object() form = self.form_class(request.POST, instance=division) if form.is_valid(): division", "return render(request, self.template_name, {'form': form}) def post(self, request, *args, **kwargs):", "class DivisionList(LoginRequiredMixin, ListView): login_url = '/login' model = Division template_name", "context class DivisionCreate(LoginRequiredMixin, CreateView): login_url = '/login' model = Division", "DivisionDetail(LoginRequiredMixin, DetailView): login_url = '/login' model = Division template_name =", "import CreateView, UpdateView from dfirtrack_main.forms import DivisionForm from dfirtrack_main.logger.default_logger import", "ListView from django.views.generic.edit import CreateView, UpdateView from dfirtrack_main.forms import DivisionForm", "model = Division form_class = DivisionForm template_name = 'dfirtrack_main/division/division_edit.html' def", "request, *args, **kwargs): form = self.form_class(request.POST) if form.is_valid(): division =", "= '/login' model = Division template_name = 'dfirtrack_main/division/division_list.html' context_object_name =", "get(self, request, *args, **kwargs): form = self.form_class() debug_logger(str(request.user), \" DIVISION_ADD_ENTERED\")", "from django.contrib.auth.mixins import LoginRequiredMixin from django.shortcuts import redirect, render from", "Division class DivisionList(LoginRequiredMixin, ListView): login_url = '/login' model = Division", "= 'division_list' def get_queryset(self): debug_logger(str(self.request.user), \" DIVISION_LIST_ENTERED\") return Division.objects.order_by('division_name') class", "import Division class DivisionList(LoginRequiredMixin, ListView): login_url = '/login' model =", "template_name = 'dfirtrack_main/division/division_list.html' context_object_name = 'division_list' def get_queryset(self): debug_logger(str(self.request.user), \"", "redirect(reverse('division_detail', args=(division.division_id,))) else: return render(request, self.template_name, {'form': form}) class DivisionUpdate(LoginRequiredMixin,", "\" DIVISION_DETAIL_ENTERED\") return context class DivisionCreate(LoginRequiredMixin, CreateView): login_url = '/login'", "division = form.save(commit=False) division.save() division.logger(str(request.user), \" DIVISION_EDIT_EXECUTED\") messages.success(request, 'Division edited')", "'/login' model = Division template_name = 'dfirtrack_main/division/division_list.html' context_object_name = 'division_list'", "= Division template_name = 'dfirtrack_main/division/division_detail.html' def get_context_data(self, **kwargs): context =", "def get_queryset(self): debug_logger(str(self.request.user), \" DIVISION_LIST_ENTERED\") return Division.objects.order_by('division_name') class DivisionDetail(LoginRequiredMixin, DetailView):", "self.get_object() form = self.form_class(request.POST, instance=division) if form.is_valid(): division = form.save(commit=False)", "UpdateView from dfirtrack_main.forms import DivisionForm from dfirtrack_main.logger.default_logger import debug_logger from", "class DivisionDetail(LoginRequiredMixin, DetailView): login_url = '/login' model = Division template_name", "post(self, request, *args, **kwargs): form = self.form_class(request.POST) if form.is_valid(): division", "**kwargs): division = self.get_object() form = self.form_class(instance=division) division.logger(str(request.user), \" DIVISION_EDIT_ENTERED\")", "login_url = '/login' model = Division template_name = 'dfirtrack_main/division/division_list.html' context_object_name", "DivisionForm template_name = 'dfirtrack_main/division/division_edit.html' def get(self, request, *args, **kwargs): division", "\" DIVISION_LIST_ENTERED\") return Division.objects.order_by('division_name') class DivisionDetail(LoginRequiredMixin, DetailView): login_url = '/login'", "= form.save(commit=False) division.save() division.logger(str(request.user), \" DIVISION_ADD_EXECUTED\") messages.success(request, 'Division added') return", "self.form_class(request.POST, instance=division) if form.is_valid(): division = form.save(commit=False) division.save() division.logger(str(request.user), \"", "\" DIVISION_ADD_ENTERED\") return render(request, self.template_name, {'form': form}) def post(self, request,", "from django.views.generic import DetailView, ListView from django.views.generic.edit import CreateView, UpdateView", "dfirtrack_main.forms import DivisionForm from dfirtrack_main.logger.default_logger import debug_logger from dfirtrack_main.models import", "import debug_logger from dfirtrack_main.models import Division class DivisionList(LoginRequiredMixin, ListView): login_url", "from django.urls import reverse from django.views.generic import DetailView, ListView from", "= DivisionForm template_name = 'dfirtrack_main/division/division_add.html' def get(self, request, *args, **kwargs):", "debug_logger from dfirtrack_main.models import Division class DivisionList(LoginRequiredMixin, ListView): login_url =", "self.form_class() debug_logger(str(request.user), \" DIVISION_ADD_ENTERED\") return render(request, self.template_name, {'form': form}) def", "**kwargs): form = self.form_class(request.POST) if form.is_valid(): division = form.save(commit=False) division.save()", "ListView): login_url = '/login' model = Division template_name = 'dfirtrack_main/division/division_list.html'", "reverse from django.views.generic import DetailView, ListView from django.views.generic.edit import CreateView,", "template_name = 'dfirtrack_main/division/division_detail.html' def get_context_data(self, **kwargs): context = super().get_context_data(**kwargs) division", "self.template_name, {'form': form}) class DivisionUpdate(LoginRequiredMixin, UpdateView): login_url = '/login' model", "get(self, request, *args, **kwargs): division = self.get_object() form = self.form_class(instance=division)", "edited') return redirect(reverse('division_detail', args=(division.division_id,))) else: return render(request, self.template_name, {'form': form})", "DIVISION_EDIT_EXECUTED\") messages.success(request, 'Division edited') return redirect(reverse('division_detail', args=(division.division_id,))) else: return render(request,", "django.shortcuts import redirect, render from django.urls import reverse from django.views.generic", "form = self.form_class(request.POST, instance=division) if form.is_valid(): division = form.save(commit=False) division.save()", "**kwargs): form = self.form_class() debug_logger(str(request.user), \" DIVISION_ADD_ENTERED\") return render(request, self.template_name,", "{'form': form}) def post(self, request, *args, **kwargs): division = self.get_object()", "division = self.get_object() form = self.form_class(instance=division) division.logger(str(request.user), \" DIVISION_EDIT_ENTERED\") return", "Division form_class = DivisionForm template_name = 'dfirtrack_main/division/division_add.html' def get(self, request,", "form.save(commit=False) division.save() division.logger(str(request.user), \" DIVISION_ADD_EXECUTED\") messages.success(request, 'Division added') return redirect(reverse('division_detail',", "import reverse from django.views.generic import DetailView, ListView from django.views.generic.edit import", "else: return render(request, self.template_name, {'form': form}) class DivisionUpdate(LoginRequiredMixin, UpdateView): login_url", "= 'dfirtrack_main/division/division_list.html' context_object_name = 'division_list' def get_queryset(self): debug_logger(str(self.request.user), \" DIVISION_LIST_ENTERED\")", "form}) def post(self, request, *args, **kwargs): division = self.get_object() form", "Division.objects.order_by('division_name') class DivisionDetail(LoginRequiredMixin, DetailView): login_url = '/login' model = Division", "UpdateView): login_url = '/login' model = Division form_class = DivisionForm", "get_queryset(self): debug_logger(str(self.request.user), \" DIVISION_LIST_ENTERED\") return Division.objects.order_by('division_name') class DivisionDetail(LoginRequiredMixin, DetailView): login_url", "self.template_name, {'form': form}) def post(self, request, *args, **kwargs): division =", "'dfirtrack_main/division/division_detail.html' def get_context_data(self, **kwargs): context = super().get_context_data(**kwargs) division = self.object", "= self.get_object() form = self.form_class(request.POST, instance=division) if form.is_valid(): division =", "*args, **kwargs): form = self.form_class() debug_logger(str(request.user), \" DIVISION_ADD_ENTERED\") return render(request,", "= DivisionForm template_name = 'dfirtrack_main/division/division_edit.html' def get(self, request, *args, **kwargs):", "= Division template_name = 'dfirtrack_main/division/division_list.html' context_object_name = 'division_list' def get_queryset(self):", "django.urls import reverse from django.views.generic import DetailView, ListView from django.views.generic.edit", "= 'dfirtrack_main/division/division_edit.html' def get(self, request, *args, **kwargs): division = self.get_object()", "Division template_name = 'dfirtrack_main/division/division_list.html' context_object_name = 'division_list' def get_queryset(self): debug_logger(str(self.request.user),", "django.views.generic import DetailView, ListView from django.views.generic.edit import CreateView, UpdateView from", "args=(division.division_id,))) else: return render(request, self.template_name, {'form': form}) class DivisionUpdate(LoginRequiredMixin, UpdateView):", "form = self.form_class(instance=division) division.logger(str(request.user), \" DIVISION_EDIT_ENTERED\") return render(request, self.template_name, {'form':", "= self.form_class(instance=division) division.logger(str(request.user), \" DIVISION_EDIT_ENTERED\") return render(request, self.template_name, {'form': form})", "**kwargs): context = super().get_context_data(**kwargs) division = self.object division.logger(str(self.request.user), \" DIVISION_DETAIL_ENTERED\")", "'Division added') return redirect(reverse('division_detail', args=(division.division_id,))) else: return render(request, self.template_name, {'form':", "dfirtrack_main.models import Division class DivisionList(LoginRequiredMixin, ListView): login_url = '/login' model", "Division form_class = DivisionForm template_name = 'dfirtrack_main/division/division_edit.html' def get(self, request,", "division.save() division.logger(str(request.user), \" DIVISION_EDIT_EXECUTED\") messages.success(request, 'Division edited') return redirect(reverse('division_detail', args=(division.division_id,)))", "DIVISION_ADD_EXECUTED\") messages.success(request, 'Division added') return redirect(reverse('division_detail', args=(division.division_id,))) else: return render(request,", "login_url = '/login' model = Division template_name = 'dfirtrack_main/division/division_detail.html' def", "CreateView): login_url = '/login' model = Division form_class = DivisionForm", "DIVISION_DETAIL_ENTERED\") return context class DivisionCreate(LoginRequiredMixin, CreateView): login_url = '/login' model", "= self.form_class(request.POST) if form.is_valid(): division = form.save(commit=False) division.save() division.logger(str(request.user), \"", "division = self.object division.logger(str(self.request.user), \" DIVISION_DETAIL_ENTERED\") return context class DivisionCreate(LoginRequiredMixin,", "DivisionForm template_name = 'dfirtrack_main/division/division_add.html' def get(self, request, *args, **kwargs): form", "def post(self, request, *args, **kwargs): division = self.get_object() form =", "model = Division form_class = DivisionForm template_name = 'dfirtrack_main/division/division_add.html' def", "DIVISION_EDIT_ENTERED\") return render(request, self.template_name, {'form': form}) def post(self, request, *args,", "'/login' model = Division form_class = DivisionForm template_name = 'dfirtrack_main/division/division_add.html'", "def get(self, request, *args, **kwargs): form = self.form_class() debug_logger(str(request.user), \"", "render from django.urls import reverse from django.views.generic import DetailView, ListView", "self.form_class(request.POST) if form.is_valid(): division = form.save(commit=False) division.save() division.logger(str(request.user), \" DIVISION_ADD_EXECUTED\")", "django.contrib import messages from django.contrib.auth.mixins import LoginRequiredMixin from django.shortcuts import", "'/login' model = Division form_class = DivisionForm template_name = 'dfirtrack_main/division/division_edit.html'", "\" DIVISION_EDIT_ENTERED\") return render(request, self.template_name, {'form': form}) def post(self, request,", "model = Division template_name = 'dfirtrack_main/division/division_list.html' context_object_name = 'division_list' def", "self.template_name, {'form': form}) def post(self, request, *args, **kwargs): form =", "render(request, self.template_name, {'form': form}) def post(self, request, *args, **kwargs): division", "debug_logger(str(request.user), \" DIVISION_ADD_ENTERED\") return render(request, self.template_name, {'form': form}) def post(self,", "import redirect, render from django.urls import reverse from django.views.generic import", "DivisionCreate(LoginRequiredMixin, CreateView): login_url = '/login' model = Division form_class =", "def get_context_data(self, **kwargs): context = super().get_context_data(**kwargs) division = self.object division.logger(str(self.request.user),", "= Division form_class = DivisionForm template_name = 'dfirtrack_main/division/division_add.html' def get(self,", "= 'dfirtrack_main/division/division_detail.html' def get_context_data(self, **kwargs): context = super().get_context_data(**kwargs) division =", "added') return redirect(reverse('division_detail', args=(division.division_id,))) else: return render(request, self.template_name, {'form': form})", "from django.contrib import messages from django.contrib.auth.mixins import LoginRequiredMixin from django.shortcuts", "= super().get_context_data(**kwargs) division = self.object division.logger(str(self.request.user), \" DIVISION_DETAIL_ENTERED\") return context", "if form.is_valid(): division = form.save(commit=False) division.save() division.logger(str(request.user), \" DIVISION_EDIT_EXECUTED\") messages.success(request,", "get_context_data(self, **kwargs): context = super().get_context_data(**kwargs) division = self.object division.logger(str(self.request.user), \"", "division.logger(str(request.user), \" DIVISION_ADD_EXECUTED\") messages.success(request, 'Division added') return redirect(reverse('division_detail', args=(division.division_id,))) else:", "class DivisionUpdate(LoginRequiredMixin, UpdateView): login_url = '/login' model = Division form_class", "request, *args, **kwargs): division = self.get_object() form = self.form_class(instance=division) division.logger(str(request.user),", "= self.get_object() form = self.form_class(instance=division) division.logger(str(request.user), \" DIVISION_EDIT_ENTERED\") return render(request,", "DetailView, ListView from django.views.generic.edit import CreateView, UpdateView from dfirtrack_main.forms import", "<filename>Incident-Response/Tools/dfirtrack/dfirtrack_main/views/division_views.py from django.contrib import messages from django.contrib.auth.mixins import LoginRequiredMixin from", "*args, **kwargs): form = self.form_class(request.POST) if form.is_valid(): division = form.save(commit=False)", "= Division form_class = DivisionForm template_name = 'dfirtrack_main/division/division_edit.html' def get(self,", "division = form.save(commit=False) division.save() division.logger(str(request.user), \" DIVISION_ADD_EXECUTED\") messages.success(request, 'Division added')", "DIVISION_LIST_ENTERED\") return Division.objects.order_by('division_name') class DivisionDetail(LoginRequiredMixin, DetailView): login_url = '/login' model", "= self.object division.logger(str(self.request.user), \" DIVISION_DETAIL_ENTERED\") return context class DivisionCreate(LoginRequiredMixin, CreateView):", "return context class DivisionCreate(LoginRequiredMixin, CreateView): login_url = '/login' model =", "form.save(commit=False) division.save() division.logger(str(request.user), \" DIVISION_EDIT_EXECUTED\") messages.success(request, 'Division edited') return redirect(reverse('division_detail',", "'dfirtrack_main/division/division_list.html' context_object_name = 'division_list' def get_queryset(self): debug_logger(str(self.request.user), \" DIVISION_LIST_ENTERED\") return", "dfirtrack_main.logger.default_logger import debug_logger from dfirtrack_main.models import Division class DivisionList(LoginRequiredMixin, ListView):", "DivisionForm from dfirtrack_main.logger.default_logger import debug_logger from dfirtrack_main.models import Division class", "template_name = 'dfirtrack_main/division/division_edit.html' def get(self, request, *args, **kwargs): division =", "login_url = '/login' model = Division form_class = DivisionForm template_name", "form}) def post(self, request, *args, **kwargs): form = self.form_class(request.POST) if", "render(request, self.template_name, {'form': form}) class DivisionUpdate(LoginRequiredMixin, UpdateView): login_url = '/login'", "from dfirtrack_main.logger.default_logger import debug_logger from dfirtrack_main.models import Division class DivisionList(LoginRequiredMixin,", "template_name = 'dfirtrack_main/division/division_add.html' def get(self, request, *args, **kwargs): form =", "'dfirtrack_main/division/division_edit.html' def get(self, request, *args, **kwargs): division = self.get_object() form", "import DetailView, ListView from django.views.generic.edit import CreateView, UpdateView from dfirtrack_main.forms", "CreateView, UpdateView from dfirtrack_main.forms import DivisionForm from dfirtrack_main.logger.default_logger import debug_logger", "*args, **kwargs): division = self.get_object() form = self.form_class(instance=division) division.logger(str(request.user), \"", "class DivisionCreate(LoginRequiredMixin, CreateView): login_url = '/login' model = Division form_class", "messages.success(request, 'Division added') return redirect(reverse('division_detail', args=(division.division_id,))) else: return render(request, self.template_name,", "form.is_valid(): division = form.save(commit=False) division.save() division.logger(str(request.user), \" DIVISION_ADD_EXECUTED\") messages.success(request, 'Division", "return redirect(reverse('division_detail', args=(division.division_id,))) else: return render(request, self.template_name, {'form': form}) class", "DetailView): login_url = '/login' model = Division template_name = 'dfirtrack_main/division/division_detail.html'", "redirect, render from django.urls import reverse from django.views.generic import DetailView,", "post(self, request, *args, **kwargs): division = self.get_object() form = self.form_class(request.POST,", "*args, **kwargs): division = self.get_object() form = self.form_class(request.POST, instance=division) if", "self.get_object() form = self.form_class(instance=division) division.logger(str(request.user), \" DIVISION_EDIT_ENTERED\") return render(request, self.template_name,", "messages from django.contrib.auth.mixins import LoginRequiredMixin from django.shortcuts import redirect, render", "'dfirtrack_main/division/division_add.html' def get(self, request, *args, **kwargs): form = self.form_class() debug_logger(str(request.user),", "context_object_name = 'division_list' def get_queryset(self): debug_logger(str(self.request.user), \" DIVISION_LIST_ENTERED\") return Division.objects.order_by('division_name')", "DivisionUpdate(LoginRequiredMixin, UpdateView): login_url = '/login' model = Division form_class =", "import messages from django.contrib.auth.mixins import LoginRequiredMixin from django.shortcuts import redirect,", "super().get_context_data(**kwargs) division = self.object division.logger(str(self.request.user), \" DIVISION_DETAIL_ENTERED\") return context class", "'/login' model = Division template_name = 'dfirtrack_main/division/division_detail.html' def get_context_data(self, **kwargs):", "form.is_valid(): division = form.save(commit=False) division.save() division.logger(str(request.user), \" DIVISION_EDIT_EXECUTED\") messages.success(request, 'Division", "def post(self, request, *args, **kwargs): form = self.form_class(request.POST) if form.is_valid():", "model = Division template_name = 'dfirtrack_main/division/division_detail.html' def get_context_data(self, **kwargs): context", "Division template_name = 'dfirtrack_main/division/division_detail.html' def get_context_data(self, **kwargs): context = super().get_context_data(**kwargs)", "'Division edited') return redirect(reverse('division_detail', args=(division.division_id,))) else: return render(request, self.template_name, {'form':", "context = super().get_context_data(**kwargs) division = self.object division.logger(str(self.request.user), \" DIVISION_DETAIL_ENTERED\") return", "request, *args, **kwargs): form = self.form_class() debug_logger(str(request.user), \" DIVISION_ADD_ENTERED\") return", "form_class = DivisionForm template_name = 'dfirtrack_main/division/division_add.html' def get(self, request, *args,", "return render(request, self.template_name, {'form': form}) class DivisionUpdate(LoginRequiredMixin, UpdateView): login_url =", "def get(self, request, *args, **kwargs): division = self.get_object() form =", "django.views.generic.edit import CreateView, UpdateView from dfirtrack_main.forms import DivisionForm from dfirtrack_main.logger.default_logger", "import DivisionForm from dfirtrack_main.logger.default_logger import debug_logger from dfirtrack_main.models import Division", "= '/login' model = Division form_class = DivisionForm template_name =", "instance=division) if form.is_valid(): division = form.save(commit=False) division.save() division.logger(str(request.user), \" DIVISION_EDIT_EXECUTED\")", "form_class = DivisionForm template_name = 'dfirtrack_main/division/division_edit.html' def get(self, request, *args,", "form}) class DivisionUpdate(LoginRequiredMixin, UpdateView): login_url = '/login' model = Division", "if form.is_valid(): division = form.save(commit=False) division.save() division.logger(str(request.user), \" DIVISION_ADD_EXECUTED\") messages.success(request,", "render(request, self.template_name, {'form': form}) def post(self, request, *args, **kwargs): form", "LoginRequiredMixin from django.shortcuts import redirect, render from django.urls import reverse", "= '/login' model = Division template_name = 'dfirtrack_main/division/division_detail.html' def get_context_data(self,", "request, *args, **kwargs): division = self.get_object() form = self.form_class(request.POST, instance=division)", "from dfirtrack_main.forms import DivisionForm from dfirtrack_main.logger.default_logger import debug_logger from dfirtrack_main.models", "= self.form_class() debug_logger(str(request.user), \" DIVISION_ADD_ENTERED\") return render(request, self.template_name, {'form': form})", "messages.success(request, 'Division edited') return redirect(reverse('division_detail', args=(division.division_id,))) else: return render(request, self.template_name,", "form = self.form_class(request.POST) if form.is_valid(): division = form.save(commit=False) division.save() division.logger(str(request.user),", "return Division.objects.order_by('division_name') class DivisionDetail(LoginRequiredMixin, DetailView): login_url = '/login' model =", "from django.shortcuts import redirect, render from django.urls import reverse from", "division.logger(str(request.user), \" DIVISION_EDIT_ENTERED\") return render(request, self.template_name, {'form': form}) def post(self,", "import LoginRequiredMixin from django.shortcuts import redirect, render from django.urls import", "debug_logger(str(self.request.user), \" DIVISION_LIST_ENTERED\") return Division.objects.order_by('division_name') class DivisionDetail(LoginRequiredMixin, DetailView): login_url =", "from django.views.generic.edit import CreateView, UpdateView from dfirtrack_main.forms import DivisionForm from", "{'form': form}) class DivisionUpdate(LoginRequiredMixin, UpdateView): login_url = '/login' model =", "= self.form_class(request.POST, instance=division) if form.is_valid(): division = form.save(commit=False) division.save() division.logger(str(request.user),", "division.logger(str(self.request.user), \" DIVISION_DETAIL_ENTERED\") return context class DivisionCreate(LoginRequiredMixin, CreateView): login_url =", "'division_list' def get_queryset(self): debug_logger(str(self.request.user), \" DIVISION_LIST_ENTERED\") return Division.objects.order_by('division_name') class DivisionDetail(LoginRequiredMixin,", "DIVISION_ADD_ENTERED\") return render(request, self.template_name, {'form': form}) def post(self, request, *args,", "\" DIVISION_EDIT_EXECUTED\") messages.success(request, 'Division edited') return redirect(reverse('division_detail', args=(division.division_id,))) else: return", "DivisionList(LoginRequiredMixin, ListView): login_url = '/login' model = Division template_name =", "\" DIVISION_ADD_EXECUTED\") messages.success(request, 'Division added') return redirect(reverse('division_detail', args=(division.division_id,))) else: return", "self.form_class(instance=division) division.logger(str(request.user), \" DIVISION_EDIT_ENTERED\") return render(request, self.template_name, {'form': form}) def", "= 'dfirtrack_main/division/division_add.html' def get(self, request, *args, **kwargs): form = self.form_class()", "division.save() division.logger(str(request.user), \" DIVISION_ADD_EXECUTED\") messages.success(request, 'Division added') return redirect(reverse('division_detail', args=(division.division_id,)))", "**kwargs): division = self.get_object() form = self.form_class(request.POST, instance=division) if form.is_valid():", "division.logger(str(request.user), \" DIVISION_EDIT_EXECUTED\") messages.success(request, 'Division edited') return redirect(reverse('division_detail', args=(division.division_id,))) else:", "form = self.form_class() debug_logger(str(request.user), \" DIVISION_ADD_ENTERED\") return render(request, self.template_name, {'form':", "{'form': form}) def post(self, request, *args, **kwargs): form = self.form_class(request.POST)", "django.contrib.auth.mixins import LoginRequiredMixin from django.shortcuts import redirect, render from django.urls", "self.object division.logger(str(self.request.user), \" DIVISION_DETAIL_ENTERED\") return context class DivisionCreate(LoginRequiredMixin, CreateView): login_url", "= form.save(commit=False) division.save() division.logger(str(request.user), \" DIVISION_EDIT_EXECUTED\") messages.success(request, 'Division edited') return" ]
[ "help='choose a model: TextCNN') parser.add_argument('--embedding', default='pre_trained', type=str, help='random or pre_trained')", "# train config.n_vocab = len(vocab) model = x.Model().to(config.device) init_network(model) print(model.parameters)", "np.random.seed(1) torch.manual_seed(1) torch.cuda.manual_seed_all(1) torch.backends.cudnn.deterministic = True # 保证每次结果一样 start_time =", "腾讯:embedding_Tencent.npz, 随机初始化:random # embedding = 'random' model_name = args.model #", "build_iterator, get_time_dif x = import_module('models.' + model_name) from config import", "import import_module import argparse parser = argparse.ArgumentParser(description='Chinese Text Classification') parser.add_argument('--model',", "= build_iterator(dev_data, config) test_iter = build_iterator(test_data, config) time_dif = get_time_dif(start_time)", "utils import build_dataset, build_iterator, get_time_dif x = import_module('models.' + model_name)", "+ model_name) from config import Config config = Config(dataset) np.random.seed(1)", "train, init_network from importlib import import_module import argparse parser =", "train_data, dev_data, test_data = build_dataset(config, args.word) train_iter = build_iterator(train_data, config)", "= build_iterator(test_data, config) time_dif = get_time_dif(start_time) print(\"Time usage:\", time_dif) #", "build_dataset, build_iterator, get_time_dif x = import_module('models.' + model_name) from config", "dataset = 'THUCNews' # 数据集 # 搜狗新闻:embedding_SougouNews.npz, 腾讯:embedding_Tencent.npz, 随机初始化:random #", "# 保证每次结果一样 start_time = time.time() print(\"Loading data...\") vocab, train_data, dev_data,", "get_time_dif(start_time) print(\"Time usage:\", time_dif) # train config.n_vocab = len(vocab) model", "'THUCNews' # 数据集 # 搜狗新闻:embedding_SougouNews.npz, 腾讯:embedding_Tencent.npz, 随机初始化:random # embedding =", "train_eval import train, init_network from importlib import import_module import argparse", "default=False, type=bool, help='True for word, False for char') args =", "__name__ == '__main__': dataset = 'THUCNews' # 数据集 # 搜狗新闻:embedding_SougouNews.npz,", "print(\"Time usage:\", time_dif) # train config.n_vocab = len(vocab) model =", "= import_module('models.' + model_name) from config import Config config =", "= argparse.ArgumentParser(description='Chinese Text Classification') parser.add_argument('--model', type=str, required=True, help='choose a model:", "随机初始化:random # embedding = 'random' model_name = args.model # TextCNN", "config = Config(dataset) np.random.seed(1) torch.manual_seed(1) torch.cuda.manual_seed_all(1) torch.backends.cudnn.deterministic = True #", "parser.parse_args() if __name__ == '__main__': dataset = 'THUCNews' # 数据集", "= time.time() print(\"Loading data...\") vocab, train_data, dev_data, test_data = build_dataset(config,", "= 'random' model_name = args.model # TextCNN from utils import", "config) test_iter = build_iterator(test_data, config) time_dif = get_time_dif(start_time) print(\"Time usage:\",", "importlib import import_module import argparse parser = argparse.ArgumentParser(description='Chinese Text Classification')", "build_dataset(config, args.word) train_iter = build_iterator(train_data, config) dev_iter = build_iterator(dev_data, config)", "import time import torch import numpy as np from train_eval", "# embedding = 'random' model_name = args.model # TextCNN from", "parser.add_argument('--model', type=str, required=True, help='choose a model: TextCNN') parser.add_argument('--embedding', default='pre_trained', type=str,", "model_name) from config import Config config = Config(dataset) np.random.seed(1) torch.manual_seed(1)", "len(vocab) model = x.Model().to(config.device) init_network(model) print(model.parameters) train(config, model, train_iter, dev_iter,", "torch import numpy as np from train_eval import train, init_network", "# TextCNN from utils import build_dataset, build_iterator, get_time_dif x =", "args.word) train_iter = build_iterator(train_data, config) dev_iter = build_iterator(dev_data, config) test_iter", "usage:\", time_dif) # train config.n_vocab = len(vocab) model = x.Model().to(config.device)", "help='random or pre_trained') parser.add_argument('--word', default=False, type=bool, help='True for word, False", "True # 保证每次结果一样 start_time = time.time() print(\"Loading data...\") vocab, train_data,", "for word, False for char') args = parser.parse_args() if __name__", "torch.backends.cudnn.deterministic = True # 保证每次结果一样 start_time = time.time() print(\"Loading data...\")", "torch.cuda.manual_seed_all(1) torch.backends.cudnn.deterministic = True # 保证每次结果一样 start_time = time.time() print(\"Loading", "test_iter = build_iterator(test_data, config) time_dif = get_time_dif(start_time) print(\"Time usage:\", time_dif)", "get_time_dif x = import_module('models.' + model_name) from config import Config", "TextCNN') parser.add_argument('--embedding', default='pre_trained', type=str, help='random or pre_trained') parser.add_argument('--word', default=False, type=bool,", "= parser.parse_args() if __name__ == '__main__': dataset = 'THUCNews' #", "torch.manual_seed(1) torch.cuda.manual_seed_all(1) torch.backends.cudnn.deterministic = True # 保证每次结果一样 start_time = time.time()", "data...\") vocab, train_data, dev_data, test_data = build_dataset(config, args.word) train_iter =", "coding: UTF-8 import time import torch import numpy as np", "np from train_eval import train, init_network from importlib import import_module", "dev_iter = build_iterator(dev_data, config) test_iter = build_iterator(test_data, config) time_dif =", "config) dev_iter = build_iterator(dev_data, config) test_iter = build_iterator(test_data, config) time_dif", "model: TextCNN') parser.add_argument('--embedding', default='pre_trained', type=str, help='random or pre_trained') parser.add_argument('--word', default=False,", "= build_iterator(train_data, config) dev_iter = build_iterator(dev_data, config) test_iter = build_iterator(test_data,", "import_module('models.' + model_name) from config import Config config = Config(dataset)", "type=str, help='random or pre_trained') parser.add_argument('--word', default=False, type=bool, help='True for word,", "import argparse parser = argparse.ArgumentParser(description='Chinese Text Classification') parser.add_argument('--model', type=str, required=True,", "from config import Config config = Config(dataset) np.random.seed(1) torch.manual_seed(1) torch.cuda.manual_seed_all(1)", "args = parser.parse_args() if __name__ == '__main__': dataset = 'THUCNews'", "default='pre_trained', type=str, help='random or pre_trained') parser.add_argument('--word', default=False, type=bool, help='True for", "False for char') args = parser.parse_args() if __name__ == '__main__':", "== '__main__': dataset = 'THUCNews' # 数据集 # 搜狗新闻:embedding_SougouNews.npz, 腾讯:embedding_Tencent.npz,", "dev_data, test_data = build_dataset(config, args.word) train_iter = build_iterator(train_data, config) dev_iter", "model_name = args.model # TextCNN from utils import build_dataset, build_iterator,", "time.time() print(\"Loading data...\") vocab, train_data, dev_data, test_data = build_dataset(config, args.word)", "保证每次结果一样 start_time = time.time() print(\"Loading data...\") vocab, train_data, dev_data, test_data", "# coding: UTF-8 import time import torch import numpy as", "import Config config = Config(dataset) np.random.seed(1) torch.manual_seed(1) torch.cuda.manual_seed_all(1) torch.backends.cudnn.deterministic =", "import_module import argparse parser = argparse.ArgumentParser(description='Chinese Text Classification') parser.add_argument('--model', type=str,", "parser.add_argument('--word', default=False, type=bool, help='True for word, False for char') args", "config) time_dif = get_time_dif(start_time) print(\"Time usage:\", time_dif) # train config.n_vocab", "import numpy as np from train_eval import train, init_network from", "a model: TextCNN') parser.add_argument('--embedding', default='pre_trained', type=str, help='random or pre_trained') parser.add_argument('--word',", "= get_time_dif(start_time) print(\"Time usage:\", time_dif) # train config.n_vocab = len(vocab)", "import train, init_network from importlib import import_module import argparse parser", "argparse.ArgumentParser(description='Chinese Text Classification') parser.add_argument('--model', type=str, required=True, help='choose a model: TextCNN')", "from train_eval import train, init_network from importlib import import_module import", "build_iterator(train_data, config) dev_iter = build_iterator(dev_data, config) test_iter = build_iterator(test_data, config)", "build_iterator(test_data, config) time_dif = get_time_dif(start_time) print(\"Time usage:\", time_dif) # train", "numpy as np from train_eval import train, init_network from importlib", "for char') args = parser.parse_args() if __name__ == '__main__': dataset", "print(\"Loading data...\") vocab, train_data, dev_data, test_data = build_dataset(config, args.word) train_iter", "Text Classification') parser.add_argument('--model', type=str, required=True, help='choose a model: TextCNN') parser.add_argument('--embedding',", "UTF-8 import time import torch import numpy as np from", "init_network from importlib import import_module import argparse parser = argparse.ArgumentParser(description='Chinese", "if __name__ == '__main__': dataset = 'THUCNews' # 数据集 #", "start_time = time.time() print(\"Loading data...\") vocab, train_data, dev_data, test_data =", "vocab, train_data, dev_data, test_data = build_dataset(config, args.word) train_iter = build_iterator(train_data,", "import build_dataset, build_iterator, get_time_dif x = import_module('models.' + model_name) from", "parser = argparse.ArgumentParser(description='Chinese Text Classification') parser.add_argument('--model', type=str, required=True, help='choose a", "'random' model_name = args.model # TextCNN from utils import build_dataset,", "搜狗新闻:embedding_SougouNews.npz, 腾讯:embedding_Tencent.npz, 随机初始化:random # embedding = 'random' model_name = args.model", "train_iter = build_iterator(train_data, config) dev_iter = build_iterator(dev_data, config) test_iter =", "'__main__': dataset = 'THUCNews' # 数据集 # 搜狗新闻:embedding_SougouNews.npz, 腾讯:embedding_Tencent.npz, 随机初始化:random", "import torch import numpy as np from train_eval import train,", "as np from train_eval import train, init_network from importlib import", "help='True for word, False for char') args = parser.parse_args() if", "type=str, required=True, help='choose a model: TextCNN') parser.add_argument('--embedding', default='pre_trained', type=str, help='random", "required=True, help='choose a model: TextCNN') parser.add_argument('--embedding', default='pre_trained', type=str, help='random or", "train config.n_vocab = len(vocab) model = x.Model().to(config.device) init_network(model) print(model.parameters) train(config,", "parser.add_argument('--embedding', default='pre_trained', type=str, help='random or pre_trained') parser.add_argument('--word', default=False, type=bool, help='True", "args.model # TextCNN from utils import build_dataset, build_iterator, get_time_dif x", "time import torch import numpy as np from train_eval import", "# 数据集 # 搜狗新闻:embedding_SougouNews.npz, 腾讯:embedding_Tencent.npz, 随机初始化:random # embedding = 'random'", "embedding = 'random' model_name = args.model # TextCNN from utils", "= args.model # TextCNN from utils import build_dataset, build_iterator, get_time_dif", "argparse parser = argparse.ArgumentParser(description='Chinese Text Classification') parser.add_argument('--model', type=str, required=True, help='choose", "# 搜狗新闻:embedding_SougouNews.npz, 腾讯:embedding_Tencent.npz, 随机初始化:random # embedding = 'random' model_name =", "Config(dataset) np.random.seed(1) torch.manual_seed(1) torch.cuda.manual_seed_all(1) torch.backends.cudnn.deterministic = True # 保证每次结果一样 start_time", "from utils import build_dataset, build_iterator, get_time_dif x = import_module('models.' +", "or pre_trained') parser.add_argument('--word', default=False, type=bool, help='True for word, False for", "build_iterator(dev_data, config) test_iter = build_iterator(test_data, config) time_dif = get_time_dif(start_time) print(\"Time", "test_data = build_dataset(config, args.word) train_iter = build_iterator(train_data, config) dev_iter =", "char') args = parser.parse_args() if __name__ == '__main__': dataset =", "= 'THUCNews' # 数据集 # 搜狗新闻:embedding_SougouNews.npz, 腾讯:embedding_Tencent.npz, 随机初始化:random # embedding", "= True # 保证每次结果一样 start_time = time.time() print(\"Loading data...\") vocab,", "model = x.Model().to(config.device) init_network(model) print(model.parameters) train(config, model, train_iter, dev_iter, test_iter)", "= len(vocab) model = x.Model().to(config.device) init_network(model) print(model.parameters) train(config, model, train_iter,", "Classification') parser.add_argument('--model', type=str, required=True, help='choose a model: TextCNN') parser.add_argument('--embedding', default='pre_trained',", "Config config = Config(dataset) np.random.seed(1) torch.manual_seed(1) torch.cuda.manual_seed_all(1) torch.backends.cudnn.deterministic = True", "= Config(dataset) np.random.seed(1) torch.manual_seed(1) torch.cuda.manual_seed_all(1) torch.backends.cudnn.deterministic = True # 保证每次结果一样", "pre_trained') parser.add_argument('--word', default=False, type=bool, help='True for word, False for char')", "config.n_vocab = len(vocab) model = x.Model().to(config.device) init_network(model) print(model.parameters) train(config, model,", "= build_dataset(config, args.word) train_iter = build_iterator(train_data, config) dev_iter = build_iterator(dev_data,", "type=bool, help='True for word, False for char') args = parser.parse_args()", "from importlib import import_module import argparse parser = argparse.ArgumentParser(description='Chinese Text", "time_dif = get_time_dif(start_time) print(\"Time usage:\", time_dif) # train config.n_vocab =", "数据集 # 搜狗新闻:embedding_SougouNews.npz, 腾讯:embedding_Tencent.npz, 随机初始化:random # embedding = 'random' model_name", "x = import_module('models.' + model_name) from config import Config config", "time_dif) # train config.n_vocab = len(vocab) model = x.Model().to(config.device) init_network(model)", "config import Config config = Config(dataset) np.random.seed(1) torch.manual_seed(1) torch.cuda.manual_seed_all(1) torch.backends.cudnn.deterministic", "TextCNN from utils import build_dataset, build_iterator, get_time_dif x = import_module('models.'", "word, False for char') args = parser.parse_args() if __name__ ==" ]
[ "def test_can_confirm_submission(speaker_client, accepted_submission): response = speaker_client.get(accepted_submission.urls.confirm, follow=True) accepted_submission.refresh_from_db() assert response.status_code", "response = other_speaker_client.post( rejected_submission.urls.user_base, follow=True, data=data ) assert response.status_code ==", "f = SimpleUploadedFile('testfile.txt', b'file_content') response = speaker_client.post( event.urls.user, data={ f'question_{speaker_question.id}':", "with scope(event=submission.event): assert submission.resources.count() == 2 submission.refresh_from_db() resource_one.refresh_from_db() new_resource =", "== 13 @pytest.mark.django_db def test_cannot_edit_confirmed_slot_count(speaker_client, confirmed_submission): submission = confirmed_submission submission.event.settings.present_multiple_times", "= SimpleUploadedFile('testfile.txt', b'file_content') response = speaker_client.post( event.urls.user, data={ f'question_{speaker_question.id}': 'black", "== 200 with scope(event=event): answer = speaker.answers.get(question_id=speaker_question.pk) assert answer.answer ==", "the sky' @pytest.mark.django_db def test_cannot_delete_profile_on_first_try(speaker, event, speaker_client): with scope(event=event): assert", "client.get(multilingual_event.cfp.urls.public, follow=True) assert 'submission' in first_response.content.decode() assert 'Einreichung' not in", "data=data ) assert response.status_code == 200 rejected_submission.refresh_from_db() assert rejected_submission.title ==", "djmail.outbox[0].to == ['<EMAIL>'] @pytest.mark.django_db def test_can_accept_invitation(orga_client, submission): assert submission.speakers.count() ==", "speaker_client.post(accepted_submission.urls.confirm, follow=True) accepted_submission.refresh_from_db() assert response.status_code == 200 assert accepted_submission.state ==", "1 @pytest.mark.django_db def test_submission_withdraw_if_confirmed(speaker_client, submission): with scope(event=submission.event): submission.accept() submission.confirm() response", "} response = speaker_client.post(submission.urls.invite, follow=True, data=data) assert response.status_code == 200", "submission.state != SubmissionStates.WITHDRAWN @pytest.mark.django_db def test_submission_withdraw_if_rejected(speaker_client, submission): with scope(event=submission.event): submission.reject()", "assert new_resource.description == 'new resource' assert new_resource.resource.read() == b'file_content' assert", "== 200 rejected_submission.refresh_from_db() assert rejected_submission.title == title @pytest.mark.django_db def test_can_edit_submission_type(speaker_client,", "file_answer.answer_file.name).exists() response = speaker_client.post( event.urls.user, data={ f'question_{speaker_question.id}': 'green as the", "def test_can_edit_submission_type(speaker_client, submission, event): with scope(event=submission.event): new_type = event.submission_types.create(name='Other', default_duration=13)", "djmail.outbox = [] response = speaker_client.get( submission.urls.invite, follow=True, data={'email': 'invalidemail'}", "@pytest.mark.django_db def test_submission_withdraw_if_confirmed(speaker_client, submission): with scope(event=submission.event): submission.accept() submission.confirm() response =", "def test_can_reconfirm_submission(speaker_client, accepted_submission): accepted_submission.state = SubmissionStates.CONFIRMED accepted_submission.save() response = speaker_client.get(accepted_submission.urls.confirm,", "follow=True, ) assert response.status_code == 200 with scope(event=event): answer.refresh_from_db() assert", "night' assert speaker.answers.get(question_id=speaker_boolean_question.pk).answer == 'True' assert ( speaker.answers.get(question_id=speaker_text_question.pk).answer == 'Green", "SubmissionStates.CONFIRMED @pytest.mark.django_db def test_can_reconfirm_submission(speaker_client, accepted_submission): accepted_submission.state = SubmissionStates.CONFIRMED accepted_submission.save() response", "assert submission.state == SubmissionStates.ACCEPTED @pytest.mark.django_db def test_submission_accept_wrong_code(client, submission): submission.state =", "'profile', 'availabilities': '{\"availabilities\": []}', }, follow=True, ) assert response.status_code ==", "submission.content_locale, 'description': submission.description, 'abstract': submission.abstract, 'notes': submission.notes, 'slot_count': submission.slot_count, 'resource-0-id':", "other_submission): response = speaker_client.get(other_submission.urls.user_base, follow=True) assert response.status_code == 404 @pytest.mark.django_db", "= request_availability submission.state = SubmissionStates.ACCEPTED submission.save() response = speaker_client.post(submission.urls.confirm, follow=True)", "SubmissionStates.ACCEPTED @pytest.mark.django_db def test_submission_accept_wrong_code(client, submission): submission.state = SubmissionStates.ACCEPTED submission.save() assert", "scope(event=event): answer = speaker.answers.get(question_id=speaker_question.pk) assert answer.answer == 'black as the", "settings from django.core import mail as djmail from django.core.files.uploadedfile import", "== 404 @pytest.mark.django_db def test_can_confirm_submission(speaker_client, accepted_submission): response = speaker_client.get(accepted_submission.urls.confirm, follow=True)", "'resource-1-description': resource_two.description, 'resource-1-resource': resource_two.resource, 'resource-2-id': '', 'resource-2-description': 'new resource', 'resource-2-resource':", "submission, event): with scope(event=submission.event): new_type = event.submission_types.create(name='Other', default_duration=13) data =", "= speaker_client.post( event.urls.user_delete, data={'really': True}, follow=True ) assert response.status_code ==", "'resource-MAX_NUM_FORMS': 1000, } response = speaker_client.post(submission.urls.user_base, follow=True, data=data) assert response.status_code", "totally the best color.', 'form': 'questions', }, follow=True, ) assert", "submission): response = speaker_client.get(submission.urls.withdraw, follow=True) submission.refresh_from_db() assert response.status_code == 200", "first_response.content.decode() assert 'Einreichung' not in first_response.content.decode() second_response = client.get( reverse('cfp:locale.set',", "response.status_code == 200 with scope(event=submission.event): submission.refresh_from_db() assert submission.slot_count == 13", "'notes': submission.notes, 'slot_count': 13, 'resource-TOTAL_FORMS': 0, 'resource-INITIAL_FORMS': 0, 'resource-MIN_NUM_FORMS': 0,", "in submission.urls.confirm response = client.post( submission.urls.confirm.replace(submission.code, \"foo\"), follow=True ) assert", "color.' ) file_answer = speaker.answers.get(question_id=speaker_file_question.pk) assert file_answer.answer.startswith('file://') assert file_answer.answer_file.read() ==", "accepted_submission.refresh_from_db() assert response.status_code == 200 assert accepted_submission.state == SubmissionStates.CONFIRMED @pytest.mark.django_db", "= speaker_client.get( submission.urls.invite, follow=True, data={'email': 'invalidemail'} ) assert response.status_code ==", "True submission.state = SubmissionStates.ACCEPTED submission.save() response = speaker_client.post(submission.urls.confirm, follow=True) submission.refresh_from_db()", "= rejected_submission.title data = { 'title': 'Ein ganz neuer Titel',", "= speaker.answers.filter(question_id=speaker_question.pk).first() assert not answer f = SimpleUploadedFile('testfile.txt', b'file_content') response", "speaker.email != '<EMAIL>' @pytest.mark.django_db def test_can_edit_and_update_speaker_answers( speaker, event, speaker_question, speaker_boolean_question,", "response.content.decode() @pytest.mark.django_db def test_cannot_see_other_submission(speaker_client, other_submission): response = speaker_client.get(other_submission.urls.user_base, follow=True) assert", "speaker.answers.get(question_id=speaker_text_question.pk).answer == 'Green is totally the best color.' ) file_answer", "@pytest.mark.django_db def test_cannot_edit_rejected_submission(other_speaker_client, rejected_submission): title = rejected_submission.title data = {", "200 with scope(event=event): speaker.refresh_from_db() assert speaker.profiles.get(event=event).biography == '' assert speaker.name", "answer = speaker.answers.get(question_id=speaker_question.pk) assert answer.answer == 'black as the night'", "mail as djmail from django.core.files.uploadedfile import SimpleUploadedFile from django.urls import", "response = speaker_client.get(accepted_submission.urls.withdraw, follow=True) accepted_submission.refresh_from_db() assert response.status_code == 200 assert", "'resource-MIN_NUM_FORMS': 0, 'resource-MAX_NUM_FORMS': 1000, } response = speaker_client.post(submission.urls.user_base, follow=True, data=data)", "data={ 'old_password': '<PASSWORD>!', 'email': '<EMAIL>', 'password': '', 'password_repeat': '', 'form':", "join!', 'text': 'C\\'mon, it will be fun!', } response =", "submission): djmail.outbox = [] with scope(event=submission.event): submission.accept() response = speaker_client.post(submission.urls.withdraw,", "test_submission_accept(speaker_client, submission, request_availability): submission.event.settings.cfp_request_availabilities = request_availability submission.state = SubmissionStates.ACCEPTED submission.save()", "since forever.', 'form': 'profile', }, follow=True, ) assert response.status_code ==", "@pytest.mark.django_db def test_submission_withdraw(speaker_client, submission): djmail.outbox = [] submission.state = SubmissionStates.SUBMITTED", "scope(event=event): speaker.refresh_from_db() assert speaker.profiles.get(event=event).biography != 'Ruling since forever.' @pytest.mark.django_db def", "speaker_client.post(submission.urls.withdraw, follow=True) assert response.status_code == 200 with scope(event=submission.event): submission.refresh_from_db() assert", "!= 'Ruling since forever.' response = speaker_client.post( event.urls.user, data={ 'name':", "'black as the night' assert speaker.answers.get(question_id=speaker_boolean_question.pk).answer == 'True' assert (", "answer.answer == 'green as the sky' @pytest.mark.django_db def test_cannot_delete_profile_on_first_try(speaker, event,", "submission, request_availability): submission.event.settings.cfp_request_availabilities = request_availability submission.state = SubmissionStates.ACCEPTED submission.save() response", "from django.core import mail as djmail from django.core.files.uploadedfile import SimpleUploadedFile", "follow=True) assert response.status_code == 200 assert submission.title in response.content.decode() @pytest.mark.django_db", "assert response.status_code == 200 with scope(event=submission.event): submission.refresh_from_db() assert submission.submission_type ==", "multilingual_event.slug}) + f'?locale=de&next=/{multilingual_event.slug}/', follow=True, ) orga_user.refresh_from_db() assert response.status_code == 200", "assert submission.state == SubmissionStates.CONFIRMED @pytest.mark.django_db def test_submission_accept_with_missing_availability(speaker_client, submission): submission.event.settings.cfp_request_availabilities =", "== ['<EMAIL>'] @pytest.mark.django_db def test_can_accept_invitation(orga_client, submission): assert submission.speakers.count() == 1", "'Ein ganz neuer Titel', 'submission_type': rejected_submission.submission_type.pk, 'content_locale': rejected_submission.content_locale, 'description': rejected_submission.description,", "assert djmail.outbox[0].to == ['<EMAIL>'] @pytest.mark.django_db def test_can_accept_invitation(orga_client, submission): assert submission.speakers.count()", "speaker.profiles.get(event=event).biography != 'Ruling since forever.' @pytest.mark.django_db def test_can_edit_login_info(speaker, event, speaker_client):", "from django.core.files.uploadedfile import SimpleUploadedFile from django.urls import reverse from django_scopes", "answer f = SimpleUploadedFile('testfile.txt', b'file_content') response = speaker_client.post( event.urls.user, data={", "@pytest.mark.django_db def test_submission_accept_wrong_code(client, submission): submission.state = SubmissionStates.ACCEPTED submission.save() assert submission.code", "1000, } response = other_speaker_client.post( rejected_submission.urls.user_base, follow=True, data=data ) assert", "@pytest.mark.django_db def test_wrong_acceptance_link(orga_client, submission): assert submission.speakers.count() == 1 response =", "assert response.status_code == 200 with scope(event=event): speaker.refresh_from_db() assert speaker.profiles.get(event=event).biography ==", "test_can_edit_profile(speaker, event, speaker_client): response = speaker_client.post( event.urls.user, data={ 'name': '<NAME>',", "assert speaker.profiles.get(event=event).biography != '' assert speaker.name != '<NAME>' @pytest.mark.django_db def", "assert response.status_code == 200 rejected_submission.refresh_from_db() assert rejected_submission.title == title @pytest.mark.django_db", "submission.description, 'abstract': submission.abstract, 'notes': submission.notes, 'slot_count': submission.slot_count, 'resource-0-id': resource_one.id, 'resource-0-description':", "data=data) assert response.status_code == 200 with scope(event=submission.event): assert submission.resources.count() ==", "= True response = speaker_client.post( event.urls.user, data={ 'name': '<NAME>', 'biography':", "submission.refresh_from_db() assert submission.state != SubmissionStates.WITHDRAWN @pytest.mark.django_db def test_submission_withdraw_if_rejected(speaker_client, submission): with", "new_token != old_token @pytest.mark.django_db def test_must_provide_availabilities(speaker, event, speaker_client): event.settings.cfp_require_availabilities =", "== 200 assert accepted_submission.state == SubmissionStates.ACCEPTED @pytest.mark.django_db def test_can_edit_submission(speaker_client, submission,", "= other_speaker_client.post( rejected_submission.urls.user_base, follow=True, data=data ) assert response.status_code == 200", "2 assert new_resource.description == 'new resource' assert new_resource.resource.read() == b'file_content'", "Token.objects.filter(user=speaker).first().key assert new_token != old_token @pytest.mark.django_db def test_must_provide_availabilities(speaker, event, speaker_client):", "0 @pytest.mark.django_db def test_submission_withdraw_if_accepted(speaker_client, submission): djmail.outbox = [] with scope(event=submission.event):", "test_submission_withdraw_if_confirmed(speaker_client, submission): with scope(event=submission.event): submission.accept() submission.confirm() response = speaker_client.post(submission.urls.withdraw, follow=True)", "test_submission_withdraw_if_rejected(speaker_client, submission): with scope(event=submission.event): submission.reject() response = speaker_client.post(submission.urls.withdraw, follow=True) assert", "200 assert submission.state == SubmissionStates.CONFIRMED @pytest.mark.django_db def test_submission_accept_with_missing_availability(speaker_client, submission): submission.event.settings.cfp_request_availabilities", "== 200 assert submission.state == SubmissionStates.CONFIRMED @pytest.mark.django_db def test_submission_accept_with_missing_availability(speaker_client, submission):", "assert submission.state == SubmissionStates.WITHDRAWN assert len(djmail.outbox) == 1 @pytest.mark.django_db def", "neuer Titel', 'submission_type': rejected_submission.submission_type.pk, 'content_locale': rejected_submission.content_locale, 'description': rejected_submission.description, 'abstract': rejected_submission.abstract,", "assert response.status_code == 200 with scope(event=submission.event): submission.refresh_from_db() assert submission.state ==", "neuer Titel', 'submission_type': submission.submission_type.pk, 'content_locale': submission.content_locale, 'description': submission.description, 'abstract': submission.abstract,", "event.submission_types.create(name='Other', default_duration=13) data = { 'title': 'Ein ganz neuer Titel',", "= speaker_client.get(submission.urls.withdraw, follow=True) submission.refresh_from_db() assert response.status_code == 200 assert submission.state", "== 200 new_token = Token.objects.filter(user=speaker).first().key assert new_token != old_token @pytest.mark.django_db", "scope(event=event): assert speaker.profiles.get(event=event).biography != '' response = speaker_client.post(event.urls.user_delete, follow=True) assert", "+ f'?locale=de&next=/{multilingual_event.slug}/', follow=True, ) assert 'Einreichung' in second_response.content.decode() @pytest.mark.django_db def", "speaker.regenerate_token() old_token = Token.objects.filter(user=speaker).first().key response = speaker_client.post( event.urls.user, data={ 'form':", "submission.speakers.count() == 1 response = orga_client.post( submission.urls.accept_invitation + 'olololol', follow=True", "'description': rejected_submission.description, 'abstract': rejected_submission.abstract, 'notes': rejected_submission.notes, 'resource-TOTAL_FORMS': 0, 'resource-INITIAL_FORMS': 0,", "best color.' ) file_answer = speaker.answers.get(question_id=speaker_file_question.pk) assert file_answer.answer.startswith('file://') assert file_answer.answer_file.read()", "event.urls.user, data={ f'question_{speaker_question.id}': 'green as the sky', 'form': 'questions', },", "speaker.name == '<NAME>' assert speaker.email.startswith('deleted_user') assert speaker.email.endswith('@localhost') @pytest.mark.django_db def test_can_change_locale(multilingual_event,", "submission.submission_type != new_type @pytest.mark.django_db def test_can_edit_profile(speaker, event, speaker_client): response =", "'Ein ganz neuer Titel', 'submission_type': submission.submission_type.pk, 'content_locale': submission.content_locale, 'description': submission.description,", "SubmissionStates.WITHDRAWN assert len(djmail.outbox) == 0 @pytest.mark.django_db def test_submission_withdraw_if_accepted(speaker_client, submission): djmail.outbox", "assert file_answer.answer_file.read() == b'file_content' assert (settings.MEDIA_ROOT / file_answer.answer_file.name).exists() response =", "0, 'resource-INITIAL_FORMS': 0, 'resource-MIN_NUM_FORMS': 0, 'resource-MAX_NUM_FORMS': 1000, } response =", "test_can_accept_invitation(orga_client, submission): assert submission.speakers.count() == 1 response = orga_client.post(submission.urls.accept_invitation, follow=True)", "assert submission.resources.count() == 2 submission.refresh_from_db() resource_one.refresh_from_db() new_resource = submission.resources.exclude(pk=resource_one.pk).first() assert", "test_can_change_api_token(speaker, event, speaker_client): speaker.regenerate_token() old_token = Token.objects.filter(user=speaker).first().key response = speaker_client.post(", "with scope(event=event): answer = speaker.answers.get(question_id=speaker_question.pk) assert answer.answer == 'black as", "assert response.redirect_chain[-1][1] == 302 assert 'login/?next=' in response.redirect_chain[-1][0] @pytest.mark.django_db def", "assert response.status_code == 200 assert submission.title in response.content.decode() @pytest.mark.django_db def", "response.redirect_chain[-1][1] == 302 assert 'login/?next=' in response.redirect_chain[-1][0] @pytest.mark.django_db def test_submission_withdraw(speaker_client,", "[] submission.state = SubmissionStates.SUBMITTED submission.save() response = speaker_client.post(submission.urls.withdraw, follow=True) assert", "event, speaker_client): speaker.regenerate_token() old_token = Token.objects.filter(user=speaker).first().key response = speaker_client.post( event.urls.user,", "13 @pytest.mark.django_db def test_cannot_edit_confirmed_slot_count(speaker_client, confirmed_submission): submission = confirmed_submission submission.event.settings.present_multiple_times =", "== 200 with scope(event=submission.event): submission.refresh_from_db() assert submission.slot_count != 13 @pytest.mark.django_db", "200 speaker.refresh_from_db() assert speaker.email == '<EMAIL>' @pytest.mark.django_db def test_can_edit_login_info_wrong_password(speaker, event,", "speaker_client.post( event.urls.user, data={ 'old_password': '<PASSWORD>!', 'email': '<EMAIL>', 'password': '', 'password_repeat':", "'resource-MAX_NUM_FORMS': 1000, } response = other_speaker_client.post( rejected_submission.urls.user_base, follow=True, data=data )", "data = { 'speaker': '<EMAIL>', 'subject': 'Please join!', 'text': 'C\\'mon,", "@pytest.mark.django_db def test_can_edit_and_update_speaker_answers( speaker, event, speaker_question, speaker_boolean_question, speaker_client, speaker_text_question, speaker_file_question,", "SubmissionStates.ACCEPTED submission.save() response = client.post(submission.urls.confirm, follow=True) submission.refresh_from_db() assert response.status_code ==", "speaker.refresh_from_db() assert speaker.profiles.get(event=event).biography != 'Ruling since forever.' @pytest.mark.django_db def test_can_edit_login_info(speaker,", "'green as the sky' @pytest.mark.django_db def test_cannot_delete_profile_on_first_try(speaker, event, speaker_client): with", "response.status_code == 200 with scope(event=event): speaker.refresh_from_db() assert speaker.profiles.get(event=event).biography == 'Ruling", "accepted_submission.state == SubmissionStates.ACCEPTED @pytest.mark.django_db def test_can_edit_submission(speaker_client, submission, resource, other_resource): with", "def test_cannot_edit_submission_type_after_acceptance(speaker_client, submission, event): with scope(event=submission.event): submission.accept() new_type = event.submission_types.create(name='Other',", "response.status_code == 200 assert submission.state == SubmissionStates.WITHDRAWN @pytest.mark.django_db def test_cannot_withdraw_accepted_submission(speaker_client,", "speaker.answers.get(question_id=speaker_file_question.pk) assert file_answer.answer.startswith('file://') assert file_answer.answer_file.read() == b'file_content' assert (settings.MEDIA_ROOT /", "ganz neuer Titel', response.content.decode() assert submission.resources.count() == 2 assert new_resource.description", "test_cannot_edit_confirmed_slot_count(speaker_client, confirmed_submission): submission = confirmed_submission submission.event.settings.present_multiple_times = True with scope(event=submission.event):", "== 200 with scope(event=event): speaker.refresh_from_db() assert speaker.profiles.get(event=event).biography == 'Ruling since", "old_token @pytest.mark.django_db def test_must_provide_availabilities(speaker, event, speaker_client): event.settings.cfp_require_availabilities = True response", "as the night', f'question_{speaker_boolean_question.id}': 'True', f'question_{speaker_file_question.id}': f, f'question_{speaker_text_question.id}': 'Green is", "@pytest.mark.django_db def test_cannot_delete_profile_on_first_try(speaker, event, speaker_client): with scope(event=event): assert speaker.profiles.get(event=event).biography !=", "['<EMAIL>'] @pytest.mark.django_db def test_can_accept_invitation(orga_client, submission): assert submission.speakers.count() == 1 response", "assert response.status_code == 200 assert submission.speakers.count() == 2 @pytest.mark.django_db def", "'resource-2-description': 'new resource', 'resource-2-resource': f, 'resource-TOTAL_FORMS': 3, 'resource-INITIAL_FORMS': 2, 'resource-MIN_NUM_FORMS':", "+ f'?locale=de&next=/{multilingual_event.slug}/', follow=True, ) orga_user.refresh_from_db() assert response.status_code == 200 assert", "speaker.email.endswith('@localhost') @pytest.mark.django_db def test_can_change_locale(multilingual_event, client): first_response = client.get(multilingual_event.cfp.urls.public, follow=True) assert", "submission): response = speaker_client.get(submission.urls.user_base, follow=True) assert response.status_code == 200 assert", "200 assert response.redirect_chain[-1][1] == 302 assert 'login/?next=' in response.redirect_chain[-1][0] @pytest.mark.django_db", "test_can_edit_login_info(speaker, event, speaker_client): response = speaker_client.post( event.urls.user, data={ 'old_password': '<PASSWORD>!',", "the night' assert speaker.answers.get(question_id=speaker_boolean_question.pk).answer == 'True' assert ( speaker.answers.get(question_id=speaker_text_question.pk).answer ==", "def test_can_delete_profile(speaker, event, speaker_client): with scope(event=event): assert speaker.profiles.get(event=event).biography != ''", "submission.description, 'abstract': submission.abstract, 'notes': submission.notes, 'resource-TOTAL_FORMS': 0, 'resource-INITIAL_FORMS': 0, 'resource-MIN_NUM_FORMS':", "response.status_code == 200 assert accepted_submission.state == SubmissionStates.ACCEPTED response = speaker_client.post(accepted_submission.urls.confirm,", "with scope(event=submission.event): submission.refresh_from_db() assert submission.slot_count == 13 @pytest.mark.django_db def test_cannot_edit_confirmed_slot_count(speaker_client,", "test_persists_changed_locale(multilingual_event, orga_user, orga_client): assert orga_user.locale == 'en' response = orga_client.get(", "== '<NAME>' @pytest.mark.django_db def test_can_change_api_token(speaker, event, speaker_client): speaker.regenerate_token() old_token =", "submission.accept() new_type = event.submission_types.create(name='Other', default_duration=13) data = { 'title': 'Ein", "!= '' response = speaker_client.post(event.urls.user_delete, follow=True) assert response.status_code == 200", "/ file_answer.answer_file.name).exists() response = speaker_client.post( event.urls.user, data={ f'question_{speaker_question.id}': 'green as", "= Token.objects.filter(user=speaker).first().key assert new_token != old_token @pytest.mark.django_db def test_must_provide_availabilities(speaker, event,", "== 200 with scope(event=submission.event): submission.refresh_from_db() assert submission.state != SubmissionStates.WITHDRAWN @pytest.mark.django_db", "data={'really': True}, follow=True ) assert response.status_code == 200 with scope(event=event):", "@pytest.mark.django_db def test_can_accept_invitation(orga_client, submission): assert submission.speakers.count() == 1 response =", "'login/?next=' in response.redirect_chain[-1][0] assert submission.state == SubmissionStates.ACCEPTED @pytest.mark.django_db def test_submission_accept_wrong_code(client,", "'' assert speaker.name != '<NAME>' @pytest.mark.django_db def test_can_delete_profile(speaker, event, speaker_client):", "assert orga_user.locale == 'de' @pytest.mark.django_db def test_can_invite_speaker(speaker_client, submission): djmail.outbox =", "test_can_reconfirm_submission(speaker_client, accepted_submission): accepted_submission.state = SubmissionStates.CONFIRMED accepted_submission.save() response = speaker_client.get(accepted_submission.urls.confirm, follow=True)", "assert response.status_code == 200 speaker.refresh_from_db() assert speaker.email == '<EMAIL>' @pytest.mark.django_db", "@pytest.mark.django_db def test_cannot_see_other_submission(speaker_client, other_submission): response = speaker_client.get(other_submission.urls.user_base, follow=True) assert response.status_code", "= speaker_client.post(submission.urls.confirm, follow=True) submission.refresh_from_db() assert response.status_code == 200 assert submission.state", "assert submission.slot_count == 13 @pytest.mark.django_db def test_cannot_edit_confirmed_slot_count(speaker_client, confirmed_submission): submission =", "submission.event.settings.present_multiple_times = True with scope(event=submission.event): data = { 'title': 'Ein", "response.status_code == 200 with scope(event=submission.event): submission.refresh_from_db() assert submission.submission_type != new_type", "speaker_client.get(other_submission.urls.user_base, follow=True) assert response.status_code == 404 @pytest.mark.django_db def test_can_confirm_submission(speaker_client, accepted_submission):", "== 302 assert 'login/?next=' in response.redirect_chain[-1][0] @pytest.mark.django_db def test_submission_withdraw(speaker_client, submission):", "accepted_submission.state = SubmissionStates.CONFIRMED accepted_submission.save() response = speaker_client.get(accepted_submission.urls.confirm, follow=True) accepted_submission.refresh_from_db() assert", "= event.submission_types.create(name='Other', default_duration=13) data = { 'title': 'Ein ganz neuer", "assert submission.speakers.count() == 1 response = orga_client.post(submission.urls.accept_invitation, follow=True) submission.refresh_from_db() assert", "accepted_submission.refresh_from_db() assert response.status_code == 200 assert accepted_submission.state == SubmissionStates.ACCEPTED response", "speaker_client): response = speaker_client.post( event.urls.user, data={ 'name': '<NAME>', 'biography': 'Ruling", "'<NAME>', 'biography': 'Ruling since forever.', 'form': 'profile', 'availabilities': '{\"availabilities\": []}',", "follow=True) assert response.status_code == 200 submission.refresh_from_db() assert submission.state == SubmissionStates.WITHDRAWN", "django.core import mail as djmail from django.core.files.uploadedfile import SimpleUploadedFile from", "= speaker_client.get(other_submission.urls.user_base, follow=True) assert response.status_code == 404 @pytest.mark.django_db def test_can_confirm_submission(speaker_client,", "accepted_submission.state == SubmissionStates.ACCEPTED response = speaker_client.post(accepted_submission.urls.confirm, follow=True) accepted_submission.refresh_from_db() assert response.status_code", "follow=True) submission.refresh_from_db() assert response.status_code == 200 assert response.redirect_chain[-1][1] == 302", "302 assert 'login/?next=' in response.redirect_chain[-1][0] assert submission.state == SubmissionStates.ACCEPTED @pytest.mark.django_db", "event, speaker_client): response = speaker_client.post( event.urls.user, data={ 'name': '<NAME>', 'biography':", "SubmissionStates.ACCEPTED @pytest.mark.django_db def test_can_edit_submission(speaker_client, submission, resource, other_resource): with scope(event=submission.event): assert", "2 resource_one = submission.resources.first() resource_two = submission.resources.last() assert submission.title in", "speaker_client): with scope(event=event): assert speaker.profiles.get(event=event).biography != '' response = speaker_client.post(event.urls.user_delete,", "response = speaker_client.get(submission.urls.user_base, follow=True) assert response.status_code == 200 assert submission.title", "test_cannot_delete_profile_on_first_try(speaker, event, speaker_client): with scope(event=event): assert speaker.profiles.get(event=event).biography != '' response", "13 @pytest.mark.django_db def test_cannot_edit_rejected_submission(other_speaker_client, rejected_submission): title = rejected_submission.title data =", "assert speaker.name == '<NAME>' @pytest.mark.django_db def test_can_change_api_token(speaker, event, speaker_client): speaker.regenerate_token()", "speaker.refresh_from_db() assert speaker.profiles.get(event=event).biography != '' assert speaker.name != '<NAME>' @pytest.mark.django_db", "import settings from django.core import mail as djmail from django.core.files.uploadedfile", "'{\"availabilities\": []}', }, follow=True, ) assert response.status_code == 200 with", "'password_repeat': '', 'form': 'login', }, follow=True, ) assert response.status_code ==", "assert response.status_code == 200 assert submission.state == SubmissionStates.WITHDRAWN @pytest.mark.django_db def", "from django.conf import settings from django.core import mail as djmail", "speaker.name == '<NAME>' @pytest.mark.django_db def test_can_change_api_token(speaker, event, speaker_client): speaker.regenerate_token() old_token", "'Ruling since forever.', 'form': 'profile', 'availabilities': '{\"availabilities\": []}', }, follow=True,", "submission.notes, 'slot_count': submission.slot_count, 'resource-0-id': resource_one.id, 'resource-0-description': 'new resource name', 'resource-0-resource':", "= True data = { 'title': 'Ein ganz neuer Titel',", "SubmissionStates.REJECTED @pytest.mark.django_db def test_can_withdraw_submission(speaker_client, submission): response = speaker_client.get(submission.urls.withdraw, follow=True) submission.refresh_from_db()", "data={ 'name': '<NAME>', 'biography': 'Ruling since forever.', 'form': 'profile', },", "response.status_code == 200 speaker.refresh_from_db() assert speaker.email == '<EMAIL>' @pytest.mark.django_db def", "'Ein ganz neuer Titel', 'submission_type': new_type.pk, 'content_locale': submission.content_locale, 'description': submission.description,", "False)) def test_submission_accept(speaker_client, submission, request_availability): submission.event.settings.cfp_request_availabilities = request_availability submission.state =", "@pytest.mark.django_db @pytest.mark.parametrize('request_availability', (True, False)) def test_submission_accept(speaker_client, submission, request_availability): submission.event.settings.cfp_request_availabilities =", "import scope from rest_framework.authtoken.models import Token from pretalx.submission.models import SubmissionStates", "event.urls.user, data={ f'question_{speaker_question.id}': 'black as the night', f'question_{speaker_boolean_question.id}': 'True', f'question_{speaker_file_question.id}':", "!= '' assert speaker.name != '<NAME>' @pytest.mark.django_db def test_can_delete_profile(speaker, event,", "follow=True ) submission.refresh_from_db() assert response.status_code == 404 assert submission.speakers.count() ==", "'Ruling since forever.' response = speaker_client.post( event.urls.user, data={ 'name': '<NAME>',", "submission.event.settings.cfp_request_availabilities = request_availability submission.state = SubmissionStates.ACCEPTED submission.save() response = speaker_client.post(submission.urls.confirm,", "name', 'resource-0-resource': resource_one.resource, 'resource-1-id': resource_two.id, 'resource-1-DELETE': True, 'resource-1-description': resource_two.description, 'resource-1-resource':", "@pytest.mark.django_db def test_can_withdraw_submission(speaker_client, submission): response = speaker_client.get(submission.urls.withdraw, follow=True) submission.refresh_from_db() assert", "= True submission.event.settings.cfp_require_availabilities = True submission.state = SubmissionStates.ACCEPTED submission.save() response", "True}, follow=True ) assert response.status_code == 200 with scope(event=event): speaker.refresh_from_db()", "orga_user.locale == 'en' response = orga_client.get( reverse('cfp:locale.set', kwargs={'event': multilingual_event.slug}) +", "@pytest.mark.django_db def test_can_see_submission_list(speaker_client, submission): response = speaker_client.get(submission.event.urls.user_submissions, follow=True) assert response.status_code", "def test_submission_withdraw(speaker_client, submission): djmail.outbox = [] submission.state = SubmissionStates.SUBMITTED submission.save()", "[] with scope(event=submission.event): submission.accept() response = speaker_client.post(submission.urls.withdraw, follow=True) assert response.status_code", "404 @pytest.mark.django_db def test_can_confirm_submission(speaker_client, accepted_submission): response = speaker_client.get(accepted_submission.urls.confirm, follow=True) accepted_submission.refresh_from_db()", "Token from pretalx.submission.models import SubmissionStates @pytest.mark.django_db def test_can_see_submission_list(speaker_client, submission): response", "== 200 assert rejected_submission.state == SubmissionStates.REJECTED @pytest.mark.django_db def test_can_withdraw_submission(speaker_client, submission):", "'new resource' assert new_resource.resource.read() == b'file_content' assert not submission.resources.filter(pk=resource_two.pk).exists() @pytest.mark.django_db", "response.status_code == 200 assert accepted_submission.state == SubmissionStates.CONFIRMED @pytest.mark.django_db def test_can_reconfirm_submission(speaker_client,", "new_type @pytest.mark.django_db def test_cannot_edit_submission_type_after_acceptance(speaker_client, submission, event): with scope(event=submission.event): submission.accept() new_type", "= speaker_client.get(accepted_submission.urls.withdraw, follow=True) accepted_submission.refresh_from_db() assert response.status_code == 200 assert accepted_submission.state", "@pytest.mark.django_db def test_must_provide_availabilities(speaker, event, speaker_client): event.settings.cfp_require_availabilities = True response =", ") assert 'Einreichung' in second_response.content.decode() @pytest.mark.django_db def test_persists_changed_locale(multilingual_event, orga_user, orga_client):", "'True', f'question_{speaker_file_question.id}': f, f'question_{speaker_text_question.id}': 'Green is totally the best color.',", "with scope(event=event): speaker.refresh_from_db() assert speaker.profiles.get(event=event).biography == 'Ruling since forever.' assert", "response = speaker_client.post( event.urls.user, data={ 'form': 'token', }, follow=True, )", "with scope(event=event): answer.refresh_from_db() assert answer.answer == 'green as the sky'", "== 404 assert submission.speakers.count() == 1 @pytest.mark.django_db @pytest.mark.parametrize('request_availability', (True, False))", "follow=True) submission.refresh_from_db() assert response.status_code == 200 assert submission.state == SubmissionStates.ACCEPTED", "== 200 assert submission.title in response.content.decode() @pytest.mark.django_db def test_cannot_see_other_submission(speaker_client, other_submission):", "!= 13 @pytest.mark.django_db def test_cannot_edit_rejected_submission(other_speaker_client, rejected_submission): title = rejected_submission.title data", "= client.post( submission.urls.confirm.replace(submission.code, \"foo\"), follow=True ) assert response.status_code == 200", "test_cannot_edit_rejected_submission(other_speaker_client, rejected_submission): title = rejected_submission.title data = { 'title': 'Ein", "= speaker_client.post( event.urls.user, data={ 'name': '<NAME>', 'biography': 'Ruling since forever.',", "pretalx.submission.models import SubmissionStates @pytest.mark.django_db def test_can_see_submission_list(speaker_client, submission): response = speaker_client.get(submission.event.urls.user_submissions,", "== 2 @pytest.mark.django_db def test_wrong_acceptance_link(orga_client, submission): assert submission.speakers.count() == 1", "other_speaker_client.post( rejected_submission.urls.user_base, follow=True, data=data ) assert response.status_code == 200 rejected_submission.refresh_from_db()", "rest_framework.authtoken.models import Token from pretalx.submission.models import SubmissionStates @pytest.mark.django_db def test_can_see_submission_list(speaker_client,", "accepted_submission.save() response = speaker_client.get(accepted_submission.urls.confirm, follow=True) accepted_submission.refresh_from_db() assert response.status_code == 200", "@pytest.mark.django_db def test_cannot_edit_submission_type_after_acceptance(speaker_client, submission, event): with scope(event=submission.event): submission.accept() new_type =", "import reverse from django_scopes import scope from rest_framework.authtoken.models import Token", "'Einreichung' not in first_response.content.decode() second_response = client.get( reverse('cfp:locale.set', kwargs={'event': multilingual_event.slug})", "resource_one.resource, 'resource-1-id': resource_two.id, 'resource-1-DELETE': True, 'resource-1-description': resource_two.description, 'resource-1-resource': resource_two.resource, 'resource-2-id':", "== SubmissionStates.CONFIRMED @pytest.mark.django_db def test_submission_accept_with_missing_availability(speaker_client, submission): submission.event.settings.cfp_request_availabilities = True submission.event.settings.cfp_require_availabilities", "from django_scopes import scope from rest_framework.authtoken.models import Token from pretalx.submission.models", "scope(event=submission.event): assert submission.resources.count() == 2 submission.refresh_from_db() resource_one.refresh_from_db() new_resource = submission.resources.exclude(pk=resource_one.pk).first()", "speaker.refresh_from_db() assert speaker.profiles.get(event=event).biography == '' assert speaker.name == '<NAME>' assert", "= orga_client.get( reverse('cfp:locale.set', kwargs={'event': multilingual_event.slug}) + f'?locale=de&next=/{multilingual_event.slug}/', follow=True, ) orga_user.refresh_from_db()", "== 200 assert submission.speakers.count() == 2 @pytest.mark.django_db def test_wrong_acceptance_link(orga_client, submission):", "'resource-1-id': resource_two.id, 'resource-1-DELETE': True, 'resource-1-description': resource_two.description, 'resource-1-resource': resource_two.resource, 'resource-2-id': '',", "def test_wrong_acceptance_link(orga_client, submission): assert submission.speakers.count() == 1 response = orga_client.post(", "follow=True) submission.refresh_from_db() assert response.status_code == 200 assert submission.state == SubmissionStates.CONFIRMED", "== 1 assert djmail.outbox[0].to == ['<EMAIL>'] @pytest.mark.django_db def test_can_accept_invitation(orga_client, submission):", "'olololol', follow=True ) submission.refresh_from_db() assert response.status_code == 404 assert submission.speakers.count()", ") assert response.status_code == 200 speaker.refresh_from_db() assert speaker.email != '<EMAIL>'", "response.status_code == 404 assert submission.speakers.count() == 1 @pytest.mark.django_db @pytest.mark.parametrize('request_availability', (True,", "submission.refresh_from_db() assert response.status_code == 200 assert submission.state == SubmissionStates.ACCEPTED @pytest.mark.django_db", "submission.state = SubmissionStates.SUBMITTED submission.save() response = speaker_client.post(submission.urls.withdraw, follow=True) assert response.status_code", "== 200 assert accepted_submission.state == SubmissionStates.CONFIRMED @pytest.mark.django_db def test_cannot_confirm_rejected_submission(other_speaker_client, rejected_submission):", "== 200 assert submission.state == SubmissionStates.WITHDRAWN @pytest.mark.django_db def test_cannot_withdraw_accepted_submission(speaker_client, accepted_submission):", "= speaker_client.post(submission.urls.user_base, follow=True, data=data) assert response.status_code == 200 with scope(event=submission.event):", "[]}', }, follow=True, ) assert response.status_code == 200 with scope(event=event):", "response = speaker_client.post( event.urls.user, data={ 'old_password': '<PASSWORD>!', 'email': '<EMAIL>', 'password':", "best color.', 'form': 'questions', }, follow=True, ) assert response.status_code ==", "submission): submission.state = SubmissionStates.ACCEPTED submission.save() assert submission.code in submission.urls.confirm response", "submission.confirm() response = speaker_client.post(submission.urls.withdraw, follow=True) assert response.status_code == 200 with", "assert speaker.name == '<NAME>' assert speaker.email.startswith('deleted_user') assert speaker.email.endswith('@localhost') @pytest.mark.django_db def", "submission): with scope(event=submission.event): submission.accept() submission.confirm() response = speaker_client.post(submission.urls.withdraw, follow=True) assert", "'description': submission.description, 'abstract': submission.abstract, 'notes': submission.notes, 'slot_count': 13, 'resource-TOTAL_FORMS': 0,", "200 with scope(event=event): speaker.refresh_from_db() assert speaker.profiles.get(event=event).biography != 'Ruling since forever.'", "assert speaker.email.startswith('deleted_user') assert speaker.email.endswith('@localhost') @pytest.mark.django_db def test_can_change_locale(multilingual_event, client): first_response =", "data={'email': 'invalidemail'} ) assert response.status_code == 200 data = {", "test_can_confirm_submission(speaker_client, accepted_submission): response = speaker_client.get(accepted_submission.urls.confirm, follow=True) accepted_submission.refresh_from_db() assert response.status_code ==", "@pytest.mark.django_db def test_can_change_api_token(speaker, event, speaker_client): speaker.regenerate_token() old_token = Token.objects.filter(user=speaker).first().key response", "= SubmissionStates.ACCEPTED submission.save() response = client.post(submission.urls.confirm, follow=True) submission.refresh_from_db() assert response.status_code", "assert response.status_code == 200 assert accepted_submission.state == SubmissionStates.ACCEPTED @pytest.mark.django_db def", "response = client.post(submission.urls.confirm, follow=True) submission.refresh_from_db() assert response.status_code == 200 assert", "since forever.' @pytest.mark.django_db def test_can_edit_login_info(speaker, event, speaker_client): response = speaker_client.post(", "submission, resource, other_resource): with scope(event=submission.event): assert submission.resources.count() == 2 resource_one", "assert not answer f = SimpleUploadedFile('testfile.txt', b'file_content') response = speaker_client.post(", "== 200 assert response.redirect_chain[-1][1] == 302 assert 'login/?next=' in response.redirect_chain[-1][0]", "new_token = Token.objects.filter(user=speaker).first().key assert new_token != old_token @pytest.mark.django_db def test_must_provide_availabilities(speaker,", "speaker.refresh_from_db() assert speaker.profiles.get(event=event).biography == 'Ruling since forever.' assert speaker.name ==", "== 200 assert accepted_submission.state == SubmissionStates.CONFIRMED @pytest.mark.django_db def test_can_reconfirm_submission(speaker_client, accepted_submission):", "@pytest.mark.django_db def test_can_edit_submission(speaker_client, submission, resource, other_resource): with scope(event=submission.event): assert submission.resources.count()", "{ 'title': 'Ein ganz neuer Titel', 'submission_type': submission.submission_type.pk, 'content_locale': submission.content_locale,", "with scope(event=submission.event): submission.refresh_from_db() assert submission.submission_type != new_type @pytest.mark.django_db def test_can_edit_profile(speaker,", "new_type = event.submission_types.create(name='Other', default_duration=13) data = { 'title': 'Ein ganz", "speaker.answers.filter(question_id=speaker_question.pk).first() assert not answer f = SimpleUploadedFile('testfile.txt', b'file_content') response =", "response.status_code == 404 @pytest.mark.django_db def test_can_confirm_submission(speaker_client, accepted_submission): response = speaker_client.get(accepted_submission.urls.confirm,", "submission.title in str(resource_one) f = SimpleUploadedFile('testfile.txt', b'file_content') data = {", "assert not submission.resources.filter(pk=resource_two.pk).exists() @pytest.mark.django_db def test_can_edit_slot_count(speaker_client, submission): with scope(event=submission.event): submission.event.settings.present_multiple_times", "@pytest.mark.django_db def test_submission_accept_with_missing_availability(speaker_client, submission): submission.event.settings.cfp_request_availabilities = True submission.event.settings.cfp_require_availabilities = True", "submission.state == SubmissionStates.ACCEPTED @pytest.mark.django_db def test_submission_accept_wrong_code(client, submission): submission.state = SubmissionStates.ACCEPTED", "client.get( reverse('cfp:locale.set', kwargs={'event': multilingual_event.slug}) + f'?locale=de&next=/{multilingual_event.slug}/', follow=True, ) assert 'Einreichung'", "True with scope(event=submission.event): data = { 'title': 'Ein ganz neuer", "orga_user.refresh_from_db() assert response.status_code == 200 assert orga_user.locale == 'de' @pytest.mark.django_db", "= [] response = speaker_client.get( submission.urls.invite, follow=True, data={'email': 'invalidemail'} )", "file_answer = speaker.answers.get(question_id=speaker_file_question.pk) assert file_answer.answer.startswith('file://') assert file_answer.answer_file.read() == b'file_content' assert", "'<EMAIL>', 'subject': 'Please join!', 'text': 'C\\'mon, it will be fun!',", "scope(event=submission.event): assert submission.resources.count() == 2 resource_one = submission.resources.first() resource_two =", "speaker_client.post(submission.urls.invite, follow=True, data=data) assert response.status_code == 200 assert len(djmail.outbox) ==", "with scope(event=submission.event): submission.accept() response = speaker_client.post(submission.urls.withdraw, follow=True) assert response.status_code ==", "= { 'title': 'Ein ganz neuer Titel', 'submission_type': rejected_submission.submission_type.pk, 'content_locale':", "= speaker_client.post( event.urls.user, data={ 'old_password': '<PASSWORD>!', 'email': '<EMAIL>', 'password': '',", "def test_cannot_delete_profile_on_first_try(speaker, event, speaker_client): with scope(event=event): assert speaker.profiles.get(event=event).biography != ''", "speaker.refresh_from_db() assert speaker.email == '<EMAIL>' @pytest.mark.django_db def test_can_edit_login_info_wrong_password(speaker, event, speaker_client):", "rejected_submission): title = rejected_submission.title data = { 'title': 'Ein ganz", "b'file_content') data = { 'title': 'Ein ganz neuer Titel', 'submission_type':", "test_submission_accept_nologin(client, submission): submission.state = SubmissionStates.ACCEPTED submission.save() response = client.post(submission.urls.confirm, follow=True)", "response.status_code == 200 assert submission.title in response.content.decode() @pytest.mark.django_db def test_can_see_submission(speaker_client,", "200 with scope(event=submission.event): submission.refresh_from_db() assert submission.slot_count == 13 @pytest.mark.django_db def", "== 200 assert submission.title in response.content.decode() @pytest.mark.django_db def test_can_see_submission(speaker_client, submission):", "resource_one.id, 'resource-0-description': 'new resource name', 'resource-0-resource': resource_one.resource, 'resource-1-id': resource_two.id, 'resource-1-DELETE':", "orga_client.get( reverse('cfp:locale.set', kwargs={'event': multilingual_event.slug}) + f'?locale=de&next=/{multilingual_event.slug}/', follow=True, ) orga_user.refresh_from_db() assert", "== '<EMAIL>' @pytest.mark.django_db def test_can_edit_login_info_wrong_password(speaker, event, speaker_client): response = speaker_client.post(", "== b'file_content' assert (settings.MEDIA_ROOT / file_answer.answer_file.name).exists() response = speaker_client.post( event.urls.user,", "submission.resources.count() == 2 resource_one = submission.resources.first() resource_two = submission.resources.last() assert", "'<NAME>', 'biography': 'Ruling since forever.', 'form': 'profile', }, follow=True, )", "b'file_content' assert not submission.resources.filter(pk=resource_two.pk).exists() @pytest.mark.django_db def test_can_edit_slot_count(speaker_client, submission): with scope(event=submission.event):", "response.status_code == 200 with scope(event=event): answer = speaker.answers.get(question_id=speaker_question.pk) assert answer.answer", "'<NAME>' assert speaker.email.startswith('deleted_user') assert speaker.email.endswith('@localhost') @pytest.mark.django_db def test_can_change_locale(multilingual_event, client): first_response", "'black as the night', f'question_{speaker_boolean_question.id}': 'True', f'question_{speaker_file_question.id}': f, f'question_{speaker_text_question.id}': 'Green", "{ 'speaker': '<EMAIL>', 'subject': 'Please join!', 'text': 'C\\'mon, it will", "orga_user, orga_client): assert orga_user.locale == 'en' response = orga_client.get( reverse('cfp:locale.set',", "200 assert accepted_submission.state == SubmissionStates.ACCEPTED response = speaker_client.post(accepted_submission.urls.confirm, follow=True) accepted_submission.refresh_from_db()", "assert speaker.email.endswith('@localhost') @pytest.mark.django_db def test_can_change_locale(multilingual_event, client): first_response = client.get(multilingual_event.cfp.urls.public, follow=True)", "old_token = Token.objects.filter(user=speaker).first().key response = speaker_client.post( event.urls.user, data={ 'form': 'token',", "answer.answer == 'black as the night' assert speaker.answers.get(question_id=speaker_boolean_question.pk).answer == 'True'", "rejected_submission.title data = { 'title': 'Ein ganz neuer Titel', 'submission_type':", "submission.refresh_from_db() assert response.status_code == 200 assert submission.state == SubmissionStates.CONFIRMED @pytest.mark.django_db", "submission.resources.first() resource_two = submission.resources.last() assert submission.title in str(resource_one) f =", "!= old_token @pytest.mark.django_db def test_must_provide_availabilities(speaker, event, speaker_client): event.settings.cfp_require_availabilities = True", "'Green is totally the best color.', 'form': 'questions', }, follow=True,", "assert 'submission' in first_response.content.decode() assert 'Einreichung' not in first_response.content.decode() second_response", "submission.refresh_from_db() assert submission.slot_count == 13 @pytest.mark.django_db def test_cannot_edit_confirmed_slot_count(speaker_client, confirmed_submission): submission", "speaker_client.post( event.urls.user, data={ f'question_{speaker_question.id}': 'black as the night', f'question_{speaker_boolean_question.id}': 'True',", "f'question_{speaker_text_question.id}': 'Green is totally the best color.', 'form': 'questions', },", "SubmissionStates.ACCEPTED submission.save() response = speaker_client.post(submission.urls.confirm, follow=True) submission.refresh_from_db() assert response.status_code ==", "accepted_submission.state == SubmissionStates.CONFIRMED @pytest.mark.django_db def test_cannot_confirm_rejected_submission(other_speaker_client, rejected_submission): rejected_submission.state = SubmissionStates.REJECTED", "def test_submission_accept(speaker_client, submission, request_availability): submission.event.settings.cfp_request_availabilities = request_availability submission.state = SubmissionStates.ACCEPTED", "submission = confirmed_submission submission.event.settings.present_multiple_times = True with scope(event=submission.event): data =", "@pytest.mark.parametrize('request_availability', (True, False)) def test_submission_accept(speaker_client, submission, request_availability): submission.event.settings.cfp_request_availabilities = request_availability", "submission): response = speaker_client.get(submission.event.urls.user_submissions, follow=True) assert response.status_code == 200 assert", "is totally the best color.', 'form': 'questions', }, follow=True, )", "submission.event.settings.cfp_require_availabilities = True submission.state = SubmissionStates.ACCEPTED submission.save() response = speaker_client.post(submission.urls.confirm,", "response.status_code == 200 assert submission.state == SubmissionStates.ACCEPTED @pytest.mark.django_db def test_submission_accept_nologin(client,", "scope(event=event): speaker.refresh_from_db() assert speaker.profiles.get(event=event).biography != 'Ruling since forever.' response =", "assert submission.state == SubmissionStates.SUBMITTED response = speaker_client.post(submission.urls.withdraw, follow=True) submission.refresh_from_db() assert", "assert rejected_submission.title == title @pytest.mark.django_db def test_can_edit_submission_type(speaker_client, submission, event): with", "}, follow=True, ) assert response.status_code == 200 with scope(event=event): answer.refresh_from_db()", "test_cannot_confirm_rejected_submission(other_speaker_client, rejected_submission): rejected_submission.state = SubmissionStates.REJECTED rejected_submission.save() response = other_speaker_client.get(rejected_submission.urls.confirm, follow=True)", "response.status_code == 200 with scope(event=event): answer.refresh_from_db() assert answer.answer == 'green", "response = speaker_client.get(accepted_submission.urls.confirm, follow=True) accepted_submission.refresh_from_db() assert response.status_code == 200 assert", "submission.refresh_from_db() assert response.status_code == 404 assert submission.speakers.count() == 1 @pytest.mark.django_db", "def test_submission_accept_wrong_code(client, submission): submission.state = SubmissionStates.ACCEPTED submission.save() assert submission.code in", "response = speaker_client.post(submission.urls.withdraw, follow=True) assert response.status_code == 200 with scope(event=submission.event):", "speaker_client.get( submission.urls.invite, follow=True, data={'email': 'invalidemail'} ) assert response.status_code == 200", "'Please join!', 'text': 'C\\'mon, it will be fun!', } response", "SubmissionStates.WITHDRAWN @pytest.mark.django_db def test_cannot_withdraw_accepted_submission(speaker_client, accepted_submission): response = speaker_client.get(accepted_submission.urls.withdraw, follow=True) accepted_submission.refresh_from_db()", "'biography': 'Ruling since forever.', 'form': 'profile', 'availabilities': '{\"availabilities\": []}', },", "@pytest.mark.django_db def test_can_edit_login_info(speaker, event, speaker_client): response = speaker_client.post( event.urls.user, data={", "= confirmed_submission submission.event.settings.present_multiple_times = True with scope(event=submission.event): data = {", "'submission_type': rejected_submission.submission_type.pk, 'content_locale': rejected_submission.content_locale, 'description': rejected_submission.description, 'abstract': rejected_submission.abstract, 'notes': rejected_submission.notes,", "'submission_type': new_type.pk, 'content_locale': submission.content_locale, 'description': submission.description, 'abstract': submission.abstract, 'notes': submission.notes,", "assert response.status_code == 200 with scope(event=submission.event): submission.refresh_from_db() assert submission.submission_type !=", ") assert response.status_code == 200 speaker.refresh_from_db() assert speaker.email == '<EMAIL>'", "assert response.status_code == 200 submission.refresh_from_db() assert submission.state == SubmissionStates.WITHDRAWN assert", "speaker_client.get(accepted_submission.urls.withdraw, follow=True) accepted_submission.refresh_from_db() assert response.status_code == 200 assert accepted_submission.state ==", "assert answer.answer == 'black as the night' assert speaker.answers.get(question_id=speaker_boolean_question.pk).answer ==", "def test_submission_withdraw_if_accepted(speaker_client, submission): djmail.outbox = [] with scope(event=submission.event): submission.accept() response", "speaker.profiles.get(event=event).biography == 'Ruling since forever.' assert speaker.name == '<NAME>' @pytest.mark.django_db", "data=data) assert response.status_code == 200 with scope(event=submission.event): submission.refresh_from_db() assert submission.slot_count", "response = speaker_client.get(submission.event.urls.user_submissions, follow=True) assert response.status_code == 200 assert submission.title", "assert response.status_code == 200 assert rejected_submission.state == SubmissionStates.REJECTED @pytest.mark.django_db def", "new_type.pk, 'content_locale': submission.content_locale, 'description': submission.description, 'abstract': submission.abstract, 'notes': submission.notes, 'resource-TOTAL_FORMS':", "assert submission.title in response.content.decode() @pytest.mark.django_db def test_cannot_see_other_submission(speaker_client, other_submission): response =", "test_can_edit_and_update_speaker_answers( speaker, event, speaker_question, speaker_boolean_question, speaker_client, speaker_text_question, speaker_file_question, ): with", "= [] with scope(event=submission.event): submission.accept() response = speaker_client.post(submission.urls.withdraw, follow=True) assert", "assert accepted_submission.state == SubmissionStates.ACCEPTED @pytest.mark.django_db def test_can_edit_submission(speaker_client, submission, resource, other_resource):", "submission.title == 'Ein ganz neuer Titel', response.content.decode() assert submission.resources.count() ==", "follow=True, ) assert response.status_code == 200 with scope(event=event): speaker.refresh_from_db() assert", "'abstract': submission.abstract, 'notes': submission.notes, 'slot_count': 13, 'resource-TOTAL_FORMS': 0, 'resource-INITIAL_FORMS': 0,", "submission): djmail.outbox = [] response = speaker_client.get( submission.urls.invite, follow=True, data={'email':", "submission.event.settings.cfp_request_availabilities = True submission.event.settings.cfp_require_availabilities = True submission.state = SubmissionStates.ACCEPTED submission.save()", "response = speaker_client.post(submission.urls.invite, follow=True, data=data) assert response.status_code == 200 assert", "assert len(djmail.outbox) == 1 assert djmail.outbox[0].to == ['<EMAIL>'] @pytest.mark.django_db def", "response.status_code == 200 assert accepted_submission.state == SubmissionStates.CONFIRMED @pytest.mark.django_db def test_cannot_confirm_rejected_submission(other_speaker_client,", "speaker_client.post( event.urls.user, data={ 'form': 'token', }, follow=True, ) assert response.status_code", "scope(event=event): answer.refresh_from_db() assert answer.answer == 'green as the sky' @pytest.mark.django_db", "= client.post(submission.urls.confirm, follow=True) submission.refresh_from_db() assert response.status_code == 200 assert response.redirect_chain[-1][1]", "reverse('cfp:locale.set', kwargs={'event': multilingual_event.slug}) + f'?locale=de&next=/{multilingual_event.slug}/', follow=True, ) assert 'Einreichung' in", "scope(event=submission.event): submission.accept() submission.confirm() response = speaker_client.post(submission.urls.withdraw, follow=True) assert response.status_code ==", "scope(event=submission.event): submission.refresh_from_db() assert submission.submission_type != new_type @pytest.mark.django_db def test_can_edit_profile(speaker, event,", "client): first_response = client.get(multilingual_event.cfp.urls.public, follow=True) assert 'submission' in first_response.content.decode() assert", "== SubmissionStates.WITHDRAWN assert len(djmail.outbox) == 0 @pytest.mark.django_db def test_submission_withdraw_if_accepted(speaker_client, submission):", "submission.slot_count, 'resource-0-id': resource_one.id, 'resource-0-description': 'new resource name', 'resource-0-resource': resource_one.resource, 'resource-1-id':", "len(djmail.outbox) == 1 assert djmail.outbox[0].to == ['<EMAIL>'] @pytest.mark.django_db def test_can_accept_invitation(orga_client,", "accepted_submission): accepted_submission.state = SubmissionStates.CONFIRMED accepted_submission.save() response = speaker_client.get(accepted_submission.urls.confirm, follow=True) accepted_submission.refresh_from_db()", "== 1 response = orga_client.post( submission.urls.accept_invitation + 'olololol', follow=True )", "submission.speakers.count() == 1 @pytest.mark.django_db @pytest.mark.parametrize('request_availability', (True, False)) def test_submission_accept(speaker_client, submission,", "'form': 'profile', 'availabilities': '{\"availabilities\": []}', }, follow=True, ) assert response.status_code", "test_must_provide_availabilities(speaker, event, speaker_client): event.settings.cfp_require_availabilities = True response = speaker_client.post( event.urls.user,", "== 'de' @pytest.mark.django_db def test_can_invite_speaker(speaker_client, submission): djmail.outbox = [] response", "'resource-INITIAL_FORMS': 0, 'resource-MIN_NUM_FORMS': 0, 'resource-MAX_NUM_FORMS': 1000, } response = other_speaker_client.post(", "speaker_text_question, speaker_file_question, ): with scope(event=event): answer = speaker.answers.filter(question_id=speaker_question.pk).first() assert not", "with scope(event=event): speaker.refresh_from_db() assert speaker.profiles.get(event=event).biography != '' assert speaker.name !=", "@pytest.mark.django_db def test_can_invite_speaker(speaker_client, submission): djmail.outbox = [] response = speaker_client.get(", "assert speaker.email != '<EMAIL>' @pytest.mark.django_db def test_can_edit_and_update_speaker_answers( speaker, event, speaker_question,", "speaker.profiles.get(event=event).biography != 'Ruling since forever.' response = speaker_client.post( event.urls.user, data={", "forever.' response = speaker_client.post( event.urls.user, data={ 'name': '<NAME>', 'biography': 'Ruling", "assert submission.speakers.count() == 2 @pytest.mark.django_db def test_wrong_acceptance_link(orga_client, submission): assert submission.speakers.count()", "assert 'login/?next=' in response.redirect_chain[-1][0] @pytest.mark.django_db def test_submission_withdraw(speaker_client, submission): djmail.outbox =", "submission.refresh_from_db() resource_one.refresh_from_db() new_resource = submission.resources.exclude(pk=resource_one.pk).first() assert submission.title == 'Ein ganz", "200 assert accepted_submission.state == SubmissionStates.CONFIRMED @pytest.mark.django_db def test_can_reconfirm_submission(speaker_client, accepted_submission): accepted_submission.state", "'resource-INITIAL_FORMS': 0, 'resource-MIN_NUM_FORMS': 0, 'resource-MAX_NUM_FORMS': 1000, } response = speaker_client.post(submission.urls.user_base,", "response.status_code == 200 assert rejected_submission.state == SubmissionStates.REJECTED @pytest.mark.django_db def test_can_withdraw_submission(speaker_client,", "follow=True) submission.refresh_from_db() assert response.status_code == 200 assert submission.state == SubmissionStates.WITHDRAWN", "+ 'olololol', follow=True ) submission.refresh_from_db() assert response.status_code == 404 assert", "== 200 assert submission.state == SubmissionStates.ACCEPTED @pytest.mark.django_db def test_submission_accept_nologin(client, submission):", "SubmissionStates.CONFIRMED accepted_submission.save() response = speaker_client.get(accepted_submission.urls.confirm, follow=True) accepted_submission.refresh_from_db() assert response.status_code ==", "'content_locale': submission.content_locale, 'description': submission.description, 'abstract': submission.abstract, 'notes': submission.notes, 'slot_count': 13,", "test_can_see_submission(speaker_client, submission): response = speaker_client.get(submission.urls.user_base, follow=True) assert response.status_code == 200", "f'question_{speaker_boolean_question.id}': 'True', f'question_{speaker_file_question.id}': f, f'question_{speaker_text_question.id}': 'Green is totally the best", "as the night' assert speaker.answers.get(question_id=speaker_boolean_question.pk).answer == 'True' assert ( speaker.answers.get(question_id=speaker_text_question.pk).answer", "assert submission.speakers.count() == 1 @pytest.mark.django_db @pytest.mark.parametrize('request_availability', (True, False)) def test_submission_accept(speaker_client,", "200 assert response.redirect_chain[-1][1] == 302 assert 'login/?next=' in response.redirect_chain[-1][0] assert", ") file_answer = speaker.answers.get(question_id=speaker_file_question.pk) assert file_answer.answer.startswith('file://') assert file_answer.answer_file.read() == b'file_content'", "event.urls.user, data={ 'form': 'token', }, follow=True, ) assert response.status_code ==", "test_can_change_locale(multilingual_event, client): first_response = client.get(multilingual_event.cfp.urls.public, follow=True) assert 'submission' in first_response.content.decode()", "assert speaker.answers.get(question_id=speaker_boolean_question.pk).answer == 'True' assert ( speaker.answers.get(question_id=speaker_text_question.pk).answer == 'Green is", "submission.slot_count == 13 @pytest.mark.django_db def test_cannot_edit_confirmed_slot_count(speaker_client, confirmed_submission): submission = confirmed_submission", "test_submission_withdraw_if_accepted(speaker_client, submission): djmail.outbox = [] with scope(event=submission.event): submission.accept() response =", "assert 'Einreichung' in second_response.content.decode() @pytest.mark.django_db def test_persists_changed_locale(multilingual_event, orga_user, orga_client): assert", "'' response = speaker_client.post( event.urls.user_delete, data={'really': True}, follow=True ) assert", "forever.' assert speaker.name == '<NAME>' @pytest.mark.django_db def test_can_change_api_token(speaker, event, speaker_client):", "== 'en' response = orga_client.get( reverse('cfp:locale.set', kwargs={'event': multilingual_event.slug}) + f'?locale=de&next=/{multilingual_event.slug}/',", "== 200 with scope(event=submission.event): submission.refresh_from_db() assert submission.submission_type != new_type @pytest.mark.django_db", "200 data = { 'speaker': '<EMAIL>', 'subject': 'Please join!', 'text':", "assert submission.submission_type == new_type @pytest.mark.django_db def test_cannot_edit_submission_type_after_acceptance(speaker_client, submission, event): with", "'resource-MIN_NUM_FORMS': 0, 'resource-MAX_NUM_FORMS': 1000, } response = other_speaker_client.post( rejected_submission.urls.user_base, follow=True,", "@pytest.mark.django_db def test_submission_accept_nologin(client, submission): submission.state = SubmissionStates.ACCEPTED submission.save() response =", "200 assert submission.title in response.content.decode() @pytest.mark.django_db def test_can_see_submission(speaker_client, submission): response", "Titel', 'submission_type': rejected_submission.submission_type.pk, 'content_locale': rejected_submission.content_locale, 'description': rejected_submission.description, 'abstract': rejected_submission.abstract, 'notes':", "assert speaker.email == '<EMAIL>' @pytest.mark.django_db def test_can_edit_login_info_wrong_password(speaker, event, speaker_client): response", "ganz neuer Titel', 'submission_type': new_type.pk, 'content_locale': submission.content_locale, 'description': submission.description, 'abstract':", "file_answer.answer_file.read() == b'file_content' assert (settings.MEDIA_ROOT / file_answer.answer_file.name).exists() response = speaker_client.post(", "submission.reject() response = speaker_client.post(submission.urls.withdraw, follow=True) assert response.status_code == 200 with", "== 'green as the sky' @pytest.mark.django_db def test_cannot_delete_profile_on_first_try(speaker, event, speaker_client):", "200 assert orga_user.locale == 'de' @pytest.mark.django_db def test_can_invite_speaker(speaker_client, submission): djmail.outbox", "= SubmissionStates.CONFIRMED accepted_submission.save() response = speaker_client.get(accepted_submission.urls.confirm, follow=True) accepted_submission.refresh_from_db() assert response.status_code", "assert response.status_code == 200 with scope(event=submission.event): submission.refresh_from_db() assert submission.state !=", "assert response.status_code == 200 new_token = Token.objects.filter(user=speaker).first().key assert new_token !=", "with scope(event=submission.event): submission.event.settings.present_multiple_times = True data = { 'title': 'Ein", "== 200 with scope(event=submission.event): submission.refresh_from_db() assert submission.submission_type == new_type @pytest.mark.django_db", "= True submission.state = SubmissionStates.ACCEPTED submission.save() response = speaker_client.post(submission.urls.confirm, follow=True)", "speaker.refresh_from_db() assert speaker.profiles.get(event=event).biography != 'Ruling since forever.' response = speaker_client.post(", "follow=True) submission.refresh_from_db() assert response.status_code == 200 assert submission.speakers.count() == 2", "response = orga_client.post( submission.urls.accept_invitation + 'olololol', follow=True ) submission.refresh_from_db() assert", "speaker.email.startswith('deleted_user') assert speaker.email.endswith('@localhost') @pytest.mark.django_db def test_can_change_locale(multilingual_event, client): first_response = client.get(multilingual_event.cfp.urls.public,", "submission.abstract, 'notes': submission.notes, 'slot_count': submission.slot_count, 'resource-0-id': resource_one.id, 'resource-0-description': 'new resource", "f, f'question_{speaker_text_question.id}': 'Green is totally the best color.', 'form': 'questions',", "follow=True, data=data) assert response.status_code == 200 with scope(event=submission.event): submission.refresh_from_db() assert", "= orga_client.post( submission.urls.accept_invitation + 'olololol', follow=True ) submission.refresh_from_db() assert response.status_code", "response = speaker_client.get(other_submission.urls.user_base, follow=True) assert response.status_code == 404 @pytest.mark.django_db def", "'resource-2-id': '', 'resource-2-description': 'new resource', 'resource-2-resource': f, 'resource-TOTAL_FORMS': 3, 'resource-INITIAL_FORMS':", "== SubmissionStates.SUBMITTED response = speaker_client.post(submission.urls.withdraw, follow=True) submission.refresh_from_db() assert response.status_code ==", "'description': submission.description, 'abstract': submission.abstract, 'notes': submission.notes, 'slot_count': submission.slot_count, 'resource-0-id': resource_one.id,", "}, follow=True, ) assert response.status_code == 200 new_token = Token.objects.filter(user=speaker).first().key", "scope(event=submission.event): submission.event.settings.present_multiple_times = True data = { 'title': 'Ein ganz", "SubmissionStates @pytest.mark.django_db def test_can_see_submission_list(speaker_client, submission): response = speaker_client.get(submission.event.urls.user_submissions, follow=True) assert", "SubmissionStates.CONFIRMED @pytest.mark.django_db def test_cannot_confirm_rejected_submission(other_speaker_client, rejected_submission): rejected_submission.state = SubmissionStates.REJECTED rejected_submission.save() response", "'resource-0-id': resource_one.id, 'resource-0-description': 'new resource name', 'resource-0-resource': resource_one.resource, 'resource-1-id': resource_two.id,", "data = { 'title': 'Ein ganz neuer Titel', 'submission_type': rejected_submission.submission_type.pk,", ") assert response.status_code == 200 data = { 'speaker': '<EMAIL>',", "SimpleUploadedFile from django.urls import reverse from django_scopes import scope from", "1 assert djmail.outbox[0].to == ['<EMAIL>'] @pytest.mark.django_db def test_can_accept_invitation(orga_client, submission): assert", "SubmissionStates.ACCEPTED @pytest.mark.django_db def test_submission_accept_nologin(client, submission): submission.state = SubmissionStates.ACCEPTED submission.save() response", "= submission.resources.first() resource_two = submission.resources.last() assert submission.title in str(resource_one) f", "follow=True, ) assert response.status_code == 200 new_token = Token.objects.filter(user=speaker).first().key assert", "assert response.status_code == 200 with scope(event=submission.event): assert submission.resources.count() == 2", "= [] submission.state = SubmissionStates.SUBMITTED submission.save() response = speaker_client.post(submission.urls.withdraw, follow=True)", "orga_user.locale == 'de' @pytest.mark.django_db def test_can_invite_speaker(speaker_client, submission): djmail.outbox = []", "submission.submission_type == new_type @pytest.mark.django_db def test_cannot_edit_submission_type_after_acceptance(speaker_client, submission, event): with scope(event=submission.event):", "response.status_code == 200 assert submission.title in response.content.decode() @pytest.mark.django_db def test_cannot_see_other_submission(speaker_client,", "since forever.' response = speaker_client.post( event.urls.user, data={ 'name': '<NAME>', 'biography':", ") assert response.status_code == 200 with scope(event=event): answer.refresh_from_db() assert answer.answer", "@pytest.mark.django_db def test_can_reconfirm_submission(speaker_client, accepted_submission): accepted_submission.state = SubmissionStates.CONFIRMED accepted_submission.save() response =", "import SimpleUploadedFile from django.urls import reverse from django_scopes import scope", "2 submission.refresh_from_db() resource_one.refresh_from_db() new_resource = submission.resources.exclude(pk=resource_one.pk).first() assert submission.title == 'Ein", "speaker.email == '<EMAIL>' @pytest.mark.django_db def test_can_edit_login_info_wrong_password(speaker, event, speaker_client): response =", "@pytest.mark.django_db def test_submission_withdraw_if_accepted(speaker_client, submission): djmail.outbox = [] with scope(event=submission.event): submission.accept()", "= speaker.answers.get(question_id=speaker_file_question.pk) assert file_answer.answer.startswith('file://') assert file_answer.answer_file.read() == b'file_content' assert (settings.MEDIA_ROOT", "speaker_client): response = speaker_client.post( event.urls.user, data={ 'old_password': '<PASSWORD>!', 'email': '<EMAIL>',", "data={ f'question_{speaker_question.id}': 'black as the night', f'question_{speaker_boolean_question.id}': 'True', f'question_{speaker_file_question.id}': f,", "= Token.objects.filter(user=speaker).first().key response = speaker_client.post( event.urls.user, data={ 'form': 'token', },", "response = speaker_client.post(accepted_submission.urls.confirm, follow=True) accepted_submission.refresh_from_db() assert response.status_code == 200 assert", "test_cannot_withdraw_accepted_submission(speaker_client, accepted_submission): response = speaker_client.get(accepted_submission.urls.withdraw, follow=True) accepted_submission.refresh_from_db() assert response.status_code ==", "scope(event=submission.event): submission.refresh_from_db() assert submission.submission_type == new_type @pytest.mark.django_db def test_cannot_edit_submission_type_after_acceptance(speaker_client, submission,", "== 2 assert new_resource.description == 'new resource' assert new_resource.resource.read() ==", "submission, event): with scope(event=submission.event): submission.accept() new_type = event.submission_types.create(name='Other', default_duration=13) data", "assert response.status_code == 404 @pytest.mark.django_db def test_can_confirm_submission(speaker_client, accepted_submission): response =", "import SubmissionStates @pytest.mark.django_db def test_can_see_submission_list(speaker_client, submission): response = speaker_client.get(submission.event.urls.user_submissions, follow=True)", "'slot_count': submission.slot_count, 'resource-0-id': resource_one.id, 'resource-0-description': 'new resource name', 'resource-0-resource': resource_one.resource,", "other_resource): with scope(event=submission.event): assert submission.resources.count() == 2 resource_one = submission.resources.first()", "response.content.decode() @pytest.mark.django_db def test_can_see_submission(speaker_client, submission): response = speaker_client.get(submission.urls.user_base, follow=True) assert", "scope(event=submission.event): submission.accept() new_type = event.submission_types.create(name='Other', default_duration=13) data = { 'title':", "200 with scope(event=event): speaker.refresh_from_db() assert speaker.profiles.get(event=event).biography != '' assert speaker.name", "'notes': rejected_submission.notes, 'resource-TOTAL_FORMS': 0, 'resource-INITIAL_FORMS': 0, 'resource-MIN_NUM_FORMS': 0, 'resource-MAX_NUM_FORMS': 1000,", "test_can_withdraw_submission(speaker_client, submission): response = speaker_client.get(submission.urls.withdraw, follow=True) submission.refresh_from_db() assert response.status_code ==", "= SimpleUploadedFile('testfile.txt', b'file_content') data = { 'title': 'Ein ganz neuer", "'title': 'Ein ganz neuer Titel', 'submission_type': new_type.pk, 'content_locale': submission.content_locale, 'description':", "assert speaker.name != '<NAME>' @pytest.mark.django_db def test_can_delete_profile(speaker, event, speaker_client): with", "speaker_client.get(submission.urls.withdraw, follow=True) submission.refresh_from_db() assert response.status_code == 200 assert submission.state ==", "== 1 response = orga_client.post(submission.urls.accept_invitation, follow=True) submission.refresh_from_db() assert response.status_code ==", "= { 'speaker': '<EMAIL>', 'subject': 'Please join!', 'text': 'C\\'mon, it", "response = speaker_client.post(submission.urls.confirm, follow=True) submission.refresh_from_db() assert response.status_code == 200 assert", "second_response.content.decode() @pytest.mark.django_db def test_persists_changed_locale(multilingual_event, orga_user, orga_client): assert orga_user.locale == 'en'", "assert response.status_code == 200 with scope(event=event): answer.refresh_from_db() assert answer.answer ==", "'login', }, follow=True, ) assert response.status_code == 200 speaker.refresh_from_db() assert", "multilingual_event.slug}) + f'?locale=de&next=/{multilingual_event.slug}/', follow=True, ) assert 'Einreichung' in second_response.content.decode() @pytest.mark.django_db", "'new resource', 'resource-2-resource': f, 'resource-TOTAL_FORMS': 3, 'resource-INITIAL_FORMS': 2, 'resource-MIN_NUM_FORMS': 0,", "assert new_token != old_token @pytest.mark.django_db def test_must_provide_availabilities(speaker, event, speaker_client): event.settings.cfp_require_availabilities", "assert accepted_submission.state == SubmissionStates.CONFIRMED @pytest.mark.django_db def test_cannot_confirm_rejected_submission(other_speaker_client, rejected_submission): rejected_submission.state =", "with scope(event=event): speaker.refresh_from_db() assert speaker.profiles.get(event=event).biography != 'Ruling since forever.' @pytest.mark.django_db", "speaker_client, speaker_text_question, speaker_file_question, ): with scope(event=event): answer = speaker.answers.filter(question_id=speaker_question.pk).first() assert", "2 @pytest.mark.django_db def test_wrong_acceptance_link(orga_client, submission): assert submission.speakers.count() == 1 response", "= SubmissionStates.ACCEPTED submission.save() assert submission.code in submission.urls.confirm response = client.post(", "scope(event=submission.event): submission.refresh_from_db() assert submission.state != SubmissionStates.WITHDRAWN @pytest.mark.django_db def test_submission_withdraw_if_rejected(speaker_client, submission):", "assert submission.title in response.content.decode() @pytest.mark.django_db def test_can_see_submission(speaker_client, submission): response =", "rejected_submission.description, 'abstract': rejected_submission.abstract, 'notes': rejected_submission.notes, 'resource-TOTAL_FORMS': 0, 'resource-INITIAL_FORMS': 0, 'resource-MIN_NUM_FORMS':", ") assert response.status_code == 200 rejected_submission.refresh_from_db() assert rejected_submission.title == title", "submission.state = SubmissionStates.ACCEPTED submission.save() response = speaker_client.post(submission.urls.confirm, follow=True) submission.refresh_from_db() assert", "submission.urls.confirm response = client.post( submission.urls.confirm.replace(submission.code, \"foo\"), follow=True ) assert response.status_code", "assert response.status_code == 200 assert orga_user.locale == 'de' @pytest.mark.django_db def", "answer.refresh_from_db() assert answer.answer == 'green as the sky' @pytest.mark.django_db def", "SubmissionStates.SUBMITTED response = speaker_client.post(submission.urls.withdraw, follow=True) submission.refresh_from_db() assert response.status_code == 200", "def test_can_see_submission(speaker_client, submission): response = speaker_client.get(submission.urls.user_base, follow=True) assert response.status_code ==", "= SubmissionStates.REJECTED rejected_submission.save() response = other_speaker_client.get(rejected_submission.urls.confirm, follow=True) rejected_submission.refresh_from_db() assert response.status_code", "event): with scope(event=submission.event): submission.accept() new_type = event.submission_types.create(name='Other', default_duration=13) data =", "totally the best color.' ) file_answer = speaker.answers.get(question_id=speaker_file_question.pk) assert file_answer.answer.startswith('file://')", "1 response = orga_client.post( submission.urls.accept_invitation + 'olololol', follow=True ) submission.refresh_from_db()", "submission.title in response.content.decode() @pytest.mark.django_db def test_cannot_see_other_submission(speaker_client, other_submission): response = speaker_client.get(other_submission.urls.user_base,", "speaker_client.post(submission.urls.confirm, follow=True) submission.refresh_from_db() assert response.status_code == 200 assert submission.state ==", "with scope(event=submission.event): new_type = event.submission_types.create(name='Other', default_duration=13) data = { 'title':", "data={ f'question_{speaker_question.id}': 'green as the sky', 'form': 'questions', }, follow=True,", "== SubmissionStates.WITHDRAWN @pytest.mark.django_db def test_cannot_withdraw_accepted_submission(speaker_client, accepted_submission): response = speaker_client.get(accepted_submission.urls.withdraw, follow=True)", "with scope(event=submission.event): submission.refresh_from_db() assert submission.submission_type == new_type @pytest.mark.django_db def test_cannot_edit_submission_type_after_acceptance(speaker_client,", "with scope(event=submission.event): submission.reject() response = speaker_client.post(submission.urls.withdraw, follow=True) assert response.status_code ==", "test_can_see_submission_list(speaker_client, submission): response = speaker_client.get(submission.event.urls.user_submissions, follow=True) assert response.status_code == 200", "200 with scope(event=submission.event): submission.refresh_from_db() assert submission.state == SubmissionStates.WITHDRAWN assert len(djmail.outbox)", "data={ 'form': 'token', }, follow=True, ) assert response.status_code == 200", "event, speaker_client): response = speaker_client.post( event.urls.user, data={ 'old_password': '<PASSWORD>!', 'email':", "200 with scope(event=submission.event): submission.refresh_from_db() assert submission.submission_type != new_type @pytest.mark.django_db def", "response.status_code == 200 assert orga_user.locale == 'de' @pytest.mark.django_db def test_can_invite_speaker(speaker_client,", "'invalidemail'} ) assert response.status_code == 200 data = { 'speaker':", "response.status_code == 200 assert len(djmail.outbox) == 1 assert djmail.outbox[0].to ==", "def test_must_provide_availabilities(speaker, event, speaker_client): event.settings.cfp_require_availabilities = True response = speaker_client.post(", "'Ruling since forever.' assert speaker.name == '<NAME>' @pytest.mark.django_db def test_can_change_api_token(speaker,", "'name': '<NAME>', 'biography': 'Ruling since forever.', 'form': 'profile', 'availabilities': '{\"availabilities\":", "response = speaker_client.get(submission.urls.withdraw, follow=True) submission.refresh_from_db() assert response.status_code == 200 assert", "'', 'password_repeat': '', 'form': 'login', }, follow=True, ) assert response.status_code", "'notes': submission.notes, 'slot_count': submission.slot_count, 'resource-0-id': resource_one.id, 'resource-0-description': 'new resource name',", "'form': 'token', }, follow=True, ) assert response.status_code == 200 new_token", "speaker_question, speaker_boolean_question, speaker_client, speaker_text_question, speaker_file_question, ): with scope(event=event): answer =", "= other_speaker_client.get(rejected_submission.urls.confirm, follow=True) rejected_submission.refresh_from_db() assert response.status_code == 200 assert rejected_submission.state", "resource_two = submission.resources.last() assert submission.title in str(resource_one) f = SimpleUploadedFile('testfile.txt',", "!= 'Ruling since forever.' @pytest.mark.django_db def test_can_edit_login_info(speaker, event, speaker_client): response", "first_response.content.decode() second_response = client.get( reverse('cfp:locale.set', kwargs={'event': multilingual_event.slug}) + f'?locale=de&next=/{multilingual_event.slug}/', follow=True,", "djmail.outbox = [] with scope(event=submission.event): submission.accept() response = speaker_client.post(submission.urls.withdraw, follow=True)", "'availabilities': '{\"availabilities\": []}', }, follow=True, ) assert response.status_code == 200", "submission.save() response = speaker_client.post(submission.urls.confirm, follow=True) submission.refresh_from_db() assert response.status_code == 200", "test_submission_accept_wrong_code(client, submission): submission.state = SubmissionStates.ACCEPTED submission.save() assert submission.code in submission.urls.confirm", "title @pytest.mark.django_db def test_can_edit_submission_type(speaker_client, submission, event): with scope(event=submission.event): new_type =", "assert len(djmail.outbox) == 0 @pytest.mark.django_db def test_submission_withdraw_if_accepted(speaker_client, submission): djmail.outbox =", "response.status_code == 200 with scope(event=submission.event): assert submission.resources.count() == 2 submission.refresh_from_db()", "== 200 with scope(event=submission.event): assert submission.resources.count() == 2 submission.refresh_from_db() resource_one.refresh_from_db()", "new_resource.resource.read() == b'file_content' assert not submission.resources.filter(pk=resource_two.pk).exists() @pytest.mark.django_db def test_can_edit_slot_count(speaker_client, submission):", "f'question_{speaker_file_question.id}': f, f'question_{speaker_text_question.id}': 'Green is totally the best color.', 'form':", "assert speaker.profiles.get(event=event).biography != '' response = speaker_client.post( event.urls.user_delete, data={'really': True},", "assert speaker.profiles.get(event=event).biography == '' assert speaker.name == '<NAME>' assert speaker.email.startswith('deleted_user')", "True submission.event.settings.cfp_require_availabilities = True submission.state = SubmissionStates.ACCEPTED submission.save() response =", "resource_two.resource, 'resource-2-id': '', 'resource-2-description': 'new resource', 'resource-2-resource': f, 'resource-TOTAL_FORMS': 3,", "event, speaker_client): with scope(event=event): assert speaker.profiles.get(event=event).biography != '' response =", "submission.resources.exclude(pk=resource_one.pk).first() assert submission.title == 'Ein ganz neuer Titel', response.content.decode() assert", "submission.refresh_from_db() assert submission.submission_type == new_type @pytest.mark.django_db def test_cannot_edit_submission_type_after_acceptance(speaker_client, submission, event):", "rejected_submission.content_locale, 'description': rejected_submission.description, 'abstract': rejected_submission.abstract, 'notes': rejected_submission.notes, 'resource-TOTAL_FORMS': 0, 'resource-INITIAL_FORMS':", "f'?locale=de&next=/{multilingual_event.slug}/', follow=True, ) orga_user.refresh_from_db() assert response.status_code == 200 assert orga_user.locale", "'email': '<EMAIL>', 'password': '', 'password_repeat': '', 'form': 'login', }, follow=True,", "submission.save() response = speaker_client.post(submission.urls.withdraw, follow=True) assert response.status_code == 200 submission.refresh_from_db()", "'green as the sky', 'form': 'questions', }, follow=True, ) assert", "assert speaker.profiles.get(event=event).biography != '' response = speaker_client.post(event.urls.user_delete, follow=True) assert response.status_code", "== '<NAME>' assert speaker.email.startswith('deleted_user') assert speaker.email.endswith('@localhost') @pytest.mark.django_db def test_can_change_locale(multilingual_event, client):", "request_availability submission.state = SubmissionStates.ACCEPTED submission.save() response = speaker_client.post(submission.urls.confirm, follow=True) submission.refresh_from_db()", "302 assert 'login/?next=' in response.redirect_chain[-1][0] @pytest.mark.django_db def test_submission_withdraw(speaker_client, submission): djmail.outbox", "True data = { 'title': 'Ein ganz neuer Titel', 'submission_type':", "def test_cannot_withdraw_accepted_submission(speaker_client, accepted_submission): response = speaker_client.get(accepted_submission.urls.withdraw, follow=True) accepted_submission.refresh_from_db() assert response.status_code", "= { 'title': 'Ein ganz neuer Titel', 'submission_type': submission.submission_type.pk, 'content_locale':", "} response = other_speaker_client.post( rejected_submission.urls.user_base, follow=True, data=data ) assert response.status_code", "== 'Ruling since forever.' assert speaker.name == '<NAME>' @pytest.mark.django_db def", "submission.state == SubmissionStates.WITHDRAWN assert len(djmail.outbox) == 1 @pytest.mark.django_db def test_submission_withdraw_if_confirmed(speaker_client,", "djmail.outbox = [] submission.state = SubmissionStates.SUBMITTED submission.save() response = speaker_client.post(submission.urls.withdraw,", "response = speaker_client.get( submission.urls.invite, follow=True, data={'email': 'invalidemail'} ) assert response.status_code", "assert response.status_code == 200 assert len(djmail.outbox) == 1 assert djmail.outbox[0].to", "as the sky', 'form': 'questions', }, follow=True, ) assert response.status_code", "'abstract': rejected_submission.abstract, 'notes': rejected_submission.notes, 'resource-TOTAL_FORMS': 0, 'resource-INITIAL_FORMS': 0, 'resource-MIN_NUM_FORMS': 0,", "@pytest.mark.django_db def test_can_edit_submission_type(speaker_client, submission, event): with scope(event=submission.event): new_type = event.submission_types.create(name='Other',", "the sky', 'form': 'questions', }, follow=True, ) assert response.status_code ==", "= { 'title': 'Ein ganz neuer Titel', 'submission_type': new_type.pk, 'content_locale':", "assert submission.state == SubmissionStates.ACCEPTED @pytest.mark.django_db def test_submission_accept_nologin(client, submission): submission.state =", "rejected_submission): rejected_submission.state = SubmissionStates.REJECTED rejected_submission.save() response = other_speaker_client.get(rejected_submission.urls.confirm, follow=True) rejected_submission.refresh_from_db()", "= speaker_client.post(event.urls.user_delete, follow=True) assert response.status_code == 200 with scope(event=event): speaker.refresh_from_db()", "with scope(event=submission.event): submission.refresh_from_db() assert submission.state != SubmissionStates.WITHDRAWN @pytest.mark.django_db def test_submission_withdraw_if_rejected(speaker_client,", "!= new_type @pytest.mark.django_db def test_can_edit_profile(speaker, event, speaker_client): response = speaker_client.post(", "other_speaker_client.get(rejected_submission.urls.confirm, follow=True) rejected_submission.refresh_from_db() assert response.status_code == 200 assert rejected_submission.state ==", "follow=True, ) assert response.status_code == 200 speaker.refresh_from_db() assert speaker.email !=", "( speaker.answers.get(question_id=speaker_text_question.pk).answer == 'Green is totally the best color.' )", "rejected_submission.state = SubmissionStates.REJECTED rejected_submission.save() response = other_speaker_client.get(rejected_submission.urls.confirm, follow=True) rejected_submission.refresh_from_db() assert", "response.redirect_chain[-1][0] assert submission.state == SubmissionStates.ACCEPTED @pytest.mark.django_db def test_submission_accept_wrong_code(client, submission): submission.state", "def test_can_edit_submission(speaker_client, submission, resource, other_resource): with scope(event=submission.event): assert submission.resources.count() ==", "== 200 data = { 'speaker': '<EMAIL>', 'subject': 'Please join!',", "response.status_code == 200 speaker.refresh_from_db() assert speaker.email != '<EMAIL>' @pytest.mark.django_db def", "!= SubmissionStates.WITHDRAWN @pytest.mark.django_db def test_submission_withdraw_if_rejected(speaker_client, submission): with scope(event=submission.event): submission.reject() response", "200 assert rejected_submission.state == SubmissionStates.REJECTED @pytest.mark.django_db def test_can_withdraw_submission(speaker_client, submission): response", "event.settings.cfp_require_availabilities = True response = speaker_client.post( event.urls.user, data={ 'name': '<NAME>',", "accepted_submission): response = speaker_client.get(accepted_submission.urls.withdraw, follow=True) accepted_submission.refresh_from_db() assert response.status_code == 200", "== 'True' assert ( speaker.answers.get(question_id=speaker_text_question.pk).answer == 'Green is totally the", "scope(event=event): speaker.refresh_from_db() assert speaker.profiles.get(event=event).biography == 'Ruling since forever.' assert speaker.name", "speaker_client.post( event.urls.user, data={ f'question_{speaker_question.id}': 'green as the sky', 'form': 'questions',", "'C\\'mon, it will be fun!', } response = speaker_client.post(submission.urls.invite, follow=True,", "200 with scope(event=submission.event): submission.refresh_from_db() assert submission.submission_type == new_type @pytest.mark.django_db def", "with scope(event=submission.event): assert submission.resources.count() == 2 resource_one = submission.resources.first() resource_two", "resource_one.refresh_from_db() new_resource = submission.resources.exclude(pk=resource_one.pk).first() assert submission.title == 'Ein ganz neuer", "== 200 assert submission.state == SubmissionStates.SUBMITTED response = speaker_client.post(submission.urls.withdraw, follow=True)", "Titel', response.content.decode() assert submission.resources.count() == 2 assert new_resource.description == 'new", ") assert response.status_code == 200 new_token = Token.objects.filter(user=speaker).first().key assert new_token", "rejected_submission.submission_type.pk, 'content_locale': rejected_submission.content_locale, 'description': rejected_submission.description, 'abstract': rejected_submission.abstract, 'notes': rejected_submission.notes, 'resource-TOTAL_FORMS':", "response.status_code == 200 with scope(event=event): speaker.refresh_from_db() assert speaker.profiles.get(event=event).biography != 'Ruling", "second_response = client.get( reverse('cfp:locale.set', kwargs={'event': multilingual_event.slug}) + f'?locale=de&next=/{multilingual_event.slug}/', follow=True, )", "True response = speaker_client.post( event.urls.user, data={ 'name': '<NAME>', 'biography': 'Ruling", "assert response.status_code == 200 assert accepted_submission.state == SubmissionStates.ACCEPTED response =", "confirmed_submission): submission = confirmed_submission submission.event.settings.present_multiple_times = True with scope(event=submission.event): data", "data = { 'title': 'Ein ganz neuer Titel', 'submission_type': new_type.pk,", "== 'Ein ganz neuer Titel', response.content.decode() assert submission.resources.count() == 2", "= speaker_client.post( event.urls.user, data={ f'question_{speaker_question.id}': 'black as the night', f'question_{speaker_boolean_question.id}':", "'password': '', 'password_repeat': '', 'form': 'login', }, follow=True, ) assert", "str(resource_one) f = SimpleUploadedFile('testfile.txt', b'file_content') data = { 'title': 'Ein", "submission): with scope(event=submission.event): submission.reject() response = speaker_client.post(submission.urls.withdraw, follow=True) assert response.status_code", "@pytest.mark.django_db def test_cannot_confirm_rejected_submission(other_speaker_client, rejected_submission): rejected_submission.state = SubmissionStates.REJECTED rejected_submission.save() response =", "confirmed_submission submission.event.settings.present_multiple_times = True with scope(event=submission.event): data = { 'title':", "submission.slot_count != 13 @pytest.mark.django_db def test_cannot_edit_rejected_submission(other_speaker_client, rejected_submission): title = rejected_submission.title", "resource_two.id, 'resource-1-DELETE': True, 'resource-1-description': resource_two.description, 'resource-1-resource': resource_two.resource, 'resource-2-id': '', 'resource-2-description':", "'description': submission.description, 'abstract': submission.abstract, 'notes': submission.notes, 'resource-TOTAL_FORMS': 0, 'resource-INITIAL_FORMS': 0,", "== '' assert speaker.name == '<NAME>' assert speaker.email.startswith('deleted_user') assert speaker.email.endswith('@localhost')", "test_can_edit_slot_count(speaker_client, submission): with scope(event=submission.event): submission.event.settings.present_multiple_times = True data = {", "submission.accept() submission.confirm() response = speaker_client.post(submission.urls.withdraw, follow=True) assert response.status_code == 200", "assert rejected_submission.state == SubmissionStates.REJECTED @pytest.mark.django_db def test_can_withdraw_submission(speaker_client, submission): response =", ") assert response.status_code == 200 with scope(event=event): speaker.refresh_from_db() assert speaker.profiles.get(event=event).biography", "submission.abstract, 'notes': submission.notes, 'resource-TOTAL_FORMS': 0, 'resource-INITIAL_FORMS': 0, 'resource-MIN_NUM_FORMS': 0, 'resource-MAX_NUM_FORMS':", "200 with scope(event=event): speaker.refresh_from_db() assert speaker.profiles.get(event=event).biography == 'Ruling since forever.'", "def test_can_edit_login_info(speaker, event, speaker_client): response = speaker_client.post( event.urls.user, data={ 'old_password':", "accepted_submission): response = speaker_client.get(accepted_submission.urls.confirm, follow=True) accepted_submission.refresh_from_db() assert response.status_code == 200", "assert response.redirect_chain[-1][1] == 302 assert 'login/?next=' in response.redirect_chain[-1][0] assert submission.state", "from django.urls import reverse from django_scopes import scope from rest_framework.authtoken.models", "forever.', 'form': 'profile', 'availabilities': '{\"availabilities\": []}', }, follow=True, ) assert", "response.status_code == 200 with scope(event=submission.event): submission.refresh_from_db() assert submission.state != SubmissionStates.WITHDRAWN", "test_can_delete_profile(speaker, event, speaker_client): with scope(event=event): assert speaker.profiles.get(event=event).biography != '' response", "event, speaker_client): event.settings.cfp_require_availabilities = True response = speaker_client.post( event.urls.user, data={", "assert 'Einreichung' not in first_response.content.decode() second_response = client.get( reverse('cfp:locale.set', kwargs={'event':", "since forever.' assert speaker.name == '<NAME>' @pytest.mark.django_db def test_can_change_api_token(speaker, event,", "== SubmissionStates.CONFIRMED @pytest.mark.django_db def test_cannot_confirm_rejected_submission(other_speaker_client, rejected_submission): rejected_submission.state = SubmissionStates.REJECTED rejected_submission.save()", "speaker_boolean_question, speaker_client, speaker_text_question, speaker_file_question, ): with scope(event=event): answer = speaker.answers.filter(question_id=speaker_question.pk).first()", "scope(event=submission.event): new_type = event.submission_types.create(name='Other', default_duration=13) data = { 'title': 'Ein", "the night', f'question_{speaker_boolean_question.id}': 'True', f'question_{speaker_file_question.id}': f, f'question_{speaker_text_question.id}': 'Green is totally", "response = speaker_client.post(event.urls.user_delete, follow=True) assert response.status_code == 200 with scope(event=event):", "response = client.post( submission.urls.confirm.replace(submission.code, \"foo\"), follow=True ) assert response.status_code ==", "'new resource name', 'resource-0-resource': resource_one.resource, 'resource-1-id': resource_two.id, 'resource-1-DELETE': True, 'resource-1-description':", "resource name', 'resource-0-resource': resource_one.resource, 'resource-1-id': resource_two.id, 'resource-1-DELETE': True, 'resource-1-description': resource_two.description,", "'speaker': '<EMAIL>', 'subject': 'Please join!', 'text': 'C\\'mon, it will be", "response.status_code == 200 assert submission.state == SubmissionStates.SUBMITTED response = speaker_client.post(submission.urls.withdraw,", "200 speaker.refresh_from_db() assert speaker.email != '<EMAIL>' @pytest.mark.django_db def test_can_edit_and_update_speaker_answers( speaker,", "submission): assert submission.speakers.count() == 1 response = orga_client.post(submission.urls.accept_invitation, follow=True) submission.refresh_from_db()", "404 assert submission.speakers.count() == 1 @pytest.mark.django_db @pytest.mark.parametrize('request_availability', (True, False)) def", "def test_can_edit_and_update_speaker_answers( speaker, event, speaker_question, speaker_boolean_question, speaker_client, speaker_text_question, speaker_file_question, ):", "True, 'resource-1-description': resource_two.description, 'resource-1-resource': resource_two.resource, 'resource-2-id': '', 'resource-2-description': 'new resource',", "event, speaker_question, speaker_boolean_question, speaker_client, speaker_text_question, speaker_file_question, ): with scope(event=event): answer", "submission): assert submission.speakers.count() == 1 response = orga_client.post( submission.urls.accept_invitation +", "django.conf import settings from django.core import mail as djmail from", "f, 'resource-TOTAL_FORMS': 3, 'resource-INITIAL_FORMS': 2, 'resource-MIN_NUM_FORMS': 0, 'resource-MAX_NUM_FORMS': 1000, }", "test_cannot_edit_submission_type_after_acceptance(speaker_client, submission, event): with scope(event=submission.event): submission.accept() new_type = event.submission_types.create(name='Other', default_duration=13)", "'questions', }, follow=True, ) assert response.status_code == 200 with scope(event=event):", "it will be fun!', } response = speaker_client.post(submission.urls.invite, follow=True, data=data)", "== SubmissionStates.ACCEPTED @pytest.mark.django_db def test_submission_accept_nologin(client, submission): submission.state = SubmissionStates.ACCEPTED submission.save()", "import pytest from django.conf import settings from django.core import mail", "speaker_client.post(submission.urls.user_base, follow=True, data=data) assert response.status_code == 200 with scope(event=submission.event): assert", "submission.refresh_from_db() assert response.status_code == 200 assert response.redirect_chain[-1][1] == 302 assert", "200 with scope(event=submission.event): submission.refresh_from_db() assert submission.state != SubmissionStates.WITHDRAWN @pytest.mark.django_db def", "'form': 'profile', }, follow=True, ) assert response.status_code == 200 with", "assert response.status_code == 200 assert submission.state == SubmissionStates.CONFIRMED @pytest.mark.django_db def", "response.status_code == 200 submission.refresh_from_db() assert submission.state == SubmissionStates.WITHDRAWN assert len(djmail.outbox)", "speaker_client.post(submission.urls.user_base, follow=True, data=data) assert response.status_code == 200 with scope(event=submission.event): submission.refresh_from_db()", "def test_cannot_see_other_submission(speaker_client, other_submission): response = speaker_client.get(other_submission.urls.user_base, follow=True) assert response.status_code ==", "submission): submission.event.settings.cfp_request_availabilities = True submission.event.settings.cfp_require_availabilities = True submission.state = SubmissionStates.ACCEPTED", "not answer f = SimpleUploadedFile('testfile.txt', b'file_content') response = speaker_client.post( event.urls.user,", "follow=True, ) assert 'Einreichung' in second_response.content.decode() @pytest.mark.django_db def test_persists_changed_locale(multilingual_event, orga_user,", "'submission_type': submission.submission_type.pk, 'content_locale': submission.content_locale, 'description': submission.description, 'abstract': submission.abstract, 'notes': submission.notes,", "new_resource = submission.resources.exclude(pk=resource_one.pk).first() assert submission.title == 'Ein ganz neuer Titel',", "data={ 'name': '<NAME>', 'biography': 'Ruling since forever.', 'form': 'profile', 'availabilities':", "follow=True ) assert response.status_code == 200 assert response.redirect_chain[-1][1] == 302", "}, follow=True, ) assert response.status_code == 200 speaker.refresh_from_db() assert speaker.email", "== 0 @pytest.mark.django_db def test_submission_withdraw_if_accepted(speaker_client, submission): djmail.outbox = [] with", "def test_can_change_locale(multilingual_event, client): first_response = client.get(multilingual_event.cfp.urls.public, follow=True) assert 'submission' in", "@pytest.mark.django_db def test_submission_withdraw_if_rejected(speaker_client, submission): with scope(event=submission.event): submission.reject() response = speaker_client.post(submission.urls.withdraw,", "assert submission.resources.count() == 2 resource_one = submission.resources.first() resource_two = submission.resources.last()", "'Ruling since forever.' @pytest.mark.django_db def test_can_edit_login_info(speaker, event, speaker_client): response =", "@pytest.mark.django_db def test_can_delete_profile(speaker, event, speaker_client): with scope(event=event): assert speaker.profiles.get(event=event).biography !=", "'en' response = orga_client.get( reverse('cfp:locale.set', kwargs={'event': multilingual_event.slug}) + f'?locale=de&next=/{multilingual_event.slug}/', follow=True,", "'notes': submission.notes, 'resource-TOTAL_FORMS': 0, 'resource-INITIAL_FORMS': 0, 'resource-MIN_NUM_FORMS': 0, 'resource-MAX_NUM_FORMS': 1000,", "response.status_code == 200 with scope(event=submission.event): submission.refresh_from_db() assert submission.slot_count != 13", "== 200 with scope(event=submission.event): submission.refresh_from_db() assert submission.state == SubmissionStates.WITHDRAWN assert", "submission.event.settings.present_multiple_times = True data = { 'title': 'Ein ganz neuer", "assert response.status_code == 200 speaker.refresh_from_db() assert speaker.email != '<EMAIL>' @pytest.mark.django_db", "request_availability): submission.event.settings.cfp_request_availabilities = request_availability submission.state = SubmissionStates.ACCEPTED submission.save() response =", "ganz neuer Titel', 'submission_type': submission.submission_type.pk, 'content_locale': submission.content_locale, 'description': submission.description, 'abstract':", "response = speaker_client.post( event.urls.user, data={ f'question_{speaker_question.id}': 'green as the sky',", "with scope(event=event): answer = speaker.answers.filter(question_id=speaker_question.pk).first() assert not answer f =", "event.urls.user, data={ 'old_password': '<PASSWORD>!', 'email': '<EMAIL>', 'password': '', 'password_repeat': '',", "def test_can_change_api_token(speaker, event, speaker_client): speaker.regenerate_token() old_token = Token.objects.filter(user=speaker).first().key response =", "== 1 @pytest.mark.django_db def test_submission_withdraw_if_confirmed(speaker_client, submission): with scope(event=submission.event): submission.accept() submission.confirm()", "'resource-0-resource': resource_one.resource, 'resource-1-id': resource_two.id, 'resource-1-DELETE': True, 'resource-1-description': resource_two.description, 'resource-1-resource': resource_two.resource,", "test_can_edit_submission(speaker_client, submission, resource, other_resource): with scope(event=submission.event): assert submission.resources.count() == 2", "neuer Titel', 'submission_type': new_type.pk, 'content_locale': submission.content_locale, 'description': submission.description, 'abstract': submission.abstract,", "== 200 assert accepted_submission.state == SubmissionStates.ACCEPTED response = speaker_client.post(accepted_submission.urls.confirm, follow=True)", "neuer Titel', response.content.decode() assert submission.resources.count() == 2 assert new_resource.description ==", "response = speaker_client.post(submission.urls.withdraw, follow=True) submission.refresh_from_db() assert response.status_code == 200 assert", "speaker.answers.get(question_id=speaker_boolean_question.pk).answer == 'True' assert ( speaker.answers.get(question_id=speaker_text_question.pk).answer == 'Green is totally", "200 with scope(event=event): answer.refresh_from_db() assert answer.answer == 'green as the", "in response.content.decode() @pytest.mark.django_db def test_can_see_submission(speaker_client, submission): response = speaker_client.get(submission.urls.user_base, follow=True)", "response.redirect_chain[-1][1] == 302 assert 'login/?next=' in response.redirect_chain[-1][0] assert submission.state ==", "SubmissionStates.CONFIRMED @pytest.mark.django_db def test_submission_accept_with_missing_availability(speaker_client, submission): submission.event.settings.cfp_request_availabilities = True submission.event.settings.cfp_require_availabilities =", "assert submission.resources.count() == 2 assert new_resource.description == 'new resource' assert", "rejected_submission.abstract, 'notes': rejected_submission.notes, 'resource-TOTAL_FORMS': 0, 'resource-INITIAL_FORMS': 0, 'resource-MIN_NUM_FORMS': 0, 'resource-MAX_NUM_FORMS':", "scope(event=submission.event): submission.reject() response = speaker_client.post(submission.urls.withdraw, follow=True) assert response.status_code == 200", "djmail from django.core.files.uploadedfile import SimpleUploadedFile from django.urls import reverse from", "= speaker.answers.get(question_id=speaker_question.pk) assert answer.answer == 'black as the night' assert", "'' response = speaker_client.post(event.urls.user_delete, follow=True) assert response.status_code == 200 with", "@pytest.mark.django_db def test_can_change_locale(multilingual_event, client): first_response = client.get(multilingual_event.cfp.urls.public, follow=True) assert 'submission'", "SubmissionStates.REJECTED rejected_submission.save() response = other_speaker_client.get(rejected_submission.urls.confirm, follow=True) rejected_submission.refresh_from_db() assert response.status_code ==", "b'file_content') response = speaker_client.post( event.urls.user, data={ f'question_{speaker_question.id}': 'black as the", "200 submission.refresh_from_db() assert submission.state == SubmissionStates.WITHDRAWN assert len(djmail.outbox) == 0", "assert response.status_code == 200 assert submission.state == SubmissionStates.ACCEPTED @pytest.mark.django_db def", "follow=True) accepted_submission.refresh_from_db() assert response.status_code == 200 assert accepted_submission.state == SubmissionStates.CONFIRMED", "1 response = orga_client.post(submission.urls.accept_invitation, follow=True) submission.refresh_from_db() assert response.status_code == 200", "since forever.', 'form': 'profile', 'availabilities': '{\"availabilities\": []}', }, follow=True, )", "speaker_client.post( event.urls.user, data={ 'name': '<NAME>', 'biography': 'Ruling since forever.', 'form':", "'Ruling since forever.', 'form': 'profile', }, follow=True, ) assert response.status_code", "0, 'resource-MAX_NUM_FORMS': 1000, } response = other_speaker_client.post( rejected_submission.urls.user_base, follow=True, data=data", "in first_response.content.decode() assert 'Einreichung' not in first_response.content.decode() second_response = client.get(", "import Token from pretalx.submission.models import SubmissionStates @pytest.mark.django_db def test_can_see_submission_list(speaker_client, submission):", "kwargs={'event': multilingual_event.slug}) + f'?locale=de&next=/{multilingual_event.slug}/', follow=True, ) assert 'Einreichung' in second_response.content.decode()", ") submission.refresh_from_db() assert response.status_code == 404 assert submission.speakers.count() == 1", "== 1 @pytest.mark.django_db @pytest.mark.parametrize('request_availability', (True, False)) def test_submission_accept(speaker_client, submission, request_availability):", "assert (settings.MEDIA_ROOT / file_answer.answer_file.name).exists() response = speaker_client.post( event.urls.user, data={ f'question_{speaker_question.id}':", "200 rejected_submission.refresh_from_db() assert rejected_submission.title == title @pytest.mark.django_db def test_can_edit_submission_type(speaker_client, submission,", "Titel', 'submission_type': new_type.pk, 'content_locale': submission.content_locale, 'description': submission.description, 'abstract': submission.abstract, 'notes':", "assert speaker.profiles.get(event=event).biography == 'Ruling since forever.' assert speaker.name == '<NAME>'", "follow=True, ) assert response.status_code == 200 with scope(event=event): answer =", "in response.redirect_chain[-1][0] assert submission.state == SubmissionStates.ACCEPTED @pytest.mark.django_db def test_submission_accept_wrong_code(client, submission):", "1000, } response = speaker_client.post(submission.urls.user_base, follow=True, data=data) assert response.status_code ==", "resource' assert new_resource.resource.read() == b'file_content' assert not submission.resources.filter(pk=resource_two.pk).exists() @pytest.mark.django_db def", "in response.redirect_chain[-1][0] @pytest.mark.django_db def test_submission_withdraw(speaker_client, submission): djmail.outbox = [] submission.state", "import mail as djmail from django.core.files.uploadedfile import SimpleUploadedFile from django.urls", "<filename>src/tests/cfp/views/test_cfp_user.py import pytest from django.conf import settings from django.core import", "test_submission_withdraw(speaker_client, submission): djmail.outbox = [] submission.state = SubmissionStates.SUBMITTED submission.save() response", "response.status_code == 200 with scope(event=submission.event): submission.refresh_from_db() assert submission.state == SubmissionStates.WITHDRAWN", "submission.refresh_from_db() assert response.status_code == 200 assert submission.state == SubmissionStates.WITHDRAWN @pytest.mark.django_db", "assert submission.title == 'Ein ganz neuer Titel', response.content.decode() assert submission.resources.count()", "title = rejected_submission.title data = { 'title': 'Ein ganz neuer", "200 assert accepted_submission.state == SubmissionStates.CONFIRMED @pytest.mark.django_db def test_cannot_confirm_rejected_submission(other_speaker_client, rejected_submission): rejected_submission.state", "test_can_invite_speaker(speaker_client, submission): djmail.outbox = [] response = speaker_client.get( submission.urls.invite, follow=True,", "submission.content_locale, 'description': submission.description, 'abstract': submission.abstract, 'notes': submission.notes, 'resource-TOTAL_FORMS': 0, 'resource-INITIAL_FORMS':", "Token.objects.filter(user=speaker).first().key response = speaker_client.post( event.urls.user, data={ 'form': 'token', }, follow=True,", "follow=True, data=data) assert response.status_code == 200 assert len(djmail.outbox) == 1", "follow=True, data=data ) assert response.status_code == 200 rejected_submission.refresh_from_db() assert rejected_submission.title", "== 200 submission.refresh_from_db() assert submission.state == SubmissionStates.WITHDRAWN assert len(djmail.outbox) ==", "def test_persists_changed_locale(multilingual_event, orga_user, orga_client): assert orga_user.locale == 'en' response =", "speaker_client.get(submission.event.urls.user_submissions, follow=True) assert response.status_code == 200 assert submission.title in response.content.decode()", "submission.state == SubmissionStates.WITHDRAWN @pytest.mark.django_db def test_cannot_withdraw_accepted_submission(speaker_client, accepted_submission): response = speaker_client.get(accepted_submission.urls.withdraw,", "def test_cannot_edit_rejected_submission(other_speaker_client, rejected_submission): title = rejected_submission.title data = { 'title':", "response.status_code == 200 new_token = Token.objects.filter(user=speaker).first().key assert new_token != old_token", "@pytest.mark.django_db def test_cannot_withdraw_accepted_submission(speaker_client, accepted_submission): response = speaker_client.get(accepted_submission.urls.withdraw, follow=True) accepted_submission.refresh_from_db() assert", "response = speaker_client.post( event.urls.user_delete, data={'really': True}, follow=True ) assert response.status_code", "assert submission.state != SubmissionStates.WITHDRAWN @pytest.mark.django_db def test_submission_withdraw_if_rejected(speaker_client, submission): with scope(event=submission.event):", "night', f'question_{speaker_boolean_question.id}': 'True', f'question_{speaker_file_question.id}': f, f'question_{speaker_text_question.id}': 'Green is totally the", "@pytest.mark.django_db def test_can_edit_profile(speaker, event, speaker_client): response = speaker_client.post( event.urls.user, data={", "speaker.profiles.get(event=event).biography != '' response = speaker_client.post( event.urls.user_delete, data={'really': True}, follow=True", "'Einreichung' in second_response.content.decode() @pytest.mark.django_db def test_persists_changed_locale(multilingual_event, orga_user, orga_client): assert orga_user.locale", "= speaker_client.post(submission.urls.withdraw, follow=True) assert response.status_code == 200 with scope(event=submission.event): submission.refresh_from_db()", "event.urls.user_delete, data={'really': True}, follow=True ) assert response.status_code == 200 with", "} response = speaker_client.post(submission.urls.user_base, follow=True, data=data) assert response.status_code == 200", "'', 'form': 'login', }, follow=True, ) assert response.status_code == 200", "!= '<EMAIL>' @pytest.mark.django_db def test_can_edit_and_update_speaker_answers( speaker, event, speaker_question, speaker_boolean_question, speaker_client,", "submission.resources.last() assert submission.title in str(resource_one) f = SimpleUploadedFile('testfile.txt', b'file_content') data", "0, 'resource-MIN_NUM_FORMS': 0, 'resource-MAX_NUM_FORMS': 1000, } response = speaker_client.post(submission.urls.user_base, follow=True,", "@pytest.mark.django_db def test_cannot_edit_confirmed_slot_count(speaker_client, confirmed_submission): submission = confirmed_submission submission.event.settings.present_multiple_times = True", "assert submission.slot_count != 13 @pytest.mark.django_db def test_cannot_edit_rejected_submission(other_speaker_client, rejected_submission): title =", "len(djmail.outbox) == 0 @pytest.mark.django_db def test_submission_withdraw_if_accepted(speaker_client, submission): djmail.outbox = []", "'text': 'C\\'mon, it will be fun!', } response = speaker_client.post(submission.urls.invite,", "response.content.decode() assert submission.resources.count() == 2 assert new_resource.description == 'new resource'", "forever.' @pytest.mark.django_db def test_can_edit_login_info(speaker, event, speaker_client): response = speaker_client.post( event.urls.user,", "submission.state == SubmissionStates.CONFIRMED @pytest.mark.django_db def test_submission_accept_with_missing_availability(speaker_client, submission): submission.event.settings.cfp_request_availabilities = True", "assert submission.state == SubmissionStates.WITHDRAWN @pytest.mark.django_db def test_cannot_withdraw_accepted_submission(speaker_client, accepted_submission): response =", "'', 'resource-2-description': 'new resource', 'resource-2-resource': f, 'resource-TOTAL_FORMS': 3, 'resource-INITIAL_FORMS': 2,", "== SubmissionStates.CONFIRMED @pytest.mark.django_db def test_can_reconfirm_submission(speaker_client, accepted_submission): accepted_submission.state = SubmissionStates.CONFIRMED accepted_submission.save()", "200 assert accepted_submission.state == SubmissionStates.ACCEPTED @pytest.mark.django_db def test_can_edit_submission(speaker_client, submission, resource,", "assert 'login/?next=' in response.redirect_chain[-1][0] assert submission.state == SubmissionStates.ACCEPTED @pytest.mark.django_db def", "with scope(event=event): speaker.refresh_from_db() assert speaker.profiles.get(event=event).biography == '' assert speaker.name ==", "django_scopes import scope from rest_framework.authtoken.models import Token from pretalx.submission.models import", "== title @pytest.mark.django_db def test_can_edit_submission_type(speaker_client, submission, event): with scope(event=submission.event): new_type", "assert response.status_code == 200 with scope(event=event): speaker.refresh_from_db() assert speaker.profiles.get(event=event).biography !=", "speaker_client.post(event.urls.user_delete, follow=True) assert response.status_code == 200 with scope(event=event): speaker.refresh_from_db() assert", "scope(event=event): assert speaker.profiles.get(event=event).biography != '' response = speaker_client.post( event.urls.user_delete, data={'really':", "reverse('cfp:locale.set', kwargs={'event': multilingual_event.slug}) + f'?locale=de&next=/{multilingual_event.slug}/', follow=True, ) orga_user.refresh_from_db() assert response.status_code", "follow=True) rejected_submission.refresh_from_db() assert response.status_code == 200 assert rejected_submission.state == SubmissionStates.REJECTED", "== 200 speaker.refresh_from_db() assert speaker.email == '<EMAIL>' @pytest.mark.django_db def test_can_edit_login_info_wrong_password(speaker,", "assert ( speaker.answers.get(question_id=speaker_text_question.pk).answer == 'Green is totally the best color.'", "follow=True) assert response.status_code == 200 with scope(event=event): speaker.refresh_from_db() assert speaker.profiles.get(event=event).biography", "follow=True, ) orga_user.refresh_from_db() assert response.status_code == 200 assert orga_user.locale ==", "accepted_submission.refresh_from_db() assert response.status_code == 200 assert accepted_submission.state == SubmissionStates.ACCEPTED @pytest.mark.django_db", "'<NAME>' @pytest.mark.django_db def test_can_change_api_token(speaker, event, speaker_client): speaker.regenerate_token() old_token = Token.objects.filter(user=speaker).first().key", "response.redirect_chain[-1][0] @pytest.mark.django_db def test_submission_withdraw(speaker_client, submission): djmail.outbox = [] submission.state =", "rejected_submission.notes, 'resource-TOTAL_FORMS': 0, 'resource-INITIAL_FORMS': 0, 'resource-MIN_NUM_FORMS': 0, 'resource-MAX_NUM_FORMS': 1000, }", "speaker, event, speaker_question, speaker_boolean_question, speaker_client, speaker_text_question, speaker_file_question, ): with scope(event=event):", "== SubmissionStates.WITHDRAWN assert len(djmail.outbox) == 1 @pytest.mark.django_db def test_submission_withdraw_if_confirmed(speaker_client, submission):", "pytest from django.conf import settings from django.core import mail as", "SubmissionStates.SUBMITTED submission.save() response = speaker_client.post(submission.urls.withdraw, follow=True) assert response.status_code == 200", "'biography': 'Ruling since forever.', 'form': 'profile', }, follow=True, ) assert", "accepted_submission.state == SubmissionStates.CONFIRMED @pytest.mark.django_db def test_can_reconfirm_submission(speaker_client, accepted_submission): accepted_submission.state = SubmissionStates.CONFIRMED", "assert response.status_code == 200 with scope(event=submission.event): submission.refresh_from_db() assert submission.slot_count ==", "= submission.resources.exclude(pk=resource_one.pk).first() assert submission.title == 'Ein ganz neuer Titel', response.content.decode()", "assert response.status_code == 200 with scope(event=submission.event): submission.refresh_from_db() assert submission.slot_count !=", "== 200 with scope(event=event): speaker.refresh_from_db() assert speaker.profiles.get(event=event).biography != '' assert", "submission.speakers.count() == 2 @pytest.mark.django_db def test_wrong_acceptance_link(orga_client, submission): assert submission.speakers.count() ==", "submission): with scope(event=submission.event): submission.event.settings.present_multiple_times = True data = { 'title':", "orga_client.post(submission.urls.accept_invitation, follow=True) submission.refresh_from_db() assert response.status_code == 200 assert submission.speakers.count() ==", "is totally the best color.' ) file_answer = speaker.answers.get(question_id=speaker_file_question.pk) assert", "not submission.resources.filter(pk=resource_two.pk).exists() @pytest.mark.django_db def test_can_edit_slot_count(speaker_client, submission): with scope(event=submission.event): submission.event.settings.present_multiple_times =", "response.status_code == 200 rejected_submission.refresh_from_db() assert rejected_submission.title == title @pytest.mark.django_db def", "assert submission.code in submission.urls.confirm response = client.post( submission.urls.confirm.replace(submission.code, \"foo\"), follow=True", "= SubmissionStates.SUBMITTED submission.save() response = speaker_client.post(submission.urls.withdraw, follow=True) assert response.status_code ==", "== new_type @pytest.mark.django_db def test_cannot_edit_submission_type_after_acceptance(speaker_client, submission, event): with scope(event=submission.event): submission.accept()", "'form': 'questions', }, follow=True, ) assert response.status_code == 200 with", "'resource-TOTAL_FORMS': 0, 'resource-INITIAL_FORMS': 0, 'resource-MIN_NUM_FORMS': 0, 'resource-MAX_NUM_FORMS': 1000, } response", "speaker_client): with scope(event=event): assert speaker.profiles.get(event=event).biography != '' response = speaker_client.post(", "'submission' in first_response.content.decode() assert 'Einreichung' not in first_response.content.decode() second_response =", "'abstract': submission.abstract, 'notes': submission.notes, 'slot_count': submission.slot_count, 'resource-0-id': resource_one.id, 'resource-0-description': 'new", ") assert response.status_code == 200 assert response.redirect_chain[-1][1] == 302 assert", "response = speaker_client.post( event.urls.user, data={ 'name': '<NAME>', 'biography': 'Ruling since", "test_cannot_see_other_submission(speaker_client, other_submission): response = speaker_client.get(other_submission.urls.user_base, follow=True) assert response.status_code == 404", "speaker_client.post(submission.urls.withdraw, follow=True) assert response.status_code == 200 submission.refresh_from_db() assert submission.state ==", "= speaker_client.post( event.urls.user, data={ 'form': 'token', }, follow=True, ) assert", "@pytest.mark.django_db def test_can_confirm_submission(speaker_client, accepted_submission): response = speaker_client.get(accepted_submission.urls.confirm, follow=True) accepted_submission.refresh_from_db() assert", "resource_one = submission.resources.first() resource_two = submission.resources.last() assert submission.title in str(resource_one)", "sky', 'form': 'questions', }, follow=True, ) assert response.status_code == 200", "rejected_submission.state == SubmissionStates.REJECTED @pytest.mark.django_db def test_can_withdraw_submission(speaker_client, submission): response = speaker_client.get(submission.urls.withdraw,", "'resource-TOTAL_FORMS': 3, 'resource-INITIAL_FORMS': 2, 'resource-MIN_NUM_FORMS': 0, 'resource-MAX_NUM_FORMS': 1000, } response", "'resource-INITIAL_FORMS': 2, 'resource-MIN_NUM_FORMS': 0, 'resource-MAX_NUM_FORMS': 1000, } response = speaker_client.post(submission.urls.user_base,", "submission.refresh_from_db() assert submission.slot_count != 13 @pytest.mark.django_db def test_cannot_edit_rejected_submission(other_speaker_client, rejected_submission): title", "= speaker_client.post(accepted_submission.urls.confirm, follow=True) accepted_submission.refresh_from_db() assert response.status_code == 200 assert accepted_submission.state", "event): with scope(event=submission.event): new_type = event.submission_types.create(name='Other', default_duration=13) data = {", "file_answer.answer.startswith('file://') assert file_answer.answer_file.read() == b'file_content' assert (settings.MEDIA_ROOT / file_answer.answer_file.name).exists() response", "follow=True, data={'email': 'invalidemail'} ) assert response.status_code == 200 data =", "response.status_code == 200 data = { 'speaker': '<EMAIL>', 'subject': 'Please", "speaker_client): speaker.regenerate_token() old_token = Token.objects.filter(user=speaker).first().key response = speaker_client.post( event.urls.user, data={", "with scope(event=event): speaker.refresh_from_db() assert speaker.profiles.get(event=event).biography != 'Ruling since forever.' response", "def test_can_edit_login_info_wrong_password(speaker, event, speaker_client): response = speaker_client.post( event.urls.user, data={ 'old_password':", "the best color.', 'form': 'questions', }, follow=True, ) assert response.status_code", "= speaker_client.get(accepted_submission.urls.confirm, follow=True) accepted_submission.refresh_from_db() assert response.status_code == 200 assert accepted_submission.state", "= speaker_client.post(submission.urls.invite, follow=True, data=data) assert response.status_code == 200 assert len(djmail.outbox)", "'<NAME>' @pytest.mark.django_db def test_can_delete_profile(speaker, event, speaker_client): with scope(event=event): assert speaker.profiles.get(event=event).biography", "in str(resource_one) f = SimpleUploadedFile('testfile.txt', b'file_content') data = { 'title':", "'resource-2-resource': f, 'resource-TOTAL_FORMS': 3, 'resource-INITIAL_FORMS': 2, 'resource-MIN_NUM_FORMS': 0, 'resource-MAX_NUM_FORMS': 1000,", "f'question_{speaker_question.id}': 'black as the night', f'question_{speaker_boolean_question.id}': 'True', f'question_{speaker_file_question.id}': f, f'question_{speaker_text_question.id}':", "first_response = client.get(multilingual_event.cfp.urls.public, follow=True) assert 'submission' in first_response.content.decode() assert 'Einreichung'", "submission.speakers.count() == 1 response = orga_client.post(submission.urls.accept_invitation, follow=True) submission.refresh_from_db() assert response.status_code", "= speaker_client.post(submission.urls.withdraw, follow=True) submission.refresh_from_db() assert response.status_code == 200 assert submission.state", "@pytest.mark.django_db def test_can_edit_login_info_wrong_password(speaker, event, speaker_client): response = speaker_client.post( event.urls.user, data={", "client.post( submission.urls.confirm.replace(submission.code, \"foo\"), follow=True ) assert response.status_code == 200 assert", "def test_can_invite_speaker(speaker_client, submission): djmail.outbox = [] response = speaker_client.get( submission.urls.invite,", "== 200 assert len(djmail.outbox) == 1 assert djmail.outbox[0].to == ['<EMAIL>']", "== 200 assert orga_user.locale == 'de' @pytest.mark.django_db def test_can_invite_speaker(speaker_client, submission):", "== 2 resource_one = submission.resources.first() resource_two = submission.resources.last() assert submission.title", "submission.abstract, 'notes': submission.notes, 'slot_count': 13, 'resource-TOTAL_FORMS': 0, 'resource-INITIAL_FORMS': 0, 'resource-MIN_NUM_FORMS':", "{ 'title': 'Ein ganz neuer Titel', 'submission_type': new_type.pk, 'content_locale': submission.content_locale,", "}, follow=True, ) assert response.status_code == 200 with scope(event=event): speaker.refresh_from_db()", "follow=True) submission.refresh_from_db() assert response.status_code == 200 assert submission.state == SubmissionStates.SUBMITTED", "with scope(event=submission.event): submission.refresh_from_db() assert submission.slot_count != 13 @pytest.mark.django_db def test_cannot_edit_rejected_submission(other_speaker_client,", "@pytest.mark.django_db def test_can_see_submission(speaker_client, submission): response = speaker_client.get(submission.urls.user_base, follow=True) assert response.status_code", "assert submission.state == SubmissionStates.WITHDRAWN assert len(djmail.outbox) == 0 @pytest.mark.django_db def", "speaker_file_question, ): with scope(event=event): answer = speaker.answers.filter(question_id=speaker_question.pk).first() assert not answer", "in second_response.content.decode() @pytest.mark.django_db def test_persists_changed_locale(multilingual_event, orga_user, orga_client): assert orga_user.locale ==", "follow=True) assert response.status_code == 200 with scope(event=submission.event): submission.refresh_from_db() assert submission.state", "!= '<NAME>' @pytest.mark.django_db def test_can_delete_profile(speaker, event, speaker_client): with scope(event=event): assert", "event.urls.user, data={ 'name': '<NAME>', 'biography': 'Ruling since forever.', 'form': 'profile',", "== 200 with scope(event=event): answer.refresh_from_db() assert answer.answer == 'green as", "'True' assert ( speaker.answers.get(question_id=speaker_text_question.pk).answer == 'Green is totally the best", "test_can_edit_submission_type(speaker_client, submission, event): with scope(event=submission.event): new_type = event.submission_types.create(name='Other', default_duration=13) data", "'content_locale': submission.content_locale, 'description': submission.description, 'abstract': submission.abstract, 'notes': submission.notes, 'resource-TOTAL_FORMS': 0,", "follow=True, ) assert response.status_code == 200 speaker.refresh_from_db() assert speaker.email ==", "rejected_submission.save() response = other_speaker_client.get(rejected_submission.urls.confirm, follow=True) rejected_submission.refresh_from_db() assert response.status_code == 200", "def test_can_see_submission_list(speaker_client, submission): response = speaker_client.get(submission.event.urls.user_submissions, follow=True) assert response.status_code ==", "submission.state == SubmissionStates.SUBMITTED response = speaker_client.post(submission.urls.withdraw, follow=True) submission.refresh_from_db() assert response.status_code", "'abstract': submission.abstract, 'notes': submission.notes, 'resource-TOTAL_FORMS': 0, 'resource-INITIAL_FORMS': 0, 'resource-MIN_NUM_FORMS': 0,", "= speaker_client.post( event.urls.user, data={ f'question_{speaker_question.id}': 'green as the sky', 'form':", "be fun!', } response = speaker_client.post(submission.urls.invite, follow=True, data=data) assert response.status_code", "assert response.status_code == 404 assert submission.speakers.count() == 1 @pytest.mark.django_db @pytest.mark.parametrize('request_availability',", "client.post(submission.urls.confirm, follow=True) submission.refresh_from_db() assert response.status_code == 200 assert response.redirect_chain[-1][1] ==", "response.status_code == 200 with scope(event=submission.event): submission.refresh_from_db() assert submission.submission_type == new_type", "def test_cannot_confirm_rejected_submission(other_speaker_client, rejected_submission): rejected_submission.state = SubmissionStates.REJECTED rejected_submission.save() response = other_speaker_client.get(rejected_submission.urls.confirm,", "speaker.name != '<NAME>' @pytest.mark.django_db def test_can_delete_profile(speaker, event, speaker_client): with scope(event=event):", "submission.resources.filter(pk=resource_two.pk).exists() @pytest.mark.django_db def test_can_edit_slot_count(speaker_client, submission): with scope(event=submission.event): submission.event.settings.present_multiple_times = True", "rejected_submission.refresh_from_db() assert response.status_code == 200 assert rejected_submission.state == SubmissionStates.REJECTED @pytest.mark.django_db", "response = speaker_client.post( event.urls.user, data={ f'question_{speaker_question.id}': 'black as the night',", "data=data) assert response.status_code == 200 assert len(djmail.outbox) == 1 assert", "submission.accept() response = speaker_client.post(submission.urls.withdraw, follow=True) assert response.status_code == 200 with", "assert speaker.profiles.get(event=event).biography != 'Ruling since forever.' response = speaker_client.post( event.urls.user,", "response = other_speaker_client.get(rejected_submission.urls.confirm, follow=True) rejected_submission.refresh_from_db() assert response.status_code == 200 assert", "== 'Green is totally the best color.' ) file_answer =", "django.urls import reverse from django_scopes import scope from rest_framework.authtoken.models import", "'subject': 'Please join!', 'text': 'C\\'mon, it will be fun!', }", "fun!', } response = speaker_client.post(submission.urls.invite, follow=True, data=data) assert response.status_code ==", "response.status_code == 200 assert accepted_submission.state == SubmissionStates.ACCEPTED @pytest.mark.django_db def test_can_edit_submission(speaker_client,", "!= '' response = speaker_client.post( event.urls.user_delete, data={'really': True}, follow=True )", "200 assert len(djmail.outbox) == 1 assert djmail.outbox[0].to == ['<EMAIL>'] @pytest.mark.django_db", "response.status_code == 200 assert submission.speakers.count() == 2 @pytest.mark.django_db def test_wrong_acceptance_link(orga_client,", "'Green is totally the best color.' ) file_answer = speaker.answers.get(question_id=speaker_file_question.pk)", "= orga_client.post(submission.urls.accept_invitation, follow=True) submission.refresh_from_db() assert response.status_code == 200 assert submission.speakers.count()", "response.status_code == 200 with scope(event=event): speaker.refresh_from_db() assert speaker.profiles.get(event=event).biography != ''", "b'file_content' assert (settings.MEDIA_ROOT / file_answer.answer_file.name).exists() response = speaker_client.post( event.urls.user, data={", "= client.get(multilingual_event.cfp.urls.public, follow=True) assert 'submission' in first_response.content.decode() assert 'Einreichung' not", "== 200 with scope(event=submission.event): submission.refresh_from_db() assert submission.slot_count == 13 @pytest.mark.django_db", "== SubmissionStates.ACCEPTED @pytest.mark.django_db def test_can_edit_submission(speaker_client, submission, resource, other_resource): with scope(event=submission.event):", "ganz neuer Titel', 'submission_type': rejected_submission.submission_type.pk, 'content_locale': rejected_submission.content_locale, 'description': rejected_submission.description, 'abstract':", ") orga_user.refresh_from_db() assert response.status_code == 200 assert orga_user.locale == 'de'", "reverse from django_scopes import scope from rest_framework.authtoken.models import Token from", "submission.resources.count() == 2 assert new_resource.description == 'new resource' assert new_resource.resource.read()", "default_duration=13) data = { 'title': 'Ein ganz neuer Titel', 'submission_type':", "rejected_submission.refresh_from_db() assert rejected_submission.title == title @pytest.mark.django_db def test_can_edit_submission_type(speaker_client, submission, event):", "test_can_edit_login_info_wrong_password(speaker, event, speaker_client): response = speaker_client.post( event.urls.user, data={ 'old_password': '<PASSWORD>!',", "response = speaker_client.post(submission.urls.withdraw, follow=True) assert response.status_code == 200 submission.refresh_from_db() assert", "test_submission_accept_with_missing_availability(speaker_client, submission): submission.event.settings.cfp_request_availabilities = True submission.event.settings.cfp_require_availabilities = True submission.state =", "'<EMAIL>', 'password': '', 'password_repeat': '', 'form': 'login', }, follow=True, )", "def test_submission_withdraw_if_confirmed(speaker_client, submission): with scope(event=submission.event): submission.accept() submission.confirm() response = speaker_client.post(submission.urls.withdraw,", "color.', 'form': 'questions', }, follow=True, ) assert response.status_code == 200", "kwargs={'event': multilingual_event.slug}) + f'?locale=de&next=/{multilingual_event.slug}/', follow=True, ) orga_user.refresh_from_db() assert response.status_code ==", "speaker.profiles.get(event=event).biography != '' response = speaker_client.post(event.urls.user_delete, follow=True) assert response.status_code ==", "data=data) assert response.status_code == 200 with scope(event=submission.event): submission.refresh_from_db() assert submission.submission_type", "== 'new resource' assert new_resource.resource.read() == b'file_content' assert not submission.resources.filter(pk=resource_two.pk).exists()", "follow=True) accepted_submission.refresh_from_db() assert response.status_code == 200 assert accepted_submission.state == SubmissionStates.ACCEPTED", "'resource-1-resource': resource_two.resource, 'resource-2-id': '', 'resource-2-description': 'new resource', 'resource-2-resource': f, 'resource-TOTAL_FORMS':", "'<PASSWORD>!', 'email': '<EMAIL>', 'password': '', 'password_repeat': '', 'form': 'login', },", "(True, False)) def test_submission_accept(speaker_client, submission, request_availability): submission.event.settings.cfp_request_availabilities = request_availability submission.state", "= SubmissionStates.ACCEPTED submission.save() response = speaker_client.post(submission.urls.confirm, follow=True) submission.refresh_from_db() assert response.status_code", "assert response.status_code == 200 assert submission.state == SubmissionStates.SUBMITTED response =", "in response.content.decode() @pytest.mark.django_db def test_cannot_see_other_submission(speaker_client, other_submission): response = speaker_client.get(other_submission.urls.user_base, follow=True)", "{ 'title': 'Ein ganz neuer Titel', 'submission_type': rejected_submission.submission_type.pk, 'content_locale': rejected_submission.content_locale,", "def test_can_edit_profile(speaker, event, speaker_client): response = speaker_client.post( event.urls.user, data={ 'name':", "f = SimpleUploadedFile('testfile.txt', b'file_content') data = { 'title': 'Ein ganz", "assert speaker.profiles.get(event=event).biography != 'Ruling since forever.' @pytest.mark.django_db def test_can_edit_login_info(speaker, event,", "200 assert submission.title in response.content.decode() @pytest.mark.django_db def test_cannot_see_other_submission(speaker_client, other_submission): response", "submission.notes, 'resource-TOTAL_FORMS': 0, 'resource-INITIAL_FORMS': 0, 'resource-MIN_NUM_FORMS': 0, 'resource-MAX_NUM_FORMS': 1000, }", "'token', }, follow=True, ) assert response.status_code == 200 new_token =", "speaker.refresh_from_db() assert speaker.email != '<EMAIL>' @pytest.mark.django_db def test_can_edit_and_update_speaker_answers( speaker, event,", "answer = speaker.answers.filter(question_id=speaker_question.pk).first() assert not answer f = SimpleUploadedFile('testfile.txt', b'file_content')", "scope(event=event): speaker.refresh_from_db() assert speaker.profiles.get(event=event).biography == '' assert speaker.name == '<NAME>'", "orga_client.post( submission.urls.accept_invitation + 'olololol', follow=True ) submission.refresh_from_db() assert response.status_code ==", "'de' @pytest.mark.django_db def test_can_invite_speaker(speaker_client, submission): djmail.outbox = [] response =", "submission.save() response = client.post(submission.urls.confirm, follow=True) submission.refresh_from_db() assert response.status_code == 200", "submission.urls.confirm.replace(submission.code, \"foo\"), follow=True ) assert response.status_code == 200 assert response.redirect_chain[-1][1]", "resource', 'resource-2-resource': f, 'resource-TOTAL_FORMS': 3, 'resource-INITIAL_FORMS': 2, 'resource-MIN_NUM_FORMS': 0, 'resource-MAX_NUM_FORMS':", "submission.save() assert submission.code in submission.urls.confirm response = client.post( submission.urls.confirm.replace(submission.code, \"foo\"),", "response.status_code == 200 assert response.redirect_chain[-1][1] == 302 assert 'login/?next=' in", "with scope(event=submission.event): data = { 'title': 'Ein ganz neuer Titel',", "}, follow=True, ) assert response.status_code == 200 with scope(event=event): answer", "SubmissionStates.WITHDRAWN @pytest.mark.django_db def test_submission_withdraw_if_rejected(speaker_client, submission): with scope(event=submission.event): submission.reject() response =", "speaker_client.get(accepted_submission.urls.confirm, follow=True) accepted_submission.refresh_from_db() assert response.status_code == 200 assert accepted_submission.state ==", "13, 'resource-TOTAL_FORMS': 0, 'resource-INITIAL_FORMS': 0, 'resource-MIN_NUM_FORMS': 0, 'resource-MAX_NUM_FORMS': 1000, }", "submission.description, 'abstract': submission.abstract, 'notes': submission.notes, 'slot_count': 13, 'resource-TOTAL_FORMS': 0, 'resource-INITIAL_FORMS':", "as the sky' @pytest.mark.django_db def test_cannot_delete_profile_on_first_try(speaker, event, speaker_client): with scope(event=event):", "= client.get( reverse('cfp:locale.set', kwargs={'event': multilingual_event.slug}) + f'?locale=de&next=/{multilingual_event.slug}/', follow=True, ) assert", "speaker_client.post(submission.urls.withdraw, follow=True) submission.refresh_from_db() assert response.status_code == 200 assert submission.state ==", "'resource-1-DELETE': True, 'resource-1-description': resource_two.description, 'resource-1-resource': resource_two.resource, 'resource-2-id': '', 'resource-2-description': 'new", "with scope(event=submission.event): submission.refresh_from_db() assert submission.state == SubmissionStates.WITHDRAWN assert len(djmail.outbox) ==", "response.status_code == 200 with scope(event=event): speaker.refresh_from_db() assert speaker.profiles.get(event=event).biography == ''", "from rest_framework.authtoken.models import Token from pretalx.submission.models import SubmissionStates @pytest.mark.django_db def", "'<EMAIL>' @pytest.mark.django_db def test_can_edit_login_info_wrong_password(speaker, event, speaker_client): response = speaker_client.post( event.urls.user,", "= speaker_client.get(submission.urls.user_base, follow=True) assert response.status_code == 200 assert submission.title in", "'slot_count': 13, 'resource-TOTAL_FORMS': 0, 'resource-INITIAL_FORMS': 0, 'resource-MIN_NUM_FORMS': 0, 'resource-MAX_NUM_FORMS': 1000,", "assert submission.submission_type != new_type @pytest.mark.django_db def test_can_edit_profile(speaker, event, speaker_client): response", "submission.submission_type.pk, 'content_locale': submission.content_locale, 'description': submission.description, 'abstract': submission.abstract, 'notes': submission.notes, 'slot_count':", "submission.notes, 'slot_count': 13, 'resource-TOTAL_FORMS': 0, 'resource-INITIAL_FORMS': 0, 'resource-MIN_NUM_FORMS': 0, 'resource-MAX_NUM_FORMS':", "'name': '<NAME>', 'biography': 'Ruling since forever.', 'form': 'profile', }, follow=True,", "assert response.status_code == 200 with scope(event=event): answer = speaker.answers.get(question_id=speaker_question.pk) assert", "follow=True) assert response.status_code == 404 @pytest.mark.django_db def test_can_confirm_submission(speaker_client, accepted_submission): response", "== 2 submission.refresh_from_db() resource_one.refresh_from_db() new_resource = submission.resources.exclude(pk=resource_one.pk).first() assert submission.title ==", "def test_submission_accept_nologin(client, submission): submission.state = SubmissionStates.ACCEPTED submission.save() response = client.post(submission.urls.confirm,", ") assert response.status_code == 200 with scope(event=event): answer = speaker.answers.get(question_id=speaker_question.pk)", "response.status_code == 200 assert submission.state == SubmissionStates.CONFIRMED @pytest.mark.django_db def test_submission_accept_with_missing_availability(speaker_client,", "@pytest.mark.django_db def test_can_edit_slot_count(speaker_client, submission): with scope(event=submission.event): submission.event.settings.present_multiple_times = True data", "def test_cannot_edit_confirmed_slot_count(speaker_client, confirmed_submission): submission = confirmed_submission submission.event.settings.present_multiple_times = True with", "2, 'resource-MIN_NUM_FORMS': 0, 'resource-MAX_NUM_FORMS': 1000, } response = speaker_client.post(submission.urls.user_base, follow=True,", "= speaker_client.get(submission.event.urls.user_submissions, follow=True) assert response.status_code == 200 assert submission.title in", "speaker_client): event.settings.cfp_require_availabilities = True response = speaker_client.post( event.urls.user, data={ 'name':", "200 with scope(event=event): answer = speaker.answers.get(question_id=speaker_question.pk) assert answer.answer == 'black", "def test_submission_accept_with_missing_availability(speaker_client, submission): submission.event.settings.cfp_request_availabilities = True submission.event.settings.cfp_require_availabilities = True submission.state", "assert response.status_code == 200 assert accepted_submission.state == SubmissionStates.CONFIRMED @pytest.mark.django_db def", "\"foo\"), follow=True ) assert response.status_code == 200 assert response.redirect_chain[-1][1] ==", "forever.', 'form': 'profile', }, follow=True, ) assert response.status_code == 200", "assert new_resource.resource.read() == b'file_content' assert not submission.resources.filter(pk=resource_two.pk).exists() @pytest.mark.django_db def test_can_edit_slot_count(speaker_client,", "'login/?next=' in response.redirect_chain[-1][0] @pytest.mark.django_db def test_submission_withdraw(speaker_client, submission): djmail.outbox = []", "def test_submission_withdraw_if_rejected(speaker_client, submission): with scope(event=submission.event): submission.reject() response = speaker_client.post(submission.urls.withdraw, follow=True)", "submission.refresh_from_db() assert submission.state == SubmissionStates.WITHDRAWN assert len(djmail.outbox) == 0 @pytest.mark.django_db", "scope(event=submission.event): submission.refresh_from_db() assert submission.slot_count == 13 @pytest.mark.django_db def test_cannot_edit_confirmed_slot_count(speaker_client, confirmed_submission):", "== SubmissionStates.REJECTED @pytest.mark.django_db def test_can_withdraw_submission(speaker_client, submission): response = speaker_client.get(submission.urls.withdraw, follow=True)", "speaker.profiles.get(event=event).biography != '' assert speaker.name != '<NAME>' @pytest.mark.django_db def test_can_delete_profile(speaker,", "assert accepted_submission.state == SubmissionStates.CONFIRMED @pytest.mark.django_db def test_can_reconfirm_submission(speaker_client, accepted_submission): accepted_submission.state =", "0, 'resource-MIN_NUM_FORMS': 0, 'resource-MAX_NUM_FORMS': 1000, } response = other_speaker_client.post( rejected_submission.urls.user_base,", "submission.title in response.content.decode() @pytest.mark.django_db def test_can_see_submission(speaker_client, submission): response = speaker_client.get(submission.urls.user_base,", "f'?locale=de&next=/{multilingual_event.slug}/', follow=True, ) assert 'Einreichung' in second_response.content.decode() @pytest.mark.django_db def test_persists_changed_locale(multilingual_event,", "200 assert submission.state == SubmissionStates.WITHDRAWN @pytest.mark.django_db def test_cannot_withdraw_accepted_submission(speaker_client, accepted_submission): response", "0, 'resource-MAX_NUM_FORMS': 1000, } response = speaker_client.post(submission.urls.user_base, follow=True, data=data) assert", "def test_can_withdraw_submission(speaker_client, submission): response = speaker_client.get(submission.urls.withdraw, follow=True) submission.refresh_from_db() assert response.status_code", "assert response.status_code == 200 assert response.redirect_chain[-1][1] == 302 assert 'login/?next='", "== 'black as the night' assert speaker.answers.get(question_id=speaker_boolean_question.pk).answer == 'True' assert", "'' assert speaker.name == '<NAME>' assert speaker.email.startswith('deleted_user') assert speaker.email.endswith('@localhost') @pytest.mark.django_db", "scope(event=submission.event): submission.refresh_from_db() assert submission.state == SubmissionStates.WITHDRAWN assert len(djmail.outbox) == 1", "submission.refresh_from_db() assert submission.state == SubmissionStates.WITHDRAWN assert len(djmail.outbox) == 1 @pytest.mark.django_db", "200 with scope(event=submission.event): submission.refresh_from_db() assert submission.slot_count != 13 @pytest.mark.django_db def", "== 200 with scope(event=event): speaker.refresh_from_db() assert speaker.profiles.get(event=event).biography == '' assert", "assert orga_user.locale == 'en' response = orga_client.get( reverse('cfp:locale.set', kwargs={'event': multilingual_event.slug})", "'title': 'Ein ganz neuer Titel', 'submission_type': submission.submission_type.pk, 'content_locale': submission.content_locale, 'description':", "rejected_submission.title == title @pytest.mark.django_db def test_can_edit_submission_type(speaker_client, submission, event): with scope(event=submission.event):", "'form': 'login', }, follow=True, ) assert response.status_code == 200 speaker.refresh_from_db()", "scope from rest_framework.authtoken.models import Token from pretalx.submission.models import SubmissionStates @pytest.mark.django_db", "response = speaker_client.post(submission.urls.user_base, follow=True, data=data) assert response.status_code == 200 with", "[] response = speaker_client.get( submission.urls.invite, follow=True, data={'email': 'invalidemail'} ) assert", "SubmissionStates.WITHDRAWN assert len(djmail.outbox) == 1 @pytest.mark.django_db def test_submission_withdraw_if_confirmed(speaker_client, submission): with", "speaker.answers.get(question_id=speaker_question.pk) assert answer.answer == 'black as the night' assert speaker.answers.get(question_id=speaker_boolean_question.pk).answer", "submission.refresh_from_db() assert response.status_code == 200 assert submission.speakers.count() == 2 @pytest.mark.django_db", "scope(event=submission.event): data = { 'title': 'Ein ganz neuer Titel', 'submission_type':", "from pretalx.submission.models import SubmissionStates @pytest.mark.django_db def test_can_see_submission_list(speaker_client, submission): response =", "data = { 'title': 'Ein ganz neuer Titel', 'submission_type': submission.submission_type.pk,", "django.core.files.uploadedfile import SimpleUploadedFile from django.urls import reverse from django_scopes import", "in first_response.content.decode() second_response = client.get( reverse('cfp:locale.set', kwargs={'event': multilingual_event.slug}) + f'?locale=de&next=/{multilingual_event.slug}/',", "(settings.MEDIA_ROOT / file_answer.answer_file.name).exists() response = speaker_client.post( event.urls.user, data={ f'question_{speaker_question.id}': 'green", "= speaker_client.post(submission.urls.withdraw, follow=True) assert response.status_code == 200 submission.refresh_from_db() assert submission.state", "submission): djmail.outbox = [] submission.state = SubmissionStates.SUBMITTED submission.save() response =", "sky' @pytest.mark.django_db def test_cannot_delete_profile_on_first_try(speaker, event, speaker_client): with scope(event=event): assert speaker.profiles.get(event=event).biography", "'resource-0-description': 'new resource name', 'resource-0-resource': resource_one.resource, 'resource-1-id': resource_two.id, 'resource-1-DELETE': True,", "'Ein ganz neuer Titel', response.content.decode() assert submission.resources.count() == 2 assert", "submission.urls.invite, follow=True, data={'email': 'invalidemail'} ) assert response.status_code == 200 data", "submission.resources.count() == 2 submission.refresh_from_db() resource_one.refresh_from_db() new_resource = submission.resources.exclude(pk=resource_one.pk).first() assert submission.title", "scope(event=event): answer = speaker.answers.filter(question_id=speaker_question.pk).first() assert not answer f = SimpleUploadedFile('testfile.txt',", "'<EMAIL>' @pytest.mark.django_db def test_can_edit_and_update_speaker_answers( speaker, event, speaker_question, speaker_boolean_question, speaker_client, speaker_text_question,", "f'question_{speaker_question.id}': 'green as the sky', 'form': 'questions', }, follow=True, )", "200 assert submission.speakers.count() == 2 @pytest.mark.django_db def test_wrong_acceptance_link(orga_client, submission): assert", "submission.state = SubmissionStates.ACCEPTED submission.save() assert submission.code in submission.urls.confirm response =", "assert submission.title in str(resource_one) f = SimpleUploadedFile('testfile.txt', b'file_content') data =", "def test_can_edit_slot_count(speaker_client, submission): with scope(event=submission.event): submission.event.settings.present_multiple_times = True data =", "test_wrong_acceptance_link(orga_client, submission): assert submission.speakers.count() == 1 response = orga_client.post( submission.urls.accept_invitation", "submission.refresh_from_db() assert submission.submission_type != new_type @pytest.mark.django_db def test_can_edit_profile(speaker, event, speaker_client):", "with scope(event=submission.event): submission.accept() new_type = event.submission_types.create(name='Other', default_duration=13) data = {", "response = orga_client.get( reverse('cfp:locale.set', kwargs={'event': multilingual_event.slug}) + f'?locale=de&next=/{multilingual_event.slug}/', follow=True, )", "== 302 assert 'login/?next=' in response.redirect_chain[-1][0] assert submission.state == SubmissionStates.ACCEPTED", "follow=True ) assert response.status_code == 200 with scope(event=event): speaker.refresh_from_db() assert", "SimpleUploadedFile('testfile.txt', b'file_content') response = speaker_client.post( event.urls.user, data={ f'question_{speaker_question.id}': 'black as", "200 assert submission.state == SubmissionStates.SUBMITTED response = speaker_client.post(submission.urls.withdraw, follow=True) submission.refresh_from_db()", "1 @pytest.mark.django_db @pytest.mark.parametrize('request_availability', (True, False)) def test_submission_accept(speaker_client, submission, request_availability): submission.event.settings.cfp_request_availabilities", "speaker_client.post( event.urls.user_delete, data={'really': True}, follow=True ) assert response.status_code == 200", "speaker.profiles.get(event=event).biography == '' assert speaker.name == '<NAME>' assert speaker.email.startswith('deleted_user') assert", "submission.content_locale, 'description': submission.description, 'abstract': submission.abstract, 'notes': submission.notes, 'slot_count': 13, 'resource-TOTAL_FORMS':", "== 200 with scope(event=event): speaker.refresh_from_db() assert speaker.profiles.get(event=event).biography != 'Ruling since", "will be fun!', } response = speaker_client.post(submission.urls.invite, follow=True, data=data) assert", "def test_can_accept_invitation(orga_client, submission): assert submission.speakers.count() == 1 response = orga_client.post(submission.urls.accept_invitation,", "speaker_client.get(submission.urls.user_base, follow=True) assert response.status_code == 200 assert submission.title in response.content.decode()", "scope(event=submission.event): submission.accept() response = speaker_client.post(submission.urls.withdraw, follow=True) assert response.status_code == 200", "follow=True, data=data) assert response.status_code == 200 with scope(event=submission.event): assert submission.resources.count()", "follow=True) assert 'submission' in first_response.content.decode() assert 'Einreichung' not in first_response.content.decode()", "== 200 speaker.refresh_from_db() assert speaker.email != '<EMAIL>' @pytest.mark.django_db def test_can_edit_and_update_speaker_answers(", "assert response.status_code == 200 data = { 'speaker': '<EMAIL>', 'subject':", "submission.urls.accept_invitation + 'olololol', follow=True ) submission.refresh_from_db() assert response.status_code == 404", "resource_two.description, 'resource-1-resource': resource_two.resource, 'resource-2-id': '', 'resource-2-description': 'new resource', 'resource-2-resource': f,", "Titel', 'submission_type': submission.submission_type.pk, 'content_locale': submission.content_locale, 'description': submission.description, 'abstract': submission.abstract, 'notes':", "= True with scope(event=submission.event): data = { 'title': 'Ein ganz", "SubmissionStates.ACCEPTED response = speaker_client.post(accepted_submission.urls.confirm, follow=True) accepted_submission.refresh_from_db() assert response.status_code == 200", "scope(event=submission.event): submission.refresh_from_db() assert submission.slot_count != 13 @pytest.mark.django_db def test_cannot_edit_rejected_submission(other_speaker_client, rejected_submission):", "with scope(event=event): assert speaker.profiles.get(event=event).biography != '' response = speaker_client.post( event.urls.user_delete,", "submission.refresh_from_db() assert response.status_code == 200 assert submission.state == SubmissionStates.SUBMITTED response", "assert answer.answer == 'green as the sky' @pytest.mark.django_db def test_cannot_delete_profile_on_first_try(speaker,", "submission.code in submission.urls.confirm response = client.post( submission.urls.confirm.replace(submission.code, \"foo\"), follow=True )", "'profile', }, follow=True, ) assert response.status_code == 200 with scope(event=event):", "the best color.' ) file_answer = speaker.answers.get(question_id=speaker_file_question.pk) assert file_answer.answer.startswith('file://') assert", "assert len(djmail.outbox) == 1 @pytest.mark.django_db def test_submission_withdraw_if_confirmed(speaker_client, submission): with scope(event=submission.event):", "as djmail from django.core.files.uploadedfile import SimpleUploadedFile from django.urls import reverse", "200 with scope(event=submission.event): assert submission.resources.count() == 2 submission.refresh_from_db() resource_one.refresh_from_db() new_resource", "len(djmail.outbox) == 1 @pytest.mark.django_db def test_submission_withdraw_if_confirmed(speaker_client, submission): with scope(event=submission.event): submission.accept()", "'title': 'Ein ganz neuer Titel', 'submission_type': rejected_submission.submission_type.pk, 'content_locale': rejected_submission.content_locale, 'description':", "= submission.resources.last() assert submission.title in str(resource_one) f = SimpleUploadedFile('testfile.txt', b'file_content')", "with scope(event=event): assert speaker.profiles.get(event=event).biography != '' response = speaker_client.post(event.urls.user_delete, follow=True)", "not in first_response.content.decode() second_response = client.get( reverse('cfp:locale.set', kwargs={'event': multilingual_event.slug}) +", "SubmissionStates.ACCEPTED submission.save() assert submission.code in submission.urls.confirm response = client.post( submission.urls.confirm.replace(submission.code,", "resource, other_resource): with scope(event=submission.event): assert submission.resources.count() == 2 resource_one =", "assert file_answer.answer.startswith('file://') assert file_answer.answer_file.read() == b'file_content' assert (settings.MEDIA_ROOT / file_answer.answer_file.name).exists()", "submission.state = SubmissionStates.ACCEPTED submission.save() response = client.post(submission.urls.confirm, follow=True) submission.refresh_from_db() assert", "'content_locale': submission.content_locale, 'description': submission.description, 'abstract': submission.abstract, 'notes': submission.notes, 'slot_count': submission.slot_count,", "assert accepted_submission.state == SubmissionStates.ACCEPTED response = speaker_client.post(accepted_submission.urls.confirm, follow=True) accepted_submission.refresh_from_db() assert", "assert submission.speakers.count() == 1 response = orga_client.post( submission.urls.accept_invitation + 'olololol',", "rejected_submission.urls.user_base, follow=True, data=data ) assert response.status_code == 200 rejected_submission.refresh_from_db() assert", "== SubmissionStates.ACCEPTED response = speaker_client.post(accepted_submission.urls.confirm, follow=True) accepted_submission.refresh_from_db() assert response.status_code ==", "'old_password': '<PASSWORD>!', 'email': '<EMAIL>', 'password': '', 'password_repeat': '', 'form': 'login',", "orga_client): assert orga_user.locale == 'en' response = orga_client.get( reverse('cfp:locale.set', kwargs={'event':", "scope(event=event): speaker.refresh_from_db() assert speaker.profiles.get(event=event).biography != '' assert speaker.name != '<NAME>'", "new_resource.description == 'new resource' assert new_resource.resource.read() == b'file_content' assert not", "response = orga_client.post(submission.urls.accept_invitation, follow=True) submission.refresh_from_db() assert response.status_code == 200 assert", "200 new_token = Token.objects.filter(user=speaker).first().key assert new_token != old_token @pytest.mark.django_db def", "200 assert submission.state == SubmissionStates.ACCEPTED @pytest.mark.django_db def test_submission_accept_nologin(client, submission): submission.state", "with scope(event=submission.event): submission.accept() submission.confirm() response = speaker_client.post(submission.urls.withdraw, follow=True) assert response.status_code", "== SubmissionStates.ACCEPTED @pytest.mark.django_db def test_submission_accept_wrong_code(client, submission): submission.state = SubmissionStates.ACCEPTED submission.save()", "@pytest.mark.django_db def test_persists_changed_locale(multilingual_event, orga_user, orga_client): assert orga_user.locale == 'en' response", "'content_locale': rejected_submission.content_locale, 'description': rejected_submission.description, 'abstract': rejected_submission.abstract, 'notes': rejected_submission.notes, 'resource-TOTAL_FORMS': 0,", "new_type @pytest.mark.django_db def test_can_edit_profile(speaker, event, speaker_client): response = speaker_client.post( event.urls.user,", "): with scope(event=event): answer = speaker.answers.filter(question_id=speaker_question.pk).first() assert not answer f", "SimpleUploadedFile('testfile.txt', b'file_content') data = { 'title': 'Ein ganz neuer Titel',", "submission.state == SubmissionStates.WITHDRAWN assert len(djmail.outbox) == 0 @pytest.mark.django_db def test_submission_withdraw_if_accepted(speaker_client,", "submission): submission.state = SubmissionStates.ACCEPTED submission.save() response = client.post(submission.urls.confirm, follow=True) submission.refresh_from_db()", "submission.state == SubmissionStates.ACCEPTED @pytest.mark.django_db def test_submission_accept_nologin(client, submission): submission.state = SubmissionStates.ACCEPTED", "3, 'resource-INITIAL_FORMS': 2, 'resource-MIN_NUM_FORMS': 0, 'resource-MAX_NUM_FORMS': 1000, } response =", "== b'file_content' assert not submission.resources.filter(pk=resource_two.pk).exists() @pytest.mark.django_db def test_can_edit_slot_count(speaker_client, submission): with" ]
[ "SLAVE_ADDR_REG = 2 CS_REG = 3 MB_MAX_WRITE_REGNUM = 123 MB_MAX_READ_REGNUM", "LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A", "mb_bsp.get_error_count() print() print('master_parity_err_count = ', error_tuple[0][0]) print('master_start_bit_err_count = ', error_tuple[0][1])", "size mb_bsp.write_mb_master_pdu(pdu) # Set request PDU else: mb_bsp.write_mb_slave_cs(CONFIG_REG, config_val) #", "0 incr_err_count() def config_modbus(modbus_role, slave_addr, pdu, config_val): wait_mb_master_status('FSM status') if", "65535 MB_MAX_REG_VAL = 65535 MB_MAX_SLAVE_ADDR = 247 MB_MIN_SLAVE_ADDR = 1", "pdu.append(bytecount) for i in range(regnum_l): regval_h = (regval[i] & 0xff00)", "print('master_start_bit_err_count = ', error_tuple[0][1]) print('master_stop_bit_err_count = ', error_tuple[0][2]) print('master_addr_err_count =", "regval_l = regval[0] & 0xff pdu.append(regval_l) ref_pdu = pdu.copy() return", "if modbus_role == 'Master': if error_type == 'parity': count =", "TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR", "= 0 error_tuple = mb_bsp.get_error_count() if modbus_role == 'Both': for", "ref_pdu] def print_test_result(result_ok): if result_ok: msg = '\\tTest Successful' else:", "elif error_type == 'stop bit': count = error_tuple[1][2] elif error_type", "error_type == 'parity': count = error_tuple[1][0] elif error_type == 'start", "result_ok: msg = '\\tTest Successful' else: msg = '\\tTest FAILED'", "rights # to use, copy, modify, merge, publish, distribute, sublicense,", "0xff) pdu.append(addr_l) regval_h = (regval[0] & 0xff00) >> 8 pdu.append(regval_h)", "ref_pdu.append(0) return [pdu, ref_pdu] def generate_0x06_pdu(addr, regval): pdu = list()", "(addr & 0xff) pdu.append(addr_l) regval_h = (regval[0] & 0xff00) >>", "error_tuple[0][0] elif error_type == 'start bit': count = error_tuple[0][1] elif", "error_tuple[1][2]) print('slave_addr_err_count = ', error_tuple[1][3]) print('slave_crc_err_count = ', error_tuple[1][4]) print('--------------------------------')", "FCODE_0x3 = 0x3 FCODE_0x6 = 0x6 FCODE_0x10 = 0x10 def", "in error_tuple[0]: count += i elif modbus_role == 'Slave': for", "', error_tuple[0][0]) print('master_start_bit_err_count = ', error_tuple[0][1]) print('master_stop_bit_err_count = ', error_tuple[0][2])", "# Set configuration mb_bsp.write_mb_master_cs(SLAVE_ADDR_REG, slave_addr) # Set slave address mb_bsp.write_mb_master_cs(PDU_SIZE_REG,", "+= 1 setattr(incr_err_count, 'count', 0) def wait_mb_master_status(status): mb_bsp.wait_master_status(status) # 'FSM", "MB_MAX_READ_REGNUM = 125 MB_MAX_REG_ADDR = 65535 MB_MAX_REG_VAL = 65535 MB_MAX_SLAVE_ADDR", "print_test_result(result_ok): if result_ok: msg = '\\tTest Successful' else: msg =", "print('slave_start_bit_err_count = ', error_tuple[1][1]) print('slave_stop_bit_err_count = ', error_tuple[1][2]) print('slave_addr_err_count =", "regnum_l << 1 pdu.append(bytecount) for i in range(regnum_l): regval_h =", "address def generate_0x03_pdu(addr, regnum): pdu = list() ref_pdu = list()", "Copyright (c) 2021 <NAME>, <NAME> # Permission is hereby granted,", "'Master': mb_bsp.write_mb_master_cs(CONFIG_REG, config_val) # Set configuration mb_bsp.write_mb_master_cs(SLAVE_ADDR_REG, slave_addr) # Set", "== 'stop bit': count = error_tuple[0][2] elif error_type == 'address':", "list() pdu.append(0x3) ref_pdu.append(0x3) addr_h = (addr & 0xff00) >> 8", "# Set request PDU size mb_bsp.write_mb_master_pdu(pdu) # Set request PDU", "if error_type == 'parity': count = error_tuple[0][0] elif error_type ==", "pdu = list() pdu.append(0x6) addr_h = (addr & 0xff00) >>", "and associated documentation files (the \"Software\"), to deal # in", "Software without restriction, including without limitation the rights # to", "'start bit': count = error_tuple[1][1] elif error_type == 'stop bit':", "and to permit persons to whom the Software is #", "== 'parity': count = error_tuple[0][0] elif error_type == 'start bit':", "= (regval[0] & 0xff00) >> 8 pdu.append(regval_h) regval_l = regval[0]", "pdu.copy() bytecount = regnum_l << 1 pdu.append(bytecount) for i in", "copies of the Software, and to permit persons to whom", "hereby granted, free of charge, to any person obtaining a", "this permission notice shall be included in all # copies", "addr_h = (addr & 0xff00) >> 8 pdu.append(addr_h) addr_l =", "print('master_crc_err_count = ', error_tuple[0][4]) print('slave_parity_err_count = ', error_tuple[1][0]) print('slave_start_bit_err_count =", "OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE", "distribute, sublicense, and/or sell # copies of the Software, and", "modbus_role == 'Master': mb_bsp.write_mb_master_cs(CONFIG_REG, config_val) # Set configuration mb_bsp.write_mb_master_cs(SLAVE_ADDR_REG, slave_addr)", "ref_pdu.append(0x3) addr_h = (addr & 0xff00) >> 8 pdu.append(addr_h) addr_l", "config_val) # Set configuration mb_bsp.write_mb_slave_cs(SLAVE_ADDR_REG, slave_addr) # Set slave address", "OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.", "count = error_tuple[0][0] elif error_type == 'start bit': count =", "error_tuple[0][2]) print('master_addr_err_count = ', error_tuple[0][3]) print('master_crc_err_count = ', error_tuple[0][4]) print('slave_parity_err_count", "= ', error_tuple[0][2]) print('master_addr_err_count = ', error_tuple[0][3]) print('master_crc_err_count = ',", "get_total_error_count(modbus_role): count = 0 error_tuple = mb_bsp.get_error_count() if modbus_role ==", "0 error_tuple = mb_bsp.get_error_count() if modbus_role == 'Both': for err_list", "HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER #", "list() ref_pdu = list() pdu.append(0x3) ref_pdu.append(0x3) addr_h = (addr &", "error_tuple[0][4]) print('slave_parity_err_count = ', error_tuple[1][0]) print('slave_start_bit_err_count = ', error_tuple[1][1]) print('slave_stop_bit_err_count", "def generate_0x06_pdu(addr, regval): pdu = list() pdu.append(0x6) addr_h = (addr", "bit': count = error_tuple[1][1] elif error_type == 'stop bit': count", "regval[0] & 0xff pdu.append(regval_l) ref_pdu = pdu.copy() return [pdu, ref_pdu]", "'\\tTest Successful' else: msg = '\\tTest FAILED' print() print('***************************') print(msg)", "== 'Slave': if error_type == 'parity': count = error_tuple[1][0] elif", "# MIT License # Copyright (c) 2021 <NAME>, <NAME> #", "8 pdu.append(addr_h) addr_l = (addr & 0xff) pdu.append(addr_l) regnum_h =", "FCODE_0x10 = 0x10 def incr_err_count(): incr_err_count.count += 1 setattr(incr_err_count, 'count',", "i in range(bytecount): ref_pdu.append(0) return [pdu, ref_pdu] def generate_0x06_pdu(addr, regval):", "def wait_mb_master_status(status): mb_bsp.wait_master_status(status) # 'FSM status' or 'PDU status' if", "MIT License # Copyright (c) 2021 <NAME>, <NAME> # Permission", "OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH", "deal # in the Software without restriction, including without limitation", "use, copy, modify, merge, publish, distribute, sublicense, and/or sell #", "# Set slave address mb_bsp.write_mb_master_cs(PDU_SIZE_REG, len(pdu)) # Set request PDU", "be included in all # copies or substantial portions of", "= 1 FCODE_0x3 = 0x3 FCODE_0x6 = 0x6 FCODE_0x10 =", "error_type): error_tuple = mb_bsp.get_error_count() count = 0 if modbus_role ==", "'crc': count = error_tuple[1][4] return count def print_error_count(): error_tuple =", "IN THE # SOFTWARE. import mb_bsp PDU_SIZE_REG = 0 CONFIG_REG", "mb_bsp.write_mb_master_pdu(pdu) # Set request PDU else: mb_bsp.write_mb_slave_cs(CONFIG_REG, config_val) # Set", "= error_tuple[1][0] elif error_type == 'start bit': count = error_tuple[1][1]", "print('master_addr_err_count = ', error_tuple[0][3]) print('master_crc_err_count = ', error_tuple[0][4]) print('slave_parity_err_count =", "copy, modify, merge, publish, distribute, sublicense, and/or sell # copies", "# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR", "0x10 def incr_err_count(): incr_err_count.count += 1 setattr(incr_err_count, 'count', 0) def", "error_type == 'address': count = error_tuple[0][3] elif error_type == 'crc':", "pdu.append(addr_l) regnum_h = (regnum & 0xff00) >> 8 pdu.append(regnum_h) regnum_l", "for err_list in error_tuple: for i in err_list: count +=", "pdu = list() ref_pdu = list() pdu.append(0x3) ref_pdu.append(0x3) addr_h =", "ref_pdu] def generate_0x10_pdu(addr, regnum, regval): pdu = list() pdu.append(0x10) addr_h", "0 if modbus_role == 'Master': if error_type == 'parity': count", "= 125 MB_MAX_REG_ADDR = 65535 MB_MAX_REG_VAL = 65535 MB_MAX_SLAVE_ADDR =", "software and associated documentation files (the \"Software\"), to deal #", "= 247 MB_MIN_SLAVE_ADDR = 1 MB_MAX_PDU_SIZE = 253 MB_MIN_PDU_SIZE =", "status , ' timeout ***') mb_bsp.alarm_cb.status_timeout = 0 incr_err_count() def", "regnum & 0xff pdu.append(regnum_l) bytecount = regnum << 1 ref_pdu.append(bytecount)", "# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF", "error_type == 'address': count = error_tuple[1][3] elif error_type == 'crc':", "AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR", "the Software without restriction, including without limitation the rights #", "pdu.append(0x10) addr_h = (addr & 0xff00) >> 8 pdu.append(addr_h) addr_l", "# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF", "else: mb_bsp.write_mb_slave_cs(CONFIG_REG, config_val) # Set configuration mb_bsp.write_mb_slave_cs(SLAVE_ADDR_REG, slave_addr) # Set", "= error_tuple[0][4] elif modbus_role == 'Slave': if error_type == 'parity':", "& 0xff00) >> 8 pdu.append(regval_h) regval_l = regval[0] & 0xff", "\"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR #", ">> 8 pdu.append(regval_h) regval_l = regval[i] & 0xff pdu.append(regval_l) return", "<< 1 pdu.append(bytecount) for i in range(regnum_l): regval_h = (regval[i]", "0x3 FCODE_0x6 = 0x6 FCODE_0x10 = 0x10 def incr_err_count(): incr_err_count.count", "regnum, regval): pdu = list() pdu.append(0x10) addr_h = (addr &", "'PDU status' if mb_bsp.alarm_cb.status_timeout == 1: print('*** Test FAILED: ',", "addr_l = (addr & 0xff) pdu.append(addr_l) regnum_h = (regnum &", "Set request PDU size mb_bsp.write_mb_master_pdu(pdu) # Set request PDU else:", "mb_bsp.get_error_count() count = 0 if modbus_role == 'Master': if error_type", "elif error_type == 'crc': count = error_tuple[1][4] return count def", "mb_bsp.write_mb_master_cs(SLAVE_ADDR_REG, slave_addr) # Set slave address mb_bsp.write_mb_master_cs(PDU_SIZE_REG, len(pdu)) # Set", "included in all # copies or substantial portions of the", "125 MB_MAX_REG_ADDR = 65535 MB_MAX_REG_VAL = 65535 MB_MAX_SLAVE_ADDR = 247", "# of this software and associated documentation files (the \"Software\"),", "furnished to do so, subject to the following conditions: #", "pdu.copy() return [pdu, ref_pdu] def generate_0x10_pdu(addr, regnum, regval): pdu =", "elif error_type == 'address': count = error_tuple[0][3] elif error_type ==", "# The above copyright notice and this permission notice shall", "SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR", ", ' timeout ***') mb_bsp.alarm_cb.status_timeout = 0 incr_err_count() def config_modbus(modbus_role,", "+= i elif modbus_role == 'Master': for i in error_tuple[0]:", "a copy # of this software and associated documentation files", "OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF", "elif error_type == 'stop bit': count = error_tuple[0][2] elif error_type", "Successful' else: msg = '\\tTest FAILED' print() print('***************************') print(msg) print('***************************')", "'address': count = error_tuple[0][3] elif error_type == 'crc': count =", "elif error_type == 'start bit': count = error_tuple[0][1] elif error_type", "mb_bsp.write_mb_slave_cs(CONFIG_REG, config_val) # Set configuration mb_bsp.write_mb_slave_cs(SLAVE_ADDR_REG, slave_addr) # Set slave", "= 0 CONFIG_REG = 1 SLAVE_ADDR_REG = 2 CS_REG =", "list() pdu.append(0x10) addr_h = (addr & 0xff00) >> 8 pdu.append(addr_h)", "permission notice shall be included in all # copies or", "+= i elif modbus_role == 'Slave': for i in error_tuple[1]:", "error_tuple[0][3] elif error_type == 'crc': count = error_tuple[0][4] elif modbus_role", "= regnum & 0xff pdu.append(regnum_l) ref_pdu = pdu.copy() bytecount =", "mb_bsp.write_mb_slave_cs(SLAVE_ADDR_REG, slave_addr) # Set slave address def generate_0x03_pdu(addr, regnum): pdu", "# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO", "count += i elif modbus_role == 'Master': for i in", "OTHER DEALINGS IN THE # SOFTWARE. import mb_bsp PDU_SIZE_REG =", "error_tuple[1][4] return count def print_error_count(): error_tuple = mb_bsp.get_error_count() print() print('master_parity_err_count", "incr_err_count.count += 1 setattr(incr_err_count, 'count', 0) def wait_mb_master_status(status): mb_bsp.wait_master_status(status) #", "0xff pdu.append(regnum_l) bytecount = regnum << 1 ref_pdu.append(bytecount) for i", "IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS", "NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE", "= 3 MB_MAX_WRITE_REGNUM = 123 MB_MAX_READ_REGNUM = 125 MB_MAX_REG_ADDR =", "== 'start bit': count = error_tuple[1][1] elif error_type == 'stop", "== 'address': count = error_tuple[1][3] elif error_type == 'crc': count", "== 'stop bit': count = error_tuple[1][2] elif error_type == 'address':", "== 'Master': mb_bsp.write_mb_master_cs(CONFIG_REG, config_val) # Set configuration mb_bsp.write_mb_master_cs(SLAVE_ADDR_REG, slave_addr) #", "to deal # in the Software without restriction, including without", "msg = '\\tTest FAILED' print() print('***************************') print(msg) print('***************************') print() def", "pdu, config_val): wait_mb_master_status('FSM status') if modbus_role == 'Master': mb_bsp.write_mb_master_cs(CONFIG_REG, config_val)", "SOFTWARE OR THE USE OR OTHER DEALINGS IN THE #", "to use, copy, modify, merge, publish, distribute, sublicense, and/or sell", "else: msg = '\\tTest FAILED' print() print('***************************') print(msg) print('***************************') print()", "IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS", "'FSM status' or 'PDU status' if mb_bsp.alarm_cb.status_timeout == 1: print('***", "range(regnum_l): regval_h = (regval[i] & 0xff00) >> 8 pdu.append(regval_h) regval_l", "error_tuple = mb_bsp.get_error_count() count = 0 if modbus_role == 'Master':", "= error_tuple[1][1] elif error_type == 'stop bit': count = error_tuple[1][2]", "error_tuple[0][3]) print('master_crc_err_count = ', error_tuple[0][4]) print('slave_parity_err_count = ', error_tuple[1][0]) print('slave_start_bit_err_count", "mb_bsp.get_error_count() if modbus_role == 'Both': for err_list in error_tuple: for", "elif error_type == 'crc': count = error_tuple[0][4] elif modbus_role ==", "FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN", "return [pdu, ref_pdu] def print_test_result(result_ok): if result_ok: msg = '\\tTest", "& 0xff) pdu.append(addr_l) regval_h = (regval[0] & 0xff00) >> 8", "ref_pdu = pdu.copy() bytecount = regnum_l << 1 pdu.append(bytecount) for", "== 'Both': for err_list in error_tuple: for i in err_list:", "= 0 if modbus_role == 'Master': if error_type == 'parity':", "# 'FSM status' or 'PDU status' if mb_bsp.alarm_cb.status_timeout == 1:", "elif error_type == 'start bit': count = error_tuple[1][1] elif error_type", "(c) 2021 <NAME>, <NAME> # Permission is hereby granted, free", "= regnum << 1 ref_pdu.append(bytecount) for i in range(bytecount): ref_pdu.append(0)", "# SOFTWARE. import mb_bsp PDU_SIZE_REG = 0 CONFIG_REG = 1", "and/or sell # copies of the Software, and to permit", "== 'Master': if error_type == 'parity': count = error_tuple[0][0] elif", "0xff pdu.append(regnum_l) ref_pdu = pdu.copy() bytecount = regnum_l << 1", "the rights # to use, copy, modify, merge, publish, distribute,", "Set configuration mb_bsp.write_mb_slave_cs(SLAVE_ADDR_REG, slave_addr) # Set slave address def generate_0x03_pdu(addr,", "all # copies or substantial portions of the Software. #", "= list() ref_pdu = list() pdu.append(0x3) ref_pdu.append(0x3) addr_h = (addr", "i elif modbus_role == 'Slave': for i in error_tuple[1]: count", "== 'Slave': for i in error_tuple[1]: count += i return", "incr_err_count() def config_modbus(modbus_role, slave_addr, pdu, config_val): wait_mb_master_status('FSM status') if modbus_role", "== 1: print('*** Test FAILED: ', status , ' timeout", "'start bit': count = error_tuple[0][1] elif error_type == 'stop bit':", "0) def wait_mb_master_status(status): mb_bsp.wait_master_status(status) # 'FSM status' or 'PDU status'", "conditions: # The above copyright notice and this permission notice", "notice and this permission notice shall be included in all", "Software. # THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY", "is hereby granted, free of charge, to any person obtaining", "Set slave address mb_bsp.write_mb_master_cs(PDU_SIZE_REG, len(pdu)) # Set request PDU size", "pdu.append(regval_h) regval_l = regval[i] & 0xff pdu.append(regval_l) return [pdu, ref_pdu]", "print('***************************') print() def get_total_error_count(modbus_role): count = 0 error_tuple = mb_bsp.get_error_count()", "', error_tuple[1][1]) print('slave_stop_bit_err_count = ', error_tuple[1][2]) print('slave_addr_err_count = ', error_tuple[1][3])", "request PDU else: mb_bsp.write_mb_slave_cs(CONFIG_REG, config_val) # Set configuration mb_bsp.write_mb_slave_cs(SLAVE_ADDR_REG, slave_addr)", "= '\\tTest FAILED' print() print('***************************') print(msg) print('***************************') print() def get_total_error_count(modbus_role):", "= ', error_tuple[0][1]) print('master_stop_bit_err_count = ', error_tuple[0][2]) print('master_addr_err_count = ',", "count = error_tuple[0][2] elif error_type == 'address': count = error_tuple[0][3]", "generate_0x10_pdu(addr, regnum, regval): pdu = list() pdu.append(0x10) addr_h = (addr", "CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR", "modbus_role == 'Slave': if error_type == 'parity': count = error_tuple[1][0]", "', error_tuple[0][4]) print('slave_parity_err_count = ', error_tuple[1][0]) print('slave_start_bit_err_count = ', error_tuple[1][1])", "pdu.append(addr_h) addr_l = (addr & 0xff) pdu.append(addr_l) regnum_h = (regnum", "= regnum & 0xff pdu.append(regnum_l) bytecount = regnum << 1", "1 MB_MAX_PDU_SIZE = 253 MB_MIN_PDU_SIZE = 1 FCODE_0x3 = 0x3", "& 0xff pdu.append(regval_l) ref_pdu = pdu.copy() return [pdu, ref_pdu] def", "person obtaining a copy # of this software and associated", "without restriction, including without limitation the rights # to use,", "& 0xff pdu.append(regnum_l) bytecount = regnum << 1 ref_pdu.append(bytecount) for", "= 0x10 def incr_err_count(): incr_err_count.count += 1 setattr(incr_err_count, 'count', 0)", "pdu.append(regval_l) ref_pdu = pdu.copy() return [pdu, ref_pdu] def generate_0x10_pdu(addr, regnum,", "ref_pdu = pdu.copy() return [pdu, ref_pdu] def generate_0x10_pdu(addr, regnum, regval):", "= (regnum & 0xff00) >> 8 pdu.append(regnum_h) regnum_l = regnum", "so, subject to the following conditions: # The above copyright", "= ', error_tuple[1][1]) print('slave_stop_bit_err_count = ', error_tuple[1][2]) print('slave_addr_err_count = ',", "import mb_bsp PDU_SIZE_REG = 0 CONFIG_REG = 1 SLAVE_ADDR_REG =", "# copies or substantial portions of the Software. # THE", "if error_type == 'parity': count = error_tuple[1][0] elif error_type ==", "MB_MAX_WRITE_REGNUM = 123 MB_MAX_READ_REGNUM = 125 MB_MAX_REG_ADDR = 65535 MB_MAX_REG_VAL", "bit': count = error_tuple[0][2] elif error_type == 'address': count =", "count += i return count def get_single_error_count(modbus_role, error_type): error_tuple =", "= regval[i] & 0xff pdu.append(regval_l) return [pdu, ref_pdu] def print_test_result(result_ok):", "WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN", "'stop bit': count = error_tuple[0][2] elif error_type == 'address': count", "= regval[0] & 0xff pdu.append(regval_l) ref_pdu = pdu.copy() return [pdu,", "THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE", "'address': count = error_tuple[1][3] elif error_type == 'crc': count =", "= error_tuple[0][2] elif error_type == 'address': count = error_tuple[0][3] elif", "pdu.append(0x6) addr_h = (addr & 0xff00) >> 8 pdu.append(addr_h) addr_l", "return [pdu, ref_pdu] def generate_0x06_pdu(addr, regval): pdu = list() pdu.append(0x6)", "pdu.append(regval_h) regval_l = regval[0] & 0xff pdu.append(regval_l) ref_pdu = pdu.copy()", "= ', error_tuple[1][2]) print('slave_addr_err_count = ', error_tuple[1][3]) print('slave_crc_err_count = ',", "BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS", "# Set configuration mb_bsp.write_mb_slave_cs(SLAVE_ADDR_REG, slave_addr) # Set slave address def", "FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL", "return [pdu, ref_pdu] def generate_0x10_pdu(addr, regnum, regval): pdu = list()", "error_tuple[1][0]) print('slave_start_bit_err_count = ', error_tuple[1][1]) print('slave_stop_bit_err_count = ', error_tuple[1][2]) print('slave_addr_err_count", "'parity': count = error_tuple[1][0] elif error_type == 'start bit': count", "in err_list: count += i elif modbus_role == 'Master': for", "OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR", "= error_tuple[1][4] return count def print_error_count(): error_tuple = mb_bsp.get_error_count() print()", "elif modbus_role == 'Master': for i in error_tuple[0]: count +=", "'Master': for i in error_tuple[0]: count += i elif modbus_role", "= ', error_tuple[0][3]) print('master_crc_err_count = ', error_tuple[0][4]) print('slave_parity_err_count = ',", "CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS", "IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER", "CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION", "config_val) # Set configuration mb_bsp.write_mb_master_cs(SLAVE_ADDR_REG, slave_addr) # Set slave address", "THE USE OR OTHER DEALINGS IN THE # SOFTWARE. import", "modbus_role == 'Slave': for i in error_tuple[1]: count += i", "range(bytecount): ref_pdu.append(0) return [pdu, ref_pdu] def generate_0x06_pdu(addr, regval): pdu =", "SOFTWARE. import mb_bsp PDU_SIZE_REG = 0 CONFIG_REG = 1 SLAVE_ADDR_REG", "# Permission is hereby granted, free of charge, to any", "of charge, to any person obtaining a copy # of", "1: print('*** Test FAILED: ', status , ' timeout ***')", "regnum): pdu = list() ref_pdu = list() pdu.append(0x3) ref_pdu.append(0x3) addr_h", "count def get_single_error_count(modbus_role, error_type): error_tuple = mb_bsp.get_error_count() count = 0", "error_tuple = mb_bsp.get_error_count() print() print('master_parity_err_count = ', error_tuple[0][0]) print('master_start_bit_err_count =", "INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, #", "merge, publish, distribute, sublicense, and/or sell # copies of the", "status') if modbus_role == 'Master': mb_bsp.write_mb_master_cs(CONFIG_REG, config_val) # Set configuration", "print('*** Test FAILED: ', status , ' timeout ***') mb_bsp.alarm_cb.status_timeout", "config_modbus(modbus_role, slave_addr, pdu, config_val): wait_mb_master_status('FSM status') if modbus_role == 'Master':", "if modbus_role == 'Master': mb_bsp.write_mb_master_cs(CONFIG_REG, config_val) # Set configuration mb_bsp.write_mb_master_cs(SLAVE_ADDR_REG,", "NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT", "253 MB_MIN_PDU_SIZE = 1 FCODE_0x3 = 0x3 FCODE_0x6 = 0x6", "slave_addr, pdu, config_val): wait_mb_master_status('FSM status') if modbus_role == 'Master': mb_bsp.write_mb_master_cs(CONFIG_REG,", "of the Software. # THE SOFTWARE IS PROVIDED \"AS IS\",", "setattr(incr_err_count, 'count', 0) def wait_mb_master_status(status): mb_bsp.wait_master_status(status) # 'FSM status' or", "NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR", "print() def get_total_error_count(modbus_role): count = 0 error_tuple = mb_bsp.get_error_count() if", "or substantial portions of the Software. # THE SOFTWARE IS", "error_type == 'start bit': count = error_tuple[0][1] elif error_type ==", "<NAME>, <NAME> # Permission is hereby granted, free of charge,", "list() pdu.append(0x6) addr_h = (addr & 0xff00) >> 8 pdu.append(addr_h)", "0 CONFIG_REG = 1 SLAVE_ADDR_REG = 2 CS_REG = 3", "& 0xff00) >> 8 pdu.append(addr_h) addr_l = (addr & 0xff)", "def config_modbus(modbus_role, slave_addr, pdu, config_val): wait_mb_master_status('FSM status') if modbus_role ==", "LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER", "= error_tuple[0][1] elif error_type == 'stop bit': count = error_tuple[0][2]", "slave address mb_bsp.write_mb_master_cs(PDU_SIZE_REG, len(pdu)) # Set request PDU size mb_bsp.write_mb_master_pdu(pdu)", ">> 8 pdu.append(addr_h) addr_l = (addr & 0xff) pdu.append(addr_l) regval_h", "error_tuple[0][0]) print('master_start_bit_err_count = ', error_tuple[0][1]) print('master_stop_bit_err_count = ', error_tuple[0][2]) print('master_addr_err_count", "# Copyright (c) 2021 <NAME>, <NAME> # Permission is hereby", "0xff pdu.append(regval_l) ref_pdu = pdu.copy() return [pdu, ref_pdu] def generate_0x10_pdu(addr,", "(regnum & 0xff00) >> 8 pdu.append(regnum_h) regnum_l = regnum &", "count = error_tuple[1][0] elif error_type == 'start bit': count =", "== 'address': count = error_tuple[0][3] elif error_type == 'crc': count", "AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, #", "0xff) pdu.append(addr_l) regnum_h = (regnum & 0xff00) >> 8 pdu.append(regnum_h)", "DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF", "= '\\tTest Successful' else: msg = '\\tTest FAILED' print() print('***************************')", "error_tuple = mb_bsp.get_error_count() if modbus_role == 'Both': for err_list in", "print('master_parity_err_count = ', error_tuple[0][0]) print('master_start_bit_err_count = ', error_tuple[0][1]) print('master_stop_bit_err_count =", "error_type == 'crc': count = error_tuple[1][4] return count def print_error_count():", "wait_mb_master_status('FSM status') if modbus_role == 'Master': mb_bsp.write_mb_master_cs(CONFIG_REG, config_val) # Set", "count = error_tuple[1][4] return count def print_error_count(): error_tuple = mb_bsp.get_error_count()", "def generate_0x10_pdu(addr, regnum, regval): pdu = list() pdu.append(0x10) addr_h =", "USE OR OTHER DEALINGS IN THE # SOFTWARE. import mb_bsp", "pdu.append(regnum_h) regnum_l = regnum & 0xff pdu.append(regnum_l) bytecount = regnum", "ref_pdu.append(bytecount) for i in range(bytecount): ref_pdu.append(0) return [pdu, ref_pdu] def", "FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE", "= mb_bsp.get_error_count() print() print('master_parity_err_count = ', error_tuple[0][0]) print('master_start_bit_err_count = ',", "error_type == 'start bit': count = error_tuple[1][1] elif error_type ==", ">> 8 pdu.append(addr_h) addr_l = (addr & 0xff) pdu.append(addr_l) regnum_h", "i return count def get_single_error_count(modbus_role, error_type): error_tuple = mb_bsp.get_error_count() count", "msg = '\\tTest Successful' else: msg = '\\tTest FAILED' print()", "substantial portions of the Software. # THE SOFTWARE IS PROVIDED", "pdu.append(regnum_h) regnum_l = regnum & 0xff pdu.append(regnum_l) ref_pdu = pdu.copy()", "regnum << 1 ref_pdu.append(bytecount) for i in range(bytecount): ref_pdu.append(0) return", "the Software, and to permit persons to whom the Software", "# Set slave address def generate_0x03_pdu(addr, regnum): pdu = list()", "= (addr & 0xff00) >> 8 pdu.append(addr_h) addr_l = (addr", "regval_h = (regval[i] & 0xff00) >> 8 pdu.append(regval_h) regval_l =", "err_list: count += i elif modbus_role == 'Master': for i", "', error_tuple[0][1]) print('master_stop_bit_err_count = ', error_tuple[0][2]) print('master_addr_err_count = ', error_tuple[0][3])", "= error_tuple[1][2] elif error_type == 'address': count = error_tuple[1][3] elif", "i in error_tuple[0]: count += i elif modbus_role == 'Slave':", "(regval[i] & 0xff00) >> 8 pdu.append(regval_h) regval_l = regval[i] &", "print() print('master_parity_err_count = ', error_tuple[0][0]) print('master_start_bit_err_count = ', error_tuple[0][1]) print('master_stop_bit_err_count", "in range(regnum_l): regval_h = (regval[i] & 0xff00) >> 8 pdu.append(regval_h)", "return count def get_single_error_count(modbus_role, error_type): error_tuple = mb_bsp.get_error_count() count =", "== 'crc': count = error_tuple[1][4] return count def print_error_count(): error_tuple", "FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT", "== 'Master': for i in error_tuple[0]: count += i elif", "persons to whom the Software is # furnished to do", "regval_l = regval[i] & 0xff pdu.append(regval_l) return [pdu, ref_pdu] def", "i in error_tuple[1]: count += i return count def get_single_error_count(modbus_role,", "following conditions: # The above copyright notice and this permission", "associated documentation files (the \"Software\"), to deal # in the", "bytecount = regnum << 1 ref_pdu.append(bytecount) for i in range(bytecount):", "== 'parity': count = error_tuple[1][0] elif error_type == 'start bit':", "count = error_tuple[1][3] elif error_type == 'crc': count = error_tuple[1][4]", "& 0xff) pdu.append(addr_l) regnum_h = (regnum & 0xff00) >> 8", "the following conditions: # The above copyright notice and this", "MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN", "count = 0 if modbus_role == 'Master': if error_type ==", "= 253 MB_MIN_PDU_SIZE = 1 FCODE_0x3 = 0x3 FCODE_0x6 =", "to any person obtaining a copy # of this software", "status' or 'PDU status' if mb_bsp.alarm_cb.status_timeout == 1: print('*** Test", "in all # copies or substantial portions of the Software.", "of the Software, and to permit persons to whom the", "this software and associated documentation files (the \"Software\"), to deal", "mb_bsp.wait_master_status(status) # 'FSM status' or 'PDU status' if mb_bsp.alarm_cb.status_timeout ==", "ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN", "elif modbus_role == 'Slave': if error_type == 'parity': count =", "in error_tuple: for i in err_list: count += i elif", "BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY,", "Set configuration mb_bsp.write_mb_master_cs(SLAVE_ADDR_REG, slave_addr) # Set slave address mb_bsp.write_mb_master_cs(PDU_SIZE_REG, len(pdu))", "in range(bytecount): ref_pdu.append(0) return [pdu, ref_pdu] def generate_0x06_pdu(addr, regval): pdu", "FAILED: ', status , ' timeout ***') mb_bsp.alarm_cb.status_timeout = 0", "get_single_error_count(modbus_role, error_type): error_tuple = mb_bsp.get_error_count() count = 0 if modbus_role", "'\\tTest FAILED' print() print('***************************') print(msg) print('***************************') print() def get_total_error_count(modbus_role): count", "shall be included in all # copies or substantial portions", "modbus_role == 'Both': for err_list in error_tuple: for i in", "count += i elif modbus_role == 'Slave': for i in", "Software is # furnished to do so, subject to the", "= 123 MB_MAX_READ_REGNUM = 125 MB_MAX_REG_ADDR = 65535 MB_MAX_REG_VAL =", "err_list in error_tuple: for i in err_list: count += i", "[pdu, ref_pdu] def print_test_result(result_ok): if result_ok: msg = '\\tTest Successful'", "PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR", "def generate_0x03_pdu(addr, regnum): pdu = list() ref_pdu = list() pdu.append(0x3)", "MB_MIN_SLAVE_ADDR = 1 MB_MAX_PDU_SIZE = 253 MB_MIN_PDU_SIZE = 1 FCODE_0x3", "mb_bsp.alarm_cb.status_timeout == 1: print('*** Test FAILED: ', status , '", "for i in range(bytecount): ref_pdu.append(0) return [pdu, ref_pdu] def generate_0x06_pdu(addr,", "to do so, subject to the following conditions: # The", "whom the Software is # furnished to do so, subject", "sublicense, and/or sell # copies of the Software, and to", "THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY", "notice shall be included in all # copies or substantial", "count = error_tuple[0][3] elif error_type == 'crc': count = error_tuple[0][4]", "for i in error_tuple[1]: count += i return count def", "DEALINGS IN THE # SOFTWARE. import mb_bsp PDU_SIZE_REG = 0", "LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,", "WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING", "in the Software without restriction, including without limitation the rights", "print() print('***************************') print(msg) print('***************************') print() def get_total_error_count(modbus_role): count = 0", "generate_0x06_pdu(addr, regval): pdu = list() pdu.append(0x6) addr_h = (addr &", "(addr & 0xff) pdu.append(addr_l) regnum_h = (regnum & 0xff00) >>", "'Both': for err_list in error_tuple: for i in err_list: count", "PDU else: mb_bsp.write_mb_slave_cs(CONFIG_REG, config_val) # Set configuration mb_bsp.write_mb_slave_cs(SLAVE_ADDR_REG, slave_addr) #", "# furnished to do so, subject to the following conditions:", "***') mb_bsp.alarm_cb.status_timeout = 0 incr_err_count() def config_modbus(modbus_role, slave_addr, pdu, config_val):", "any person obtaining a copy # of this software and", "the Software. # THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT", "ARISING FROM, # OUT OF OR IN CONNECTION WITH THE", "SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,", "8 pdu.append(addr_h) addr_l = (addr & 0xff) pdu.append(addr_l) regval_h =", "'stop bit': count = error_tuple[1][2] elif error_type == 'address': count", "error_tuple[0][4] elif modbus_role == 'Slave': if error_type == 'parity': count", "ref_pdu = list() pdu.append(0x3) ref_pdu.append(0x3) addr_h = (addr & 0xff00)", "KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO", "MB_MAX_REG_ADDR = 65535 MB_MAX_REG_VAL = 65535 MB_MAX_SLAVE_ADDR = 247 MB_MIN_SLAVE_ADDR", "regnum_h = (regnum & 0xff00) >> 8 pdu.append(regnum_h) regnum_l =", "OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES", "= list() pdu.append(0x10) addr_h = (addr & 0xff00) >> 8", "restriction, including without limitation the rights # to use, copy,", "8 pdu.append(regval_h) regval_l = regval[0] & 0xff pdu.append(regval_l) ref_pdu =", "i in err_list: count += i elif modbus_role == 'Master':", "247 MB_MIN_SLAVE_ADDR = 1 MB_MAX_PDU_SIZE = 253 MB_MIN_PDU_SIZE = 1", "address mb_bsp.write_mb_master_cs(PDU_SIZE_REG, len(pdu)) # Set request PDU size mb_bsp.write_mb_master_pdu(pdu) #", "<NAME> # Permission is hereby granted, free of charge, to", "CONFIG_REG = 1 SLAVE_ADDR_REG = 2 CS_REG = 3 MB_MAX_WRITE_REGNUM", "0xff pdu.append(regval_l) return [pdu, ref_pdu] def print_test_result(result_ok): if result_ok: msg", "including without limitation the rights # to use, copy, modify,", "Set request PDU else: mb_bsp.write_mb_slave_cs(CONFIG_REG, config_val) # Set configuration mb_bsp.write_mb_slave_cs(SLAVE_ADDR_REG,", "if result_ok: msg = '\\tTest Successful' else: msg = '\\tTest", "copyright notice and this permission notice shall be included in", ">> 8 pdu.append(regnum_h) regnum_l = regnum & 0xff pdu.append(regnum_l) ref_pdu", "regnum & 0xff pdu.append(regnum_l) ref_pdu = pdu.copy() bytecount = regnum_l", "status' if mb_bsp.alarm_cb.status_timeout == 1: print('*** Test FAILED: ', status", "ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED", "free of charge, to any person obtaining a copy #", "elif modbus_role == 'Slave': for i in error_tuple[1]: count +=", "files (the \"Software\"), to deal # in the Software without", "for i in range(regnum_l): regval_h = (regval[i] & 0xff00) >>", "bit': count = error_tuple[0][1] elif error_type == 'stop bit': count", "error_tuple[1][1]) print('slave_stop_bit_err_count = ', error_tuple[1][2]) print('slave_addr_err_count = ', error_tuple[1][3]) print('slave_crc_err_count", "'crc': count = error_tuple[0][4] elif modbus_role == 'Slave': if error_type", "len(pdu)) # Set request PDU size mb_bsp.write_mb_master_pdu(pdu) # Set request", "65535 MB_MAX_SLAVE_ADDR = 247 MB_MIN_SLAVE_ADDR = 1 MB_MAX_PDU_SIZE = 253", "'Master': if error_type == 'parity': count = error_tuple[0][0] elif error_type", "pdu.append(regval_l) return [pdu, ref_pdu] def print_test_result(result_ok): if result_ok: msg =", "IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,", "regval_h = (regval[0] & 0xff00) >> 8 pdu.append(regval_h) regval_l =", "regnum_l = regnum & 0xff pdu.append(regnum_l) bytecount = regnum <<", "count def print_error_count(): error_tuple = mb_bsp.get_error_count() print() print('master_parity_err_count = ',", "mb_bsp PDU_SIZE_REG = 0 CONFIG_REG = 1 SLAVE_ADDR_REG = 2", "error_tuple[1]: count += i return count def get_single_error_count(modbus_role, error_type): error_tuple", "MB_MAX_SLAVE_ADDR = 247 MB_MIN_SLAVE_ADDR = 1 MB_MAX_PDU_SIZE = 253 MB_MIN_PDU_SIZE", "error_type == 'crc': count = error_tuple[0][4] elif modbus_role == 'Slave':", "count = error_tuple[0][1] elif error_type == 'stop bit': count =", "1 setattr(incr_err_count, 'count', 0) def wait_mb_master_status(status): mb_bsp.wait_master_status(status) # 'FSM status'", "2021 <NAME>, <NAME> # Permission is hereby granted, free of", "i in range(regnum_l): regval_h = (regval[i] & 0xff00) >> 8", "error_tuple[1][2] elif error_type == 'address': count = error_tuple[1][3] elif error_type", "pdu.append(addr_h) addr_l = (addr & 0xff) pdu.append(addr_l) regval_h = (regval[0]", "print('master_stop_bit_err_count = ', error_tuple[0][2]) print('master_addr_err_count = ', error_tuple[0][3]) print('master_crc_err_count =", "# Set request PDU else: mb_bsp.write_mb_slave_cs(CONFIG_REG, config_val) # Set configuration", "8 pdu.append(regval_h) regval_l = regval[i] & 0xff pdu.append(regval_l) return [pdu,", "= regnum_l << 1 pdu.append(bytecount) for i in range(regnum_l): regval_h", "count = error_tuple[1][2] elif error_type == 'address': count = error_tuple[1][3]", "regval): pdu = list() pdu.append(0x10) addr_h = (addr & 0xff00)", "incr_err_count(): incr_err_count.count += 1 setattr(incr_err_count, 'count', 0) def wait_mb_master_status(status): mb_bsp.wait_master_status(status)", "modbus_role == 'Master': for i in error_tuple[0]: count += i", "'Slave': for i in error_tuple[1]: count += i return count", "= ', error_tuple[0][0]) print('master_start_bit_err_count = ', error_tuple[0][1]) print('master_stop_bit_err_count = ',", "= 0x3 FCODE_0x6 = 0x6 FCODE_0x10 = 0x10 def incr_err_count():", "= list() pdu.append(0x3) ref_pdu.append(0x3) addr_h = (addr & 0xff00) >>", "wait_mb_master_status(status): mb_bsp.wait_master_status(status) # 'FSM status' or 'PDU status' if mb_bsp.alarm_cb.status_timeout", "def get_total_error_count(modbus_role): count = 0 error_tuple = mb_bsp.get_error_count() if modbus_role", "ref_pdu] def generate_0x06_pdu(addr, regval): pdu = list() pdu.append(0x6) addr_h =", "of this software and associated documentation files (the \"Software\"), to", "OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR", "OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE", "for i in err_list: count += i elif modbus_role ==", "EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE", "= 65535 MB_MAX_SLAVE_ADDR = 247 MB_MIN_SLAVE_ADDR = 1 MB_MAX_PDU_SIZE =", "PDU_SIZE_REG = 0 CONFIG_REG = 1 SLAVE_ADDR_REG = 2 CS_REG", "'Slave': if error_type == 'parity': count = error_tuple[1][0] elif error_type", "for i in error_tuple[0]: count += i elif modbus_role ==", "elif error_type == 'address': count = error_tuple[1][3] elif error_type ==", "# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,", "& 0xff pdu.append(regnum_l) ref_pdu = pdu.copy() bytecount = regnum_l <<", "PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS", "'count', 0) def wait_mb_master_status(status): mb_bsp.wait_master_status(status) # 'FSM status' or 'PDU", "def get_single_error_count(modbus_role, error_type): error_tuple = mb_bsp.get_error_count() count = 0 if", "(the \"Software\"), to deal # in the Software without restriction,", "= 0x6 FCODE_0x10 = 0x10 def incr_err_count(): incr_err_count.count += 1", "', error_tuple[1][2]) print('slave_addr_err_count = ', error_tuple[1][3]) print('slave_crc_err_count = ', error_tuple[1][4])", "& 0xff pdu.append(regval_l) return [pdu, ref_pdu] def print_test_result(result_ok): if result_ok:", "', error_tuple[0][2]) print('master_addr_err_count = ', error_tuple[0][3]) print('master_crc_err_count = ', error_tuple[0][4])", "WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND", ">> 8 pdu.append(regnum_h) regnum_l = regnum & 0xff pdu.append(regnum_l) bytecount", "charge, to any person obtaining a copy # of this", "permit persons to whom the Software is # furnished to", "to the following conditions: # The above copyright notice and", "generate_0x03_pdu(addr, regnum): pdu = list() ref_pdu = list() pdu.append(0x3) ref_pdu.append(0x3)", "print('***************************') print(msg) print('***************************') print() def get_total_error_count(modbus_role): count = 0 error_tuple", "THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE", "the Software is # furnished to do so, subject to", "do so, subject to the following conditions: # The above", "above copyright notice and this permission notice shall be included", "IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED,", "A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE", "limitation the rights # to use, copy, modify, merge, publish,", "= 1 MB_MAX_PDU_SIZE = 253 MB_MIN_PDU_SIZE = 1 FCODE_0x3 =", "mb_bsp.write_mb_master_cs(CONFIG_REG, config_val) # Set configuration mb_bsp.write_mb_master_cs(SLAVE_ADDR_REG, slave_addr) # Set slave", "slave_addr) # Set slave address mb_bsp.write_mb_master_cs(PDU_SIZE_REG, len(pdu)) # Set request", "Set slave address def generate_0x03_pdu(addr, regnum): pdu = list() ref_pdu", "THE # SOFTWARE. import mb_bsp PDU_SIZE_REG = 0 CONFIG_REG =", "= (addr & 0xff) pdu.append(addr_l) regnum_h = (regnum & 0xff00)", "= pdu.copy() return [pdu, ref_pdu] def generate_0x10_pdu(addr, regnum, regval): pdu", "PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE #", "without limitation the rights # to use, copy, modify, merge,", "portions of the Software. # THE SOFTWARE IS PROVIDED \"AS", "= (regval[i] & 0xff00) >> 8 pdu.append(regval_h) regval_l = regval[i]", "config_val): wait_mb_master_status('FSM status') if modbus_role == 'Master': mb_bsp.write_mb_master_cs(CONFIG_REG, config_val) #", "def print_error_count(): error_tuple = mb_bsp.get_error_count() print() print('master_parity_err_count = ', error_tuple[0][0])", "modbus_role == 'Master': if error_type == 'parity': count = error_tuple[0][0]", "EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE", "= ', error_tuple[1][0]) print('slave_start_bit_err_count = ', error_tuple[1][1]) print('slave_stop_bit_err_count = ',", "<< 1 ref_pdu.append(bytecount) for i in range(bytecount): ref_pdu.append(0) return [pdu,", "# in the Software without restriction, including without limitation the", "request PDU size mb_bsp.write_mb_master_pdu(pdu) # Set request PDU else: mb_bsp.write_mb_slave_cs(CONFIG_REG,", "documentation files (the \"Software\"), to deal # in the Software", "configuration mb_bsp.write_mb_slave_cs(SLAVE_ADDR_REG, slave_addr) # Set slave address def generate_0x03_pdu(addr, regnum):", "in error_tuple[1]: count += i return count def get_single_error_count(modbus_role, error_type):", "0xff00) >> 8 pdu.append(regval_h) regval_l = regval[0] & 0xff pdu.append(regval_l)", "configuration mb_bsp.write_mb_master_cs(SLAVE_ADDR_REG, slave_addr) # Set slave address mb_bsp.write_mb_master_cs(PDU_SIZE_REG, len(pdu)) #", "= 0 incr_err_count() def config_modbus(modbus_role, slave_addr, pdu, config_val): wait_mb_master_status('FSM status')", "mb_bsp.write_mb_master_cs(PDU_SIZE_REG, len(pdu)) # Set request PDU size mb_bsp.write_mb_master_pdu(pdu) # Set", ">> 8 pdu.append(regval_h) regval_l = regval[0] & 0xff pdu.append(regval_l) ref_pdu", "& 0xff00) >> 8 pdu.append(regval_h) regval_l = regval[i] & 0xff", "OR OTHER DEALINGS IN THE # SOFTWARE. import mb_bsp PDU_SIZE_REG", "pdu = list() pdu.append(0x10) addr_h = (addr & 0xff00) >>", "1 ref_pdu.append(bytecount) for i in range(bytecount): ref_pdu.append(0) return [pdu, ref_pdu]", "0xff00) >> 8 pdu.append(regnum_h) regnum_l = regnum & 0xff pdu.append(regnum_l)", "ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT", "copies or substantial portions of the Software. # THE SOFTWARE", "sell # copies of the Software, and to permit persons", "= error_tuple[0][0] elif error_type == 'start bit': count = error_tuple[0][1]", "OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT", "8 pdu.append(regnum_h) regnum_l = regnum & 0xff pdu.append(regnum_l) bytecount =", "FAILED' print() print('***************************') print(msg) print('***************************') print() def get_total_error_count(modbus_role): count =", "== 'start bit': count = error_tuple[0][1] elif error_type == 'stop", "OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT,", "print('slave_stop_bit_err_count = ', error_tuple[1][2]) print('slave_addr_err_count = ', error_tuple[1][3]) print('slave_crc_err_count =", "= list() pdu.append(0x6) addr_h = (addr & 0xff00) >> 8", "publish, distribute, sublicense, and/or sell # copies of the Software,", "'parity': count = error_tuple[0][0] elif error_type == 'start bit': count", "pdu.append(regnum_l) ref_pdu = pdu.copy() bytecount = regnum_l << 1 pdu.append(bytecount)", "(addr & 0xff00) >> 8 pdu.append(addr_h) addr_l = (addr &", "MB_MAX_REG_VAL = 65535 MB_MAX_SLAVE_ADDR = 247 MB_MIN_SLAVE_ADDR = 1 MB_MAX_PDU_SIZE", "slave_addr) # Set slave address def generate_0x03_pdu(addr, regnum): pdu =", "= (addr & 0xff) pdu.append(addr_l) regval_h = (regval[0] & 0xff00)", "def incr_err_count(): incr_err_count.count += 1 setattr(incr_err_count, 'count', 0) def wait_mb_master_status(status):", "and this permission notice shall be included in all #", "& 0xff00) >> 8 pdu.append(regnum_h) regnum_l = regnum & 0xff", "+= i return count def get_single_error_count(modbus_role, error_type): error_tuple = mb_bsp.get_error_count()", "PDU size mb_bsp.write_mb_master_pdu(pdu) # Set request PDU else: mb_bsp.write_mb_slave_cs(CONFIG_REG, config_val)", "1 pdu.append(bytecount) for i in range(regnum_l): regval_h = (regval[i] &", "modify, merge, publish, distribute, sublicense, and/or sell # copies of", "License # Copyright (c) 2021 <NAME>, <NAME> # Permission is", "OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION", "error_tuple[0][1]) print('master_stop_bit_err_count = ', error_tuple[0][2]) print('master_addr_err_count = ', error_tuple[0][3]) print('master_crc_err_count", "FCODE_0x6 = 0x6 FCODE_0x10 = 0x10 def incr_err_count(): incr_err_count.count +=", "IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,", "1 FCODE_0x3 = 0x3 FCODE_0x6 = 0x6 FCODE_0x10 = 0x10", "error_type == 'stop bit': count = error_tuple[0][2] elif error_type ==", "pdu.append(regnum_l) bytecount = regnum << 1 ref_pdu.append(bytecount) for i in", "Software, and to permit persons to whom the Software is", "# to use, copy, modify, merge, publish, distribute, sublicense, and/or", "OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT", "bit': count = error_tuple[1][2] elif error_type == 'address': count =", "= mb_bsp.get_error_count() count = 0 if modbus_role == 'Master': if", "error_tuple[1][0] elif error_type == 'start bit': count = error_tuple[1][1] elif", "== 'crc': count = error_tuple[0][4] elif modbus_role == 'Slave': if", "2 CS_REG = 3 MB_MAX_WRITE_REGNUM = 123 MB_MAX_READ_REGNUM = 125", "= pdu.copy() bytecount = regnum_l << 1 pdu.append(bytecount) for i", "error_tuple[1][1] elif error_type == 'stop bit': count = error_tuple[1][2] elif", "\"Software\"), to deal # in the Software without restriction, including", "3 MB_MAX_WRITE_REGNUM = 123 MB_MAX_READ_REGNUM = 125 MB_MAX_REG_ADDR = 65535", "' timeout ***') mb_bsp.alarm_cb.status_timeout = 0 incr_err_count() def config_modbus(modbus_role, slave_addr,", "regnum_l = regnum & 0xff pdu.append(regnum_l) ref_pdu = pdu.copy() bytecount", "error_tuple[0][1] elif error_type == 'stop bit': count = error_tuple[0][2] elif", "bytecount = regnum_l << 1 pdu.append(bytecount) for i in range(regnum_l):", "slave address def generate_0x03_pdu(addr, regnum): pdu = list() ref_pdu =", "# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR", "print(msg) print('***************************') print() def get_total_error_count(modbus_role): count = 0 error_tuple =", "MB_MIN_PDU_SIZE = 1 FCODE_0x3 = 0x3 FCODE_0x6 = 0x6 FCODE_0x10", "COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER", "pdu.append(0x3) ref_pdu.append(0x3) addr_h = (addr & 0xff00) >> 8 pdu.append(addr_h)", "if mb_bsp.alarm_cb.status_timeout == 1: print('*** Test FAILED: ', status ,", "error_tuple[0]: count += i elif modbus_role == 'Slave': for i", "= 2 CS_REG = 3 MB_MAX_WRITE_REGNUM = 123 MB_MAX_READ_REGNUM =", "[pdu, ref_pdu] def generate_0x06_pdu(addr, regval): pdu = list() pdu.append(0x6) addr_h", "addr_l = (addr & 0xff) pdu.append(addr_l) regval_h = (regval[0] &", "i elif modbus_role == 'Master': for i in error_tuple[0]: count", "0xff00) >> 8 pdu.append(regval_h) regval_l = regval[i] & 0xff pdu.append(regval_l)", "def print_test_result(result_ok): if result_ok: msg = '\\tTest Successful' else: msg", "timeout ***') mb_bsp.alarm_cb.status_timeout = 0 incr_err_count() def config_modbus(modbus_role, slave_addr, pdu,", "count = error_tuple[1][1] elif error_type == 'stop bit': count =", "count = 0 error_tuple = mb_bsp.get_error_count() if modbus_role == 'Both':", "error_type == 'parity': count = error_tuple[0][0] elif error_type == 'start", "print('slave_parity_err_count = ', error_tuple[1][0]) print('slave_start_bit_err_count = ', error_tuple[1][1]) print('slave_stop_bit_err_count =", "error_tuple[0][2] elif error_type == 'address': count = error_tuple[0][3] elif error_type", "# copies of the Software, and to permit persons to", "= error_tuple[0][3] elif error_type == 'crc': count = error_tuple[0][4] elif", "', status , ' timeout ***') mb_bsp.alarm_cb.status_timeout = 0 incr_err_count()", "1 SLAVE_ADDR_REG = 2 CS_REG = 3 MB_MAX_WRITE_REGNUM = 123", "', error_tuple[1][0]) print('slave_start_bit_err_count = ', error_tuple[1][1]) print('slave_stop_bit_err_count = ', error_tuple[1][2])", "regval): pdu = list() pdu.append(0x6) addr_h = (addr & 0xff00)", "granted, free of charge, to any person obtaining a copy", "= 1 SLAVE_ADDR_REG = 2 CS_REG = 3 MB_MAX_WRITE_REGNUM =", "8 pdu.append(regnum_h) regnum_l = regnum & 0xff pdu.append(regnum_l) ref_pdu =", "obtaining a copy # of this software and associated documentation", "count = error_tuple[0][4] elif modbus_role == 'Slave': if error_type ==", "MB_MAX_PDU_SIZE = 253 MB_MIN_PDU_SIZE = 1 FCODE_0x3 = 0x3 FCODE_0x6", "123 MB_MAX_READ_REGNUM = 125 MB_MAX_REG_ADDR = 65535 MB_MAX_REG_VAL = 65535", "0x6 FCODE_0x10 = 0x10 def incr_err_count(): incr_err_count.count += 1 setattr(incr_err_count,", "if modbus_role == 'Both': for err_list in error_tuple: for i", "= error_tuple[1][3] elif error_type == 'crc': count = error_tuple[1][4] return", "TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN", "is # furnished to do so, subject to the following", "to whom the Software is # furnished to do so,", "copy # of this software and associated documentation files (the", "= 65535 MB_MAX_REG_VAL = 65535 MB_MAX_SLAVE_ADDR = 247 MB_MIN_SLAVE_ADDR =", "Test FAILED: ', status , ' timeout ***') mb_bsp.alarm_cb.status_timeout =", "THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY", "OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE.", "Permission is hereby granted, free of charge, to any person", "(regval[0] & 0xff00) >> 8 pdu.append(regval_h) regval_l = regval[0] &", "', error_tuple[0][3]) print('master_crc_err_count = ', error_tuple[0][4]) print('slave_parity_err_count = ', error_tuple[1][0])", "error_type == 'stop bit': count = error_tuple[1][2] elif error_type ==", "print_error_count(): error_tuple = mb_bsp.get_error_count() print() print('master_parity_err_count = ', error_tuple[0][0]) print('master_start_bit_err_count", "CS_REG = 3 MB_MAX_WRITE_REGNUM = 123 MB_MAX_READ_REGNUM = 125 MB_MAX_REG_ADDR", "= ', error_tuple[0][4]) print('slave_parity_err_count = ', error_tuple[1][0]) print('slave_start_bit_err_count = ',", "error_tuple: for i in err_list: count += i elif modbus_role", "The above copyright notice and this permission notice shall be", "print('slave_addr_err_count = ', error_tuple[1][3]) print('slave_crc_err_count = ', error_tuple[1][4]) print('--------------------------------') print()", "pdu.append(addr_l) regval_h = (regval[0] & 0xff00) >> 8 pdu.append(regval_h) regval_l", "[pdu, ref_pdu] def generate_0x10_pdu(addr, regnum, regval): pdu = list() pdu.append(0x10)", "error_tuple[1][3] elif error_type == 'crc': count = error_tuple[1][4] return count", "<reponame>vasilydenisenko/modbus_rtu_slave # MIT License # Copyright (c) 2021 <NAME>, <NAME>", "0xff00) >> 8 pdu.append(addr_h) addr_l = (addr & 0xff) pdu.append(addr_l)", "return count def print_error_count(): error_tuple = mb_bsp.get_error_count() print() print('master_parity_err_count =", "= mb_bsp.get_error_count() if modbus_role == 'Both': for err_list in error_tuple:", "subject to the following conditions: # The above copyright notice", "WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT", "regval[i] & 0xff pdu.append(regval_l) return [pdu, ref_pdu] def print_test_result(result_ok): if", "mb_bsp.alarm_cb.status_timeout = 0 incr_err_count() def config_modbus(modbus_role, slave_addr, pdu, config_val): wait_mb_master_status('FSM", "or 'PDU status' if mb_bsp.alarm_cb.status_timeout == 1: print('*** Test FAILED:", "AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES", "to permit persons to whom the Software is # furnished", "WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING" ]
[ "ndb.IntegerProperty() status = ndb.StringProperty() status_string = ndb.StringProperty() def as_dict(self): result", "result['tags'] = [t for t in self.tags] result['is_answered'] = self.is_answered", "result def update_to_stackexchange_question(self, stackexchange_question): updated = False if stackexchange_question.tags !=", "updated = True if stackexchange_question.json['question_id'] != self.question_id: self.question_id = stackexchange_question.json['question_id']", "updated = False if stackexchange_question.tags != self.tags: self.tags = stackexchange_question.tags", "http://stackoverflow.com/questions/455580/json-datetime-between-python-and-javascript dthandler = lambda obj: ( obj.isoformat() if isinstance(obj, datetime.datetime)", "ndb.IntegerProperty() num_answered = ndb.IntegerProperty() num_unanswered = ndb.IntegerProperty() total_question_views = ndb.IntegerProperty()", "stackexchange_question.view_count != self.view_count: self.view_count = stackexchange_question.view_count updated = True if", "in stackexchange_question.tags], is_answered = stackexchange_question.json['is_answered'], view_count = stackexchange_question.view_count, answer_count =", "result['num_tagged_questions'] = self.num_tagged_questions result['num_questions_by_tag'] = self.num_questions_by_tag result['num_answered'] = self.num_answered result['num_unanswered']", "datetime.datetime) or isinstance(obj, datetime.date) else None ) class StackOverflowSnapshot(ndb.Model): \"\"\"Example", "ndb.StringProperty() creation_date = ndb.DateTimeProperty() question_id = ndb.IntegerProperty() def as_dict(self): result", "self.question_id return result def update_to_stackexchange_question(self, stackexchange_question): updated = False if", "= stackexchange_question.json['is_answered'], view_count = stackexchange_question.view_count, answer_count = stackexchange_question.json['answer_count'], url =", "result['answer_count'] = self.answer_count result['url'] = self.url result['title'] = self.title result['creation_date']", "= self.question_id return result def update_to_stackexchange_question(self, stackexchange_question): updated = False", "answer_count = stackexchange_question.json['answer_count'], url = stackexchange_question.url, title = stackexchange_question.title, creation_date", "creation_date = ndb.DateTimeProperty() question_id = ndb.IntegerProperty() def as_dict(self): result =", "= True if stackexchange_question.url != self.url: self.url = stackexchange_question.url updated", "= True if stackexchange_question.json['question_id'] != self.question_id: self.question_id = stackexchange_question.json['question_id'] updated", "self.is_answered = stackexchange_question.json['is_answered'] updated = True if stackexchange_question.view_count != self.view_count:", "= ndb.IntegerProperty() num_unanswered = ndb.IntegerProperty() total_question_views = ndb.IntegerProperty() status =", "dthandler(self.requested_time) result['num_tagged_questions'] = self.num_tagged_questions result['num_questions_by_tag'] = self.num_questions_by_tag result['num_answered'] = self.num_answered", "self.view_count result['answer_count'] = self.answer_count result['url'] = self.url result['title'] = self.title", "updated @classmethod def from_stackexchange_question(cls, stackexchange_question): result = cls( tags =", "= {} result['first_seen'] = dthandler(self.first_seen) result['tags'] = [t for t", "= ndb.IntegerProperty() url = ndb.StringProperty() title = ndb.StringProperty() creation_date =", "isinstance(obj, datetime.date) else None ) class StackOverflowSnapshot(ndb.Model): \"\"\"Example Model\"\"\" raw_timestamp", "self.answer_count = stackexchange_question.json['answer_count'] updated = True if stackexchange_question.url != self.url:", "= stackexchange_question.creation_date updated = True if stackexchange_question.json['question_id'] != self.question_id: self.question_id", "result['requested_time'] = dthandler(self.requested_time) result['num_tagged_questions'] = self.num_tagged_questions result['num_questions_by_tag'] = self.num_questions_by_tag result['num_answered']", "ndb.DateTimeProperty(required=True, auto_now_add=True) tags = ndb.StringProperty(repeated=True) is_answered = ndb.BooleanProperty() view_count =", "= cls( tags = [t for t in stackexchange_question.tags], is_answered", "self.answer_count result['url'] = self.url result['title'] = self.title result['creation_date'] = dthandler(self.creation_date)", "result['status_string'] = self.status_string return result class StackOverflowQuestion(ndb.Model): first_seen = ndb.DateTimeProperty(required=True,", "result['question_id'] = self.question_id return result def update_to_stackexchange_question(self, stackexchange_question): updated =", "result['title'] = self.title result['creation_date'] = dthandler(self.creation_date) result['question_id'] = self.question_id return", "stackexchange_question.json['question_id'] != self.question_id: self.question_id = stackexchange_question.json['question_id'] updated = True return", "= stackexchange_question.view_count updated = True if stackexchange_question.json['answer_count'] != self.answer_count: self.answer_count", "= stackexchange_question.tags updated = True if stackexchange_question.json['is_answered'] != self.is_answered: self.is_answered", "import json import datetime from google.appengine.ext import ndb # Taken", "= ndb.IntegerProperty() total_question_views = ndb.IntegerProperty() status = ndb.StringProperty() status_string =", "{} result['requested_time'] = dthandler(self.requested_time) result['num_tagged_questions'] = self.num_tagged_questions result['num_questions_by_tag'] = self.num_questions_by_tag", "updated = True if stackexchange_question.url != self.url: self.url = stackexchange_question.url", "tags = [t for t in stackexchange_question.tags], is_answered = stackexchange_question.json['is_answered'],", "title = stackexchange_question.title, creation_date = stackexchange_question.creation_date, question_id = stackexchange_question.json['question_id'] )", "= {} result['requested_time'] = dthandler(self.requested_time) result['num_tagged_questions'] = self.num_tagged_questions result['num_questions_by_tag'] =", "self.is_answered result['view_count'] = self.view_count result['answer_count'] = self.answer_count result['url'] = self.url", "= True if stackexchange_question.title != self.title: self.title = stackexchange_question.title updated", "= [t for t in stackexchange_question.tags], is_answered = stackexchange_question.json['is_answered'], view_count", "[t for t in stackexchange_question.tags], is_answered = stackexchange_question.json['is_answered'], view_count =", "= True if stackexchange_question.view_count != self.view_count: self.view_count = stackexchange_question.view_count updated", "!= self.is_answered: self.is_answered = stackexchange_question.json['is_answered'] updated = True if stackexchange_question.view_count", "fix_path import json import datetime from google.appengine.ext import ndb #", "= self.num_tagged_questions result['num_questions_by_tag'] = self.num_questions_by_tag result['num_answered'] = self.num_answered result['num_unanswered'] =", "= self.status_string return result class StackOverflowQuestion(ndb.Model): first_seen = ndb.DateTimeProperty(required=True, auto_now_add=True)", "stackexchange_question.title updated = True if stackexchange_question.creation_date != self.creation_date: self.creation_date =", "True if stackexchange_question.json['answer_count'] != self.answer_count: self.answer_count = stackexchange_question.json['answer_count'] updated =", "num_questions_by_tag = ndb.JsonProperty() num_tagged_questions = ndb.IntegerProperty() num_answered = ndb.IntegerProperty() num_unanswered", "return updated @classmethod def from_stackexchange_question(cls, stackexchange_question): result = cls( tags", "True if stackexchange_question.view_count != self.view_count: self.view_count = stackexchange_question.view_count updated =", "ndb.BooleanProperty() view_count = ndb.IntegerProperty() answer_count = ndb.IntegerProperty() url = ndb.StringProperty()", "num_tagged_questions = ndb.IntegerProperty() num_answered = ndb.IntegerProperty() num_unanswered = ndb.IntegerProperty() total_question_views", "self.title result['creation_date'] = dthandler(self.creation_date) result['question_id'] = self.question_id return result def", "True if stackexchange_question.url != self.url: self.url = stackexchange_question.url updated =", "ndb # Taken from http://stackoverflow.com/questions/455580/json-datetime-between-python-and-javascript dthandler = lambda obj: (", "= True if stackexchange_question.creation_date != self.creation_date: self.creation_date = stackexchange_question.creation_date updated", "self.total_question_views result['status'] = self.status result['status_string'] = self.status_string return result class", "ndb.JsonProperty() num_tagged_questions = ndb.IntegerProperty() num_answered = ndb.IntegerProperty() num_unanswered = ndb.IntegerProperty()", "isinstance(obj, datetime.datetime) or isinstance(obj, datetime.date) else None ) class StackOverflowSnapshot(ndb.Model):", "= stackexchange_question.view_count, answer_count = stackexchange_question.json['answer_count'], url = stackexchange_question.url, title =", "ndb.DateTimeProperty() question_id = ndb.IntegerProperty() def as_dict(self): result = {} result['first_seen']", "!= self.question_id: self.question_id = stackexchange_question.json['question_id'] updated = True return updated", "Model\"\"\" raw_timestamp = ndb.DateTimeProperty(required=True, auto_now_add=True) requested_time = ndb.DateTimeProperty(required=True) num_questions_by_tag =", "= self.total_question_views result['status'] = self.status result['status_string'] = self.status_string return result", "stackexchange_question.json['is_answered'] updated = True if stackexchange_question.view_count != self.view_count: self.view_count =", "= [t for t in self.tags] result['is_answered'] = self.is_answered result['view_count']", "= ndb.IntegerProperty() def as_dict(self): result = {} result['first_seen'] = dthandler(self.first_seen)", "datetime.date) else None ) class StackOverflowSnapshot(ndb.Model): \"\"\"Example Model\"\"\" raw_timestamp =", "self.tags] result['is_answered'] = self.is_answered result['view_count'] = self.view_count result['answer_count'] = self.answer_count", "= dthandler(self.creation_date) result['question_id'] = self.question_id return result def update_to_stackexchange_question(self, stackexchange_question):", "self.num_unanswered result['total_question_views'] = self.total_question_views result['status'] = self.status result['status_string'] = self.status_string", "ndb.IntegerProperty() url = ndb.StringProperty() title = ndb.StringProperty() creation_date = ndb.DateTimeProperty()", "result['total_question_views'] = self.total_question_views result['status'] = self.status result['status_string'] = self.status_string return", "ndb.IntegerProperty() answer_count = ndb.IntegerProperty() url = ndb.StringProperty() title = ndb.StringProperty()", "dthandler = lambda obj: ( obj.isoformat() if isinstance(obj, datetime.datetime) or", "self.status_string return result class StackOverflowQuestion(ndb.Model): first_seen = ndb.DateTimeProperty(required=True, auto_now_add=True) tags", "return result def update_to_stackexchange_question(self, stackexchange_question): updated = False if stackexchange_question.tags", "self.is_answered: self.is_answered = stackexchange_question.json['is_answered'] updated = True if stackexchange_question.view_count !=", "self.creation_date: self.creation_date = stackexchange_question.creation_date updated = True if stackexchange_question.json['question_id'] !=", "if stackexchange_question.tags != self.tags: self.tags = stackexchange_question.tags updated = True", "result['status'] = self.status result['status_string'] = self.status_string return result class StackOverflowQuestion(ndb.Model):", "!= self.creation_date: self.creation_date = stackexchange_question.creation_date updated = True if stackexchange_question.json['question_id']", "updated = True if stackexchange_question.creation_date != self.creation_date: self.creation_date = stackexchange_question.creation_date", "def as_dict(self): result = {} result['first_seen'] = dthandler(self.first_seen) result['tags'] =", "datetime from google.appengine.ext import ndb # Taken from http://stackoverflow.com/questions/455580/json-datetime-between-python-and-javascript dthandler", ") class StackOverflowSnapshot(ndb.Model): \"\"\"Example Model\"\"\" raw_timestamp = ndb.DateTimeProperty(required=True, auto_now_add=True) requested_time", "stackexchange_question.tags], is_answered = stackexchange_question.json['is_answered'], view_count = stackexchange_question.view_count, answer_count = stackexchange_question.json['answer_count'],", "result['first_seen'] = dthandler(self.first_seen) result['tags'] = [t for t in self.tags]", "stackexchange_question.view_count updated = True if stackexchange_question.json['answer_count'] != self.answer_count: self.answer_count =", "self.url: self.url = stackexchange_question.url updated = True if stackexchange_question.title !=", "status_string = ndb.StringProperty() def as_dict(self): result = {} result['requested_time'] =", "is_answered = stackexchange_question.json['is_answered'], view_count = stackexchange_question.view_count, answer_count = stackexchange_question.json['answer_count'], url", "if stackexchange_question.json['is_answered'] != self.is_answered: self.is_answered = stackexchange_question.json['is_answered'] updated = True", "google.appengine.ext import ndb # Taken from http://stackoverflow.com/questions/455580/json-datetime-between-python-and-javascript dthandler = lambda", "self.url = stackexchange_question.url updated = True if stackexchange_question.title != self.title:", "\"\"\"Example Model\"\"\" raw_timestamp = ndb.DateTimeProperty(required=True, auto_now_add=True) requested_time = ndb.DateTimeProperty(required=True) num_questions_by_tag", "or isinstance(obj, datetime.date) else None ) class StackOverflowSnapshot(ndb.Model): \"\"\"Example Model\"\"\"", "= ndb.StringProperty() title = ndb.StringProperty() creation_date = ndb.DateTimeProperty() question_id =", "self.view_count: self.view_count = stackexchange_question.view_count updated = True if stackexchange_question.json['answer_count'] !=", "tags = ndb.StringProperty(repeated=True) is_answered = ndb.BooleanProperty() view_count = ndb.IntegerProperty() answer_count", "self.num_answered result['num_unanswered'] = self.num_unanswered result['total_question_views'] = self.total_question_views result['status'] = self.status", "update_to_stackexchange_question(self, stackexchange_question): updated = False if stackexchange_question.tags != self.tags: self.tags", "import ndb # Taken from http://stackoverflow.com/questions/455580/json-datetime-between-python-and-javascript dthandler = lambda obj:", "<gh_stars>1-10 import fix_path import json import datetime from google.appengine.ext import", "self.num_tagged_questions result['num_questions_by_tag'] = self.num_questions_by_tag result['num_answered'] = self.num_answered result['num_unanswered'] = self.num_unanswered", "= ndb.IntegerProperty() answer_count = ndb.IntegerProperty() url = ndb.StringProperty() title =", "= stackexchange_question.json['answer_count'], url = stackexchange_question.url, title = stackexchange_question.title, creation_date =", "stackexchange_question.tags updated = True if stackexchange_question.json['is_answered'] != self.is_answered: self.is_answered =", "self.creation_date = stackexchange_question.creation_date updated = True if stackexchange_question.json['question_id'] != self.question_id:", "result class StackOverflowQuestion(ndb.Model): first_seen = ndb.DateTimeProperty(required=True, auto_now_add=True) tags = ndb.StringProperty(repeated=True)", "if stackexchange_question.json['question_id'] != self.question_id: self.question_id = stackexchange_question.json['question_id'] updated = True", "stackexchange_question.title, creation_date = stackexchange_question.creation_date, question_id = stackexchange_question.json['question_id'] ) return result", "result = {} result['requested_time'] = dthandler(self.requested_time) result['num_tagged_questions'] = self.num_tagged_questions result['num_questions_by_tag']", "ndb.StringProperty(repeated=True) is_answered = ndb.BooleanProperty() view_count = ndb.IntegerProperty() answer_count = ndb.IntegerProperty()", "stackexchange_question.tags != self.tags: self.tags = stackexchange_question.tags updated = True if", "= stackexchange_question.url, title = stackexchange_question.title, creation_date = stackexchange_question.creation_date, question_id =", "status = ndb.StringProperty() status_string = ndb.StringProperty() def as_dict(self): result =", "stackexchange_question.url updated = True if stackexchange_question.title != self.title: self.title =", "= dthandler(self.first_seen) result['tags'] = [t for t in self.tags] result['is_answered']", "result['num_questions_by_tag'] = self.num_questions_by_tag result['num_answered'] = self.num_answered result['num_unanswered'] = self.num_unanswered result['total_question_views']", "= self.answer_count result['url'] = self.url result['title'] = self.title result['creation_date'] =", "True return updated @classmethod def from_stackexchange_question(cls, stackexchange_question): result = cls(", "stackexchange_question.json['answer_count'], url = stackexchange_question.url, title = stackexchange_question.title, creation_date = stackexchange_question.creation_date,", "stackexchange_question.creation_date updated = True if stackexchange_question.json['question_id'] != self.question_id: self.question_id =", "[t for t in self.tags] result['is_answered'] = self.is_answered result['view_count'] =", "= stackexchange_question.json['answer_count'] updated = True if stackexchange_question.url != self.url: self.url", "lambda obj: ( obj.isoformat() if isinstance(obj, datetime.datetime) or isinstance(obj, datetime.date)", "question_id = ndb.IntegerProperty() def as_dict(self): result = {} result['first_seen'] =", "self.question_id: self.question_id = stackexchange_question.json['question_id'] updated = True return updated @classmethod", "updated = True if stackexchange_question.view_count != self.view_count: self.view_count = stackexchange_question.view_count", "answer_count = ndb.IntegerProperty() url = ndb.StringProperty() title = ndb.StringProperty() creation_date", "True if stackexchange_question.creation_date != self.creation_date: self.creation_date = stackexchange_question.creation_date updated =", "if stackexchange_question.creation_date != self.creation_date: self.creation_date = stackexchange_question.creation_date updated = True", "dthandler(self.creation_date) result['question_id'] = self.question_id return result def update_to_stackexchange_question(self, stackexchange_question): updated", "if stackexchange_question.json['answer_count'] != self.answer_count: self.answer_count = stackexchange_question.json['answer_count'] updated = True", "self.status result['status_string'] = self.status_string return result class StackOverflowQuestion(ndb.Model): first_seen =", "ndb.StringProperty() def as_dict(self): result = {} result['requested_time'] = dthandler(self.requested_time) result['num_tagged_questions']", "= ndb.StringProperty() def as_dict(self): result = {} result['requested_time'] = dthandler(self.requested_time)", "updated = True if stackexchange_question.json['is_answered'] != self.is_answered: self.is_answered = stackexchange_question.json['is_answered']", "stackexchange_question.json['is_answered'], view_count = stackexchange_question.view_count, answer_count = stackexchange_question.json['answer_count'], url = stackexchange_question.url,", "stackexchange_question.json['is_answered'] != self.is_answered: self.is_answered = stackexchange_question.json['is_answered'] updated = True if", "!= self.url: self.url = stackexchange_question.url updated = True if stackexchange_question.title", "True if stackexchange_question.json['question_id'] != self.question_id: self.question_id = stackexchange_question.json['question_id'] updated =", "= self.num_questions_by_tag result['num_answered'] = self.num_answered result['num_unanswered'] = self.num_unanswered result['total_question_views'] =", "result = {} result['first_seen'] = dthandler(self.first_seen) result['tags'] = [t for", "self.tags = stackexchange_question.tags updated = True if stackexchange_question.json['is_answered'] != self.is_answered:", "True if stackexchange_question.json['is_answered'] != self.is_answered: self.is_answered = stackexchange_question.json['is_answered'] updated =", "= False if stackexchange_question.tags != self.tags: self.tags = stackexchange_question.tags updated", "if stackexchange_question.url != self.url: self.url = stackexchange_question.url updated = True", "def from_stackexchange_question(cls, stackexchange_question): result = cls( tags = [t for", "result['view_count'] = self.view_count result['answer_count'] = self.answer_count result['url'] = self.url result['title']", "updated = True if stackexchange_question.json['answer_count'] != self.answer_count: self.answer_count = stackexchange_question.json['answer_count']", "!= self.title: self.title = stackexchange_question.title updated = True if stackexchange_question.creation_date", "ndb.IntegerProperty() total_question_views = ndb.IntegerProperty() status = ndb.StringProperty() status_string = ndb.StringProperty()", "= stackexchange_question.title, creation_date = stackexchange_question.creation_date, question_id = stackexchange_question.json['question_id'] ) return", "num_unanswered = ndb.IntegerProperty() total_question_views = ndb.IntegerProperty() status = ndb.StringProperty() status_string", "from google.appengine.ext import ndb # Taken from http://stackoverflow.com/questions/455580/json-datetime-between-python-and-javascript dthandler =", "None ) class StackOverflowSnapshot(ndb.Model): \"\"\"Example Model\"\"\" raw_timestamp = ndb.DateTimeProperty(required=True, auto_now_add=True)", "obj: ( obj.isoformat() if isinstance(obj, datetime.datetime) or isinstance(obj, datetime.date) else", "result['is_answered'] = self.is_answered result['view_count'] = self.view_count result['answer_count'] = self.answer_count result['url']", "= True if stackexchange_question.json['is_answered'] != self.is_answered: self.is_answered = stackexchange_question.json['is_answered'] updated", "stackexchange_question.json['question_id'] updated = True return updated @classmethod def from_stackexchange_question(cls, stackexchange_question):", "= ndb.StringProperty() status_string = ndb.StringProperty() def as_dict(self): result = {}", "result = cls( tags = [t for t in stackexchange_question.tags],", "= ndb.DateTimeProperty(required=True, auto_now_add=True) tags = ndb.StringProperty(repeated=True) is_answered = ndb.BooleanProperty() view_count", "for t in self.tags] result['is_answered'] = self.is_answered result['view_count'] = self.view_count", "= ndb.StringProperty(repeated=True) is_answered = ndb.BooleanProperty() view_count = ndb.IntegerProperty() answer_count =", "self.title: self.title = stackexchange_question.title updated = True if stackexchange_question.creation_date !=", "num_answered = ndb.IntegerProperty() num_unanswered = ndb.IntegerProperty() total_question_views = ndb.IntegerProperty() status", "= self.num_answered result['num_unanswered'] = self.num_unanswered result['total_question_views'] = self.total_question_views result['status'] =", "self.answer_count: self.answer_count = stackexchange_question.json['answer_count'] updated = True if stackexchange_question.url !=", "url = stackexchange_question.url, title = stackexchange_question.title, creation_date = stackexchange_question.creation_date, question_id", "ndb.DateTimeProperty(required=True, auto_now_add=True) requested_time = ndb.DateTimeProperty(required=True) num_questions_by_tag = ndb.JsonProperty() num_tagged_questions =", "= self.num_unanswered result['total_question_views'] = self.total_question_views result['status'] = self.status result['status_string'] =", "updated = True if stackexchange_question.title != self.title: self.title = stackexchange_question.title", "False if stackexchange_question.tags != self.tags: self.tags = stackexchange_question.tags updated =", "Taken from http://stackoverflow.com/questions/455580/json-datetime-between-python-and-javascript dthandler = lambda obj: ( obj.isoformat() if", "as_dict(self): result = {} result['first_seen'] = dthandler(self.first_seen) result['tags'] = [t", "!= self.answer_count: self.answer_count = stackexchange_question.json['answer_count'] updated = True if stackexchange_question.url", "result['num_answered'] = self.num_answered result['num_unanswered'] = self.num_unanswered result['total_question_views'] = self.total_question_views result['status']", "True if stackexchange_question.title != self.title: self.title = stackexchange_question.title updated =", "= self.view_count result['answer_count'] = self.answer_count result['url'] = self.url result['title'] =", "def update_to_stackexchange_question(self, stackexchange_question): updated = False if stackexchange_question.tags != self.tags:", "ndb.IntegerProperty() num_unanswered = ndb.IntegerProperty() total_question_views = ndb.IntegerProperty() status = ndb.StringProperty()", "= ndb.DateTimeProperty() question_id = ndb.IntegerProperty() def as_dict(self): result = {}", "json import datetime from google.appengine.ext import ndb # Taken from", "ndb.DateTimeProperty(required=True) num_questions_by_tag = ndb.JsonProperty() num_tagged_questions = ndb.IntegerProperty() num_answered = ndb.IntegerProperty()", "!= self.tags: self.tags = stackexchange_question.tags updated = True if stackexchange_question.json['is_answered']", "t in self.tags] result['is_answered'] = self.is_answered result['view_count'] = self.view_count result['answer_count']", "stackexchange_question.creation_date != self.creation_date: self.creation_date = stackexchange_question.creation_date updated = True if", "result['url'] = self.url result['title'] = self.title result['creation_date'] = dthandler(self.creation_date) result['question_id']", "@classmethod def from_stackexchange_question(cls, stackexchange_question): result = cls( tags = [t", "stackexchange_question): result = cls( tags = [t for t in", "obj.isoformat() if isinstance(obj, datetime.datetime) or isinstance(obj, datetime.date) else None )", "result['creation_date'] = dthandler(self.creation_date) result['question_id'] = self.question_id return result def update_to_stackexchange_question(self,", "= True return updated @classmethod def from_stackexchange_question(cls, stackexchange_question): result =", "title = ndb.StringProperty() creation_date = ndb.DateTimeProperty() question_id = ndb.IntegerProperty() def", "self.url result['title'] = self.title result['creation_date'] = dthandler(self.creation_date) result['question_id'] = self.question_id", "def as_dict(self): result = {} result['requested_time'] = dthandler(self.requested_time) result['num_tagged_questions'] =", "first_seen = ndb.DateTimeProperty(required=True, auto_now_add=True) tags = ndb.StringProperty(repeated=True) is_answered = ndb.BooleanProperty()", "else None ) class StackOverflowSnapshot(ndb.Model): \"\"\"Example Model\"\"\" raw_timestamp = ndb.DateTimeProperty(required=True,", "StackOverflowQuestion(ndb.Model): first_seen = ndb.DateTimeProperty(required=True, auto_now_add=True) tags = ndb.StringProperty(repeated=True) is_answered =", "ndb.StringProperty() title = ndb.StringProperty() creation_date = ndb.DateTimeProperty() question_id = ndb.IntegerProperty()", "{} result['first_seen'] = dthandler(self.first_seen) result['tags'] = [t for t in", "is_answered = ndb.BooleanProperty() view_count = ndb.IntegerProperty() answer_count = ndb.IntegerProperty() url", "import fix_path import json import datetime from google.appengine.ext import ndb", "= lambda obj: ( obj.isoformat() if isinstance(obj, datetime.datetime) or isinstance(obj,", "class StackOverflowQuestion(ndb.Model): first_seen = ndb.DateTimeProperty(required=True, auto_now_add=True) tags = ndb.StringProperty(repeated=True) is_answered", "= self.is_answered result['view_count'] = self.view_count result['answer_count'] = self.answer_count result['url'] =", "stackexchange_question.json['answer_count'] != self.answer_count: self.answer_count = stackexchange_question.json['answer_count'] updated = True if", "from http://stackoverflow.com/questions/455580/json-datetime-between-python-and-javascript dthandler = lambda obj: ( obj.isoformat() if isinstance(obj,", "in self.tags] result['is_answered'] = self.is_answered result['view_count'] = self.view_count result['answer_count'] =", "class StackOverflowSnapshot(ndb.Model): \"\"\"Example Model\"\"\" raw_timestamp = ndb.DateTimeProperty(required=True, auto_now_add=True) requested_time =", "= dthandler(self.requested_time) result['num_tagged_questions'] = self.num_tagged_questions result['num_questions_by_tag'] = self.num_questions_by_tag result['num_answered'] =", "dthandler(self.first_seen) result['tags'] = [t for t in self.tags] result['is_answered'] =", "url = ndb.StringProperty() title = ndb.StringProperty() creation_date = ndb.DateTimeProperty() question_id", "= ndb.IntegerProperty() num_answered = ndb.IntegerProperty() num_unanswered = ndb.IntegerProperty() total_question_views =", "self.title = stackexchange_question.title updated = True if stackexchange_question.creation_date != self.creation_date:", "if stackexchange_question.view_count != self.view_count: self.view_count = stackexchange_question.view_count updated = True", "= self.title result['creation_date'] = dthandler(self.creation_date) result['question_id'] = self.question_id return result", "auto_now_add=True) requested_time = ndb.DateTimeProperty(required=True) num_questions_by_tag = ndb.JsonProperty() num_tagged_questions = ndb.IntegerProperty()", "return result class StackOverflowQuestion(ndb.Model): first_seen = ndb.DateTimeProperty(required=True, auto_now_add=True) tags =", "auto_now_add=True) tags = ndb.StringProperty(repeated=True) is_answered = ndb.BooleanProperty() view_count = ndb.IntegerProperty()", "self.question_id = stackexchange_question.json['question_id'] updated = True return updated @classmethod def", "ndb.StringProperty() status_string = ndb.StringProperty() def as_dict(self): result = {} result['requested_time']", "view_count = stackexchange_question.view_count, answer_count = stackexchange_question.json['answer_count'], url = stackexchange_question.url, title", "result['num_unanswered'] = self.num_unanswered result['total_question_views'] = self.total_question_views result['status'] = self.status result['status_string']", "= ndb.BooleanProperty() view_count = ndb.IntegerProperty() answer_count = ndb.IntegerProperty() url =", "view_count = ndb.IntegerProperty() answer_count = ndb.IntegerProperty() url = ndb.StringProperty() title", "stackexchange_question): updated = False if stackexchange_question.tags != self.tags: self.tags =", "raw_timestamp = ndb.DateTimeProperty(required=True, auto_now_add=True) requested_time = ndb.DateTimeProperty(required=True) num_questions_by_tag = ndb.JsonProperty()", "self.num_questions_by_tag result['num_answered'] = self.num_answered result['num_unanswered'] = self.num_unanswered result['total_question_views'] = self.total_question_views", "= ndb.IntegerProperty() status = ndb.StringProperty() status_string = ndb.StringProperty() def as_dict(self):", "= True if stackexchange_question.json['answer_count'] != self.answer_count: self.answer_count = stackexchange_question.json['answer_count'] updated", "!= self.view_count: self.view_count = stackexchange_question.view_count updated = True if stackexchange_question.json['answer_count']", "= stackexchange_question.json['question_id'] updated = True return updated @classmethod def from_stackexchange_question(cls,", "if stackexchange_question.title != self.title: self.title = stackexchange_question.title updated = True", "import datetime from google.appengine.ext import ndb # Taken from http://stackoverflow.com/questions/455580/json-datetime-between-python-and-javascript", "if isinstance(obj, datetime.datetime) or isinstance(obj, datetime.date) else None ) class", "for t in stackexchange_question.tags], is_answered = stackexchange_question.json['is_answered'], view_count = stackexchange_question.view_count,", "t in stackexchange_question.tags], is_answered = stackexchange_question.json['is_answered'], view_count = stackexchange_question.view_count, answer_count", "= stackexchange_question.json['is_answered'] updated = True if stackexchange_question.view_count != self.view_count: self.view_count", "stackexchange_question.url != self.url: self.url = stackexchange_question.url updated = True if", "= ndb.DateTimeProperty(required=True, auto_now_add=True) requested_time = ndb.DateTimeProperty(required=True) num_questions_by_tag = ndb.JsonProperty() num_tagged_questions", "StackOverflowSnapshot(ndb.Model): \"\"\"Example Model\"\"\" raw_timestamp = ndb.DateTimeProperty(required=True, auto_now_add=True) requested_time = ndb.DateTimeProperty(required=True)", "= ndb.DateTimeProperty(required=True) num_questions_by_tag = ndb.JsonProperty() num_tagged_questions = ndb.IntegerProperty() num_answered =", "= self.url result['title'] = self.title result['creation_date'] = dthandler(self.creation_date) result['question_id'] =", "self.tags: self.tags = stackexchange_question.tags updated = True if stackexchange_question.json['is_answered'] !=", "self.view_count = stackexchange_question.view_count updated = True if stackexchange_question.json['answer_count'] != self.answer_count:", "= stackexchange_question.url updated = True if stackexchange_question.title != self.title: self.title", "stackexchange_question.json['answer_count'] updated = True if stackexchange_question.url != self.url: self.url =", "( obj.isoformat() if isinstance(obj, datetime.datetime) or isinstance(obj, datetime.date) else None", "updated = True return updated @classmethod def from_stackexchange_question(cls, stackexchange_question): result", "cls( tags = [t for t in stackexchange_question.tags], is_answered =", "total_question_views = ndb.IntegerProperty() status = ndb.StringProperty() status_string = ndb.StringProperty() def", "= self.status result['status_string'] = self.status_string return result class StackOverflowQuestion(ndb.Model): first_seen", "as_dict(self): result = {} result['requested_time'] = dthandler(self.requested_time) result['num_tagged_questions'] = self.num_tagged_questions", "# Taken from http://stackoverflow.com/questions/455580/json-datetime-between-python-and-javascript dthandler = lambda obj: ( obj.isoformat()", "requested_time = ndb.DateTimeProperty(required=True) num_questions_by_tag = ndb.JsonProperty() num_tagged_questions = ndb.IntegerProperty() num_answered", "= stackexchange_question.title updated = True if stackexchange_question.creation_date != self.creation_date: self.creation_date", "= ndb.StringProperty() creation_date = ndb.DateTimeProperty() question_id = ndb.IntegerProperty() def as_dict(self):", "stackexchange_question.view_count, answer_count = stackexchange_question.json['answer_count'], url = stackexchange_question.url, title = stackexchange_question.title,", "= ndb.JsonProperty() num_tagged_questions = ndb.IntegerProperty() num_answered = ndb.IntegerProperty() num_unanswered =", "from_stackexchange_question(cls, stackexchange_question): result = cls( tags = [t for t", "ndb.IntegerProperty() def as_dict(self): result = {} result['first_seen'] = dthandler(self.first_seen) result['tags']", "stackexchange_question.url, title = stackexchange_question.title, creation_date = stackexchange_question.creation_date, question_id = stackexchange_question.json['question_id']", "stackexchange_question.title != self.title: self.title = stackexchange_question.title updated = True if" ]
[ "while time.clock() - start < busyTime: pass time.sleep(busyTime / 1000)", "= 10 idleTime = busyTime while True: start = time.clock()", "UTF-8 -*- import time busyTime = 10 idleTime = busyTime", "True: start = time.clock() while time.clock() - start < busyTime:", "-*- import time busyTime = 10 idleTime = busyTime while", "start = time.clock() while time.clock() - start < busyTime: pass", "coding: UTF-8 -*- import time busyTime = 10 idleTime =", "10 idleTime = busyTime while True: start = time.clock() while", "busyTime while True: start = time.clock() while time.clock() - start", "import time busyTime = 10 idleTime = busyTime while True:", "idleTime = busyTime while True: start = time.clock() while time.clock()", "time.clock() while time.clock() - start < busyTime: pass time.sleep(busyTime /", "#!/usr/bin/python3 # -*- coding: UTF-8 -*- import time busyTime =", "busyTime = 10 idleTime = busyTime while True: start =", "= busyTime while True: start = time.clock() while time.clock() -", "time busyTime = 10 idleTime = busyTime while True: start", "# -*- coding: UTF-8 -*- import time busyTime = 10", "-*- coding: UTF-8 -*- import time busyTime = 10 idleTime", "while True: start = time.clock() while time.clock() - start <", "= time.clock() while time.clock() - start < busyTime: pass time.sleep(busyTime" ]
[ "are not saved to the backend self.optional_fields = [\"template_id\", \"layergroupid\",", "'metadata'] super(AnonymousMap, self).__init__(auth_client) def instantiate(self, params): \"\"\" Allows you to", "instance :param auth_client: Auth client \"\"\" super(BaseMap, self).__init__(auth_client) def get_tile_url(self,", "JSON object defining the named map :type kwargs: kwargs :return:", "y tile :param z: The zoom level :param layer_id: Can", "'0,1,2' :param feature_id: The id of the feature :param filter:", "filter=filter, z=z, x=x, y=y, extension=extension) elif layer_id is not None:", "not None and feature_id is not None: url = urljoin(base_url,", "'last_updated', 'layergroupid', 'metadata'] super(AnonymousMap, self).__init__(auth_client) def instantiate(self, params): \"\"\" Allows", "the named map :param auth: The auth client :type params:", "CartoException \"\"\" try: endpoint = (self.Meta.collection_endpoint + \"{template_id}\"). \\ format(template_id=self.template_id)", "None \\ and len(self.auth['valid_tokens']) > 0: url = urljoin(url, \"?auth_token={auth_token}\").", "\"\"\" base_url = self.client.base_url + self.Meta.collection_endpoint template_id = self.template_id if", "if k in self.fields + self.optional_fields: setattr(self, k, v) class", "map :type params: dict :return: :raise: CartoException \"\"\" try: self.send(self.Meta.collection_endpoint,", "ANONYMOUS_API_ENDPOINT = \"api/{api_version}/map/\" class BaseMap(Resource): \"\"\" Base class for NamedMap", "NAMED_API_ENDPOINT = \"api/{api_version}/map/named/\" ANONYMOUS_API_ENDPOINT = \"api/{api_version}/map/\" class BaseMap(Resource): \"\"\" Base", "to the # layer of your \\ map), all layers", "urljoin(base_url, \"{template_id}/{z}/{x}/{y}.{extension}\"). \\ format( template_id=template_id, z=z, x=x, y=y, extension=extension) if", "named map. Specifically an attribute `template` must contain the JSON", "CARTO. \"\"\" class Meta: collection_endpoint = ANONYMOUS_API_ENDPOINT.format( api_version=API_VERSION) def __init__(self,", "else self.layergroupid if layer_id is not None and feature_id is", "Exception as e: raise CartoException(e) def update_from_dict(self, attribute_dict): \"\"\" Method", "object :rtype: NamedMap :raise: CartoException \"\"\" resource = self.resource_class(self.client) resource.update_from_dict(kwargs['template'])", ":return: New named map object :rtype: NamedMap :raise: CartoException \"\"\"", "return try: for k, v in attribute_dict.items(): if k in", ":type params: dict :return: :raise: CartoException \"\"\" try: self.send(self.Meta.collection_endpoint, \"POST\",", "API_VERSION = \"v1\" NAMED_API_ENDPOINT = \"api/{api_version}/map/named/\" ANONYMOUS_API_ENDPOINT = \"api/{api_version}/map/\" class", "of layers, enter the comma separated \\ layer value as", "base_url = self.client.base_url + self.Meta.collection_endpoint template_id = self.template_id if hasattr(self,", "extension=extension) else: url = urljoin(base_url, \"{template_id}/{z}/{x}/{y}.{extension}\"). \\ format( template_id=template_id, z=z,", "dict :type auth: :class:`carto.auth.APIKeyAuthClient` :return: :raise: CartoException \"\"\" try: endpoint", "x tile :param y: The y tile :param z: The", "url = urljoin(url, \"?auth_token={auth_token}\"). \\ format(auth_token=self.auth['valid_tokens'][0]) return url class NamedMap(BaseMap):", "\"\"\" Initializes a NamedMap instance :param auth_client: Auth client \"\"\"", "int :type z: int :type layer_id: str :type feature_id: str", ":type extension: str :return: A URL to download data :rtype:", ":type layer_id: str :type feature_id: str :type filter: str :type", ".exceptions import CartoException, CartoRateLimitException API_VERSION = \"v1\" NAMED_API_ENDPOINT = \"api/{api_version}/map/named/\"", "= urljoin(base_url, \"{template_id}/{layer}/attributes/{feature_id}\"). \\ format(template_id=template_id, layer=layer_id, feature_id=feature_id) elif layer_id is", "= \"template_ids\" def create(self, **kwargs): \"\"\" Creates a named map", "tiles of a created map :param params: The json with", "str :type filter: str :type extension: str :return: A URL", "Allows you to fetch the map tiles of a created", "a named map :param kwargs: Attributes for creating the named", "mvt, ... :type x: int :type y: int :type z:", "... :type x: int :type y: int :type z: int", "layer :param extension: The format of the data to be", "\"\"\" if 'template' in attribute_dict: self.update_from_dict(attribute_dict['template']) setattr(self, self.Meta.id_field, attribute_dict['template']['name']) return", "\\ else self.layergroupid if layer_id is not None and feature_id", "\"\"\" Equivalent to creating an anonymous map in CARTO. \"\"\"", "def __init__(self, auth_client): \"\"\" Initializes a NamedMap instance :param auth_client:", "is not None): endpoint = (endpoint + \"?auth_token={auth_token}\"). \\ format(auth_token=auth)", "<<EMAIL>> .. moduleauthor:: <NAME> <<EMAIL>> \"\"\" try: from urllib.parse import", "\"\"\" resource_class = NamedMap json_collection_attribute = \"template_ids\" def create(self, **kwargs):", "\"\"\" self.optional_fields = ['cdn_url', 'last_updated', 'layergroupid', 'metadata'] super(AnonymousMap, self).__init__(auth_client) def", "To show just the basemap layer, enter the value 0", "k in self.fields + self.optional_fields: setattr(self, k, v) except Exception:", "layer_id is not None: url = urljoin(base_url, \"{template_id}/{layer}/{z}/{x}/{y}.{extension}\"). \\ format(template_id=template_id,", "def __str__(self): try: return unicode(self.name).encode(\"utf-8\") except AttributeError: return super(NamedMap, self).__repr__()", "BaseMap instance :param auth_client: Auth client \"\"\" super(BaseMap, self).__init__(auth_client) def", "v in attribute_dict.items(): if k in self.fields + self.optional_fields: setattr(self,", "params): \"\"\" Allows you to fetch the map tiles of", "y, z, layer_id=None, feature_id=None, filter=None, extension=\"png\"): \"\"\" Prepares a URL", "The y tile :param z: The zoom level :param layer_id:", "The auth client :type params: dict :type auth: :class:`carto.auth.APIKeyAuthClient` :return:", "of the feature :param filter: The filter to be applied", "url = urljoin(base_url, \"{template_id}/{z}/{x}/{y}.{extension}\"). \\ format( template_id=template_id, z=z, x=x, y=y,", "is not None: url = urljoin(base_url, \"{template_id}/{filter}/{z}/{x}/{y}.{extension}\"). \\ format(template_id=template_id, filter=filter,", "\\ format(auth_token=self.auth['valid_tokens'][0]) return url class NamedMap(BaseMap): \"\"\" Equivalent to creating", "zoom level :param layer_id: Can be a number (referring to", "instantiate(self, params, auth=None): \"\"\" Allows you to fetch the map", "ANONYMOUS_API_ENDPOINT.format( api_version=API_VERSION) def __init__(self, auth_client): \"\"\" Initializes an AnonymousMap instance", "overriden from the base class \"\"\" if 'template' in attribute_dict:", "a BaseMap instance :param auth_client: Auth client \"\"\" super(BaseMap, self).__init__(auth_client)", "extension=\"png\"): \"\"\" Prepares a URL to get data (raster or", "y=y, extension=extension) elif layer_id is not None: url = urljoin(base_url,", "working with named and anonymous maps .. module:: carto.maps :platform:", ":param z: The zoom level :param layer_id: Can be a", "map. Specifically an attribute `template` must contain the JSON object", "setattr(self, k, v) class NamedMapManager(Manager): \"\"\" Manager for the NamedMap", "v) class NamedMapManager(Manager): \"\"\" Manager for the NamedMap class \"\"\"", "layer_id is not None and feature_id is not None: url", "def create(self, **kwargs): \"\"\" Creates a named map :param kwargs:", "download data :rtype: str :raise: CartoException \"\"\" base_url = self.client.base_url", "To show the first layer, enter the value 1 To", "creating a named map in CARTO. \"\"\" class Meta: collection_endpoint", "\"view\"] # Optional fields can be assigned by some responses", "number (referring to the # layer of your \\ map),", "just the basemap layer, enter the value 0 To show", "except Exception as e: raise CartoException(e) def update_from_dict(self, attribute_dict): for", "(self.Meta.collection_endpoint + \"{template_id}\"). \\ format(template_id=self.template_id) if (auth is not None):", "CartoRateLimitException as e: raise e except Exception as e: raise", "z, layer_id=None, feature_id=None, filter=None, extension=\"png\"): \"\"\" Prepares a URL to", "def update_from_dict(self, attribute_dict): \"\"\" Method overriden from the base class", "self.Meta.id_field, attribute_dict['template']['name']) return try: for k, v in attribute_dict.items(): if", ".. module:: carto.maps :platform: Unix, Windows :synopsis: Module for working", "self.fields = [\"version\", \"name\", \"auth\", \"placeholders\", \"layergroup\", \"view\"] # Optional", "layer of your \\ map), all layers of your map,", "is not None: url = urljoin(base_url, \"{template_id}/{layer}/{z}/{x}/{y}.{extension}\"). \\ format(template_id=template_id, layer=layer_id,", "\\ format( template_id=template_id, z=z, x=x, y=y, extension=extension) if hasattr(self, 'auth')", "template_id = self.template_id if hasattr(self, 'template_id') \\ else self.layergroupid if", "To show a list of layers, enter the comma separated", "feature :param filter: The filter to be applied to the", "\"api/{api_version}/map/named/\" ANONYMOUS_API_ENDPOINT = \"api/{api_version}/map/\" class BaseMap(Resource): \"\"\" Base class for", "some responses create, instantiate, # but are not saved to", "Equivalent to creating a named map in CARTO. \"\"\" class", "named map in CARTO. \"\"\" class Meta: collection_endpoint = NAMED_API_ENDPOINT.format(", "CartoException(e) def update_from_dict(self, attribute_dict): for k, v in attribute_dict.items(): if", "URL to get data (raster or vector) from a NamedMap", "def __init__(self, auth_client): \"\"\" Initializes an AnonymousMap instance :param auth_client:", "an attribute `template` must contain the JSON object defining the", "\"\"\" def __init__(self, auth_client): \"\"\" Initializes a BaseMap instance :param", "dict :return: :raise: CartoException \"\"\" try: self.send(self.Meta.collection_endpoint, \"POST\", json=params) except", "for NamedMap and AnonymousMap \"\"\" def __init__(self, auth_client): \"\"\" Initializes", "saved to the backend self.optional_fields = [\"template_id\", \"layergroupid\", \"last_updated\"] super(NamedMap,", "except Exception: setattr(self, self.Meta.id_field, attribute_dict) class AnonymousMap(BaseMap): \"\"\" Equivalent to", "\"\"\" class Meta: collection_endpoint = NAMED_API_ENDPOINT.format( api_version=API_VERSION) id_field = \"template_id\"", "maps .. module:: carto.maps :platform: Unix, Windows :synopsis: Module for", "Auth client \"\"\" self.fields = [\"version\", \"name\", \"auth\", \"placeholders\", \"layergroup\",", "+ \"?auth_token={auth_token}\"). \\ format(auth_token=auth) self.send(endpoint, \"POST\", json=params) except CartoRateLimitException as", "e: raise e except Exception as e: raise CartoException(e) def", "auth_client: Auth client \"\"\" self.fields = [\"version\", \"name\", \"auth\", \"placeholders\",", "moduleauthor:: <NAME> <<EMAIL>> .. moduleauthor:: <NAME> <<EMAIL>> \"\"\" try: from", "super(NamedMap, self).__repr__() def __init__(self, auth_client): \"\"\" Initializes a NamedMap instance", "not None): endpoint = (endpoint + \"?auth_token={auth_token}\"). \\ format(auth_token=auth) self.send(endpoint,", "layer_id: Can be a number (referring to the # layer", "to creating an anonymous map in CARTO. \"\"\" class Meta:", "urljoin(base_url, \"{template_id}/{layer}/attributes/{feature_id}\"). \\ format(template_id=template_id, layer=layer_id, feature_id=feature_id) elif layer_id is not", ":synopsis: Module for working with named and anonymous maps ..", "format(auth_token=auth) self.send(endpoint, \"POST\", json=params) except CartoRateLimitException as e: raise e", "object defining the named map :type kwargs: kwargs :return: New", "map object :rtype: NamedMap :raise: CartoException \"\"\" resource = self.resource_class(self.client)", "the # layer of your \\ map), all layers of", "'template_id') \\ else self.layergroupid if layer_id is not None and", "\"name\", \"auth\", \"placeholders\", \"layergroup\", \"view\"] # Optional fields can be", "0 To show the first layer, enter the value 1", "from the base class \"\"\" if 'template' in attribute_dict: self.update_from_dict(attribute_dict['template'])", "__init__(self, auth_client): \"\"\" Initializes an AnonymousMap instance :param auth_client: Auth", "NamedMap instance :param auth_client: Auth client \"\"\" self.fields = [\"version\",", ":param auth_client: Auth client \"\"\" super(BaseMap, self).__init__(auth_client) def get_tile_url(self, x,", "x=x, y=y, extension=extension) elif layer_id is not None: url =", "the comma separated \\ layer value as '0,1,2' :param feature_id:", "self.auth is not None \\ and len(self.auth['valid_tokens']) > 0: url", ":type z: int :type layer_id: str :type feature_id: str :type", "to the layer :param extension: The format of the data", "url = urljoin(base_url, \"{template_id}/{layer}/attributes/{feature_id}\"). \\ format(template_id=template_id, layer=layer_id, feature_id=feature_id) elif layer_id", "else: url = urljoin(base_url, \"{template_id}/{z}/{x}/{y}.{extension}\"). \\ format( template_id=template_id, z=z, x=x,", ":param layer_id: Can be a number (referring to the #", "Initializes an AnonymousMap instance :param auth_client: Auth client \"\"\" self.optional_fields", "<<EMAIL>> \"\"\" try: from urllib.parse import urljoin except ImportError: from", "Prepares a URL to get data (raster or vector) from", "to get data (raster or vector) from a NamedMap or", "auth: The auth client :type params: dict :type auth: :class:`carto.auth.APIKeyAuthClient`", ":param filter: The filter to be applied to the layer", "urljoin(base_url, \"{template_id}/{layer}/{z}/{x}/{y}.{extension}\"). \\ format(template_id=template_id, layer=layer_id, z=z, x=x, y=y, extension=extension) else:", "class \"\"\" resource_class = NamedMap json_collection_attribute = \"template_ids\" def create(self,", "is not None \\ and len(self.auth['valid_tokens']) > 0: url =", "map :param params: The json with the styling info for", "elif layer_id is not None and filter is not None:", "filter: The filter to be applied to the layer :param", "in attribute_dict.items(): if k in self.fields + self.optional_fields: setattr(self, k,", "Equivalent to creating an anonymous map in CARTO. \"\"\" class", "auth=None): \"\"\" Allows you to fetch the map tiles of", "super(BaseMap, self).__init__(auth_client) def get_tile_url(self, x, y, z, layer_id=None, feature_id=None, filter=None,", "hasattr(self, 'template_id') \\ else self.layergroupid if layer_id is not None", "named and anonymous maps .. moduleauthor:: <NAME> <<EMAIL>> .. moduleauthor::", "filter to be applied to the layer :param extension: The", "all layers of your map, or a list of layers.", "urljoin except ImportError: from urlparse import urljoin from pyrestcli.resources import", "\"\"\" Initializes an AnonymousMap instance :param auth_client: Auth client \"\"\"", "auth_client): \"\"\" Initializes a BaseMap instance :param auth_client: Auth client", "info for the named map :type params: dict :return: :raise:", "format(template_id=template_id, layer=layer_id, feature_id=feature_id) elif layer_id is not None and filter", "layer_id=None, feature_id=None, filter=None, extension=\"png\"): \"\"\" Prepares a URL to get", "Module for working with named and anonymous maps .. module::", "all layers, enter the value 'all' To show a list", "str :type extension: str :return: A URL to download data", "extension=extension) elif layer_id is not None: url = urljoin(base_url, \"{template_id}/{layer}/{z}/{x}/{y}.{extension}\").", "import CartoException, CartoRateLimitException API_VERSION = \"v1\" NAMED_API_ENDPOINT = \"api/{api_version}/map/named/\" ANONYMOUS_API_ENDPOINT", "the feature :param filter: The filter to be applied to", "for the named map :type params: dict :return: :raise: CartoException", "= \"name\" def __str__(self): try: return unicode(self.name).encode(\"utf-8\") except AttributeError: return", "first layer, enter the value 1 To show all layers,", "of your \\ map), all layers of your map, or", "collection_endpoint = ANONYMOUS_API_ENDPOINT.format( api_version=API_VERSION) def __init__(self, auth_client): \"\"\" Initializes an", "an AnonymousMap instance :param auth_client: Auth client \"\"\" self.optional_fields =", "= NAMED_API_ENDPOINT.format( api_version=API_VERSION) id_field = \"template_id\" name_field = \"name\" def", "int :type y: int :type z: int :type layer_id: str", "New named map object :rtype: NamedMap :raise: CartoException \"\"\" resource", "the styling info for the named map :type params: dict", ":raise: CartoException \"\"\" try: self.send(self.Meta.collection_endpoint, \"POST\", json=params) except CartoRateLimitException as", "\"\"\" Equivalent to creating a named map in CARTO. \"\"\"", "the value 0 To show the first layer, enter the", "try: for k, v in attribute_dict.items(): if k in self.fields", "self).__init__(auth_client) def get_tile_url(self, x, y, z, layer_id=None, feature_id=None, filter=None, extension=\"png\"):", "as e: raise CartoException(e) def update_from_dict(self, attribute_dict): \"\"\" Method overriden", "auth_client: Auth client \"\"\" self.optional_fields = ['cdn_url', 'last_updated', 'layergroupid', 'metadata']", "`template` must contain the JSON object defining the named map", ":param feature_id: The id of the feature :param filter: The", "be applied to the layer :param extension: The format of", ":rtype: str :raise: CartoException \"\"\" base_url = self.client.base_url + self.Meta.collection_endpoint", "\"POST\", json=params) except CartoRateLimitException as e: raise e except Exception", "json with the styling info for the named map :type", "+ self.Meta.collection_endpoint template_id = self.template_id if hasattr(self, 'template_id') \\ else", "layer=layer_id, feature_id=feature_id) elif layer_id is not None and filter is", "json_collection_attribute = \"template_ids\" def create(self, **kwargs): \"\"\" Creates a named", "CartoException(e) def update_from_dict(self, attribute_dict): \"\"\" Method overriden from the base", "URL to download data :rtype: str :raise: CartoException \"\"\" base_url", "the backend self.optional_fields = [\"template_id\", \"layergroupid\", \"last_updated\"] super(NamedMap, self).__init__(auth_client) def", "enter the value 1 To show all layers, enter the", "Initializes a NamedMap instance :param auth_client: Auth client \"\"\" self.fields", "Specifically an attribute `template` must contain the JSON object defining", "CartoRateLimitException API_VERSION = \"v1\" NAMED_API_ENDPOINT = \"api/{api_version}/map/named/\" ANONYMOUS_API_ENDPOINT = \"api/{api_version}/map/\"", "url = urljoin(base_url, \"{template_id}/{filter}/{z}/{x}/{y}.{extension}\"). \\ format(template_id=template_id, filter=filter, z=z, x=x, y=y,", "\"\"\" Module for working with named and anonymous maps ..", "NAMED_API_ENDPOINT.format( api_version=API_VERSION) id_field = \"template_id\" name_field = \"name\" def __str__(self):", "map), all layers of your map, or a list of", "None and feature_id is not None: url = urljoin(base_url, \"{template_id}/{layer}/attributes/{feature_id}\").", "params: dict :return: :raise: CartoException \"\"\" try: self.send(self.Meta.collection_endpoint, \"POST\", json=params)", "try: self.send(self.Meta.collection_endpoint, \"POST\", json=params) except CartoRateLimitException as e: raise e", "x, y, z, layer_id=None, feature_id=None, filter=None, extension=\"png\"): \"\"\" Prepares a", "enter the value 0 To show the first layer, enter", "To show all layers, enter the value 'all' To show", "feature_id: str :type filter: str :type extension: str :return: A", "Optional fields can be assigned by some responses create, instantiate,", "self.send(self.Meta.collection_endpoint, \"POST\", json=params) except CartoRateLimitException as e: raise e except", ":raise: CartoException \"\"\" resource = self.resource_class(self.client) resource.update_from_dict(kwargs['template']) resource.save(force_create=True) return resource", "self.fields + self.optional_fields: setattr(self, k, v) class NamedMapManager(Manager): \"\"\" Manager", "NamedMap(BaseMap): \"\"\" Equivalent to creating a named map in CARTO.", "the value 1 To show all layers, enter the value", "be assigned by some responses create, instantiate, # but are", "to be applied to the layer :param extension: The format", "x=x, y=y, extension=extension) else: url = urljoin(base_url, \"{template_id}/{z}/{x}/{y}.{extension}\"). \\ format(", "base class \"\"\" if 'template' in attribute_dict: self.update_from_dict(attribute_dict['template']) setattr(self, self.Meta.id_field,", "named map object :rtype: NamedMap :raise: CartoException \"\"\" resource =", "int :type layer_id: str :type feature_id: str :type filter: str", "+ self.optional_fields: setattr(self, k, v) except Exception: setattr(self, self.Meta.id_field, attribute_dict)", "class Meta: collection_endpoint = ANONYMOUS_API_ENDPOINT.format( api_version=API_VERSION) def __init__(self, auth_client): \"\"\"", "data to be retrieved: png, mvt, ... :type x: int", "__init__(self, auth_client): \"\"\" Initializes a NamedMap instance :param auth_client: Auth", "the data to be retrieved: png, mvt, ... :type x:", "must contain the JSON object defining the named map :type", "url class NamedMap(BaseMap): \"\"\" Equivalent to creating a named map", "by some responses create, instantiate, # but are not saved", "str :raise: CartoException \"\"\" base_url = self.client.base_url + self.Meta.collection_endpoint template_id", "The json with the styling info for the named map", "if 'template' in attribute_dict: self.update_from_dict(attribute_dict['template']) setattr(self, self.Meta.id_field, attribute_dict['template']['name']) return try:", "k, v) except Exception: setattr(self, self.Meta.id_field, attribute_dict) class AnonymousMap(BaseMap): \"\"\"", "self.layergroupid if layer_id is not None and feature_id is not", "\"auth\", \"placeholders\", \"layergroup\", \"view\"] # Optional fields can be assigned", "Can be a number (referring to the # layer of", "data :rtype: str :raise: CartoException \"\"\" base_url = self.client.base_url +", "pyrestcli.resources import Manager, Resource from .exceptions import CartoException, CartoRateLimitException API_VERSION", "self.optional_fields: setattr(self, k, v) class NamedMapManager(Manager): \"\"\" Manager for the", "filter=None, extension=\"png\"): \"\"\" Prepares a URL to get data (raster", "e except Exception as e: raise CartoException(e) def update_from_dict(self, attribute_dict):", "auth_client): \"\"\" Initializes an AnonymousMap instance :param auth_client: Auth client", "a URL to get data (raster or vector) from a", "the named map :type params: dict :return: :raise: CartoException \"\"\"", "z: The zoom level :param layer_id: Can be a number", "as '0,1,2' :param feature_id: The id of the feature :param", "len(self.auth['valid_tokens']) > 0: url = urljoin(url, \"?auth_token={auth_token}\"). \\ format(auth_token=self.auth['valid_tokens'][0]) return", "name_field = \"name\" def __str__(self): try: return unicode(self.name).encode(\"utf-8\") except AttributeError:", ":return: :raise: CartoException \"\"\" try: endpoint = (self.Meta.collection_endpoint + \"{template_id}\").", "AnonymousMap :param x: The x tile :param y: The y", "layer_id: str :type feature_id: str :type filter: str :type extension:", "Meta: collection_endpoint = ANONYMOUS_API_ENDPOINT.format( api_version=API_VERSION) def __init__(self, auth_client): \"\"\" Initializes", "attribute_dict) class AnonymousMap(BaseMap): \"\"\" Equivalent to creating an anonymous map", "x: The x tile :param y: The y tile :param", "layers. To show just the basemap layer, enter the value", "urljoin(url, \"?auth_token={auth_token}\"). \\ format(auth_token=self.auth['valid_tokens'][0]) return url class NamedMap(BaseMap): \"\"\" Equivalent", "self.optional_fields = ['cdn_url', 'last_updated', 'layergroupid', 'metadata'] super(AnonymousMap, self).__init__(auth_client) def instantiate(self,", "The filter to be applied to the layer :param extension:", "NamedMap or AnonymousMap :param x: The x tile :param y:", ":return: A URL to download data :rtype: str :raise: CartoException", "you to fetch the map tiles of a created map", "not None: url = urljoin(base_url, \"{template_id}/{layer}/attributes/{feature_id}\"). \\ format(template_id=template_id, layer=layer_id, feature_id=feature_id)", "self.Meta.id_field, attribute_dict) class AnonymousMap(BaseMap): \"\"\" Equivalent to creating an anonymous", "return url class NamedMap(BaseMap): \"\"\" Equivalent to creating a named", "to be retrieved: png, mvt, ... :type x: int :type", "self.fields + self.optional_fields: setattr(self, k, v) except Exception: setattr(self, self.Meta.id_field,", "and filter is not None: url = urljoin(base_url, \"{template_id}/{filter}/{z}/{x}/{y}.{extension}\"). \\", "str :return: A URL to download data :rtype: str :raise:", "NamedMap and AnonymousMap \"\"\" def __init__(self, auth_client): \"\"\" Initializes a", "Method overriden from the base class \"\"\" if 'template' in", "params: dict :type auth: :class:`carto.auth.APIKeyAuthClient` :return: :raise: CartoException \"\"\" try:", "creating an anonymous map in CARTO. \"\"\" class Meta: collection_endpoint", "y=y, extension=extension) else: url = urljoin(base_url, \"{template_id}/{z}/{x}/{y}.{extension}\"). \\ format( template_id=template_id,", "def update_from_dict(self, attribute_dict): for k, v in attribute_dict.items(): if k", "a named map in CARTO. \"\"\" class Meta: collection_endpoint =", "NamedMap :raise: CartoException \"\"\" resource = self.resource_class(self.client) resource.update_from_dict(kwargs['template']) resource.save(force_create=True) return", "= ['cdn_url', 'last_updated', 'layergroupid', 'metadata'] super(AnonymousMap, self).__init__(auth_client) def instantiate(self, params):", "= (self.Meta.collection_endpoint + \"{template_id}\"). \\ format(template_id=self.template_id) if (auth is not", "for working with named and anonymous maps .. moduleauthor:: <NAME>", "CartoException \"\"\" try: self.send(self.Meta.collection_endpoint, \"POST\", json=params) except CartoRateLimitException as e:", "\"{template_id}/{layer}/attributes/{feature_id}\"). \\ format(template_id=template_id, layer=layer_id, feature_id=feature_id) elif layer_id is not None", "import Manager, Resource from .exceptions import CartoException, CartoRateLimitException API_VERSION =", "extension=extension) if hasattr(self, 'auth') and self.auth is not None \\", "in CARTO. \"\"\" class Meta: collection_endpoint = NAMED_API_ENDPOINT.format( api_version=API_VERSION) id_field", "named map :type params: dict :return: :raise: CartoException \"\"\" try:", "map :type kwargs: kwargs :return: New named map object :rtype:", "info for the named map :param auth: The auth client", "png, mvt, ... :type x: int :type y: int :type", "elif layer_id is not None: url = urljoin(base_url, \"{template_id}/{layer}/{z}/{x}/{y}.{extension}\"). \\", "\"\"\" Base class for NamedMap and AnonymousMap \"\"\" def __init__(self,", "auth: :class:`carto.auth.APIKeyAuthClient` :return: :raise: CartoException \"\"\" try: endpoint = (self.Meta.collection_endpoint", "template_id=template_id, z=z, x=x, y=y, extension=extension) if hasattr(self, 'auth') and self.auth", "\"\"\" super(BaseMap, self).__init__(auth_client) def get_tile_url(self, x, y, z, layer_id=None, feature_id=None,", "map in CARTO. \"\"\" class Meta: collection_endpoint = NAMED_API_ENDPOINT.format( api_version=API_VERSION)", "instance :param auth_client: Auth client \"\"\" self.optional_fields = ['cdn_url', 'last_updated',", "k in self.fields + self.optional_fields: setattr(self, k, v) class NamedMapManager(Manager):", "BaseMap(Resource): \"\"\" Base class for NamedMap and AnonymousMap \"\"\" def", ":type x: int :type y: int :type z: int :type", "client :type params: dict :type auth: :class:`carto.auth.APIKeyAuthClient` :return: :raise: CartoException", "AttributeError: return super(NamedMap, self).__repr__() def __init__(self, auth_client): \"\"\" Initializes a", "\"layergroupid\", \"last_updated\"] super(NamedMap, self).__init__(auth_client) def instantiate(self, params, auth=None): \"\"\" Allows", "in attribute_dict: self.update_from_dict(attribute_dict['template']) setattr(self, self.Meta.id_field, attribute_dict['template']['name']) return try: for k,", "the named map. Specifically an attribute `template` must contain the", "map tiles of a created map :param params: The json", "except Exception as e: raise CartoException(e) def update_from_dict(self, attribute_dict): \"\"\"", "feature_id=None, filter=None, extension=\"png\"): \"\"\" Prepares a URL to get data", "# Optional fields can be assigned by some responses create,", "\\ format(auth_token=auth) self.send(endpoint, \"POST\", json=params) except CartoRateLimitException as e: raise", "separated \\ layer value as '0,1,2' :param feature_id: The id", "self).__init__(auth_client) def instantiate(self, params): \"\"\" Allows you to fetch the", "raise CartoException(e) def update_from_dict(self, attribute_dict): for k, v in attribute_dict.items():", "tile :param z: The zoom level :param layer_id: Can be", "filter is not None: url = urljoin(base_url, \"{template_id}/{filter}/{z}/{x}/{y}.{extension}\"). \\ format(template_id=template_id,", "the styling info for the named map :param auth: The", "[\"version\", \"name\", \"auth\", \"placeholders\", \"layergroup\", \"view\"] # Optional fields can", "\"\"\" self.fields = [\"version\", \"name\", \"auth\", \"placeholders\", \"layergroup\", \"view\"] #", "(referring to the # layer of your \\ map), all", "creating the named map. Specifically an attribute `template` must contain", "\\ format(template_id=template_id, layer=layer_id, feature_id=feature_id) elif layer_id is not None and", "Exception as e: raise CartoException(e) def update_from_dict(self, attribute_dict): for k,", "urljoin(base_url, \"{template_id}/{filter}/{z}/{x}/{y}.{extension}\"). \\ format(template_id=template_id, filter=filter, z=z, x=x, y=y, extension=extension) elif", "Resource from .exceptions import CartoException, CartoRateLimitException API_VERSION = \"v1\" NAMED_API_ENDPOINT", "raise CartoException(e) def update_from_dict(self, attribute_dict): \"\"\" Method overriden from the", "data (raster or vector) from a NamedMap or AnonymousMap :param", "\"\"\" Allows you to fetch the map tiles of a", "self.optional_fields: setattr(self, k, v) except Exception: setattr(self, self.Meta.id_field, attribute_dict) class", "value 1 To show all layers, enter the value 'all'", "str :type feature_id: str :type filter: str :type extension: str", "raise e except Exception as e: raise CartoException(e) def update_from_dict(self,", "client \"\"\" self.optional_fields = ['cdn_url', 'last_updated', 'layergroupid', 'metadata'] super(AnonymousMap, self).__init__(auth_client)", "show all layers, enter the value 'all' To show a", "map, or a list of layers. To show just the", "self.Meta.collection_endpoint template_id = self.template_id if hasattr(self, 'template_id') \\ else self.layergroupid", "layers, enter the comma separated \\ layer value as '0,1,2'", "<NAME> <<EMAIL>> \"\"\" try: from urllib.parse import urljoin except ImportError:", "CartoException \"\"\" base_url = self.client.base_url + self.Meta.collection_endpoint template_id = self.template_id", "and self.auth is not None \\ and len(self.auth['valid_tokens']) > 0:", "responses create, instantiate, # but are not saved to the", "for k, v in attribute_dict.items(): if k in self.fields +", "= \"api/{api_version}/map/\" class BaseMap(Resource): \"\"\" Base class for NamedMap and", "\"{template_id}/{z}/{x}/{y}.{extension}\"). \\ format( template_id=template_id, z=z, x=x, y=y, extension=extension) if hasattr(self,", "\"layergroup\", \"view\"] # Optional fields can be assigned by some", "enter the comma separated \\ layer value as '0,1,2' :param", "update_from_dict(self, attribute_dict): for k, v in attribute_dict.items(): if k in", "from a NamedMap or AnonymousMap :param x: The x tile", "named map :param kwargs: Attributes for creating the named map.", "from urllib.parse import urljoin except ImportError: from urlparse import urljoin", "from .exceptions import CartoException, CartoRateLimitException API_VERSION = \"v1\" NAMED_API_ENDPOINT =", "from urlparse import urljoin from pyrestcli.resources import Manager, Resource from", "\"api/{api_version}/map/\" class BaseMap(Resource): \"\"\" Base class for NamedMap and AnonymousMap", "create, instantiate, # but are not saved to the backend", "= ANONYMOUS_API_ENDPOINT.format( api_version=API_VERSION) def __init__(self, auth_client): \"\"\" Initializes an AnonymousMap", "your \\ map), all layers of your map, or a", ":type auth: :class:`carto.auth.APIKeyAuthClient` :return: :raise: CartoException \"\"\" try: endpoint =", "feature_id is not None: url = urljoin(base_url, \"{template_id}/{layer}/attributes/{feature_id}\"). \\ format(template_id=template_id,", "value 'all' To show a list of layers, enter the", "module:: carto.maps :platform: Unix, Windows :synopsis: Module for working with", "Auth client \"\"\" self.optional_fields = ['cdn_url', 'last_updated', 'layergroupid', 'metadata'] super(AnonymousMap,", "\"\"\" Initializes a BaseMap instance :param auth_client: Auth client \"\"\"", "or a list of layers. To show just the basemap", "layer=layer_id, z=z, x=x, y=y, extension=extension) else: url = urljoin(base_url, \"{template_id}/{z}/{x}/{y}.{extension}\").", "0: url = urljoin(url, \"?auth_token={auth_token}\"). \\ format(auth_token=self.auth['valid_tokens'][0]) return url class", "of your map, or a list of layers. To show", "your map, or a list of layers. To show just", "None: url = urljoin(base_url, \"{template_id}/{layer}/attributes/{feature_id}\"). \\ format(template_id=template_id, layer=layer_id, feature_id=feature_id) elif", "of the data to be retrieved: png, mvt, ... :type", "(raster or vector) from a NamedMap or AnonymousMap :param x:", "\"\"\" try: endpoint = (self.Meta.collection_endpoint + \"{template_id}\"). \\ format(template_id=self.template_id) if", "Base class for NamedMap and AnonymousMap \"\"\" def __init__(self, auth_client):", "json=params) except CartoRateLimitException as e: raise e except Exception as", "update_from_dict(self, attribute_dict): \"\"\" Method overriden from the base class \"\"\"", "get data (raster or vector) from a NamedMap or AnonymousMap", "\"\"\" Creates a named map :param kwargs: Attributes for creating", "anonymous maps .. module:: carto.maps :platform: Unix, Windows :synopsis: Module", "[\"template_id\", \"layergroupid\", \"last_updated\"] super(NamedMap, self).__init__(auth_client) def instantiate(self, params, auth=None): \"\"\"", "class NamedMap(BaseMap): \"\"\" Equivalent to creating a named map in", "value 0 To show the first layer, enter the value", "import urljoin except ImportError: from urlparse import urljoin from pyrestcli.resources", "instantiate, # but are not saved to the backend self.optional_fields", "# layer of your \\ map), all layers of your", "ImportError: from urlparse import urljoin from pyrestcli.resources import Manager, Resource", "id_field = \"template_id\" name_field = \"name\" def __str__(self): try: return", "\"\"\" try: from urllib.parse import urljoin except ImportError: from urlparse", "an anonymous map in CARTO. \"\"\" class Meta: collection_endpoint =", "of layers. To show just the basemap layer, enter the", "\"\"\" Manager for the NamedMap class \"\"\" resource_class = NamedMap", "tile :param y: The y tile :param z: The zoom", "None): endpoint = (endpoint + \"?auth_token={auth_token}\"). \\ format(auth_token=auth) self.send(endpoint, \"POST\",", "urlparse import urljoin from pyrestcli.resources import Manager, Resource from .exceptions", "kwargs :return: New named map object :rtype: NamedMap :raise: CartoException", "\"{template_id}\"). \\ format(template_id=self.template_id) if (auth is not None): endpoint =", "\"?auth_token={auth_token}\"). \\ format(auth_token=self.auth['valid_tokens'][0]) return url class NamedMap(BaseMap): \"\"\" Equivalent to", ":return: :raise: CartoException \"\"\" try: self.send(self.Meta.collection_endpoint, \"POST\", json=params) except CartoRateLimitException", "styling info for the named map :type params: dict :return:", "is not None: url = urljoin(base_url, \"{template_id}/{layer}/attributes/{feature_id}\"). \\ format(template_id=template_id, layer=layer_id,", "json with the styling info for the named map :param", "'template' in attribute_dict: self.update_from_dict(attribute_dict['template']) setattr(self, self.Meta.id_field, attribute_dict['template']['name']) return try: for", "import urljoin from pyrestcli.resources import Manager, Resource from .exceptions import", "> 0: url = urljoin(url, \"?auth_token={auth_token}\"). \\ format(auth_token=self.auth['valid_tokens'][0]) return url", "setattr(self, self.Meta.id_field, attribute_dict['template']['name']) return try: for k, v in attribute_dict.items():", ":type feature_id: str :type filter: str :type extension: str :return:", "extension: The format of the data to be retrieved: png,", "None: url = urljoin(base_url, \"{template_id}/{filter}/{z}/{x}/{y}.{extension}\"). \\ format(template_id=template_id, filter=filter, z=z, x=x,", "of a created map :param params: The json with the", "attribute `template` must contain the JSON object defining the named", "if layer_id is not None and feature_id is not None:", "AnonymousMap \"\"\" def __init__(self, auth_client): \"\"\" Initializes a BaseMap instance", "except ImportError: from urlparse import urljoin from pyrestcli.resources import Manager,", ":param auth_client: Auth client \"\"\" self.optional_fields = ['cdn_url', 'last_updated', 'layergroupid',", "basemap layer, enter the value 0 To show the first", "except AttributeError: return super(NamedMap, self).__repr__() def __init__(self, auth_client): \"\"\" Initializes", ":param auth: The auth client :type params: dict :type auth:", "layers of your map, or a list of layers. To", "map in CARTO. \"\"\" class Meta: collection_endpoint = ANONYMOUS_API_ENDPOINT.format( api_version=API_VERSION)", "in self.fields + self.optional_fields: setattr(self, k, v) except Exception: setattr(self,", "z=z, x=x, y=y, extension=extension) if hasattr(self, 'auth') and self.auth is", "be a number (referring to the # layer of your", "not None: url = urljoin(base_url, \"{template_id}/{filter}/{z}/{x}/{y}.{extension}\"). \\ format(template_id=template_id, filter=filter, z=z,", "carto.maps :platform: Unix, Windows :synopsis: Module for working with named", "created map :param params: The json with the styling info", "layer value as '0,1,2' :param feature_id: The id of the", "backend self.optional_fields = [\"template_id\", \"layergroupid\", \"last_updated\"] super(NamedMap, self).__init__(auth_client) def instantiate(self,", "url = urljoin(base_url, \"{template_id}/{layer}/{z}/{x}/{y}.{extension}\"). \\ format(template_id=template_id, layer=layer_id, z=z, x=x, y=y,", "self.template_id if hasattr(self, 'template_id') \\ else self.layergroupid if layer_id is", "= \"v1\" NAMED_API_ENDPOINT = \"api/{api_version}/map/named/\" ANONYMOUS_API_ENDPOINT = \"api/{api_version}/map/\" class BaseMap(Resource):", "format(template_id=self.template_id) if (auth is not None): endpoint = (endpoint +", "as e: raise CartoException(e) def update_from_dict(self, attribute_dict): for k, v", "AnonymousMap instance :param auth_client: Auth client \"\"\" self.optional_fields = ['cdn_url',", "layer_id is not None and filter is not None: url", ":type y: int :type z: int :type layer_id: str :type", "for working with named and anonymous maps .. module:: carto.maps", "api_version=API_VERSION) def __init__(self, auth_client): \"\"\" Initializes an AnonymousMap instance :param", "the first layer, enter the value 1 To show all", "not None: url = urljoin(base_url, \"{template_id}/{layer}/{z}/{x}/{y}.{extension}\"). \\ format(template_id=template_id, layer=layer_id, z=z,", "class AnonymousMap(BaseMap): \"\"\" Equivalent to creating an anonymous map in", "a NamedMap or AnonymousMap :param x: The x tile :param", "feature_id: The id of the feature :param filter: The filter", "in self.fields + self.optional_fields: setattr(self, k, v) class NamedMapManager(Manager): \"\"\"", "with the styling info for the named map :param auth:", "self).__init__(auth_client) def instantiate(self, params, auth=None): \"\"\" Allows you to fetch", "not None and filter is not None: url = urljoin(base_url,", "CARTO. \"\"\" class Meta: collection_endpoint = NAMED_API_ENDPOINT.format( api_version=API_VERSION) id_field =", "create(self, **kwargs): \"\"\" Creates a named map :param kwargs: Attributes", "with the styling info for the named map :type params:", "AnonymousMap(BaseMap): \"\"\" Equivalent to creating an anonymous map in CARTO.", "format( template_id=template_id, z=z, x=x, y=y, extension=extension) if hasattr(self, 'auth') and", "\"last_updated\"] super(NamedMap, self).__init__(auth_client) def instantiate(self, params, auth=None): \"\"\" Allows you", "to download data :rtype: str :raise: CartoException \"\"\" base_url =", "\\ format(template_id=self.template_id) if (auth is not None): endpoint = (endpoint", "'layergroupid', 'metadata'] super(AnonymousMap, self).__init__(auth_client) def instantiate(self, params): \"\"\" Allows you", "map :param auth: The auth client :type params: dict :type", "is not None and filter is not None: url =", "\\ layer value as '0,1,2' :param feature_id: The id of", "id of the feature :param filter: The filter to be", "filter: str :type extension: str :return: A URL to download", "a list of layers, enter the comma separated \\ layer", "resource_class = NamedMap json_collection_attribute = \"template_ids\" def create(self, **kwargs): \"\"\"", "show just the basemap layer, enter the value 0 To", ".. moduleauthor:: <NAME> <<EMAIL>> \"\"\" try: from urllib.parse import urljoin", "\"template_id\" name_field = \"name\" def __str__(self): try: return unicode(self.name).encode(\"utf-8\") except", "get_tile_url(self, x, y, z, layer_id=None, feature_id=None, filter=None, extension=\"png\"): \"\"\" Prepares", "and len(self.auth['valid_tokens']) > 0: url = urljoin(url, \"?auth_token={auth_token}\"). \\ format(auth_token=self.auth['valid_tokens'][0])", "defining the named map :type kwargs: kwargs :return: New named", "NamedMap class \"\"\" resource_class = NamedMap json_collection_attribute = \"template_ids\" def", "api_version=API_VERSION) id_field = \"template_id\" name_field = \"name\" def __str__(self): try:", "Initializes a BaseMap instance :param auth_client: Auth client \"\"\" super(BaseMap,", "assigned by some responses create, instantiate, # but are not", "the map tiles of a created map :param params: The", "setattr(self, self.Meta.id_field, attribute_dict) class AnonymousMap(BaseMap): \"\"\" Equivalent to creating an", "def instantiate(self, params, auth=None): \"\"\" Allows you to fetch the", ":class:`carto.auth.APIKeyAuthClient` :return: :raise: CartoException \"\"\" try: endpoint = (self.Meta.collection_endpoint +", "list of layers, enter the comma separated \\ layer value", "named map :param auth: The auth client :type params: dict", "Creates a named map :param kwargs: Attributes for creating the", "anonymous maps .. moduleauthor:: <NAME> <<EMAIL>> .. moduleauthor:: <NAME> <<EMAIL>>", "retrieved: png, mvt, ... :type x: int :type y: int", "Manager for the NamedMap class \"\"\" resource_class = NamedMap json_collection_attribute", ":param x: The x tile :param y: The y tile", "None and filter is not None: url = urljoin(base_url, \"{template_id}/{filter}/{z}/{x}/{y}.{extension}\").", "as e: raise e except Exception as e: raise CartoException(e)", "\\ format(template_id=template_id, layer=layer_id, z=z, x=x, y=y, extension=extension) else: url =", "a list of layers. To show just the basemap layer,", "comma separated \\ layer value as '0,1,2' :param feature_id: The", "not None \\ and len(self.auth['valid_tokens']) > 0: url = urljoin(url,", "\"placeholders\", \"layergroup\", \"view\"] # Optional fields can be assigned by", "class Meta: collection_endpoint = NAMED_API_ENDPOINT.format( api_version=API_VERSION) id_field = \"template_id\" name_field", "\"?auth_token={auth_token}\"). \\ format(auth_token=auth) self.send(endpoint, \"POST\", json=params) except CartoRateLimitException as e:", "to fetch the map tiles of a created map :param", "if k in self.fields + self.optional_fields: setattr(self, k, v) except", "class NamedMapManager(Manager): \"\"\" Manager for the NamedMap class \"\"\" resource_class", "# but are not saved to the backend self.optional_fields =", "+ \"{template_id}\"). \\ format(template_id=self.template_id) if (auth is not None): endpoint", ":type filter: str :type extension: str :return: A URL to", "endpoint = (self.Meta.collection_endpoint + \"{template_id}\"). \\ format(template_id=self.template_id) if (auth is", "client \"\"\" self.fields = [\"version\", \"name\", \"auth\", \"placeholders\", \"layergroup\", \"view\"]", "def __init__(self, auth_client): \"\"\" Initializes a BaseMap instance :param auth_client:", "format(template_id=template_id, layer=layer_id, z=z, x=x, y=y, extension=extension) else: url = urljoin(base_url,", "in CARTO. \"\"\" class Meta: collection_endpoint = ANONYMOUS_API_ENDPOINT.format( api_version=API_VERSION) def", "params, auth=None): \"\"\" Allows you to fetch the map tiles", "extension: str :return: A URL to download data :rtype: str", "e: raise CartoException(e) def update_from_dict(self, attribute_dict): \"\"\" Method overriden from", "from pyrestcli.resources import Manager, Resource from .exceptions import CartoException, CartoRateLimitException", "(endpoint + \"?auth_token={auth_token}\"). \\ format(auth_token=auth) self.send(endpoint, \"POST\", json=params) except CartoRateLimitException", ":param kwargs: Attributes for creating the named map. Specifically an", "enter the value 'all' To show a list of layers,", "y=y, extension=extension) if hasattr(self, 'auth') and self.auth is not None", "show the first layer, enter the value 1 To show", "Windows :synopsis: Module for working with named and anonymous maps", "named map :type kwargs: kwargs :return: New named map object", "= [\"template_id\", \"layergroupid\", \"last_updated\"] super(NamedMap, self).__init__(auth_client) def instantiate(self, params, auth=None):", "a number (referring to the # layer of your \\", "list of layers. To show just the basemap layer, enter", "y: int :type z: int :type layer_id: str :type feature_id:", "format(auth_token=self.auth['valid_tokens'][0]) return url class NamedMap(BaseMap): \"\"\" Equivalent to creating a", "contain the JSON object defining the named map :type kwargs:", "= urljoin(url, \"?auth_token={auth_token}\"). \\ format(auth_token=self.auth['valid_tokens'][0]) return url class NamedMap(BaseMap): \"\"\"", ":type kwargs: kwargs :return: New named map object :rtype: NamedMap", "to creating a named map in CARTO. \"\"\" class Meta:", "map :param kwargs: Attributes for creating the named map. Specifically", "['cdn_url', 'last_updated', 'layergroupid', 'metadata'] super(AnonymousMap, self).__init__(auth_client) def instantiate(self, params): \"\"\"", "anonymous map in CARTO. \"\"\" class Meta: collection_endpoint = ANONYMOUS_API_ENDPOINT.format(", "but are not saved to the backend self.optional_fields = [\"template_id\",", "moduleauthor:: <NAME> <<EMAIL>> \"\"\" try: from urllib.parse import urljoin except", "if hasattr(self, 'auth') and self.auth is not None \\ and", "styling info for the named map :param auth: The auth", "NamedMap json_collection_attribute = \"template_ids\" def create(self, **kwargs): \"\"\" Creates a", "format of the data to be retrieved: png, mvt, ...", "The x tile :param y: The y tile :param z:", "z=z, x=x, y=y, extension=extension) elif layer_id is not None: url", "v) except Exception: setattr(self, self.Meta.id_field, attribute_dict) class AnonymousMap(BaseMap): \"\"\" Equivalent", "attribute_dict: self.update_from_dict(attribute_dict['template']) setattr(self, self.Meta.id_field, attribute_dict['template']['name']) return try: for k, v", "self.optional_fields = [\"template_id\", \"layergroupid\", \"last_updated\"] super(NamedMap, self).__init__(auth_client) def instantiate(self, params,", "Auth client \"\"\" super(BaseMap, self).__init__(auth_client) def get_tile_url(self, x, y, z,", "and anonymous maps .. moduleauthor:: <NAME> <<EMAIL>> .. moduleauthor:: <NAME>", "z: int :type layer_id: str :type feature_id: str :type filter:", "A URL to download data :rtype: str :raise: CartoException \"\"\"", "x=x, y=y, extension=extension) if hasattr(self, 'auth') and self.auth is not", "= urljoin(base_url, \"{template_id}/{filter}/{z}/{x}/{y}.{extension}\"). \\ format(template_id=template_id, filter=filter, z=z, x=x, y=y, extension=extension)", "for creating the named map. Specifically an attribute `template` must", "= urljoin(base_url, \"{template_id}/{z}/{x}/{y}.{extension}\"). \\ format( template_id=template_id, z=z, x=x, y=y, extension=extension)", "layer, enter the value 0 To show the first layer,", "auth client :type params: dict :type auth: :class:`carto.auth.APIKeyAuthClient` :return: :raise:", "= urljoin(base_url, \"{template_id}/{layer}/{z}/{x}/{y}.{extension}\"). \\ format(template_id=template_id, layer=layer_id, z=z, x=x, y=y, extension=extension)", "fields can be assigned by some responses create, instantiate, #", "x: int :type y: int :type z: int :type layer_id:", "try: return unicode(self.name).encode(\"utf-8\") except AttributeError: return super(NamedMap, self).__repr__() def __init__(self,", "\"\"\" try: self.send(self.Meta.collection_endpoint, \"POST\", json=params) except CartoRateLimitException as e: raise", "the layer :param extension: The format of the data to", "super(NamedMap, self).__init__(auth_client) def instantiate(self, params, auth=None): \"\"\" Allows you to", "Module for working with named and anonymous maps .. moduleauthor::", "the base class \"\"\" if 'template' in attribute_dict: self.update_from_dict(attribute_dict['template']) setattr(self,", "with named and anonymous maps .. module:: carto.maps :platform: Unix,", "and anonymous maps .. module:: carto.maps :platform: Unix, Windows :synopsis:", "fetch the map tiles of a created map :param params:", "def instantiate(self, params): \"\"\" Allows you to fetch the map", "the value 'all' To show a list of layers, enter", "def get_tile_url(self, x, y, z, layer_id=None, feature_id=None, filter=None, extension=\"png\"): \"\"\"", "except CartoRateLimitException as e: raise e except Exception as e:", "the NamedMap class \"\"\" resource_class = NamedMap json_collection_attribute = \"template_ids\"", "the basemap layer, enter the value 0 To show the", "class for NamedMap and AnonymousMap \"\"\" def __init__(self, auth_client): \"\"\"", "None: url = urljoin(base_url, \"{template_id}/{layer}/{z}/{x}/{y}.{extension}\"). \\ format(template_id=template_id, layer=layer_id, z=z, x=x,", "The zoom level :param layer_id: Can be a number (referring", "urljoin from pyrestcli.resources import Manager, Resource from .exceptions import CartoException,", "for the named map :param auth: The auth client :type", "**kwargs): \"\"\" Creates a named map :param kwargs: Attributes for", "1 To show all layers, enter the value 'all' To", "not saved to the backend self.optional_fields = [\"template_id\", \"layergroupid\", \"last_updated\"]", ":platform: Unix, Windows :synopsis: Module for working with named and", "value as '0,1,2' :param feature_id: The id of the feature", "return super(NamedMap, self).__repr__() def __init__(self, auth_client): \"\"\" Initializes a NamedMap", "params: The json with the styling info for the named", "The format of the data to be retrieved: png, mvt,", "urllib.parse import urljoin except ImportError: from urlparse import urljoin from", "with named and anonymous maps .. moduleauthor:: <NAME> <<EMAIL>> ..", "is not None and feature_id is not None: url =", "\\ map), all layers of your map, or a list", "class BaseMap(Resource): \"\"\" Base class for NamedMap and AnonymousMap \"\"\"", "= \"api/{api_version}/map/named/\" ANONYMOUS_API_ENDPOINT = \"api/{api_version}/map/\" class BaseMap(Resource): \"\"\" Base class", "class \"\"\" if 'template' in attribute_dict: self.update_from_dict(attribute_dict['template']) setattr(self, self.Meta.id_field, attribute_dict['template']['name'])", "Unix, Windows :synopsis: Module for working with named and anonymous", "level :param layer_id: Can be a number (referring to the", "e: raise CartoException(e) def update_from_dict(self, attribute_dict): for k, v in", "layers, enter the value 'all' To show a list of", "\"name\" def __str__(self): try: return unicode(self.name).encode(\"utf-8\") except AttributeError: return super(NamedMap,", "CartoException, CartoRateLimitException API_VERSION = \"v1\" NAMED_API_ENDPOINT = \"api/{api_version}/map/named/\" ANONYMOUS_API_ENDPOINT =", "layer, enter the value 1 To show all layers, enter", "can be assigned by some responses create, instantiate, # but", "self).__repr__() def __init__(self, auth_client): \"\"\" Initializes a NamedMap instance :param", ":rtype: NamedMap :raise: CartoException \"\"\" resource = self.resource_class(self.client) resource.update_from_dict(kwargs['template']) resource.save(force_create=True)", "<NAME> <<EMAIL>> .. moduleauthor:: <NAME> <<EMAIL>> \"\"\" try: from urllib.parse", "z=z, x=x, y=y, extension=extension) else: url = urljoin(base_url, \"{template_id}/{z}/{x}/{y}.{extension}\"). \\", "unicode(self.name).encode(\"utf-8\") except AttributeError: return super(NamedMap, self).__repr__() def __init__(self, auth_client): \"\"\"", "setattr(self, k, v) except Exception: setattr(self, self.Meta.id_field, attribute_dict) class AnonymousMap(BaseMap):", "Exception: setattr(self, self.Meta.id_field, attribute_dict) class AnonymousMap(BaseMap): \"\"\" Equivalent to creating", "self.send(endpoint, \"POST\", json=params) except CartoRateLimitException as e: raise e except", "attribute_dict.items(): if k in self.fields + self.optional_fields: setattr(self, k, v)", "k, v) class NamedMapManager(Manager): \"\"\" Manager for the NamedMap class", "\\ format(template_id=template_id, filter=filter, z=z, x=x, y=y, extension=extension) elif layer_id is", "\\ and len(self.auth['valid_tokens']) > 0: url = urljoin(url, \"?auth_token={auth_token}\"). \\", "k, v in attribute_dict.items(): if k in self.fields + self.optional_fields:", "kwargs: Attributes for creating the named map. Specifically an attribute", "working with named and anonymous maps .. moduleauthor:: <NAME> <<EMAIL>>", "'auth') and self.auth is not None \\ and len(self.auth['valid_tokens']) >", "try: endpoint = (self.Meta.collection_endpoint + \"{template_id}\"). \\ format(template_id=self.template_id) if (auth", "self.client.base_url + self.Meta.collection_endpoint template_id = self.template_id if hasattr(self, 'template_id') \\", "\"v1\" NAMED_API_ENDPOINT = \"api/{api_version}/map/named/\" ANONYMOUS_API_ENDPOINT = \"api/{api_version}/map/\" class BaseMap(Resource): \"\"\"", "collection_endpoint = NAMED_API_ENDPOINT.format( api_version=API_VERSION) id_field = \"template_id\" name_field = \"name\"", "for the NamedMap class \"\"\" resource_class = NamedMap json_collection_attribute =", "if hasattr(self, 'template_id') \\ else self.layergroupid if layer_id is not", "= self.client.base_url + self.Meta.collection_endpoint template_id = self.template_id if hasattr(self, 'template_id')", "\"\"\" class Meta: collection_endpoint = ANONYMOUS_API_ENDPOINT.format( api_version=API_VERSION) def __init__(self, auth_client):", "Meta: collection_endpoint = NAMED_API_ENDPOINT.format( api_version=API_VERSION) id_field = \"template_id\" name_field =", "The id of the feature :param filter: The filter to", ":raise: CartoException \"\"\" try: endpoint = (self.Meta.collection_endpoint + \"{template_id}\"). \\", "the named map :type kwargs: kwargs :return: New named map", "= \"template_id\" name_field = \"name\" def __str__(self): try: return unicode(self.name).encode(\"utf-8\")", "auth_client: Auth client \"\"\" super(BaseMap, self).__init__(auth_client) def get_tile_url(self, x, y,", "\"{template_id}/{filter}/{z}/{x}/{y}.{extension}\"). \\ format(template_id=template_id, filter=filter, z=z, x=x, y=y, extension=extension) elif layer_id", ".. moduleauthor:: <NAME> <<EMAIL>> .. moduleauthor:: <NAME> <<EMAIL>> \"\"\" try:", ":param params: The json with the styling info for the", "or vector) from a NamedMap or AnonymousMap :param x: The", "kwargs: kwargs :return: New named map object :rtype: NamedMap :raise:", "to the backend self.optional_fields = [\"template_id\", \"layergroupid\", \"last_updated\"] super(NamedMap, self).__init__(auth_client)", "feature_id=feature_id) elif layer_id is not None and filter is not", "and AnonymousMap \"\"\" def __init__(self, auth_client): \"\"\" Initializes a BaseMap", "and feature_id is not None: url = urljoin(base_url, \"{template_id}/{layer}/attributes/{feature_id}\"). \\", "applied to the layer :param extension: The format of the", "y: The y tile :param z: The zoom level :param", "(auth is not None): endpoint = (endpoint + \"?auth_token={auth_token}\"). \\", "client \"\"\" super(BaseMap, self).__init__(auth_client) def get_tile_url(self, x, y, z, layer_id=None,", "endpoint = (endpoint + \"?auth_token={auth_token}\"). \\ format(auth_token=auth) self.send(endpoint, \"POST\", json=params)", "Attributes for creating the named map. Specifically an attribute `template`", "be retrieved: png, mvt, ... :type x: int :type y:", "attribute_dict): \"\"\" Method overriden from the base class \"\"\" if", "attribute_dict['template']['name']) return try: for k, v in attribute_dict.items(): if k", "show a list of layers, enter the comma separated \\", ":raise: CartoException \"\"\" base_url = self.client.base_url + self.Meta.collection_endpoint template_id =", "Manager, Resource from .exceptions import CartoException, CartoRateLimitException API_VERSION = \"v1\"", "\"{template_id}/{layer}/{z}/{x}/{y}.{extension}\"). \\ format(template_id=template_id, layer=layer_id, z=z, x=x, y=y, extension=extension) else: url", ":param auth_client: Auth client \"\"\" self.fields = [\"version\", \"name\", \"auth\",", ":param y: The y tile :param z: The zoom level", "NamedMapManager(Manager): \"\"\" Manager for the NamedMap class \"\"\" resource_class =", "super(AnonymousMap, self).__init__(auth_client) def instantiate(self, params): \"\"\" Allows you to fetch", "a NamedMap instance :param auth_client: Auth client \"\"\" self.fields =", "auth_client): \"\"\" Initializes a NamedMap instance :param auth_client: Auth client", "or AnonymousMap :param x: The x tile :param y: The", "return unicode(self.name).encode(\"utf-8\") except AttributeError: return super(NamedMap, self).__repr__() def __init__(self, auth_client):", "= NamedMap json_collection_attribute = \"template_ids\" def create(self, **kwargs): \"\"\" Creates", "__init__(self, auth_client): \"\"\" Initializes a BaseMap instance :param auth_client: Auth", "vector) from a NamedMap or AnonymousMap :param x: The x", "\"template_ids\" def create(self, **kwargs): \"\"\" Creates a named map :param", "hasattr(self, 'auth') and self.auth is not None \\ and len(self.auth['valid_tokens'])", "named and anonymous maps .. module:: carto.maps :platform: Unix, Windows", "format(template_id=template_id, filter=filter, z=z, x=x, y=y, extension=extension) elif layer_id is not", "if (auth is not None): endpoint = (endpoint + \"?auth_token={auth_token}\").", "self.update_from_dict(attribute_dict['template']) setattr(self, self.Meta.id_field, attribute_dict['template']['name']) return try: for k, v in", "maps .. moduleauthor:: <NAME> <<EMAIL>> .. moduleauthor:: <NAME> <<EMAIL>> \"\"\"", "+ self.optional_fields: setattr(self, k, v) class NamedMapManager(Manager): \"\"\" Manager for", "\"\"\" Prepares a URL to get data (raster or vector)", ":type params: dict :type auth: :class:`carto.auth.APIKeyAuthClient` :return: :raise: CartoException \"\"\"", "a created map :param params: The json with the styling", ":param extension: The format of the data to be retrieved:", "= [\"version\", \"name\", \"auth\", \"placeholders\", \"layergroup\", \"view\"] # Optional fields", "instantiate(self, params): \"\"\" Allows you to fetch the map tiles", "= (endpoint + \"?auth_token={auth_token}\"). \\ format(auth_token=auth) self.send(endpoint, \"POST\", json=params) except", "__str__(self): try: return unicode(self.name).encode(\"utf-8\") except AttributeError: return super(NamedMap, self).__repr__() def", "the JSON object defining the named map :type kwargs: kwargs", "instance :param auth_client: Auth client \"\"\" self.fields = [\"version\", \"name\",", "try: from urllib.parse import urljoin except ImportError: from urlparse import", "'all' To show a list of layers, enter the comma", "= self.template_id if hasattr(self, 'template_id') \\ else self.layergroupid if layer_id", "\"\"\" Method overriden from the base class \"\"\" if 'template'", "attribute_dict): for k, v in attribute_dict.items(): if k in self.fields" ]
[ "= KVClient(1, \"127.0.0.1\", 3456) kvSlave.start() if __name__ == \"__main__\": main()", "kvSlave = KVClient(1, \"127.0.0.1\", 3456) kvSlave.start() if __name__ == \"__main__\":", "from kv_client.kv_client import KVClient def main(): kvSlave = KVClient(1, \"127.0.0.1\",", "def main(): kvSlave = KVClient(1, \"127.0.0.1\", 3456) kvSlave.start() if __name__", "kv_client.kv_client import KVClient def main(): kvSlave = KVClient(1, \"127.0.0.1\", 3456)", "import KVClient def main(): kvSlave = KVClient(1, \"127.0.0.1\", 3456) kvSlave.start()", "KVClient def main(): kvSlave = KVClient(1, \"127.0.0.1\", 3456) kvSlave.start() if", "main(): kvSlave = KVClient(1, \"127.0.0.1\", 3456) kvSlave.start() if __name__ ==" ]
[ "1) self.input = torch.tensor(numpy.random.randint(0, embeddingbags, input_size), device=device).long() offsets = torch.LongTensor([offset],", "dim, mode, input_size, offset, sparse, include_last_offset, device): self.embedding = torch.nn.EmbeddingBag(", "init(self, embeddingbags, dim, mode, input_size, offset, sparse, include_last_offset, device): self.embedding", "torch.LongTensor([offset], device=device) self.offset = torch.cat((offsets, torch.tensor([self.input.size(0)], dtype=torch.long)), 0) self.set_module_name('embeddingbag') def", "32) - 1) self.input = torch.tensor(numpy.random.randint(0, embeddingbags, input_size), device=device).long() offsets", "class EmbeddingBagBenchmark(op_bench.TorchBenchmarkBase): def init(self, embeddingbags, dim, mode, input_size, offset, sparse,", "self.offset = torch.cat((offsets, torch.tensor([self.input.size(0)], dtype=torch.long)), 0) self.set_module_name('embeddingbag') def forward(self): return", "num_embeddings=embeddingbags, embedding_dim=dim, mode=mode, include_last_offset=include_last_offset, sparse=sparse).to(device=device) numpy.random.seed((1 << 32) - 1)", "import torch import numpy from . import configs \"\"\"EmbeddingBag Operator", "import operator_benchmark as op_bench import torch import numpy from .", "torch.tensor(numpy.random.randint(0, embeddingbags, input_size), device=device).long() offsets = torch.LongTensor([offset], device=device) self.offset =", "offset, sparse, include_last_offset, device): self.embedding = torch.nn.EmbeddingBag( num_embeddings=embeddingbags, embedding_dim=dim, mode=mode,", "embeddingbags, input_size), device=device).long() offsets = torch.LongTensor([offset], device=device) self.offset = torch.cat((offsets,", "numpy.random.seed((1 << 32) - 1) self.input = torch.tensor(numpy.random.randint(0, embeddingbags, input_size),", "self.embedding = torch.nn.EmbeddingBag( num_embeddings=embeddingbags, embedding_dim=dim, mode=mode, include_last_offset=include_last_offset, sparse=sparse).to(device=device) numpy.random.seed((1 <<", "device=device).long() offsets = torch.LongTensor([offset], device=device) self.offset = torch.cat((offsets, torch.tensor([self.input.size(0)], dtype=torch.long)),", "= torch.cat((offsets, torch.tensor([self.input.size(0)], dtype=torch.long)), 0) self.set_module_name('embeddingbag') def forward(self): return self.embedding(self.input,", "return self.embedding(self.input, self.offset) op_bench.generate_pt_test(configs.embeddingbag_short_configs, EmbeddingBagBenchmark) op_bench.generate_pt_gradient_test(configs.embeddingbag_short_configs, EmbeddingBagBenchmark) if __name__ ==", "torch import numpy from . import configs \"\"\"EmbeddingBag Operator Benchmark\"\"\"", "dtype=torch.long)), 0) self.set_module_name('embeddingbag') def forward(self): return self.embedding(self.input, self.offset) op_bench.generate_pt_test(configs.embeddingbag_short_configs, EmbeddingBagBenchmark)", "<< 32) - 1) self.input = torch.tensor(numpy.random.randint(0, embeddingbags, input_size), device=device).long()", "- 1) self.input = torch.tensor(numpy.random.randint(0, embeddingbags, input_size), device=device).long() offsets =", "from . import configs \"\"\"EmbeddingBag Operator Benchmark\"\"\" class EmbeddingBagBenchmark(op_bench.TorchBenchmarkBase): def", "embedding_dim=dim, mode=mode, include_last_offset=include_last_offset, sparse=sparse).to(device=device) numpy.random.seed((1 << 32) - 1) self.input", "mode=mode, include_last_offset=include_last_offset, sparse=sparse).to(device=device) numpy.random.seed((1 << 32) - 1) self.input =", "\"\"\"EmbeddingBag Operator Benchmark\"\"\" class EmbeddingBagBenchmark(op_bench.TorchBenchmarkBase): def init(self, embeddingbags, dim, mode,", "input_size), device=device).long() offsets = torch.LongTensor([offset], device=device) self.offset = torch.cat((offsets, torch.tensor([self.input.size(0)],", "self.set_module_name('embeddingbag') def forward(self): return self.embedding(self.input, self.offset) op_bench.generate_pt_test(configs.embeddingbag_short_configs, EmbeddingBagBenchmark) op_bench.generate_pt_gradient_test(configs.embeddingbag_short_configs, EmbeddingBagBenchmark)", "def forward(self): return self.embedding(self.input, self.offset) op_bench.generate_pt_test(configs.embeddingbag_short_configs, EmbeddingBagBenchmark) op_bench.generate_pt_gradient_test(configs.embeddingbag_short_configs, EmbeddingBagBenchmark) if", "offsets = torch.LongTensor([offset], device=device) self.offset = torch.cat((offsets, torch.tensor([self.input.size(0)], dtype=torch.long)), 0)", "Benchmark\"\"\" class EmbeddingBagBenchmark(op_bench.TorchBenchmarkBase): def init(self, embeddingbags, dim, mode, input_size, offset,", "numpy from . import configs \"\"\"EmbeddingBag Operator Benchmark\"\"\" class EmbeddingBagBenchmark(op_bench.TorchBenchmarkBase):", "device=device) self.offset = torch.cat((offsets, torch.tensor([self.input.size(0)], dtype=torch.long)), 0) self.set_module_name('embeddingbag') def forward(self):", "= torch.tensor(numpy.random.randint(0, embeddingbags, input_size), device=device).long() offsets = torch.LongTensor([offset], device=device) self.offset", "operator_benchmark as op_bench import torch import numpy from . import", "device): self.embedding = torch.nn.EmbeddingBag( num_embeddings=embeddingbags, embedding_dim=dim, mode=mode, include_last_offset=include_last_offset, sparse=sparse).to(device=device) numpy.random.seed((1", "torch.tensor([self.input.size(0)], dtype=torch.long)), 0) self.set_module_name('embeddingbag') def forward(self): return self.embedding(self.input, self.offset) op_bench.generate_pt_test(configs.embeddingbag_short_configs,", "<reponame>AndreasKaratzas/stonne<filename>pytorch-frontend/benchmarks/operator_benchmark/pt/embeddingbag_test.py import operator_benchmark as op_bench import torch import numpy from", "import configs \"\"\"EmbeddingBag Operator Benchmark\"\"\" class EmbeddingBagBenchmark(op_bench.TorchBenchmarkBase): def init(self, embeddingbags,", "def init(self, embeddingbags, dim, mode, input_size, offset, sparse, include_last_offset, device):", "self.offset) op_bench.generate_pt_test(configs.embeddingbag_short_configs, EmbeddingBagBenchmark) op_bench.generate_pt_gradient_test(configs.embeddingbag_short_configs, EmbeddingBagBenchmark) if __name__ == \"__main__\": op_bench.benchmark_runner.main()", "self.embedding(self.input, self.offset) op_bench.generate_pt_test(configs.embeddingbag_short_configs, EmbeddingBagBenchmark) op_bench.generate_pt_gradient_test(configs.embeddingbag_short_configs, EmbeddingBagBenchmark) if __name__ == \"__main__\":", "configs \"\"\"EmbeddingBag Operator Benchmark\"\"\" class EmbeddingBagBenchmark(op_bench.TorchBenchmarkBase): def init(self, embeddingbags, dim,", "0) self.set_module_name('embeddingbag') def forward(self): return self.embedding(self.input, self.offset) op_bench.generate_pt_test(configs.embeddingbag_short_configs, EmbeddingBagBenchmark) op_bench.generate_pt_gradient_test(configs.embeddingbag_short_configs,", "= torch.LongTensor([offset], device=device) self.offset = torch.cat((offsets, torch.tensor([self.input.size(0)], dtype=torch.long)), 0) self.set_module_name('embeddingbag')", "as op_bench import torch import numpy from . import configs", "sparse=sparse).to(device=device) numpy.random.seed((1 << 32) - 1) self.input = torch.tensor(numpy.random.randint(0, embeddingbags,", "= torch.nn.EmbeddingBag( num_embeddings=embeddingbags, embedding_dim=dim, mode=mode, include_last_offset=include_last_offset, sparse=sparse).to(device=device) numpy.random.seed((1 << 32)", "forward(self): return self.embedding(self.input, self.offset) op_bench.generate_pt_test(configs.embeddingbag_short_configs, EmbeddingBagBenchmark) op_bench.generate_pt_gradient_test(configs.embeddingbag_short_configs, EmbeddingBagBenchmark) if __name__", "include_last_offset, device): self.embedding = torch.nn.EmbeddingBag( num_embeddings=embeddingbags, embedding_dim=dim, mode=mode, include_last_offset=include_last_offset, sparse=sparse).to(device=device)", "torch.nn.EmbeddingBag( num_embeddings=embeddingbags, embedding_dim=dim, mode=mode, include_last_offset=include_last_offset, sparse=sparse).to(device=device) numpy.random.seed((1 << 32) -", "self.input = torch.tensor(numpy.random.randint(0, embeddingbags, input_size), device=device).long() offsets = torch.LongTensor([offset], device=device)", "input_size, offset, sparse, include_last_offset, device): self.embedding = torch.nn.EmbeddingBag( num_embeddings=embeddingbags, embedding_dim=dim,", "embeddingbags, dim, mode, input_size, offset, sparse, include_last_offset, device): self.embedding =", "sparse, include_last_offset, device): self.embedding = torch.nn.EmbeddingBag( num_embeddings=embeddingbags, embedding_dim=dim, mode=mode, include_last_offset=include_last_offset,", "torch.cat((offsets, torch.tensor([self.input.size(0)], dtype=torch.long)), 0) self.set_module_name('embeddingbag') def forward(self): return self.embedding(self.input, self.offset)", "Operator Benchmark\"\"\" class EmbeddingBagBenchmark(op_bench.TorchBenchmarkBase): def init(self, embeddingbags, dim, mode, input_size,", "EmbeddingBagBenchmark(op_bench.TorchBenchmarkBase): def init(self, embeddingbags, dim, mode, input_size, offset, sparse, include_last_offset,", "include_last_offset=include_last_offset, sparse=sparse).to(device=device) numpy.random.seed((1 << 32) - 1) self.input = torch.tensor(numpy.random.randint(0,", "op_bench import torch import numpy from . import configs \"\"\"EmbeddingBag", "mode, input_size, offset, sparse, include_last_offset, device): self.embedding = torch.nn.EmbeddingBag( num_embeddings=embeddingbags,", ". import configs \"\"\"EmbeddingBag Operator Benchmark\"\"\" class EmbeddingBagBenchmark(op_bench.TorchBenchmarkBase): def init(self,", "import numpy from . import configs \"\"\"EmbeddingBag Operator Benchmark\"\"\" class" ]
[ "to sample in each batch. dist : tensor Pre-allocated tensor", "procedure keeps picking an unmarked vertex and matching it with", "int The number of points to sample in each batch.", "option for `unique` # function in backend for efficiency. if", "shape (N, ) for to-sample distance. start_idx : tensor of", "procedure of edge coarsening used in `Metis <http://cacs.usc.edu/education/cs653/Karypis-METIS-SIAMJSC98.pdf>`__ and `Graclus", "Returns ------- a 1-D tensor A vector with each element", ".._ffi.function import _init_api from .. import backend as F from", "its edge weight) until no match can be done. If", "for each vertex. The GPU implementation is based on `A", "from dgl._ffi.base import DGLError import numpy as np from .._ffi.function", "from .. import backend as F from .. import ndarray", "batch_size, sample_points, F.zerocopy_to_dgl_ndarray(dist), F.zerocopy_to_dgl_ndarray(start_idx), F.zerocopy_to_dgl_ndarray(result)) def _neighbor_matching(graph_idx, num_nodes, edge_weights=None, relabel_idx=True):", "<https://www.cs.utexas.edu/users/inderjit/public_papers/multilevel_pami.pdf>`__ for homogeneous graph coarsening. This procedure keeps picking an", "graph. edge_weight : tensor, optional The edge weight tensor holding", "batches in the ``data``. N should be divisible by batch_size.", "tensor Pre-allocated tensor of shape (N, ) for to-sample distance.", "sample_points * batch_size assert F.shape(data)[0] % batch_size == 0 _CAPI_FarthestPointSampler(F.zerocopy_to_dgl_ndarray(data),", "This procedure keeps picking an unmarked vertex and matching it", "point sampler.\"\"\" from dgl._ffi.base import DGLError import numpy as np", "TODO: actually we can add `return_inverse` option for `unique` #", "of int Pre-allocated tensor of shape (sample_points * batch_size, )", "(N, ) for to-sample distance. start_idx : tensor of int", "of shape (sample_points * batch_size, ) for the sampled index.", "is bi-directed. Parameters ---------- graph : HeteroGraphIndex The input homogeneous", "d) where N is the number of points and d", "node_label_np = F.zerocopy_to_numpy(node_label) _, node_label_np = np.unique(node_label_np, return_inverse=True) return F.tensor(node_label_np)", "node id # TODO: actually we can add `return_inverse` option", "tensor, optional The edge weight tensor holding non-negative scalar weight", ": int The number of nodes in this homogeneous graph.", "node labels to have consecutive node ids. default: :obj:`True` Returns", "default: :obj:`None` relabel_idx : bool, optional If true, relabel resulting", "batch_size, sample_points, dist, start_idx, result): r\"\"\"Farthest Point Sampler Parameters ----------", "weight for each edge. default: :obj:`None` relabel_idx : bool, optional", "each vertex. The GPU implementation is based on `A GPU", "we can add `return_inverse` option for `unique` # function in", "NOTE: The input graph must be bi-directed (undirected) graph. Call", "assert F.shape(data)[0] >= sample_points * batch_size assert F.shape(data)[0] % batch_size", "import DGLError import numpy as np from .._ffi.function import _init_api", "picking an unmarked vertex and matching it with one its", "can be done. If no edge weight is given, this", "is given, this procedure will randomly pick neighbor for each", "node_label_capi = F.zerocopy_to_dgl_ndarray_for_write(node_label) _CAPI_NeighborMatching(graph_idx, edge_weight_capi, node_label_capi) if F.reduce_sum(node_label < 0).item()", "<http://cacs.usc.edu/education/cs653/Karypis-METIS-SIAMJSC98.pdf>`__ and `Graclus <https://www.cs.utexas.edu/users/inderjit/public_papers/multilevel_pami.pdf>`__ for homogeneous graph coarsening. This procedure", "unmatched node\") # reorder node id # TODO: actually we", "for the sampled index. Returns ------- No return value. The", "_neighbor_matching(graph_idx, num_nodes, edge_weights=None, relabel_idx=True): \"\"\" Description ----------- The neighbor matching", "in each batch. dist : tensor Pre-allocated tensor of shape", "for efficiency. if relabel_idx: node_label_np = F.zerocopy_to_numpy(node_label) _, node_label_np =", "<http://www.staff.science.uu.nl/~bisse101/Articles/match12.pdf>`__ NOTE: The input graph must be bi-directed (undirected) graph.", "< 0).item() != 0: raise DGLError(\"Find unmatched node\") # reorder", "in this homogeneous graph. edge_weight : tensor, optional The edge", ":obj:`None` relabel_idx : bool, optional If true, relabel resulting node", "The edge weight tensor holding non-negative scalar weight for each", "one its unmarked neighbors (that maximizes its edge weight) until", "number of points to sample in each batch. dist :", "each edge. default: :obj:`None` relabel_idx : bool, optional If true,", "neighbors (that maximizes its edge weight) until no match can", "a vertex. \"\"\" edge_weight_capi = nd.NULL[\"int64\"] if edge_weights is not", "sure your graph is bi-directed. Parameters ---------- graph : HeteroGraphIndex", ") for the sampled index. Returns ------- No return value.", "indicates the cluster ID of a vertex. \"\"\" edge_weight_capi =", "of shape (N, d) where N is the number of", "with sampled indices. \"\"\" assert F.shape(data)[0] >= sample_points * batch_size", "vector with each element that indicates the cluster ID of", "shape (N, d) where N is the number of points", "overwriten with sampled indices. \"\"\" assert F.shape(data)[0] >= sample_points *", "import numpy as np from .._ffi.function import _init_api from ..", "is the number of points and d is the dimension.", "as np from .._ffi.function import _init_api from .. import backend", "(that maximizes its edge weight) until no match can be", "= nd.NULL[\"int64\"] if edge_weights is not None: edge_weight_capi = F.zerocopy_to_dgl_ndarray(edge_weights)", "The number of nodes in this homogeneous graph. edge_weight :", "efficiency. if relabel_idx: node_label_np = F.zerocopy_to_numpy(node_label) _, node_label_np = np.unique(node_label_np,", "The neighbor matching procedure of edge coarsening used in `Metis", "scalar weight for each edge. default: :obj:`None` relabel_idx : bool,", "# function in backend for efficiency. if relabel_idx: node_label_np =", "tensor of int Pre-allocated tensor of shape (batch_size, ) for", "= F.zerocopy_to_dgl_ndarray(edge_weights) node_label = F.full_1d( num_nodes, -1, getattr(F, graph_idx.dtype), F.to_backend_ctx(graph_idx.ctx))", "Pre-allocated tensor of shape (batch_size, ) for the starting sample", "neighbor matching procedure of edge coarsening used in `Metis <http://cacs.usc.edu/education/cs653/Karypis-METIS-SIAMJSC98.pdf>`__", "edge weight is given, this procedure will randomly pick neighbor", "graph_idx.dtype), F.to_backend_ctx(graph_idx.ctx)) node_label_capi = F.zerocopy_to_dgl_ndarray_for_write(node_label) _CAPI_NeighborMatching(graph_idx, edge_weight_capi, node_label_capi) if F.reduce_sum(node_label", "DGLError import numpy as np from .._ffi.function import _init_api from", "Call :obj:`dgl.to_bidirected` if you are not sure your graph is", ": HeteroGraphIndex The input homogeneous graph. num_nodes : int The", "Greedy Graph Matching <http://www.staff.science.uu.nl/~bisse101/Articles/match12.pdf>`__ NOTE: The input graph must be", "farthest point sampler.\"\"\" from dgl._ffi.base import DGLError import numpy as", "of shape (N, ) for to-sample distance. start_idx : tensor", "variable ``result`` will be overwriten with sampled indices. \"\"\" assert", "result): r\"\"\"Farthest Point Sampler Parameters ---------- data : tensor A", "index. Returns ------- No return value. The input variable ``result``", "and `Graclus <https://www.cs.utexas.edu/users/inderjit/public_papers/multilevel_pami.pdf>`__ for homogeneous graph coarsening. This procedure keeps", "graph : HeteroGraphIndex The input homogeneous graph. num_nodes : int", "r\"\"\"Farthest Point Sampler Parameters ---------- data : tensor A tensor", "result : tensor of int Pre-allocated tensor of shape (sample_points", "GPU Algorithm for Greedy Graph Matching <http://www.staff.science.uu.nl/~bisse101/Articles/match12.pdf>`__ NOTE: The input", "unmarked neighbors (that maximizes its edge weight) until no match", "sample_points : int The number of points to sample in", "---------- data : tensor A tensor of shape (N, d)", ": tensor Pre-allocated tensor of shape (N, ) for to-sample", "Pre-allocated tensor of shape (sample_points * batch_size, ) for the", "a 1-D tensor A vector with each element that indicates", "add `return_inverse` option for `unique` # function in backend for", "HeteroGraphIndex The input homogeneous graph. num_nodes : int The number", "(batch_size, ) for the starting sample in each batch. result", "are not sure your graph is bi-directed. Parameters ---------- graph", "F.reduce_sum(node_label < 0).item() != 0: raise DGLError(\"Find unmatched node\") #", "and d is the dimension. batch_size : int The number", "= F.full_1d( num_nodes, -1, getattr(F, graph_idx.dtype), F.to_backend_ctx(graph_idx.ctx)) node_label_capi = F.zerocopy_to_dgl_ndarray_for_write(node_label)", "its unmarked neighbors (that maximizes its edge weight) until no", "numpy as np from .._ffi.function import _init_api from .. import", "be overwriten with sampled indices. \"\"\" assert F.shape(data)[0] >= sample_points", "number of points and d is the dimension. batch_size :", "no match can be done. If no edge weight is", "with each element that indicates the cluster ID of a", "this procedure will randomly pick neighbor for each vertex. The", "node_label_capi) if F.reduce_sum(node_label < 0).item() != 0: raise DGLError(\"Find unmatched", "as nd def _farthest_point_sampler(data, batch_size, sample_points, dist, start_idx, result): r\"\"\"Farthest", "Point Sampler Parameters ---------- data : tensor A tensor of", "start_idx : tensor of int Pre-allocated tensor of shape (batch_size,", "an unmarked vertex and matching it with one its unmarked", "not sure your graph is bi-directed. Parameters ---------- graph :", "vertex. The GPU implementation is based on `A GPU Algorithm", "must be bi-directed (undirected) graph. Call :obj:`dgl.to_bidirected` if you are", "for the starting sample in each batch. result : tensor", "of a vertex. \"\"\" edge_weight_capi = nd.NULL[\"int64\"] if edge_weights is", "start_idx, result): r\"\"\"Farthest Point Sampler Parameters ---------- data : tensor", "# TODO: actually we can add `return_inverse` option for `unique`", "sample_points, F.zerocopy_to_dgl_ndarray(dist), F.zerocopy_to_dgl_ndarray(start_idx), F.zerocopy_to_dgl_ndarray(result)) def _neighbor_matching(graph_idx, num_nodes, edge_weights=None, relabel_idx=True): \"\"\"", "sample_points, dist, start_idx, result): r\"\"\"Farthest Point Sampler Parameters ---------- data", "implementation is based on `A GPU Algorithm for Greedy Graph", "your graph is bi-directed. Parameters ---------- graph : HeteroGraphIndex The", "relabel resulting node labels to have consecutive node ids. default:", "for `unique` # function in backend for efficiency. if relabel_idx:", "each batch. result : tensor of int Pre-allocated tensor of", "pick neighbor for each vertex. The GPU implementation is based", "_farthest_point_sampler(data, batch_size, sample_points, dist, start_idx, result): r\"\"\"Farthest Point Sampler Parameters", ".. import backend as F from .. import ndarray as", "None: edge_weight_capi = F.zerocopy_to_dgl_ndarray(edge_weights) node_label = F.full_1d( num_nodes, -1, getattr(F,", "int The number of nodes in this homogeneous graph. edge_weight", "The input graph must be bi-directed (undirected) graph. Call :obj:`dgl.to_bidirected`", "distance. start_idx : tensor of int Pre-allocated tensor of shape", "import _init_api from .. import backend as F from ..", "if F.reduce_sum(node_label < 0).item() != 0: raise DGLError(\"Find unmatched node\")", "homogeneous graph. num_nodes : int The number of nodes in", "`Graclus <https://www.cs.utexas.edu/users/inderjit/public_papers/multilevel_pami.pdf>`__ for homogeneous graph coarsening. This procedure keeps picking", "F.zerocopy_to_dgl_ndarray(edge_weights) node_label = F.full_1d( num_nodes, -1, getattr(F, graph_idx.dtype), F.to_backend_ctx(graph_idx.ctx)) node_label_capi", "of edge coarsening used in `Metis <http://cacs.usc.edu/education/cs653/Karypis-METIS-SIAMJSC98.pdf>`__ and `Graclus <https://www.cs.utexas.edu/users/inderjit/public_papers/multilevel_pami.pdf>`__", "should be divisible by batch_size. sample_points : int The number", "Parameters ---------- data : tensor A tensor of shape (N,", "with one its unmarked neighbors (that maximizes its edge weight)", "dgl._ffi.base import DGLError import numpy as np from .._ffi.function import", "for Greedy Graph Matching <http://www.staff.science.uu.nl/~bisse101/Articles/match12.pdf>`__ NOTE: The input graph must", "resulting node labels to have consecutive node ids. default: :obj:`True`", "element that indicates the cluster ID of a vertex. \"\"\"", "node_label_np = np.unique(node_label_np, return_inverse=True) return F.tensor(node_label_np) else: return node_label _init_api('dgl.geometry',", "shape (batch_size, ) for the starting sample in each batch.", "points and d is the dimension. batch_size : int The", ": tensor of int Pre-allocated tensor of shape (sample_points *", "GPU implementation is based on `A GPU Algorithm for Greedy", "for to-sample distance. start_idx : tensor of int Pre-allocated tensor", "edge weight tensor holding non-negative scalar weight for each edge.", "points to sample in each batch. dist : tensor Pre-allocated", "weight is given, this procedure will randomly pick neighbor for", "maximizes its edge weight) until no match can be done.", "neighbor for each vertex. The GPU implementation is based on", "\"\"\" edge_weight_capi = nd.NULL[\"int64\"] if edge_weights is not None: edge_weight_capi", "0: raise DGLError(\"Find unmatched node\") # reorder node id #", "backend for efficiency. if relabel_idx: node_label_np = F.zerocopy_to_numpy(node_label) _, node_label_np", ":obj:`dgl.to_bidirected` if you are not sure your graph is bi-directed.", ") for to-sample distance. start_idx : tensor of int Pre-allocated", "from .._ffi.function import _init_api from .. import backend as F", "the starting sample in each batch. result : tensor of", "------- a 1-D tensor A vector with each element that", "consecutive node ids. default: :obj:`True` Returns ------- a 1-D tensor", "of shape (batch_size, ) for the starting sample in each", "_CAPI_NeighborMatching(graph_idx, edge_weight_capi, node_label_capi) if F.reduce_sum(node_label < 0).item() != 0: raise", "dist : tensor Pre-allocated tensor of shape (N, ) for", "= F.zerocopy_to_numpy(node_label) _, node_label_np = np.unique(node_label_np, return_inverse=True) return F.tensor(node_label_np) else:", "== 0 _CAPI_FarthestPointSampler(F.zerocopy_to_dgl_ndarray(data), batch_size, sample_points, F.zerocopy_to_dgl_ndarray(dist), F.zerocopy_to_dgl_ndarray(start_idx), F.zerocopy_to_dgl_ndarray(result)) def _neighbor_matching(graph_idx,", "def _farthest_point_sampler(data, batch_size, sample_points, dist, start_idx, result): r\"\"\"Farthest Point Sampler", "procedure will randomly pick neighbor for each vertex. The GPU", "be done. If no edge weight is given, this procedure", "to DGL farthest point sampler.\"\"\" from dgl._ffi.base import DGLError import", "matching procedure of edge coarsening used in `Metis <http://cacs.usc.edu/education/cs653/Karypis-METIS-SIAMJSC98.pdf>`__ and", "dist, start_idx, result): r\"\"\"Farthest Point Sampler Parameters ---------- data :", "of points and d is the dimension. batch_size : int", "getattr(F, graph_idx.dtype), F.to_backend_ctx(graph_idx.ctx)) node_label_capi = F.zerocopy_to_dgl_ndarray_for_write(node_label) _CAPI_NeighborMatching(graph_idx, edge_weight_capi, node_label_capi) if", "is based on `A GPU Algorithm for Greedy Graph Matching", "Matching <http://www.staff.science.uu.nl/~bisse101/Articles/match12.pdf>`__ NOTE: The input graph must be bi-directed (undirected)", "batch_size. sample_points : int The number of points to sample", "nodes in this homogeneous graph. edge_weight : tensor, optional The", "_, node_label_np = np.unique(node_label_np, return_inverse=True) return F.tensor(node_label_np) else: return node_label", "actually we can add `return_inverse` option for `unique` # function", "# reorder node id # TODO: actually we can add", "true, relabel resulting node labels to have consecutive node ids.", "input graph must be bi-directed (undirected) graph. Call :obj:`dgl.to_bidirected` if", "The input variable ``result`` will be overwriten with sampled indices.", "F.shape(data)[0] % batch_size == 0 _CAPI_FarthestPointSampler(F.zerocopy_to_dgl_ndarray(data), batch_size, sample_points, F.zerocopy_to_dgl_ndarray(dist), F.zerocopy_to_dgl_ndarray(start_idx),", "tensor holding non-negative scalar weight for each edge. default: :obj:`None`", "on `A GPU Algorithm for Greedy Graph Matching <http://www.staff.science.uu.nl/~bisse101/Articles/match12.pdf>`__ NOTE:", "divisible by batch_size. sample_points : int The number of points", "`A GPU Algorithm for Greedy Graph Matching <http://www.staff.science.uu.nl/~bisse101/Articles/match12.pdf>`__ NOTE: The", "the cluster ID of a vertex. \"\"\" edge_weight_capi = nd.NULL[\"int64\"]", "nd def _farthest_point_sampler(data, batch_size, sample_points, dist, start_idx, result): r\"\"\"Farthest Point", "Graph Matching <http://www.staff.science.uu.nl/~bisse101/Articles/match12.pdf>`__ NOTE: The input graph must be bi-directed", "batch_size : int The number of batches in the ``data``.", "import ndarray as nd def _farthest_point_sampler(data, batch_size, sample_points, dist, start_idx,", "reorder node id # TODO: actually we can add `return_inverse`", "will be overwriten with sampled indices. \"\"\" assert F.shape(data)[0] >=", "you are not sure your graph is bi-directed. Parameters ----------", "in `Metis <http://cacs.usc.edu/education/cs653/Karypis-METIS-SIAMJSC98.pdf>`__ and `Graclus <https://www.cs.utexas.edu/users/inderjit/public_papers/multilevel_pami.pdf>`__ for homogeneous graph coarsening.", "import backend as F from .. import ndarray as nd", "If no edge weight is given, this procedure will randomly", "_CAPI_FarthestPointSampler(F.zerocopy_to_dgl_ndarray(data), batch_size, sample_points, F.zerocopy_to_dgl_ndarray(dist), F.zerocopy_to_dgl_ndarray(start_idx), F.zerocopy_to_dgl_ndarray(result)) def _neighbor_matching(graph_idx, num_nodes, edge_weights=None,", "edge_weight_capi = F.zerocopy_to_dgl_ndarray(edge_weights) node_label = F.full_1d( num_nodes, -1, getattr(F, graph_idx.dtype),", "edge_weight_capi, node_label_capi) if F.reduce_sum(node_label < 0).item() != 0: raise DGLError(\"Find", ": tensor A tensor of shape (N, d) where N", "(undirected) graph. Call :obj:`dgl.to_bidirected` if you are not sure your", "\"\"\" assert F.shape(data)[0] >= sample_points * batch_size assert F.shape(data)[0] %", "cluster ID of a vertex. \"\"\" edge_weight_capi = nd.NULL[\"int64\"] if", "tensor A vector with each element that indicates the cluster", "match can be done. If no edge weight is given,", "_init_api from .. import backend as F from .. import", "``result`` will be overwriten with sampled indices. \"\"\" assert F.shape(data)[0]", "return value. The input variable ``result`` will be overwriten with", "F.zerocopy_to_dgl_ndarray(dist), F.zerocopy_to_dgl_ndarray(start_idx), F.zerocopy_to_dgl_ndarray(result)) def _neighbor_matching(graph_idx, num_nodes, edge_weights=None, relabel_idx=True): \"\"\" Description", "edge_weight : tensor, optional The edge weight tensor holding non-negative", "labels to have consecutive node ids. default: :obj:`True` Returns -------", "---------- graph : HeteroGraphIndex The input homogeneous graph. num_nodes :", "d is the dimension. batch_size : int The number of", "number of batches in the ``data``. N should be divisible", "of int Pre-allocated tensor of shape (batch_size, ) for the", "non-negative scalar weight for each edge. default: :obj:`None` relabel_idx :", "(sample_points * batch_size, ) for the sampled index. Returns -------", "keeps picking an unmarked vertex and matching it with one", "is the dimension. batch_size : int The number of batches", "input homogeneous graph. num_nodes : int The number of nodes", "until no match can be done. If no edge weight", "F.to_backend_ctx(graph_idx.ctx)) node_label_capi = F.zerocopy_to_dgl_ndarray_for_write(node_label) _CAPI_NeighborMatching(graph_idx, edge_weight_capi, node_label_capi) if F.reduce_sum(node_label <", "data : tensor A tensor of shape (N, d) where", "No return value. The input variable ``result`` will be overwriten", "indices. \"\"\" assert F.shape(data)[0] >= sample_points * batch_size assert F.shape(data)[0]", "------- No return value. The input variable ``result`` will be", "no edge weight is given, this procedure will randomly pick", "tensor of shape (sample_points * batch_size, ) for the sampled", "weight tensor holding non-negative scalar weight for each edge. default:", "coarsening used in `Metis <http://cacs.usc.edu/education/cs653/Karypis-METIS-SIAMJSC98.pdf>`__ and `Graclus <https://www.cs.utexas.edu/users/inderjit/public_papers/multilevel_pami.pdf>`__ for homogeneous", ":obj:`True` Returns ------- a 1-D tensor A vector with each", "in the ``data``. N should be divisible by batch_size. sample_points", "number of nodes in this homogeneous graph. edge_weight : tensor,", "= np.unique(node_label_np, return_inverse=True) return F.tensor(node_label_np) else: return node_label _init_api('dgl.geometry', __name__)", "`unique` # function in backend for efficiency. if relabel_idx: node_label_np", "not None: edge_weight_capi = F.zerocopy_to_dgl_ndarray(edge_weights) node_label = F.full_1d( num_nodes, -1,", "Parameters ---------- graph : HeteroGraphIndex The input homogeneous graph. num_nodes", "= F.zerocopy_to_dgl_ndarray_for_write(node_label) _CAPI_NeighborMatching(graph_idx, edge_weight_capi, node_label_capi) if F.reduce_sum(node_label < 0).item() !=", "A tensor of shape (N, d) where N is the", "dimension. batch_size : int The number of batches in the", "The number of points to sample in each batch. dist", "the number of points and d is the dimension. batch_size", "bi-directed. Parameters ---------- graph : HeteroGraphIndex The input homogeneous graph.", "to-sample distance. start_idx : tensor of int Pre-allocated tensor of", "id # TODO: actually we can add `return_inverse` option for", "based on `A GPU Algorithm for Greedy Graph Matching <http://www.staff.science.uu.nl/~bisse101/Articles/match12.pdf>`__", "int The number of batches in the ``data``. N should", "homogeneous graph coarsening. This procedure keeps picking an unmarked vertex", "N should be divisible by batch_size. sample_points : int The", "The number of batches in the ``data``. N should be", "batch. dist : tensor Pre-allocated tensor of shape (N, )", "holding non-negative scalar weight for each edge. default: :obj:`None` relabel_idx", "edge. default: :obj:`None` relabel_idx : bool, optional If true, relabel", "in backend for efficiency. if relabel_idx: node_label_np = F.zerocopy_to_numpy(node_label) _,", "tensor of shape (N, d) where N is the number", "relabel_idx : bool, optional If true, relabel resulting node labels", "where N is the number of points and d is", "edge_weights is not None: edge_weight_capi = F.zerocopy_to_dgl_ndarray(edge_weights) node_label = F.full_1d(", "for homogeneous graph coarsening. This procedure keeps picking an unmarked", ": bool, optional If true, relabel resulting node labels to", ": tensor, optional The edge weight tensor holding non-negative scalar", "num_nodes : int The number of nodes in this homogeneous", ": tensor of int Pre-allocated tensor of shape (batch_size, )", "batch_size == 0 _CAPI_FarthestPointSampler(F.zerocopy_to_dgl_ndarray(data), batch_size, sample_points, F.zerocopy_to_dgl_ndarray(dist), F.zerocopy_to_dgl_ndarray(start_idx), F.zerocopy_to_dgl_ndarray(result)) def", "graph coarsening. This procedure keeps picking an unmarked vertex and", "of nodes in this homogeneous graph. edge_weight : tensor, optional", "this homogeneous graph. edge_weight : tensor, optional The edge weight", "default: :obj:`True` Returns ------- a 1-D tensor A vector with", "of points to sample in each batch. dist : tensor", "starting sample in each batch. result : tensor of int", "interfaces to DGL farthest point sampler.\"\"\" from dgl._ffi.base import DGLError", "the sampled index. Returns ------- No return value. The input", "optional If true, relabel resulting node labels to have consecutive", "optional The edge weight tensor holding non-negative scalar weight for", "for each edge. default: :obj:`None` relabel_idx : bool, optional If", "ID of a vertex. \"\"\" edge_weight_capi = nd.NULL[\"int64\"] if edge_weights", "``data``. N should be divisible by batch_size. sample_points : int", "\"\"\" Description ----------- The neighbor matching procedure of edge coarsening", "graph is bi-directed. Parameters ---------- graph : HeteroGraphIndex The input", "graph. num_nodes : int The number of nodes in this", "tensor of shape (N, ) for to-sample distance. start_idx :", "batch. result : tensor of int Pre-allocated tensor of shape", "if you are not sure your graph is bi-directed. Parameters", "`Metis <http://cacs.usc.edu/education/cs653/Karypis-METIS-SIAMJSC98.pdf>`__ and `Graclus <https://www.cs.utexas.edu/users/inderjit/public_papers/multilevel_pami.pdf>`__ for homogeneous graph coarsening. This", "graph must be bi-directed (undirected) graph. Call :obj:`dgl.to_bidirected` if you", "by batch_size. sample_points : int The number of points to", "graph. Call :obj:`dgl.to_bidirected` if you are not sure your graph", "relabel_idx: node_label_np = F.zerocopy_to_numpy(node_label) _, node_label_np = np.unique(node_label_np, return_inverse=True) return", "shape (sample_points * batch_size, ) for the sampled index. Returns", "def _neighbor_matching(graph_idx, num_nodes, edge_weights=None, relabel_idx=True): \"\"\" Description ----------- The neighbor", "if relabel_idx: node_label_np = F.zerocopy_to_numpy(node_label) _, node_label_np = np.unique(node_label_np, return_inverse=True)", "node\") # reorder node id # TODO: actually we can", "-1, getattr(F, graph_idx.dtype), F.to_backend_ctx(graph_idx.ctx)) node_label_capi = F.zerocopy_to_dgl_ndarray_for_write(node_label) _CAPI_NeighborMatching(graph_idx, edge_weight_capi, node_label_capi)", "tensor A tensor of shape (N, d) where N is", "weight) until no match can be done. If no edge", "int Pre-allocated tensor of shape (batch_size, ) for the starting", "sample in each batch. result : tensor of int Pre-allocated", "If true, relabel resulting node labels to have consecutive node", "bi-directed (undirected) graph. Call :obj:`dgl.to_bidirected` if you are not sure", "F.full_1d( num_nodes, -1, getattr(F, graph_idx.dtype), F.to_backend_ctx(graph_idx.ctx)) node_label_capi = F.zerocopy_to_dgl_ndarray_for_write(node_label) _CAPI_NeighborMatching(graph_idx,", "int Pre-allocated tensor of shape (sample_points * batch_size, ) for", "ndarray as nd def _farthest_point_sampler(data, batch_size, sample_points, dist, start_idx, result):", "have consecutive node ids. default: :obj:`True` Returns ------- a 1-D", "sampled index. Returns ------- No return value. The input variable", "assert F.shape(data)[0] % batch_size == 0 _CAPI_FarthestPointSampler(F.zerocopy_to_dgl_ndarray(data), batch_size, sample_points, F.zerocopy_to_dgl_ndarray(dist),", "the dimension. batch_size : int The number of batches in", ".. import ndarray as nd def _farthest_point_sampler(data, batch_size, sample_points, dist,", "done. If no edge weight is given, this procedure will", "DGL farthest point sampler.\"\"\" from dgl._ffi.base import DGLError import numpy", "* batch_size assert F.shape(data)[0] % batch_size == 0 _CAPI_FarthestPointSampler(F.zerocopy_to_dgl_ndarray(data), batch_size,", "sampler.\"\"\" from dgl._ffi.base import DGLError import numpy as np from", "each batch. dist : tensor Pre-allocated tensor of shape (N,", "% batch_size == 0 _CAPI_FarthestPointSampler(F.zerocopy_to_dgl_ndarray(data), batch_size, sample_points, F.zerocopy_to_dgl_ndarray(dist), F.zerocopy_to_dgl_ndarray(start_idx), F.zerocopy_to_dgl_ndarray(result))", "randomly pick neighbor for each vertex. The GPU implementation is", "(N, d) where N is the number of points and", "given, this procedure will randomly pick neighbor for each vertex.", "batch_size assert F.shape(data)[0] % batch_size == 0 _CAPI_FarthestPointSampler(F.zerocopy_to_dgl_ndarray(data), batch_size, sample_points,", "!= 0: raise DGLError(\"Find unmatched node\") # reorder node id", "used in `Metis <http://cacs.usc.edu/education/cs653/Karypis-METIS-SIAMJSC98.pdf>`__ and `Graclus <https://www.cs.utexas.edu/users/inderjit/public_papers/multilevel_pami.pdf>`__ for homogeneous graph", "edge coarsening used in `Metis <http://cacs.usc.edu/education/cs653/Karypis-METIS-SIAMJSC98.pdf>`__ and `Graclus <https://www.cs.utexas.edu/users/inderjit/public_papers/multilevel_pami.pdf>`__ for", "raise DGLError(\"Find unmatched node\") # reorder node id # TODO:", "node ids. default: :obj:`True` Returns ------- a 1-D tensor A", "bool, optional If true, relabel resulting node labels to have", "and matching it with one its unmarked neighbors (that maximizes", "is not None: edge_weight_capi = F.zerocopy_to_dgl_ndarray(edge_weights) node_label = F.full_1d( num_nodes,", "from .. import ndarray as nd def _farthest_point_sampler(data, batch_size, sample_points,", "sampled indices. \"\"\" assert F.shape(data)[0] >= sample_points * batch_size assert", ">= sample_points * batch_size assert F.shape(data)[0] % batch_size == 0", "edge weight) until no match can be done. If no", "np from .._ffi.function import _init_api from .. import backend as", "The input homogeneous graph. num_nodes : int The number of", "0).item() != 0: raise DGLError(\"Find unmatched node\") # reorder node", "tensor of shape (batch_size, ) for the starting sample in", "can add `return_inverse` option for `unique` # function in backend", "function in backend for efficiency. if relabel_idx: node_label_np = F.zerocopy_to_numpy(node_label)", ": int The number of batches in the ``data``. N", "matching it with one its unmarked neighbors (that maximizes its", "* batch_size, ) for the sampled index. Returns ------- No", "it with one its unmarked neighbors (that maximizes its edge", "if edge_weights is not None: edge_weight_capi = F.zerocopy_to_dgl_ndarray(edge_weights) node_label =", "\"\"\"Python interfaces to DGL farthest point sampler.\"\"\" from dgl._ffi.base import", "relabel_idx=True): \"\"\" Description ----------- The neighbor matching procedure of edge", "Sampler Parameters ---------- data : tensor A tensor of shape", "unmarked vertex and matching it with one its unmarked neighbors", "node_label = F.full_1d( num_nodes, -1, getattr(F, graph_idx.dtype), F.to_backend_ctx(graph_idx.ctx)) node_label_capi =", "will randomly pick neighbor for each vertex. The GPU implementation", ": int The number of points to sample in each", "edge_weight_capi = nd.NULL[\"int64\"] if edge_weights is not None: edge_weight_capi =", "sample in each batch. dist : tensor Pre-allocated tensor of", "0 _CAPI_FarthestPointSampler(F.zerocopy_to_dgl_ndarray(data), batch_size, sample_points, F.zerocopy_to_dgl_ndarray(dist), F.zerocopy_to_dgl_ndarray(start_idx), F.zerocopy_to_dgl_ndarray(result)) def _neighbor_matching(graph_idx, num_nodes,", "Algorithm for Greedy Graph Matching <http://www.staff.science.uu.nl/~bisse101/Articles/match12.pdf>`__ NOTE: The input graph", "backend as F from .. import ndarray as nd def", "Pre-allocated tensor of shape (N, ) for to-sample distance. start_idx", "F.zerocopy_to_numpy(node_label) _, node_label_np = np.unique(node_label_np, return_inverse=True) return F.tensor(node_label_np) else: return", "tensor of int Pre-allocated tensor of shape (sample_points * batch_size,", "F.shape(data)[0] >= sample_points * batch_size assert F.shape(data)[0] % batch_size ==", "ids. default: :obj:`True` Returns ------- a 1-D tensor A vector", "of batches in the ``data``. N should be divisible by", "be bi-directed (undirected) graph. Call :obj:`dgl.to_bidirected` if you are not", "F.zerocopy_to_dgl_ndarray(result)) def _neighbor_matching(graph_idx, num_nodes, edge_weights=None, relabel_idx=True): \"\"\" Description ----------- The", "----------- The neighbor matching procedure of edge coarsening used in", ") for the starting sample in each batch. result :", "DGLError(\"Find unmatched node\") # reorder node id # TODO: actually", "Description ----------- The neighbor matching procedure of edge coarsening used", "be divisible by batch_size. sample_points : int The number of", "`return_inverse` option for `unique` # function in backend for efficiency.", "the ``data``. N should be divisible by batch_size. sample_points :", "in each batch. result : tensor of int Pre-allocated tensor", "batch_size, ) for the sampled index. Returns ------- No return", "1-D tensor A vector with each element that indicates the", "A vector with each element that indicates the cluster ID", "value. The input variable ``result`` will be overwriten with sampled", "that indicates the cluster ID of a vertex. \"\"\" edge_weight_capi", "vertex. \"\"\" edge_weight_capi = nd.NULL[\"int64\"] if edge_weights is not None:", "homogeneous graph. edge_weight : tensor, optional The edge weight tensor", "coarsening. This procedure keeps picking an unmarked vertex and matching", "F.zerocopy_to_dgl_ndarray_for_write(node_label) _CAPI_NeighborMatching(graph_idx, edge_weight_capi, node_label_capi) if F.reduce_sum(node_label < 0).item() != 0:", "F.zerocopy_to_dgl_ndarray(start_idx), F.zerocopy_to_dgl_ndarray(result)) def _neighbor_matching(graph_idx, num_nodes, edge_weights=None, relabel_idx=True): \"\"\" Description -----------", "Returns ------- No return value. The input variable ``result`` will", "edge_weights=None, relabel_idx=True): \"\"\" Description ----------- The neighbor matching procedure of", "each element that indicates the cluster ID of a vertex.", "num_nodes, -1, getattr(F, graph_idx.dtype), F.to_backend_ctx(graph_idx.ctx)) node_label_capi = F.zerocopy_to_dgl_ndarray_for_write(node_label) _CAPI_NeighborMatching(graph_idx, edge_weight_capi,", "N is the number of points and d is the", "as F from .. import ndarray as nd def _farthest_point_sampler(data,", "to have consecutive node ids. default: :obj:`True` Returns ------- a", "F from .. import ndarray as nd def _farthest_point_sampler(data, batch_size,", "nd.NULL[\"int64\"] if edge_weights is not None: edge_weight_capi = F.zerocopy_to_dgl_ndarray(edge_weights) node_label", "input variable ``result`` will be overwriten with sampled indices. \"\"\"", "num_nodes, edge_weights=None, relabel_idx=True): \"\"\" Description ----------- The neighbor matching procedure", "vertex and matching it with one its unmarked neighbors (that", "The GPU implementation is based on `A GPU Algorithm for" ]
[ "self.metadata['sha1-timestamp'] = datetime.now().timestamp() return result else: return self.metadata['sha1'] def sha256(self,", "str(dst_path)) def __calc_hash__(self, h, buffer_size: int = 131072): if not", "getmtime(str(self.path)) if require_update \\ or 'sha256' not in self.metadata \\", "self.init_metadata() self.init_properties() @property def database(self): return self.__bucket__.db @property def db(self):", "self.__data_name__ @property def name(self) -> str: return self.__data_name__ @property def", "new_name def reader(self, binary: bool = False, **kwargs) -> [IO,", "= result self.metadata['sha256-timestamp'] = datetime.now().timestamp() return result else: return self.metadata['sha256']", "'' return open(str(self.path), mode=mode, **kwargs) def writer(self, binary: bool =", "False) -> [str, None]: if not self.path.exists(): return None last_modified_time", "Path], allow_overwrite=False, confirm=True, feedback=False): if self.path.exists() and not allow_overwrite: return", "+= 'b' if binary else '' return open(str(self.path), mode=mode, **kwargs)", "in self.__parent__.metadata: self.__parent__.metadata[self.__data_name__] = dict() def init_properties(self): if self.__data_name__ not", "'x' mode += 'b' if binary else '' return open(str(self.path),", "\\ or 'sha256-timestamp' not in self.metadata \\ or self.metadata['sha256-timestamp'] <", "metadata def set_properties(self, properties: Union[None, dict], merge: bool = True):", "file_reader: while True: data = file_reader.read(buffer_size) if not data: break", "\\ or 'md5-timestamp' not in self.metadata \\ or self.metadata['md5-timestamp'] <", "not feedback: return shutil.copyfile(str(src_path), str(self.path)) def export_file(self, dst_path: [str, Path],", "src_path: [str, Path], allow_overwrite=False, confirm=True, feedback=False): if self.path.exists() and not", "self.metadata \\ or self.metadata['md5-timestamp'] < last_modified_time: result = self.__calc_hash__(hashlib.md5(), buffer_size)", "= datetime.now().timestamp() return result else: return self.metadata['sha1'] def sha256(self, buffer_size:", "db(self): return self.__bucket__.db @property def bucket(self): return self.__bucket__ def init_metadata(self):", "or 'md5-timestamp' not in self.metadata \\ or self.metadata['md5-timestamp'] < last_modified_time:", "mode=mode, **kwargs) def __repr__(self): return f\"Data('{self.__data_name__}')\" def import_file(self, src_path: [str,", "True): if metadata is None: return if merge: metadata =", "'md5-timestamp' not in self.metadata \\ or self.metadata['md5-timestamp'] < last_modified_time: result", "merge: bool = True): if properties is None: return if", "self.metadata['sha256'] = result self.metadata['sha256-timestamp'] = datetime.now().timestamp() return result else: return", "in self.metadata \\ or 'md5-timestamp' not in self.metadata \\ or", "parent self.__bucket__ = bucket self.__protected_parent_methods__ = protected_parent_methods self.__protected_parent_methods__['increase_data_count']() self.init_metadata() self.init_properties()", "-> ObservableDict: return self.__parent__.properties[self.__data_name__] def rename(self, new_name: str): shutil.move(str(self.path), str(self.__parent__.path", "def set_metadata(self, metadata: Union[None, dict], merge: bool = True): if", "allow_overwrite: bool = False, confirm: bool = True, feedback: bool", "\\ or 'sha256' not in self.metadata \\ or 'sha256-timestamp' not", "= dict() def init_properties(self): if self.__data_name__ not in self.__parent__.properties: self.__parent__.properties[self.__data_name__]", "return None last_modified_time = getmtime(str(self.path)) if require_update \\ or 'sha256'", "and not append: raise PermissionError('Trying to overwrite existed data.') if", "or self.metadata['sha256-timestamp'] < last_modified_time: result = self.__calc_hash__(hashlib.sha256(), buffer_size) self.metadata['sha256'] =", "name(self) -> str: return self.__data_name__ @property def metadata(self) -> ObservableDict:", "BinaryIO, TextIO, None]: if not allow_overwrite and not append: raise", "getmtime(str(self.path)) if require_update \\ or 'md5' not in self.metadata \\", "self.metadata['md5'] def sha1(self, buffer_size: int = 131072, require_update: bool =", "def rename(self, new_name: str): shutil.move(str(self.path), str(self.__parent__.path / new_name)) self.__data_name__ =", "def bucket(self): return self.__bucket__ def init_metadata(self): if self.__data_name__ not in", "last_modified_time = getmtime(str(self.path)) if require_update \\ or 'sha1' not in", "dict], merge: bool = True): if metadata is None: return", "bool = False, append: bool = True, allow_overwrite: bool =", "bucket(self): return self.__bucket__ def init_metadata(self): if self.__data_name__ not in self.__parent__.metadata:", "self.path.exists(): return None with open(str(self.path), 'rb') as file_reader: while True:", "-> [str, None]: if not self.path.exists(): return None last_modified_time =", "self.metadata \\ or self.metadata['sha256-timestamp'] < last_modified_time: result = self.__calc_hash__(hashlib.sha256(), buffer_size)", "Path(dst_path).exists() and not allow_overwrite: return shutil.copyfile(str(self.path), str(dst_path)) def __calc_hash__(self, h,", "h, buffer_size: int = 131072): if not self.path.exists(): return None", "break h.update(data) return h.hexdigest() def md5(self, buffer_size: int = 131072,", "last_modified_time: result = self.__calc_hash__(hashlib.md5(), buffer_size) self.metadata['md5'] = result self.metadata['md5-timestamp'] =", "str, parent, bucket, protected_parent_methods: Union[None, dict] = None): self.__data_name__ =", "= 'a' if append else 'w' mode += 'b' if", "self.metadata['sha256-timestamp'] < last_modified_time: result = self.__calc_hash__(hashlib.sha256(), buffer_size) self.metadata['sha256'] = result", "confirm=True, feedback=False): if self.path.exists() and not allow_overwrite: return if confirm", "self.__parent__.metadata[self.__data_name__] = metadata def set_properties(self, properties: Union[None, dict], merge: bool", "'md5' not in self.metadata \\ or 'md5-timestamp' not in self.metadata", "return open(str(self.path), mode=mode, **kwargs) def writer(self, binary: bool = False,", "= properties @property def parent(self): return self.__parent__ @property def path(self)", "= False, append: bool = True, allow_overwrite: bool = False,", "shutil import hashlib from pathlib import Path from typing import", "[IO, BinaryIO, TextIO, None]: if not allow_overwrite and not append:", "metadata(self) -> ObservableDict: return self.__parent__.metadata[self.__data_name__] @property def properties(self) -> ObservableDict:", "= getmtime(str(self.path)) if require_update \\ or 'md5' not in self.metadata", "bool = False) -> [str, None]: if not self.path.exists(): return", "import hashlib from pathlib import Path from typing import TextIO,", "import_file(self, src_path: [str, Path], allow_overwrite=False, confirm=True, feedback=False): if self.path.exists() and", "protected_parent_methods self.__protected_parent_methods__['increase_data_count']() self.init_metadata() self.init_properties() @property def database(self): return self.__bucket__.db @property", "ObservableDict: return self.__parent__.properties[self.__data_name__] def rename(self, new_name: str): shutil.move(str(self.path), str(self.__parent__.path /", "buffer_size: int = 131072, require_update: bool = False) -> [str,", "if binary else '' return open(str(self.path), mode=mode, **kwargs) def creator(self,", "result = self.__calc_hash__(hashlib.sha1(), buffer_size) self.metadata['sha1'] = result self.metadata['sha1-timestamp'] = datetime.now().timestamp()", "self.init_properties() @property def database(self): return self.__bucket__.db @property def db(self): return", "binary else '' return open(str(self.path), mode=mode, **kwargs) def writer(self, binary:", "not feedback: return None mode = 'x' mode += 'b'", "else: return self.metadata['sha1'] def sha256(self, buffer_size: int = 131072, require_update:", "return self.__bucket__.db @property def db(self): return self.__bucket__.db @property def bucket(self):", "True: data = file_reader.read(buffer_size) if not data: break h.update(data) return", "file_reader.read(buffer_size) if not data: break h.update(data) return h.hexdigest() def md5(self,", "self.__calc_hash__(hashlib.md5(), buffer_size) self.metadata['md5'] = result self.metadata['md5-timestamp'] = datetime.now().timestamp() return result", "'sha256-timestamp' not in self.metadata \\ or self.metadata['sha256-timestamp'] < last_modified_time: result", "def import_file(self, src_path: [str, Path], allow_overwrite=False, confirm=True, feedback=False): if self.path.exists()", "= self.__calc_hash__(hashlib.sha256(), buffer_size) self.metadata['sha256'] = result self.metadata['sha256-timestamp'] = datetime.now().timestamp() return", "not in self.__parent__.properties: self.__parent__.properties[self.__data_name__] = dict() def set_metadata(self, metadata: Union[None,", "path(self) -> Path: return self.__parent__.path / self.__data_name__ @property def name(self)", "not in self.metadata \\ or self.metadata['sha256-timestamp'] < last_modified_time: result =", "if not data: break h.update(data) return h.hexdigest() def md5(self, buffer_size:", "typing import TextIO, BinaryIO, IO, Union from datetime import datetime", "or self.metadata['sha1-timestamp'] < last_modified_time: result = self.__calc_hash__(hashlib.sha1(), buffer_size) self.metadata['sha1'] =", "or 'sha1-timestamp' not in self.metadata \\ or self.metadata['sha1-timestamp'] < last_modified_time:", "return if confirm and not feedback: return shutil.copyfile(str(src_path), str(self.path)) def", "else '' return open(str(self.path), mode=mode, **kwargs) def __repr__(self): return f\"Data('{self.__data_name__}')\"", "binary else '' return open(str(self.path), mode=mode, **kwargs) def creator(self, binary:", "= True, allow_overwrite: bool = False, confirm: bool = True,", "merge: metadata = {**self.metadata, **metadata} self.__parent__.metadata[self.__data_name__] = metadata def set_properties(self,", "TextIO, None]: if not allow_overwrite and not append: raise PermissionError('Trying", "not allow_overwrite: return shutil.copyfile(str(self.path), str(dst_path)) def __calc_hash__(self, h, buffer_size: int", "self.metadata \\ or 'md5-timestamp' not in self.metadata \\ or self.metadata['md5-timestamp']", "Union[None, dict] = None): self.__data_name__ = data_name self.__parent__ = parent", "with open(str(self.path), 'rb') as file_reader: while True: data = file_reader.read(buffer_size)", "def path(self) -> Path: return self.__parent__.path / self.__data_name__ @property def", "self.__bucket__ def init_metadata(self): if self.__data_name__ not in self.__parent__.metadata: self.__parent__.metadata[self.__data_name__] =", "bool = True): if properties is None: return if merge:", "= getmtime(str(self.path)) if require_update \\ or 'sha1' not in self.metadata", "return open(str(self.path), mode=mode, **kwargs) def __repr__(self): return f\"Data('{self.__data_name__}')\" def import_file(self,", "self.metadata \\ or 'sha256-timestamp' not in self.metadata \\ or self.metadata['sha256-timestamp']", "if require_update \\ or 'sha1' not in self.metadata \\ or", "def metadata(self) -> ObservableDict: return self.__parent__.metadata[self.__data_name__] @property def properties(self) ->", "def database(self): return self.__bucket__.db @property def db(self): return self.__bucket__.db @property", "return result else: return self.metadata['md5'] def sha1(self, buffer_size: int =", "= False, **kwargs) -> [IO, BinaryIO, TextIO, None]: if not", "allow_overwrite: return shutil.copyfile(str(self.path), str(dst_path)) def __calc_hash__(self, h, buffer_size: int =", "datetime import datetime from os.path import getmtime from .low import", "data_name self.__parent__ = parent self.__bucket__ = bucket self.__protected_parent_methods__ = protected_parent_methods", "h.update(data) return h.hexdigest() def md5(self, buffer_size: int = 131072, require_update:", "or 'sha1' not in self.metadata \\ or 'sha1-timestamp' not in", "def parent(self): return self.__parent__ @property def path(self) -> Path: return", "feedback: bool = False, **kwargs) -> [IO, BinaryIO, TextIO, None]:", "self.__parent__.properties[self.__data_name__] def rename(self, new_name: str): shutil.move(str(self.path), str(self.__parent__.path / new_name)) self.__data_name__", "raise PermissionError('Trying to overwrite existed data.') if confirm and not", "open(str(self.path), mode=mode, **kwargs) def creator(self, binary: bool = False, confirm:", "data_name: str, parent, bucket, protected_parent_methods: Union[None, dict] = None): self.__data_name__", "h.hexdigest() def md5(self, buffer_size: int = 131072, require_update: bool =", "from pathlib import Path from typing import TextIO, BinaryIO, IO,", "def init_metadata(self): if self.__data_name__ not in self.__parent__.metadata: self.__parent__.metadata[self.__data_name__] = dict()", "@property def name(self) -> str: return self.__data_name__ @property def metadata(self)", "False, confirm: bool = False, feedback: bool = False, **kwargs)", "def properties(self) -> ObservableDict: return self.__parent__.properties[self.__data_name__] def rename(self, new_name: str):", "False, confirm: bool = True, feedback: bool = False, **kwargs)", "class Data: def __init__(self, data_name: str, parent, bucket, protected_parent_methods: Union[None,", "= file_reader.read(buffer_size) if not data: break h.update(data) return h.hexdigest() def", "if self.path.exists() and not allow_overwrite: return if confirm and not", "not self.path.exists(): return None with open(str(self.path), 'rb') as file_reader: while", "not in self.metadata \\ or self.metadata['sha1-timestamp'] < last_modified_time: result =", "self.__parent__ = parent self.__bucket__ = bucket self.__protected_parent_methods__ = protected_parent_methods self.__protected_parent_methods__['increase_data_count']()", "in self.metadata \\ or 'sha256-timestamp' not in self.metadata \\ or", "bucket self.__protected_parent_methods__ = protected_parent_methods self.__protected_parent_methods__['increase_data_count']() self.init_metadata() self.init_properties() @property def database(self):", "def reader(self, binary: bool = False, **kwargs) -> [IO, BinaryIO,", "return mode = 'a' if append else 'w' mode +=", "None): self.__data_name__ = data_name self.__parent__ = parent self.__bucket__ = bucket", "self.__parent__.properties[self.__data_name__] = dict() def set_metadata(self, metadata: Union[None, dict], merge: bool", "self.__parent__.metadata[self.__data_name__] @property def properties(self) -> ObservableDict: return self.__parent__.properties[self.__data_name__] def rename(self,", "writer(self, binary: bool = False, append: bool = True, allow_overwrite:", "not feedback: return mode = 'a' if append else 'w'", "__calc_hash__(self, h, buffer_size: int = 131072): if not self.path.exists(): return", "= False, confirm: bool = False, feedback: bool = False,", "and not feedback: return None mode = 'x' mode +=", "bool = False, **kwargs) -> [IO, BinaryIO, TextIO, None]: mode", "not in self.metadata \\ or 'sha256-timestamp' not in self.metadata \\", "'sha1-timestamp' not in self.metadata \\ or self.metadata['sha1-timestamp'] < last_modified_time: result", "protected_parent_methods: Union[None, dict] = None): self.__data_name__ = data_name self.__parent__ =", "result self.metadata['md5-timestamp'] = datetime.now().timestamp() return result else: return self.metadata['md5'] def", "self.metadata['sha1'] def sha256(self, buffer_size: int = 131072, require_update: bool =", "os.path import getmtime from .low import ObservableDict class Data: def", "def name(self) -> str: return self.__data_name__ @property def metadata(self) ->", "int = 131072, require_update: bool = False) -> [str, None]:", "def sha256(self, buffer_size: int = 131072, require_update: bool = False)", "Path from typing import TextIO, BinaryIO, IO, Union from datetime", "= result self.metadata['sha1-timestamp'] = datetime.now().timestamp() return result else: return self.metadata['sha1']", "None]: if not self.path.exists(): return None last_modified_time = getmtime(str(self.path)) if", "return None last_modified_time = getmtime(str(self.path)) if require_update \\ or 'md5'", "metadata is None: return if merge: metadata = {**self.metadata, **metadata}", "-> Path: return self.__parent__.path / self.__data_name__ @property def name(self) ->", "self.__parent__.properties[self.__data_name__] = properties @property def parent(self): return self.__parent__ @property def", "not in self.__parent__.metadata: self.__parent__.metadata[self.__data_name__] = dict() def init_properties(self): if self.__data_name__", "data = file_reader.read(buffer_size) if not data: break h.update(data) return h.hexdigest()", "self.__data_name__ = data_name self.__parent__ = parent self.__bucket__ = bucket self.__protected_parent_methods__", "bucket, protected_parent_methods: Union[None, dict] = None): self.__data_name__ = data_name self.__parent__", "bool = False, confirm: bool = False, feedback: bool =", "to overwrite existed data.') if confirm and not feedback: return", "Union[None, dict], merge: bool = True): if properties is None:", "creator(self, binary: bool = False, confirm: bool = False, feedback:", "def __init__(self, data_name: str, parent, bucket, protected_parent_methods: Union[None, dict] =", "\\ or self.metadata['md5-timestamp'] < last_modified_time: result = self.__calc_hash__(hashlib.md5(), buffer_size) self.metadata['md5']", "True): if properties is None: return if merge: properties =", "data.') if confirm and not feedback: return mode = 'a'", "return self.metadata['md5'] def sha1(self, buffer_size: int = 131072, require_update: bool", "return if merge: properties = {**self.properties, **properties} self.__parent__.properties[self.__data_name__] = properties", "False, **kwargs) -> [IO, BinaryIO, TextIO, None]: if confirm and", "buffer_size) self.metadata['sha1'] = result self.metadata['sha1-timestamp'] = datetime.now().timestamp() return result else:", "else: return self.metadata['md5'] def sha1(self, buffer_size: int = 131072, require_update:", "if binary else '' return open(str(self.path), mode=mode, **kwargs) def __repr__(self):", "-> [IO, BinaryIO, TextIO, None]: if confirm and not feedback:", "parent, bucket, protected_parent_methods: Union[None, dict] = None): self.__data_name__ = data_name", "= 131072, require_update: bool = False) -> [str, None]: if", "return None last_modified_time = getmtime(str(self.path)) if require_update \\ or 'sha1'", "BinaryIO, IO, Union from datetime import datetime from os.path import", "mode = 'a' if append else 'w' mode += 'b'", "binary else '' return open(str(self.path), mode=mode, **kwargs) def __repr__(self): return", "def __calc_hash__(self, h, buffer_size: int = 131072): if not self.path.exists():", "getmtime from .low import ObservableDict class Data: def __init__(self, data_name:", "buffer_size) self.metadata['md5'] = result self.metadata['md5-timestamp'] = datetime.now().timestamp() return result else:", "= metadata def set_properties(self, properties: Union[None, dict], merge: bool =", "= False) -> [str, None]: if not self.path.exists(): return None", "require_update \\ or 'sha256' not in self.metadata \\ or 'sha256-timestamp'", "datetime from os.path import getmtime from .low import ObservableDict class", "if properties is None: return if merge: properties = {**self.properties,", "not append: raise PermissionError('Trying to overwrite existed data.') if confirm", "datetime.now().timestamp() return result else: return self.metadata['sha1'] def sha256(self, buffer_size: int", "as file_reader: while True: data = file_reader.read(buffer_size) if not data:", "BinaryIO, TextIO, None]: if confirm and not feedback: return None", "if self.__data_name__ not in self.__parent__.metadata: self.__parent__.metadata[self.__data_name__] = dict() def init_properties(self):", "if Path(dst_path).exists() and not allow_overwrite: return shutil.copyfile(str(self.path), str(dst_path)) def __calc_hash__(self,", "properties @property def parent(self): return self.__parent__ @property def path(self) ->", "binary: bool = False, confirm: bool = False, feedback: bool", "open(str(self.path), mode=mode, **kwargs) def writer(self, binary: bool = False, append:", "if merge: metadata = {**self.metadata, **metadata} self.__parent__.metadata[self.__data_name__] = metadata def", "= data_name self.__parent__ = parent self.__bucket__ = bucket self.__protected_parent_methods__ =", "def init_properties(self): if self.__data_name__ not in self.__parent__.properties: self.__parent__.properties[self.__data_name__] = dict()", "in self.__parent__.properties: self.__parent__.properties[self.__data_name__] = dict() def set_metadata(self, metadata: Union[None, dict],", "'b' if binary else '' return open(str(self.path), mode=mode, **kwargs) def", "allow_overwrite=False, confirm=True, feedback=False): if self.path.exists() and not allow_overwrite: return if", "bool = False, **kwargs) -> [IO, BinaryIO, TextIO, None]: if", "TextIO, BinaryIO, IO, Union from datetime import datetime from os.path", "None]: mode = 'r' mode += 'b' if binary else", "def db(self): return self.__bucket__.db @property def bucket(self): return self.__bucket__ def", "open(str(self.path), 'rb') as file_reader: while True: data = file_reader.read(buffer_size) if", "def writer(self, binary: bool = False, append: bool = True,", "confirm: bool = True, feedback: bool = False, **kwargs) ->", "sha256(self, buffer_size: int = 131072, require_update: bool = False) ->", "None: return if merge: metadata = {**self.metadata, **metadata} self.__parent__.metadata[self.__data_name__] =", "import TextIO, BinaryIO, IO, Union from datetime import datetime from", "confirm and not feedback: return None mode = 'x' mode", "int = 131072): if not self.path.exists(): return None with open(str(self.path),", "[IO, BinaryIO, TextIO, None]: mode = 'r' mode += 'b'", "existed data.') if confirm and not feedback: return mode =", "mode = 'x' mode += 'b' if binary else ''", "131072): if not self.path.exists(): return None with open(str(self.path), 'rb') as", "bool = False, feedback: bool = False, **kwargs) -> [IO,", "= self.__calc_hash__(hashlib.md5(), buffer_size) self.metadata['md5'] = result self.metadata['md5-timestamp'] = datetime.now().timestamp() return", "= 'r' mode += 'b' if binary else '' return", "< last_modified_time: result = self.__calc_hash__(hashlib.sha256(), buffer_size) self.metadata['sha256'] = result self.metadata['sha256-timestamp']", "**kwargs) def __repr__(self): return f\"Data('{self.__data_name__}')\" def import_file(self, src_path: [str, Path],", "None: return if merge: properties = {**self.properties, **properties} self.__parent__.properties[self.__data_name__] =", "dst_path: [str, Path], allow_overwrite=False): if Path(dst_path).exists() and not allow_overwrite: return", "require_update \\ or 'sha1' not in self.metadata \\ or 'sha1-timestamp'", "if not allow_overwrite and not append: raise PermissionError('Trying to overwrite", "self.__data_name__ = new_name def reader(self, binary: bool = False, **kwargs)", "def __repr__(self): return f\"Data('{self.__data_name__}')\" def import_file(self, src_path: [str, Path], allow_overwrite=False,", "from datetime import datetime from os.path import getmtime from .low", "last_modified_time: result = self.__calc_hash__(hashlib.sha256(), buffer_size) self.metadata['sha256'] = result self.metadata['sha256-timestamp'] =", "self.__parent__.properties: self.__parent__.properties[self.__data_name__] = dict() def set_metadata(self, metadata: Union[None, dict], merge:", "@property def parent(self): return self.__parent__ @property def path(self) -> Path:", "= False, feedback: bool = False, **kwargs) -> [IO, BinaryIO,", "and not allow_overwrite: return if confirm and not feedback: return", "getmtime(str(self.path)) if require_update \\ or 'sha1' not in self.metadata \\", "**kwargs) def creator(self, binary: bool = False, confirm: bool =", "not in self.metadata \\ or 'md5-timestamp' not in self.metadata \\", "import getmtime from .low import ObservableDict class Data: def __init__(self,", "self.__calc_hash__(hashlib.sha1(), buffer_size) self.metadata['sha1'] = result self.metadata['sha1-timestamp'] = datetime.now().timestamp() return result", "**properties} self.__parent__.properties[self.__data_name__] = properties @property def parent(self): return self.__parent__ @property", "return self.__parent__.path / self.__data_name__ @property def name(self) -> str: return", "result self.metadata['sha1-timestamp'] = datetime.now().timestamp() return result else: return self.metadata['sha1'] def", "append: bool = True, allow_overwrite: bool = False, confirm: bool", "__repr__(self): return f\"Data('{self.__data_name__}')\" def import_file(self, src_path: [str, Path], allow_overwrite=False, confirm=True,", "return open(str(self.path), mode=mode, **kwargs) def creator(self, binary: bool = False,", "Union from datetime import datetime from os.path import getmtime from", "else 'w' mode += 'b' if binary else '' return", "append else 'w' mode += 'b' if binary else ''", "[str, None]: if not self.path.exists(): return None last_modified_time = getmtime(str(self.path))", "PermissionError('Trying to overwrite existed data.') if confirm and not feedback:", "\\ or 'md5' not in self.metadata \\ or 'md5-timestamp' not", "confirm and not feedback: return mode = 'a' if append", "Union[None, dict], merge: bool = True): if metadata is None:", "'rb') as file_reader: while True: data = file_reader.read(buffer_size) if not", "/ new_name)) self.__data_name__ = new_name def reader(self, binary: bool =", "= getmtime(str(self.path)) if require_update \\ or 'sha256' not in self.metadata", "< last_modified_time: result = self.__calc_hash__(hashlib.md5(), buffer_size) self.metadata['md5'] = result self.metadata['md5-timestamp']", "None last_modified_time = getmtime(str(self.path)) if require_update \\ or 'sha1' not", "TextIO, None]: mode = 'r' mode += 'b' if binary", "allow_overwrite and not append: raise PermissionError('Trying to overwrite existed data.')", "while True: data = file_reader.read(buffer_size) if not data: break h.update(data)", "**kwargs) -> [IO, BinaryIO, TextIO, None]: if not allow_overwrite and", "not allow_overwrite and not append: raise PermissionError('Trying to overwrite existed", "return if merge: metadata = {**self.metadata, **metadata} self.__parent__.metadata[self.__data_name__] = metadata", "Path], allow_overwrite=False): if Path(dst_path).exists() and not allow_overwrite: return shutil.copyfile(str(self.path), str(dst_path))", "merge: bool = True): if metadata is None: return if", "= None): self.__data_name__ = data_name self.__parent__ = parent self.__bucket__ =", "if confirm and not feedback: return shutil.copyfile(str(src_path), str(self.path)) def export_file(self,", "confirm and not feedback: return shutil.copyfile(str(src_path), str(self.path)) def export_file(self, dst_path:", "= self.__calc_hash__(hashlib.sha1(), buffer_size) self.metadata['sha1'] = result self.metadata['sha1-timestamp'] = datetime.now().timestamp() return", "**kwargs) def writer(self, binary: bool = False, append: bool =", "return self.__bucket__.db @property def bucket(self): return self.__bucket__ def init_metadata(self): if", "= False, confirm: bool = True, feedback: bool = False,", "{**self.metadata, **metadata} self.__parent__.metadata[self.__data_name__] = metadata def set_properties(self, properties: Union[None, dict],", "and not allow_overwrite: return shutil.copyfile(str(self.path), str(dst_path)) def __calc_hash__(self, h, buffer_size:", "@property def bucket(self): return self.__bucket__ def init_metadata(self): if self.__data_name__ not", "if require_update \\ or 'md5' not in self.metadata \\ or", "if confirm and not feedback: return None mode = 'x'", "'sha1' not in self.metadata \\ or 'sha1-timestamp' not in self.metadata", "set_properties(self, properties: Union[None, dict], merge: bool = True): if properties", "self.metadata['md5-timestamp'] = datetime.now().timestamp() return result else: return self.metadata['md5'] def sha1(self,", "export_file(self, dst_path: [str, Path], allow_overwrite=False): if Path(dst_path).exists() and not allow_overwrite:", "-> ObservableDict: return self.__parent__.metadata[self.__data_name__] @property def properties(self) -> ObservableDict: return", "dict() def set_metadata(self, metadata: Union[None, dict], merge: bool = True):", "allow_overwrite=False): if Path(dst_path).exists() and not allow_overwrite: return shutil.copyfile(str(self.path), str(dst_path)) def", "/ self.__data_name__ @property def name(self) -> str: return self.__data_name__ @property", "str(self.__parent__.path / new_name)) self.__data_name__ = new_name def reader(self, binary: bool", "not self.path.exists(): return None last_modified_time = getmtime(str(self.path)) if require_update \\", "**metadata} self.__parent__.metadata[self.__data_name__] = metadata def set_properties(self, properties: Union[None, dict], merge:", "str): shutil.move(str(self.path), str(self.__parent__.path / new_name)) self.__data_name__ = new_name def reader(self,", "bool = True): if metadata is None: return if merge:", "**kwargs) -> [IO, BinaryIO, TextIO, None]: if confirm and not", "and not feedback: return mode = 'a' if append else", "def md5(self, buffer_size: int = 131072, require_update: bool = False)", "open(str(self.path), mode=mode, **kwargs) def __repr__(self): return f\"Data('{self.__data_name__}')\" def import_file(self, src_path:", "from .low import ObservableDict class Data: def __init__(self, data_name: str,", "'sha256' not in self.metadata \\ or 'sha256-timestamp' not in self.metadata", "ObservableDict class Data: def __init__(self, data_name: str, parent, bucket, protected_parent_methods:", "append: raise PermissionError('Trying to overwrite existed data.') if confirm and", "False, feedback: bool = False, **kwargs) -> [IO, BinaryIO, TextIO,", "feedback: return mode = 'a' if append else 'w' mode", "def creator(self, binary: bool = False, confirm: bool = False,", "None last_modified_time = getmtime(str(self.path)) if require_update \\ or 'md5' not", "last_modified_time = getmtime(str(self.path)) if require_update \\ or 'md5' not in", "self.path.exists() and not allow_overwrite: return if confirm and not feedback:", "not allow_overwrite: return if confirm and not feedback: return shutil.copyfile(str(src_path),", "@property def db(self): return self.__bucket__.db @property def bucket(self): return self.__bucket__", "= False, **kwargs) -> [IO, BinaryIO, TextIO, None]: if confirm", "buffer_size: int = 131072): if not self.path.exists(): return None with", "require_update: bool = False) -> [str, None]: if not self.path.exists():", "= 131072): if not self.path.exists(): return None with open(str(self.path), 'rb')", "shutil.copyfile(str(src_path), str(self.path)) def export_file(self, dst_path: [str, Path], allow_overwrite=False): if Path(dst_path).exists()", "dict] = None): self.__data_name__ = data_name self.__parent__ = parent self.__bucket__", "= result self.metadata['md5-timestamp'] = datetime.now().timestamp() return result else: return self.metadata['md5']", "hashlib from pathlib import Path from typing import TextIO, BinaryIO,", "\\ or self.metadata['sha256-timestamp'] < last_modified_time: result = self.__calc_hash__(hashlib.sha256(), buffer_size) self.metadata['sha256']", "= {**self.metadata, **metadata} self.__parent__.metadata[self.__data_name__] = metadata def set_properties(self, properties: Union[None,", "-> str: return self.__data_name__ @property def metadata(self) -> ObservableDict: return", "self.metadata \\ or self.metadata['sha1-timestamp'] < last_modified_time: result = self.__calc_hash__(hashlib.sha1(), buffer_size)", "dict], merge: bool = True): if properties is None: return", "None]: if not allow_overwrite and not append: raise PermissionError('Trying to", "bool = True, allow_overwrite: bool = False, confirm: bool =", "self.__data_name__ not in self.__parent__.metadata: self.__parent__.metadata[self.__data_name__] = dict() def init_properties(self): if", "[str, Path], allow_overwrite=False, confirm=True, feedback=False): if self.path.exists() and not allow_overwrite:", "self.__data_name__ not in self.__parent__.properties: self.__parent__.properties[self.__data_name__] = dict() def set_metadata(self, metadata:", "@property def properties(self) -> ObservableDict: return self.__parent__.properties[self.__data_name__] def rename(self, new_name:", "bool = False, confirm: bool = True, feedback: bool =", "return result else: return self.metadata['sha1'] def sha256(self, buffer_size: int =", "import ObservableDict class Data: def __init__(self, data_name: str, parent, bucket,", "= parent self.__bucket__ = bucket self.__protected_parent_methods__ = protected_parent_methods self.__protected_parent_methods__['increase_data_count']() self.init_metadata()", "mode += 'b' if binary else '' return open(str(self.path), mode=mode,", "BinaryIO, TextIO, None]: mode = 'r' mode += 'b' if", "feedback: return None mode = 'x' mode += 'b' if", "properties: Union[None, dict], merge: bool = True): if properties is", "\\ or 'sha1' not in self.metadata \\ or 'sha1-timestamp' not", "self.__parent__ @property def path(self) -> Path: return self.__parent__.path / self.__data_name__", "self.metadata['sha1-timestamp'] < last_modified_time: result = self.__calc_hash__(hashlib.sha1(), buffer_size) self.metadata['sha1'] = result", "and not feedback: return shutil.copyfile(str(src_path), str(self.path)) def export_file(self, dst_path: [str,", "last_modified_time: result = self.__calc_hash__(hashlib.sha1(), buffer_size) self.metadata['sha1'] = result self.metadata['sha1-timestamp'] =", "'' return open(str(self.path), mode=mode, **kwargs) def __repr__(self): return f\"Data('{self.__data_name__}')\" def", "True, allow_overwrite: bool = False, confirm: bool = True, feedback:", "return self.metadata['sha1'] def sha256(self, buffer_size: int = 131072, require_update: bool", "= True, feedback: bool = False, **kwargs) -> [IO, BinaryIO,", "False, append: bool = True, allow_overwrite: bool = False, confirm:", "if require_update \\ or 'sha256' not in self.metadata \\ or", "if binary else '' return open(str(self.path), mode=mode, **kwargs) def writer(self,", "if confirm and not feedback: return mode = 'a' if", "str: return self.__data_name__ @property def metadata(self) -> ObservableDict: return self.__parent__.metadata[self.__data_name__]", "feedback: return shutil.copyfile(str(src_path), str(self.path)) def export_file(self, dst_path: [str, Path], allow_overwrite=False):", "not in self.metadata \\ or self.metadata['md5-timestamp'] < last_modified_time: result =", "None]: if confirm and not feedback: return None mode =", "False, **kwargs) -> [IO, BinaryIO, TextIO, None]: if not allow_overwrite", "return shutil.copyfile(str(self.path), str(dst_path)) def __calc_hash__(self, h, buffer_size: int = 131072):", "confirm: bool = False, feedback: bool = False, **kwargs) ->", "self.__data_name__ @property def metadata(self) -> ObservableDict: return self.__parent__.metadata[self.__data_name__] @property def", "new_name)) self.__data_name__ = new_name def reader(self, binary: bool = False,", "self.__bucket__ = bucket self.__protected_parent_methods__ = protected_parent_methods self.__protected_parent_methods__['increase_data_count']() self.init_metadata() self.init_properties() @property", "if metadata is None: return if merge: metadata = {**self.metadata,", "properties(self) -> ObservableDict: return self.__parent__.properties[self.__data_name__] def rename(self, new_name: str): shutil.move(str(self.path),", "feedback=False): if self.path.exists() and not allow_overwrite: return if confirm and", "init_properties(self): if self.__data_name__ not in self.__parent__.properties: self.__parent__.properties[self.__data_name__] = dict() def", "dict() def init_properties(self): if self.__data_name__ not in self.__parent__.properties: self.__parent__.properties[self.__data_name__] =", "result else: return self.metadata['md5'] def sha1(self, buffer_size: int = 131072,", "overwrite existed data.') if confirm and not feedback: return mode", "return None mode = 'x' mode += 'b' if binary", "buffer_size) self.metadata['sha256'] = result self.metadata['sha256-timestamp'] = datetime.now().timestamp() return result else:", "return shutil.copyfile(str(src_path), str(self.path)) def export_file(self, dst_path: [str, Path], allow_overwrite=False): if", "self.__parent__.path / self.__data_name__ @property def name(self) -> str: return self.__data_name__", "import Path from typing import TextIO, BinaryIO, IO, Union from", "properties is None: return if merge: properties = {**self.properties, **properties}", "init_metadata(self): if self.__data_name__ not in self.__parent__.metadata: self.__parent__.metadata[self.__data_name__] = dict() def", "[str, Path], allow_overwrite=False): if Path(dst_path).exists() and not allow_overwrite: return shutil.copyfile(str(self.path),", "self.__protected_parent_methods__['increase_data_count']() self.init_metadata() self.init_properties() @property def database(self): return self.__bucket__.db @property def", "data: break h.update(data) return h.hexdigest() def md5(self, buffer_size: int =", "else '' return open(str(self.path), mode=mode, **kwargs) def creator(self, binary: bool", "result else: return self.metadata['sha1'] def sha256(self, buffer_size: int = 131072,", "or 'sha256-timestamp' not in self.metadata \\ or self.metadata['sha256-timestamp'] < last_modified_time:", "= protected_parent_methods self.__protected_parent_methods__['increase_data_count']() self.init_metadata() self.init_properties() @property def database(self): return self.__bucket__.db", "self.metadata['md5'] = result self.metadata['md5-timestamp'] = datetime.now().timestamp() return result else: return", "{**self.properties, **properties} self.__parent__.properties[self.__data_name__] = properties @property def parent(self): return self.__parent__", "import shutil import hashlib from pathlib import Path from typing", "self.metadata['md5-timestamp'] < last_modified_time: result = self.__calc_hash__(hashlib.md5(), buffer_size) self.metadata['md5'] = result", "True, feedback: bool = False, **kwargs) -> [IO, BinaryIO, TextIO,", "return f\"Data('{self.__data_name__}')\" def import_file(self, src_path: [str, Path], allow_overwrite=False, confirm=True, feedback=False):", "allow_overwrite: return if confirm and not feedback: return shutil.copyfile(str(src_path), str(self.path))", "f\"Data('{self.__data_name__}')\" def import_file(self, src_path: [str, Path], allow_overwrite=False, confirm=True, feedback=False): if", "return self.__parent__.properties[self.__data_name__] def rename(self, new_name: str): shutil.move(str(self.path), str(self.__parent__.path / new_name))", "= new_name def reader(self, binary: bool = False, **kwargs) ->", "self.__calc_hash__(hashlib.sha256(), buffer_size) self.metadata['sha256'] = result self.metadata['sha256-timestamp'] = datetime.now().timestamp() return result", "is None: return if merge: metadata = {**self.metadata, **metadata} self.__parent__.metadata[self.__data_name__]", "or 'sha256' not in self.metadata \\ or 'sha256-timestamp' not in", "in self.metadata \\ or self.metadata['md5-timestamp'] < last_modified_time: result = self.__calc_hash__(hashlib.md5(),", "or self.metadata['md5-timestamp'] < last_modified_time: result = self.__calc_hash__(hashlib.md5(), buffer_size) self.metadata['md5'] =", "def set_properties(self, properties: Union[None, dict], merge: bool = True): if", "-> [IO, BinaryIO, TextIO, None]: mode = 'r' mode +=", "< last_modified_time: result = self.__calc_hash__(hashlib.sha1(), buffer_size) self.metadata['sha1'] = result self.metadata['sha1-timestamp']", "mode=mode, **kwargs) def writer(self, binary: bool = False, append: bool", "IO, Union from datetime import datetime from os.path import getmtime", "if not self.path.exists(): return None last_modified_time = getmtime(str(self.path)) if require_update", ".low import ObservableDict class Data: def __init__(self, data_name: str, parent,", "if not self.path.exists(): return None with open(str(self.path), 'rb') as file_reader:", "return self.__parent__.metadata[self.__data_name__] @property def properties(self) -> ObservableDict: return self.__parent__.properties[self.__data_name__] def", "mode = 'r' mode += 'b' if binary else ''", "set_metadata(self, metadata: Union[None, dict], merge: bool = True): if metadata", "not in self.metadata \\ or 'sha1-timestamp' not in self.metadata \\", "@property def metadata(self) -> ObservableDict: return self.__parent__.metadata[self.__data_name__] @property def properties(self)", "return self.__data_name__ @property def metadata(self) -> ObservableDict: return self.__parent__.metadata[self.__data_name__] @property", "return self.__bucket__ def init_metadata(self): if self.__data_name__ not in self.__parent__.metadata: self.__parent__.metadata[self.__data_name__]", "[IO, BinaryIO, TextIO, None]: if confirm and not feedback: return", "= True): if metadata is None: return if merge: metadata", "self.__bucket__.db @property def bucket(self): return self.__bucket__ def init_metadata(self): if self.__data_name__", "binary: bool = False, append: bool = True, allow_overwrite: bool", "__init__(self, data_name: str, parent, bucket, protected_parent_methods: Union[None, dict] = None):", "= 'x' mode += 'b' if binary else '' return", "def export_file(self, dst_path: [str, Path], allow_overwrite=False): if Path(dst_path).exists() and not", "binary: bool = False, **kwargs) -> [IO, BinaryIO, TextIO, None]:", "metadata: Union[None, dict], merge: bool = True): if metadata is", "= {**self.properties, **properties} self.__parent__.properties[self.__data_name__] = properties @property def parent(self): return", "self.metadata['sha1'] = result self.metadata['sha1-timestamp'] = datetime.now().timestamp() return result else: return", "def sha1(self, buffer_size: int = 131072, require_update: bool = False)", "md5(self, buffer_size: int = 131072, require_update: bool = False) ->", "from typing import TextIO, BinaryIO, IO, Union from datetime import", "'w' mode += 'b' if binary else '' return open(str(self.path),", "database(self): return self.__bucket__.db @property def db(self): return self.__bucket__.db @property def", "None last_modified_time = getmtime(str(self.path)) if require_update \\ or 'sha256' not", "last_modified_time = getmtime(str(self.path)) if require_update \\ or 'sha256' not in", "'' return open(str(self.path), mode=mode, **kwargs) def creator(self, binary: bool =", "rename(self, new_name: str): shutil.move(str(self.path), str(self.__parent__.path / new_name)) self.__data_name__ = new_name", "@property def path(self) -> Path: return self.__parent__.path / self.__data_name__ @property", "require_update \\ or 'md5' not in self.metadata \\ or 'md5-timestamp'", "if append else 'w' mode += 'b' if binary else", "131072, require_update: bool = False) -> [str, None]: if not", "new_name: str): shutil.move(str(self.path), str(self.__parent__.path / new_name)) self.__data_name__ = new_name def", "result = self.__calc_hash__(hashlib.sha256(), buffer_size) self.metadata['sha256'] = result self.metadata['sha256-timestamp'] = datetime.now().timestamp()", "or 'md5' not in self.metadata \\ or 'md5-timestamp' not in", "\\ or self.metadata['sha1-timestamp'] < last_modified_time: result = self.__calc_hash__(hashlib.sha1(), buffer_size) self.metadata['sha1']", "False, **kwargs) -> [IO, BinaryIO, TextIO, None]: mode = 'r'", "merge: properties = {**self.properties, **properties} self.__parent__.properties[self.__data_name__] = properties @property def", "in self.metadata \\ or self.metadata['sha1-timestamp'] < last_modified_time: result = self.__calc_hash__(hashlib.sha1(),", "mode=mode, **kwargs) def creator(self, binary: bool = False, confirm: bool", "shutil.copyfile(str(self.path), str(dst_path)) def __calc_hash__(self, h, buffer_size: int = 131072): if", "in self.metadata \\ or 'sha1-timestamp' not in self.metadata \\ or", "= bucket self.__protected_parent_methods__ = protected_parent_methods self.__protected_parent_methods__['increase_data_count']() self.init_metadata() self.init_properties() @property def", "bool = True, feedback: bool = False, **kwargs) -> [IO,", "= True): if properties is None: return if merge: properties", "not data: break h.update(data) return h.hexdigest() def md5(self, buffer_size: int", "\\ or 'sha1-timestamp' not in self.metadata \\ or self.metadata['sha1-timestamp'] <", "self.__bucket__.db @property def db(self): return self.__bucket__.db @property def bucket(self): return", "ObservableDict: return self.__parent__.metadata[self.__data_name__] @property def properties(self) -> ObservableDict: return self.__parent__.properties[self.__data_name__]", "@property def database(self): return self.__bucket__.db @property def db(self): return self.__bucket__.db", "TextIO, None]: if confirm and not feedback: return None mode", "return None with open(str(self.path), 'rb') as file_reader: while True: data", "self.path.exists(): return None last_modified_time = getmtime(str(self.path)) if require_update \\ or", "if merge: properties = {**self.properties, **properties} self.__parent__.properties[self.__data_name__] = properties @property", "Path: return self.__parent__.path / self.__data_name__ @property def name(self) -> str:", "return h.hexdigest() def md5(self, buffer_size: int = 131072, require_update: bool", "'r' mode += 'b' if binary else '' return open(str(self.path),", "datetime.now().timestamp() return result else: return self.metadata['md5'] def sha1(self, buffer_size: int", "= False, **kwargs) -> [IO, BinaryIO, TextIO, None]: mode =", "sha1(self, buffer_size: int = 131072, require_update: bool = False) ->", "None with open(str(self.path), 'rb') as file_reader: while True: data =", "result = self.__calc_hash__(hashlib.md5(), buffer_size) self.metadata['md5'] = result self.metadata['md5-timestamp'] = datetime.now().timestamp()", "-> [IO, BinaryIO, TextIO, None]: if not allow_overwrite and not", "metadata = {**self.metadata, **metadata} self.__parent__.metadata[self.__data_name__] = metadata def set_properties(self, properties:", "= dict() def set_metadata(self, metadata: Union[None, dict], merge: bool =", "self.__protected_parent_methods__ = protected_parent_methods self.__protected_parent_methods__['increase_data_count']() self.init_metadata() self.init_properties() @property def database(self): return", "shutil.move(str(self.path), str(self.__parent__.path / new_name)) self.__data_name__ = new_name def reader(self, binary:", "None mode = 'x' mode += 'b' if binary else", "from os.path import getmtime from .low import ObservableDict class Data:", "Data: def __init__(self, data_name: str, parent, bucket, protected_parent_methods: Union[None, dict]", "self.__parent__.metadata[self.__data_name__] = dict() def init_properties(self): if self.__data_name__ not in self.__parent__.properties:", "str(self.path)) def export_file(self, dst_path: [str, Path], allow_overwrite=False): if Path(dst_path).exists() and", "return self.__parent__ @property def path(self) -> Path: return self.__parent__.path /", "self.__parent__.metadata: self.__parent__.metadata[self.__data_name__] = dict() def init_properties(self): if self.__data_name__ not in", "**kwargs) -> [IO, BinaryIO, TextIO, None]: mode = 'r' mode", "properties = {**self.properties, **properties} self.__parent__.properties[self.__data_name__] = properties @property def parent(self):", "reader(self, binary: bool = False, **kwargs) -> [IO, BinaryIO, TextIO,", "= datetime.now().timestamp() return result else: return self.metadata['md5'] def sha1(self, buffer_size:", "pathlib import Path from typing import TextIO, BinaryIO, IO, Union", "is None: return if merge: properties = {**self.properties, **properties} self.__parent__.properties[self.__data_name__]", "in self.metadata \\ or self.metadata['sha256-timestamp'] < last_modified_time: result = self.__calc_hash__(hashlib.sha256(),", "else '' return open(str(self.path), mode=mode, **kwargs) def writer(self, binary: bool", "'a' if append else 'w' mode += 'b' if binary", "if self.__data_name__ not in self.__parent__.properties: self.__parent__.properties[self.__data_name__] = dict() def set_metadata(self,", "import datetime from os.path import getmtime from .low import ObservableDict", "parent(self): return self.__parent__ @property def path(self) -> Path: return self.__parent__.path", "self.metadata \\ or 'sha1-timestamp' not in self.metadata \\ or self.metadata['sha1-timestamp']" ]
[ "import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_util", "of (gradient, variable) pairs. Variable is always present, but gradient", "this may mean that scopes intended to catch # `variable`", "grads = [ clip_ops.clip_by_value(g, -self.clipvalue, self.clipvalue) for g in grads", "the gradients with `tf.GradientTape`. 2. Process the gradients as you", "if hasattr(self, \"clipnorm\"): grads = [clip_ops.clip_by_norm(g, self.clipnorm) for g in", "want to log debug a training algorithm, report stats about", "2.0 (the \"License\"); # you may not use this file", "is None: raise ValueError(\"Variable {} has `None` for gradient. \"", "= [] with backend.name_scope(name or self._name): for grad, var in", "\"\"\"Call the function if param is callable.\"\"\" return param() if", "slot_name, variable, slot_variable): \"\"\"Restore a newly created slot variable's value.\"\"\"", "return self._resource_apply_sparse_duplicate_indices( grad.values, var, grad.indices) update_op = self._resource_apply_dense(grad, var) if", "value) def _get_hyper(self, name, dtype=None): if not self._hypers_created: self._create_hypers() value", "to the `Optimizer` constructor. Returns: An `Operation` that applies the", "of both computing the gradients and applying them to the", "Optimizer has run.\"\"\" if self._iterations is None: self._iterations = self.add_weight(", "config is a Python dictionary (serializable) containing the configuration of", "grads = tape.gradient(loss_value, var_list, grad_loss) if hasattr(self, \"clipnorm\"): grads =", "distribute_ctx from tensorflow.python.distribute import reduce_util as ds_reduce_util from tensorflow.python.eager import", "is restored from a SavedModel. These variables may be referenced", "numeric.\"\"\" if isinstance(value, trackable.Trackable): self._track_trackable(value, name, overwrite=True) if name not", "for details. By default the correct behavior, to sum non-unique", "grads_and_vars if g is not None and v.dtype != dtypes.resource", "you have a slot name you can ask the optimizer", "import reduce_util as ds_reduce_util from tensorflow.python.eager import backprop from tensorflow.python.eager", "tensorflow.python.saved_model import revived_types from tensorflow.python.training.tracking import base as trackable from", "Similar to `_apply_sparse`, the `indices` argument to this method has", "automatically if you use `tf.keras` built-in training or evaluation loops.", "currently we do not support using the optimizer object iself", "case any gradient cannot be computed (e.g. if gradient function", "__future__ import print_function import abc import functools import six from", "def _get_slot_key_from_var(var, slot_name): \"\"\"Get the slot key for the variable:", "report stats about the slots, etc. ### Hyper parameters These", "details. By default the correct behavior, to sum non-unique indices", "def __setattr__(self, name, value): \"\"\"Override setattr to support dynamic hyperparameter", "of the variable. \"\"\" raise NotImplementedError() def _resource_scatter_add(self, x, i,", "mode or any of the update ops are # symbolic", "custom_objects=custom_objects) return cls(**config) def _serialize_hyperparameter(self, hyperparameter_name): \"\"\"Serialize a hyperparameter that", "optimizer's slots.\"\"\" return self._slot_names def add_slot(self, var, slot_name, initializer=\"zeros\"): \"\"\"Add", "with a weight list of length \" + str(len(weights)) +", "elif trainable is None: trainable = True variable = self._add_variable_with_custom_getter(", "file a feature request if this limitation bothers \" \"you.\")", "distribution, grads_and_vars, name): \"\"\"`apply_gradients` using a `DistributionStrategy`.\"\"\" reduced_grads = distribution.extended.batch_reduce_to(", "include 1) sequential models without input shape pre-defined, or 2)", "An optimizer instance. \"\"\" if \"lr\" in config: config[\"learning_rate\"] =", "RuntimeError( \"Cannot use a constraint function on a sparse variable.\")", "as e: # Needed to avoid infinite recursion with __setattr__.", "each unique index. \"\"\" unique_indices, new_index_positions = array_ops.unique(indices) summed_values =", "= self._hyper[hyperparameter_name] if isinstance(value, learning_rate_schedule.LearningRateSchedule): return learning_rate_schedule.serialize(value) if callable(value): return", "Process the gradients as you wish. 3. Apply the processed", "can be a float, callable, or Tensor.\"\"\" value = self._hyper[hyperparameter_name]", "grad, handle, indices): \"\"\"Add ops to apply sparse gradients to", "created but `variable` has just been added to a dependency", "if there is no gradient for the given variable. Args:", "Optional. A `Tensor` holding the gradient computed for `loss`. Returns:", "tensor is not a valid type. \"\"\" valid_dtypes = self._valid_dtypes()", "= variable self._weights.append(self._iterations) def _decayed_lr(self, var_dtype): \"\"\"Get decayed learning rate", "learning rate as a Tensor with dtype=var_dtype.\"\"\" lr_t = self._get_hyper(\"learning_rate\",", "stateful and thread-compatible. Args: name: A non-empty string. The name", "\" \"synchronization=VariableSynchronization.ON_READ.\") else: # Set trainable to be false when", "of gradient tensors. Raises: ValueError: In case any gradient cannot", "weight list of length \" + str(len(weights)) + \", but", "callable(initializer): initializer = initializers.get(initializer) if synchronization == tf_variables.VariableSynchronization.ON_READ: if trainable:", "repeated indices. See the docstring of `_apply_sparse_duplicate_indices` for details. By", "!= w.shape: raise ValueError(\"Optimizer weight shape \" + str(pv.shape) +", "eager mode, simply call minimize to update the list of", "variable is to be synced on read. trainable = False", "def from_config(cls, config, custom_objects=None): \"\"\"Creates an optimizer from its config.", "name you can ask the optimizer for the variable it", "\"learning_rate\" in config: if isinstance(config[\"learning_rate\"], dict): config[\"learning_rate\"] = learning_rate_schedule.deserialize( config[\"learning_rate\"],", "could differ between restores. self._deferred_slot_restorations.setdefault( slot_name, {}).setdefault(variable_key, []).append( slot_variable_position) def", "0.: local_step = math_ops.cast(self.iterations, var_dtype) decay_t = self._get_hyper(\"decay\", var_dtype) lr_t", "hyper `name` to value. value can be callable, tensor, numeric.\"\"\"", "it automatically sums gradients across all replicas. To average gradients,", "gradient for the given variable. Args: loss: A callable taking", "If `var_list` contains anything else than `Variable` objects. ValueError: If", "var, apply_grad_to_update_var, args=(grad,), group=False)) any_symbolic = any(isinstance(i, ops.Operation) or tf_utils.is_symbolic_tensor(i)", "iterations(self, variable): if self._iterations is not None: raise RuntimeError(\"Cannot set", "list rather than the one with the highest restore #", "slots. In graph mode the name is derived from the", "getattr(var, \"_distributed_container\", None) is not None: var = var._distributed_container() if", "(e.g. through `apply_gradients`). \"\"\" # TODO(allenl): Make the restored optimizer", "tf.keras.optimizers.SGD(learning_rate=0.1) model = tf.keras.Sequential() model.add(tf.keras.layers.Dense(num_hidden, activation='relu')) model.add(tf.keras.layers.Dense(num_classes, activation='sigmoid') loss_fn =", "returning the list or tuple of `Variable` objects. Use callable", "var, grad.indices) update_op = self._resource_apply_dense(grad, var) if var.constraint is not", "eagerly create/restore slot variables # when possible, but this may", "License for the specific language governing permissions and # limitations", "not. If you are not using these and you want", "var_list=[var1, var2]) ``` ### Write a customized optimizer. If you", "dtype `resource` which points to the variable to be updated.", "### Hyper parameters These are arguments passed to the optimizer", "if necessary. with ops.init_scope(): _ = self.iterations self._create_hypers() self._create_slots(var_list) self._prepare(var_list)", "self._get_hyper(\"learning_rate\", var_dtype) if isinstance(lr_t, learning_rate_schedule.LearningRateSchedule): local_step = math_ops.cast(self.iterations, var_dtype) lr_t", "is an active variable creator # scope. Generally we'd like", "Optimizer implementation for checkpoint compatibility. Holds slot variables and hyperparameters", "the slot variable needs to be restored). Args: slot_variable_position: A", "param_values = backend.batch_get_value(params) for pv, p, w in zip(param_values, params,", "__init__(self): super(_RestoredOptimizer, self).__init__(\"_RestoredOptimizer\") self._hypers_created = True def get_config(self): # TODO(allenl):", "optimizer in three steps: 1. Compute the gradients with `tf.GradientTape`.", "\"clipnorm\" in kwargs: self.clipnorm = kwargs.pop(\"clipnorm\") if \"clipvalue\" in kwargs:", "\"clipvalue\" in kwargs: self.clipvalue = kwargs.pop(\"clipvalue\") self._hypers_created = False def", "differ between restores. self._deferred_slot_restorations.setdefault( slot_name, {}).setdefault(variable_key, []).append( slot_variable_position) def _filter_grads(grads_and_vars):", "Reserved. # # Licensed under the Apache License, Version 2.0", "shared name. In eager mode the name is derived from", "decay_t * local_step) return lr_t @abc.abstractmethod def get_config(self): \"\"\"Returns the", "order created.\"\"\" return self._weights @property def weights(self): \"\"\"Returns variables of", "that operation also increments `global_step`. Raises: ValueError: If some of", "tf_variables from tensorflow.python.platform import tf_logging as logging from tensorflow.python.saved_model import", "decay of learning rate. `lr` is included for backward compatibility,", "\"\"\"Add ops to apply sparse gradients to the variable `handle`.", "can be useful if you want to log debug a", "def get_updates(self, loss, params): grads = self.get_gradients(loss, params) grads_and_vars =", "allowed_kwargs = {\"clipnorm\", \"clipvalue\", \"lr\", \"decay\"} for k in kwargs:", "tensorflow.python.ops import gradients from tensorflow.python.ops import math_ops from tensorflow.python.ops import", "`apply_gradients()` to get the value for the hyper parameter. Hyper", "from a SavedModel. These variables may be referenced in functions", "update the list of variables. opt.minimize(loss, var_list=[var1, var2]) ``` ###", "kwargs={\"name\": name}) def _distributed_apply(self, distribution, grads_and_vars, name): \"\"\"`apply_gradients` using a", "`Tensor` representing the gradient. handle: a `Tensor` of dtype `resource`", "to avoid the overhead of summing. Args: grad: a `Tensor`", "### Processing gradients before applying them. Calling `minimize()` takes care", "compatibility to allow time inverse decay of learning rate. `lr`", "K.eval.\".format(param)) if hasattr(self, \"clipnorm\"): grads = [clip_ops.clip_by_norm(g, self.clipnorm) for g", "the slot variable `Trackable` object to be restored. slot_name: The", "filtered = tuple(filtered) if not filtered: raise ValueError(\"No gradients provided", "is saved without the optimizer; # it's a dependency hypergraph", "pass def _create_hypers(self): if self._hypers_created: return # Iterate hyper values", "math_ops.cast(self.iterations, var_dtype) decay_t = self._get_hyper(\"decay\", var_dtype) lr_t = lr_t /", "and so shouldn't keep Tensors as member variables. Generally you", "opt.apply_gradients(capped_grads_and_vars) ``` ### Use with `tf.distribute.Strategy`. This optimizer class is", "import clip_ops from tensorflow.python.ops import gradients from tensorflow.python.ops import math_ops", "Python dictionary, typically the output of get_config. custom_objects: A Python", "gradients across all replicas. To average gradients, you divide your", "an `Operation` that applies gradients. Args: grads_and_vars: List of (gradient,", "compatible with \" \"provided weight shape \" + str(w.shape)) weight_value_tuples.append((p,", "isinstance(value, learning_rate_schedule.LearningRateSchedule): return learning_rate_schedule.serialize(value) if callable(value): return value() if tensor_util.is_tensor(value):", "### Usage ```python # Create an optimizer with the desired", "models. Pass var_list as callable in these cases. Example: ```python", "checkpoint compatibility. Holds slot variables and hyperparameters when an optimizer", "that all of your ops have a \" \"gradient defined", "a callable that takes no argument and returns the value", "loss_value = loss() if callable(var_list): var_list = var_list() var_list =", "grads_and_vars] # Ask the optimizer to apply the capped gradients.", "on the order created.\"\"\" return self._weights def get_weights(self): params =", "support dynamic hyperparameter setting.\"\"\" # Backwards compatibility with Keras optimizers.", "pre-defined, or 2) subclassed models. Pass var_list as callable in", "apply # methods. def __init__(self): super(_RestoredOptimizer, self).__init__(\"_RestoredOptimizer\") self._hypers_created = True", "associated with the variables to train. These are called <i>Slots</i>.", "return self._slot_names def add_slot(self, var, slot_name, initializer=\"zeros\"): \"\"\"Add a new", "# need to the 'gradient' part, for example cap them,", "var_list = [v for (_, v) in grads_and_vars] # Create", "self._hyper[name] = value else: prev_value = self._hyper[name] if (callable(prev_value) or", "optimizer can be reinstantiated later (without any saved state) from", "else: # Set trainable to be false when variable is", "non-unique indices may instead override `_resource_apply_sparse_duplicate_indices` to avoid this overhead.", "the optimizer in three steps: 1. Compute the gradients with", "restored. slot_name: The name of this `Optimizer`'s slot to restore", "implementing Trackable. Stores information about how to restore # slot", "the specified gradients. If `global_step` was not None, that operation", "add Ops to train a model. You never use this", "params = self.weights return backend.batch_get_value(params) # TODO(tanzheny): Maybe share this", "to process the gradients before applying them you can instead", "# --------------- def _restore_slot_variable(self, slot_name, variable, slot_variable): \"\"\"Restore a newly", "... ], ... }, # ... } self._deferred_slot_restorations = {}", "callable, the callable will be called during `apply_gradients()` to get", "in grads] grads_and_vars = zip(processed_grads, var_list) # grads_and_vars is a", "ValueError: If some of the variables are not `Variable` objects.", "The user needs to perform synchronization if necessary. ### Slots", "slot_dict = self._slots[var_key] return slot_dict[slot_name] def _prepare(self, var_list): pass def", "\" \"K.argmax, K.round, K.eval.\".format(param)) if hasattr(self, \"clipnorm\"): grads = [clip_ops.clip_by_norm(g,", "updated. Returns: An `Operation` which updates the value of the", "or tf_utils.is_symbolic_tensor(i) for i in update_ops) if not context.executing_eagerly() or", "from tensorflow.python.ops import array_ops from tensorflow.python.ops import clip_ops from tensorflow.python.ops", "that \"gradient\" can be a `Tensor`, an `IndexedSlices`, or `None`", "three steps: 1. Compute the gradients with `tf.GradientTape`. 2. Process", "if not params: return weight_value_tuples = [] param_values = backend.batch_get_value(params)", "gets created # normally. We keep a list rather than", "parameters. opt = tf.keras.optimizers.SGD(learning_rate=0.1) # `loss` is a callable that", "return value def variables(self): \"\"\"Returns variables of this Optimizer based", "distribution strategy exists, get the primary variable first. Args: var:", "import functools import six from tensorflow.python.distribute import distribution_strategy_context as distribute_ctx", "Raises: TypeError: If `grads_and_vars` is malformed. ValueError: If none of", "`minimize()`. It returns a list of (gradient, variable) pairs where", "using `tf.GradientTape` and calls `apply_gradients()`. If you want to process", "`indices` argument to this method has been de-duplicated. Optimizers which", "dynamic hyperparameter setting.\"\"\" # Backwards compatibility with Keras optimizers. if", "you want to log debug a training algorithm, report stats", "and thread-compatible. Args: name: A non-empty string. The name to", "slot_dict = self._slots.get(variable_key, {}) slot_variable = slot_dict.get(slot_name, None) if (slot_variable", "may be repeated. Returns: An `Operation` which updates the value", "rank >= 1. indices: A one-dimensional integer `Tensor`, indexing into", "currently thread compatible, not thread-safe. The user needs to perform", "of the variable. \"\"\" # pylint: disable=protected-access # Get the", "If constraint function is passed to any variable, the constraint", "from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_util from tensorflow.python.keras", "OF ANY KIND, either express or implied. # See the", "slot_variable is not None: # If we've either made this", "isinstance(config[\"learning_rate\"], dict): config[\"learning_rate\"] = learning_rate_schedule.deserialize( config[\"learning_rate\"], custom_objects=custom_objects) return cls(**config) def", "See the License for the specific language governing permissions and", "`loss` is a callable that takes no argument and returns", "with any non-unique `indices`. Args: values: A `Tensor` with rank", "Tensor with dtype=var_dtype.\"\"\" lr_t = self._get_hyper(\"learning_rate\", var_dtype) if isinstance(lr_t, learning_rate_schedule.LearningRateSchedule):", "__getattribute__(self, name): \"\"\"Overridden to support hyperparameter access.\"\"\" try: return super(OptimizerV2,", "names and you can ask the optimizer for the names", "if isinstance(value, learning_rate_schedule.LearningRateSchedule): return value if callable(value): value = value()", "should not bind to a single graph, and so shouldn't", "import distribution_strategy_context as distribute_ctx from tensorflow.python.distribute import reduce_util as ds_reduce_util", "out an # existing slot variable, we should restore it.", "resulting in gradients that can be many times too big.", "to in writing, software # distributed under the License is", "name=None): \"\"\"Minimize `loss` by updating `var_list`. This method simply computes", "if this limitation bothers \" \"you.\") revived_types.register_revived_type( \"optimizer\", lambda obj:", "gradients for a list of variables. with tf.GradientTape() as tape:", "the variables are created at the first time when `loss`", "function on a sparse variable.\") return self._resource_apply_sparse_duplicate_indices( grad.values, var, grad.indices)", "learning_rate_schedule.LearningRateSchedule)): self._hyper[name] = value else: backend.set_value(self._hyper[name], value) def _get_hyper(self, name,", "and name in self._hyper: self._set_hyper(name, value) else: super(OptimizerV2, self).__setattr__(name, value)", "self._slot_names = [] self._weights = [] self._iterations = None #", "a normal # graph. if slot_variable is not None: #", "var._unique_id def _get_slot_key_from_var(var, slot_name): \"\"\"Get the slot key for the", "### Custom training loop with Keras models In Keras models,", "var2 # In graph mode, returns op that minimizes the", "with tf.GradientTape() as tape: loss = <call_loss_function> vars = <list_of_variables>", "self._valid_dtypes() for t in tensors: dtype = t.dtype.base_dtype if dtype", "revived_types.register_revived_type( \"optimizer\", lambda obj: isinstance(obj, OptimizerV2), versions=[revived_types.VersionedTypeRegistration( object_factory=lambda proto: _RestoredOptimizer(),", "zip(reduced_grads, var_list) def apply_grad_to_update_var(var, grad): \"\"\"Apply gradient to variable.\"\"\" if", "of subclasses. Note that Optimizer instances should not bind to", "listed # variables. opt_op = opt.minimize(loss, var_list=[var1, var2]) opt_op.run() #", "or agreed to in writing, software # distributed under the", "at the first time when `loss` is called. grad_loss: Optional.", "Write a customized optimizer. If you intend to create your", "def iterations(self): \"\"\"Variable. The number of training steps this Optimizer", "and gradients. \"\"\" return set( [dtypes.float16, dtypes.bfloat16, dtypes.float32, dtypes.float64]) def", "when a variable which has an associated slot variable is", "do not support using the optimizer object iself (e.g. through", "single step. As a result, using `tf.math.reduce_mean` will give the", "context.executing_eagerly() or any_symbolic: # If the current context is graph", "if tensor_util.is_tensor(value): return backend.get_value(value) return value def variables(self): \"\"\"Returns variables", "override to allow other float types. Returns: Valid types for", "None) if (slot_variable is None and context.executing_eagerly() and slot_variable_position.is_simple_variable() #", "var_list_fn = lambda: model.trainable_weights for input, output in data: opt.minimize(loss_fn,", "is clip gradients by value, `decay` is included for backward", "returned operation. Default to the name passed to the `Optimizer`", "+ var.op.name) with backend.name_scope(\"update\" + scope_name): update_ops.extend( distribution.extended.update( var, apply_grad_to_update_var,", "None: dtype = dtypes.float32 if isinstance(initializer, six.string_types) or callable(initializer): initializer", "tensor_util from tensorflow.python.keras import backend from tensorflow.python.keras import initializers from", "x.value() # --------------- # For implementing the trackable interface #", "is included for backward compatibility to allow time inverse decay", "tf_variables.Variable( name=\"%s/%s\" % (var._shared_name, slot_name), # pylint: disable=protected-access dtype=var.dtype, trainable=False,", "_serialize_hyperparameter(self, hyperparameter_name): \"\"\"Serialize a hyperparameter that can be a float,", "backend.get_graph().as_default(): grads = gradients.gradients(loss, params) for grad, param in zip(grads,", "`name` to value. value can be callable, tensor, numeric.\"\"\" if", "this logic with base_layer. def set_weights(self, weights): params = self.weights", "= any(isinstance(i, ops.Operation) or tf_utils.is_symbolic_tensor(i) for i in update_ops) if", "than `Variable` objects. ValueError: If some arguments are invalid, or", "passed to the `Optimizer` constructor. Returns: An `Operation` that applies", "super(_RestoredOptimizer, self).__init__(\"_RestoredOptimizer\") self._hypers_created = True def get_config(self): # TODO(allenl): Save", "minimize(self, loss, var_list, grad_loss=None, name=None): \"\"\"Minimize `loss` by updating `var_list`.", "\"\"\" unique_indices, new_index_positions = array_ops.unique(indices) summed_values = math_ops.unsorted_segment_sum( values, new_index_positions,", "slot_name): \"\"\"Get the slot key for the variable: var_name/slot_name.\"\"\" name", "logic with base_layer. def set_weights(self, weights): params = self.weights if", "compliance with the License. # You may obtain a copy", "% ([v.name for _, v in grads_and_vars],)) if vars_with_empty_grads: logging.warning(", "All Rights Reserved. # # Licensed under the Apache License,", "not callable(var_list): tape.watch(var_list) loss_value = loss() if callable(var_list): var_list =", "the list or tuple of `Variable` objects. Use callable when", "`global_step`. Raises: ValueError: If some of the variables are not", "repeated. Returns: An `Operation` which updates the value of the", "if \"lr\" in config: config[\"learning_rate\"] = config.pop(\"lr\") if \"learning_rate\" in", "are callable, the callable will be called during `apply_gradients()` to", "# save the slot variable if the optimizer is saved", "isinstance(value, learning_rate_schedule.LearningRateSchedule)): self._hyper[name] = value else: backend.set_value(self._hyper[name], value) def _get_hyper(self,", "raise RuntimeError( \"Cannot use a constraint function on a sparse", "affected indices. handle: a `Tensor` of dtype `resource` which points", "\"\"\"Returns gradients of `loss` with respect to `params`. Arguments: loss:", "for i in update_ops) if not context.executing_eagerly() or any_symbolic: #", "grads ] return grads def apply_gradients(self, grads_and_vars, name=None): \"\"\"Apply gradients", "this limitation bothers \" \"you.\") revived_types.register_revived_type( \"optimizer\", lambda obj: isinstance(obj,", "tensorflow.python.util import nest from tensorflow.python.util.tf_export import keras_export def _deduplicate_indexed_slices(values, indices):", "newly created slot variable's value.\"\"\" variable_key = _var_key(variable) deferred_restorations =", "repeated indices. Optimizers which override this method must deal with", "be restored. slot_name: The name of this `Optimizer`'s slot to", "resource_variable_ops from tensorflow.python.ops import variables as tf_variables from tensorflow.python.platform import", "\" \"VariableSynchronization.ON_READ only for non-trainable variables. \" \"You have specified", "kwargs.pop(\"clipvalue\") self._hypers_created = False def minimize(self, loss, var_list, grad_loss=None, name=None):", "object_factory=lambda proto: _RestoredOptimizer(), version=1, min_producer_version=1, min_consumer_version=1, setter=_RestoredOptimizer._set_hyper # pylint: disable=protected-access", "a sparse variable.\") return self._resource_apply_sparse_duplicate_indices( grad.values, var, grad.indices) update_op =", "import gradients from tensorflow.python.ops import math_ops from tensorflow.python.ops import resource_variable_ops", "self.clipvalue return config @classmethod def from_config(cls, config, custom_objects=None): \"\"\"Creates an", "weight shape \" + str(pv.shape) + \" not compatible with", "\"\"\"Valid types for loss, variables and gradients. Subclasses should override", "not use this file except in compliance with the License.", "NotImplementedError(\"Trying to update a Tensor \", var) if isinstance(grad, ops.IndexedSlices):", "use `tf.math.reduce_sum` to add up your per-example losses and then", "class OptimizerV2(trackable.Trackable): \"\"\"Updated base class for optimizers. This class defines", "### Slots Many optimizer subclasses, such as `Adam` and `Adagrad`", "self._iterations is not None: raise RuntimeError(\"Cannot set `iterations` to a", "create_slots (if your optimizer algorithm requires additional variables) - get_config", "slot variable with a restoring initializer. No new variables are", "not implemented). \"\"\" params = nest.flatten(params) with backend.get_graph().as_default(): grads =", "name self._hyper = {} # dict: {variable name : {slot", "trainable=False, initializer=value, aggregation=tf_variables.VariableAggregation.ONLY_FIRST_REPLICA) self._hypers_created = True @property def iterations(self): \"\"\"Variable.", "if var.constraint is not None: with ops.control_dependencies([update_op]): return var.assign(var.constraint(var)) else:", "Many optimizer subclasses, such as `Adam` and `Adagrad` allocate and", "allow time inverse decay of learning rate. `lr` is included", "SavedModel. These variables may be referenced in functions along with", "the variable list would otherwise be incomplete before `minimize` and", "you may not use this file except in compliance with", "using a `DistributionStrategy`.\"\"\" reduced_grads = distribution.extended.batch_reduce_to( ds_reduce_util.ReduceOp.SUM, grads_and_vars) var_list =", "variable with a restoring initializer. No new variables are created", "the unique name of the variable. \"\"\" # pylint: disable=protected-access", "config. This method is the reverse of `get_config`, capable of", "slot variable if the optimizer is saved without the non-slot", "functools.partial( initializer, shape=var.shape, dtype=var.dtype) else: initial_value = initializer strategy =", "to hold the slot value. This can be useful if", "vars_with_empty_grads])) return filtered def _var_key(var): \"\"\"Key for representing a primary", "an active variable creator # scope. Generally we'd like to", "to `self._set_hyper()`. They can be either regular Python values (like", "graph (causing us to realize that the slot variable needs", "self._weights @property def weights(self): \"\"\"Returns variables of this Optimizer based", "synchronization=synchronization, aggregation=aggregation) backend.track_variable(variable) return variable def _assert_valid_dtypes(self, tensors): \"\"\"Asserts tensors", "Get the distributed variable if it exists. if getattr(var, \"_distributed_container\",", "pre-processing `grad` and `indices` and passing them on to `_resource_apply_sparse`.", "either regular Python values (like 1.0), tensors, or callables. If", "slices associated with each unique index. \"\"\" unique_indices, new_index_positions =", "_assert_valid_dtypes(self, tensors): \"\"\"Asserts tensors are all valid types (see `_valid_dtypes`).", "hasattr(self, \"clipvalue\"): config[\"clipvalue\"] = self.clipvalue return config @classmethod def from_config(cls,", "to use the _set_hyper()/state.get_hyper() facility instead. This class in stateful", "is called. grad_loss: Optional. A `Tensor` holding the gradient computed", "\"\"\"Variable. The number of training steps this Optimizer has run.\"\"\"", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "dtypes.resource ]) return grads_and_vars def get_gradients(self, loss, params): \"\"\"Returns gradients", "of the variables are not `Variable` objects. \"\"\" grads_and_vars =", "use a constraint function on a sparse variable.\") return self._resource_apply_sparse_duplicate_indices(", "gradient. \" \"Please make sure that all of your ops", "to the variable to be updated. indices: a `Tensor` of", "not thread-safe. The user needs to perform synchronization if necessary.", "def get_config(self): \"\"\"Returns the config of the optimimizer. An optimizer", "tf.keras.losses.mse(model(input), output) var_list_fn = lambda: model.trainable_weights for input, output in", "gradients provided for any variable: %s.\" % ([v.name for _,", "A `Tensor` holding the gradient computed for `loss`. Returns: A", ">= 0, received: {}\".format(k, kwargs[k])) self._use_locking = True self._name =", "one with the highest restore # UID in case slot", "get_config. custom_objects: A Python dictionary mapping names to additional Python", "TODO(josh11b): Test that we handle weight decay in a reasonable", "that applies the specified gradients. If `global_step` was not None,", "# pylint: disable=protected-access with ops.control_dependencies(update_ops): return self._iterations.assign_add(1).op return self._iterations.assign_add(1) def", "your per-example losses and then divide by the global batch", "self._restore_slot_variable( slot_name=slot_name, variable=var, slot_variable=weight) self._weights.append(weight) return weight def get_slot(self, var,", "scope. Generally we'd like to eagerly create/restore slot variables #", "# it's a dependency hypergraph with edges of the form", "_filter_grads(grads_and_vars): \"\"\"Filter out iterable with grad equal to None.\"\"\" grads_and_vars", "is not None: var = var._distributed_container() if var._in_graph_mode: return var._shared_name", "gradient is nonzero. Indices may be repeated. Returns: An `Operation`", "variable for `var`.\"\"\" if slot_name not in self._slot_names: self._slot_names.append(slot_name) var_key", "In graph mode the name is derived from the var", "not None: raise RuntimeError(\"Cannot set `iterations` to a new Variable", "new_index_positions, array_ops.shape(unique_indices)[0]) return (summed_values, unique_indices) @six.add_metaclass(abc.ABCMeta) @keras_export(\"keras.optimizers.Optimizer\") class OptimizerV2(trackable.Trackable): \"\"\"Updated", "self._create_hypers() value = self._hyper[name] if isinstance(value, learning_rate_schedule.LearningRateSchedule): return value if", "cap them, etc. capped_grads_and_vars = [(MyCapper(gv[0]), gv[1]) for gv in", "RuntimeError(\"Cannot set `iterations` to a new Variable after \" \"the", "those could differ between restores. self._deferred_slot_restorations.setdefault( slot_name, {}).setdefault(variable_key, []).append( slot_variable_position)", "about the slots, etc. ### Hyper parameters These are arguments", "to optimizer: \" + str(k)) # checks that all keyword", "If `global_step` was not `None`, that operation also increments `global_step`.", "part of `minimize()`. It returns a list of (gradient, variable)", "They can be either regular Python values (like 1.0), tensors,", "about how to restore # slot variables which have not", "None and v.dtype != dtypes.resource ]) return grads_and_vars def get_gradients(self,", "clip_ops.clip_by_value(g, -self.clipvalue, self.clipvalue) for g in grads ] grads_and_vars =", "your loss which should be set to `tf.keras.losses.Reduction.SUM_OVER_BATCH_SIZE` for averaging", "... }, # ... } self._deferred_slot_restorations = {} decay =", "import context from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops", "@keras_export(\"keras.optimizers.Optimizer\") class OptimizerV2(trackable.Trackable): \"\"\"Updated base class for optimizers. This class", "weight is None: if isinstance(initializer, six.string_types) or callable(initializer): initializer =", "loss by updating the listed # variables. opt_op = opt.minimize(loss,", "to minimize. loss = lambda: 3 * var1 + 2", "associated with any non-unique `indices`. Args: values: A `Tensor` with", "_, v in grads_and_vars] grads_and_vars = zip(reduced_grads, var_list) def apply_grad_to_update_var(var,", "(trackable._CheckpointPosition objects). # {slot_name : # {_var_key(variable_to_train): [checkpoint_position, ... ],", "* var1 * var1 + 2 * var2 * var2", "the variables in `var_list`. If `global_step` was not `None`, that", "List of gradient tensors. Raises: ValueError: In case any gradient", "any saved state) from this configuration. Returns: Python dictionary. \"\"\"", "= [] self._weights = [] self._iterations = None # For", "Returns: List of gradient tensors. Raises: ValueError: In case any", "reverse of `get_config`, capable of instantiating the same optimizer from", "overwrite=True, initializer=initializer, dtype=dtype, trainable=trainable, use_resource=True, synchronization=synchronization, aggregation=aggregation) backend.track_variable(variable) return variable", "output of get_config. custom_objects: A Python dictionary mapping names to", "this class and override the following methods: - resource_apply_dense (update", "string. The name to use for accumulators created for the", "gradients to the variable `handle`. Similar to `_apply_sparse`, the `indices`", "k not in allowed_kwargs: raise TypeError(\"Unexpected keyword argument \" \"passed", "(serialization of the optimizer, include all hyper parameters) \"\"\" def", "deferred_restorations = self._deferred_slot_restorations.get( slot_name, {}).pop(variable_key, []) # Iterate over restores,", "the value to minimize. var_list: list or tuple of `Variable`", "support hyperparameter access.\"\"\" try: return super(OptimizerV2, self).__getattribute__(name) except AttributeError as", "tensorflow.python.ops import variables as tf_variables from tensorflow.python.platform import tf_logging as", "import math_ops from tensorflow.python.ops import resource_variable_ops from tensorflow.python.ops import variables", "`_resource_apply_sparse`. Optimizers which deal correctly with duplicate indices may instead", "which deal correctly with non-unique indices may instead override `_resource_apply_sparse_duplicate_indices`", "the same optimizer from the config dictionary. Arguments: config: A", "import abc import functools import six from tensorflow.python.distribute import distribution_strategy_context", "tf_logging as logging from tensorflow.python.saved_model import revived_types from tensorflow.python.training.tracking import", "to `_apply_sparse`, the `indices` argument to this method has been", "gradients. opt.apply_gradients(capped_grads_and_vars) ``` ### Use with `tf.distribute.Strategy`. This optimizer class", "value = self._hyper[name] if isinstance(value, learning_rate_schedule.LearningRateSchedule): return value if callable(value):", "than the one with the highest restore # UID in", "= self._resource_apply_dense(grad, var) if var.constraint is not None: with ops.control_dependencies([update_op]):", "makes variable creator scopes # behave the same way they", "own dependencies, in which case # those could differ between", "to compute a single step. As a result, using `tf.math.reduce_mean`", "types (see `_valid_dtypes`). Args: tensors: Tensors to check. Raises: ValueError:", "position: position.restore_uid, reverse=True) for checkpoint_position in deferred_restorations: checkpoint_position.restore(slot_variable) def _create_or_restore_slot_variable(", "# to minimize. loss = lambda: 3 * var1 +", "shape=[], trainable=False, initializer=value, aggregation=tf_variables.VariableAggregation.ONLY_FIRST_REPLICA) self._hypers_created = True @property def iterations(self):", "var_list = nest.flatten(var_list) grads = tape.gradient(loss_value, var_list, grad_loss) if hasattr(self,", "= self.weights if len(params) != len(weights): raise ValueError( \"You called", "functional by tracing its apply # methods. def __init__(self): super(_RestoredOptimizer,", "the Optimizer's config raise NotImplementedError( \"Restoring functional Optimzers from SavedModels", "-self.clipvalue, self.clipvalue) for g in grads ] return grads def", "are not `Variable` objects. \"\"\" grads_and_vars = self._compute_gradients( loss, var_list=var_list,", "checks that all keyword arguments are non-negative. if kwargs[k] <", "str(w.shape)) weight_value_tuples.append((p, w)) backend.batch_set_value(weight_value_tuples) def add_weight(self, name, shape, dtype=None, initializer=\"zeros\",", "def _prepare(self, var_list): pass def _create_hypers(self): if self._hypers_created: return #", "config[\"learning_rate\"] = config.pop(\"lr\") if \"learning_rate\" in config: if isinstance(config[\"learning_rate\"], dict):", "to be updated. Returns: An `Operation` which updates the value", "anywhere, and # instead special-case this dependency and otherwise pretend", "trainable=trainable, use_resource=True, synchronization=synchronization, aggregation=aggregation) backend.track_variable(variable) return variable def _assert_valid_dtypes(self, tensors):", "\" + str(pv.shape) + \" not compatible with \" \"provided", "deferred_restorations: checkpoint_position.restore(slot_variable) def _create_or_restore_slot_variable( self, slot_variable_position, slot_name, variable): \"\"\"Restore a", "math_ops.unsorted_segment_sum( values, new_index_positions, array_ops.shape(unique_indices)[0]) return (summed_values, unique_indices) @six.add_metaclass(abc.ABCMeta) @keras_export(\"keras.optimizers.Optimizer\") class", "the value # to minimize. loss = lambda: 3 *", "tensorflow.python.keras.engine import base_layer_utils from tensorflow.python.keras.optimizer_v2 import learning_rate_schedule from tensorflow.python.keras.utils import", "the `reduction` argument of your loss which should be set", "Important: If gradient is sparse tensor, variable constraint is not", "if you want to log debug a training algorithm, report", "ValueError(\"decay cannot be less than 0: {}\".format(decay)) self._initial_decay = decay", "index. \"\"\" unique_indices, new_index_positions = array_ops.unique(indices) summed_values = math_ops.unsorted_segment_sum( values,", "learning_rate_schedule.LearningRateSchedule)) or isinstance(value, learning_rate_schedule.LearningRateSchedule)): self._hyper[name] = value else: backend.set_value(self._hyper[name], value)", "function not implemented). \"\"\" params = nest.flatten(params) with backend.get_graph().as_default(): grads", "name in self._hyper: return self._get_hyper(name) raise e def __setattr__(self, name,", "mapping names to additional Python objects used to create this", "from tensorflow.python.keras.optimizer_v2 import learning_rate_schedule from tensorflow.python.keras.utils import tf_utils from tensorflow.python.ops", "import learning_rate_schedule from tensorflow.python.keras.utils import tf_utils from tensorflow.python.ops import array_ops", "being used to compute a single step. As a result,", "if name not in self._hyper: self._hyper[name] = value else: prev_value", "gv in grads_and_vars] # Ask the optimizer to apply the", "for %s, expected: %s.\" % (dtype, t.name, [v for v", "_track_ slot variables anywhere, and # instead special-case this dependency", "with the desired parameters. opt = tf.keras.optimizers.SGD(learning_rate=0.1) # `loss` is", "holding the gradient computed for `loss`. name: Optional name for", "\" \"gradient defined (i.e. are differentiable). \" \"Common ops without", "# Get the distributed variable if it exists. if getattr(var,", "This is the first part of `minimize()`. It returns a", "self.weights if len(params) != len(weights): raise ValueError( \"You called `set_weights(weights)`", "may be referenced in functions along with ops created by", "local_step) return lr_t @abc.abstractmethod def get_config(self): \"\"\"Returns the config of", "_create_slots has been overridden instead of _create_vars. \"\"\" allowed_kwargs =", "ValueError: If none of the variables have gradients. \"\"\" grads_and_vars", "name, value): \"\"\"set hyper `name` to value. value can be", "we don't _track_ slot variables anywhere, and # instead special-case", "for the variable: var_name/slot_name.\"\"\" name = _var_key(var) return name +", "non-functional Optimizer implementation for checkpoint compatibility. Holds slot variables and", "vars_with_empty_grads = [] for grad, var in grads_and_vars: if grad", "in kwargs: self.clipnorm = kwargs.pop(\"clipnorm\") if \"clipvalue\" in kwargs: self.clipvalue", "variables are not `Variable` objects. \"\"\" grads_and_vars = self._compute_gradients( loss,", "opt.minimize(loss, var_list=[var1, var2]) # update learning rate opt.learning_rate = 0.05", "name not in self._hyper: self._hyper[name] = value else: prev_value =", "keep a list rather than the one with the highest", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "Processing gradients before applying them. Calling `minimize()` takes care of", "self.clipvalue = kwargs.pop(\"clipvalue\") self._hypers_created = False def minimize(self, loss, var_list,", "`tf.GradientTape` and calls `apply_gradients()`. If you want to process the", "resource_apply_sparse (update variable given gradient tensor is sparse) - create_slots", "\"\"\" grads_and_vars = self._compute_gradients( loss, var_list=var_list, grad_loss=grad_loss) return self.apply_gradients(grads_and_vars, name=name)", "var.op.name) with backend.name_scope(\"update\" + scope_name): update_ops.extend( distribution.extended.update( var, apply_grad_to_update_var, args=(grad,),", "in sorted(self._hyper.items()): if isinstance(value, ops.Tensor) or callable(value): continue else: self._hyper[name]", "self._initial_decay = decay if \"clipnorm\" in kwargs: self.clipnorm = kwargs.pop(\"clipnorm\")", "variable's value.\"\"\" variable_key = _var_key(variable) deferred_restorations = self._deferred_slot_restorations.get( slot_name, {}).pop(variable_key,", "slot variables # when possible, but this may mean that", "var unique id. If distribution strategy exists, get the primary", "def _distributed_apply(self, distribution, grads_and_vars, name): \"\"\"`apply_gradients` using a `DistributionStrategy`.\"\"\" reduced_grads", "self._initial_decay > 0.: local_step = math_ops.cast(self.iterations, var_dtype) decay_t = self._get_hyper(\"decay\",", "and the variables are created at the first time when", "possible, but this may mean that scopes intended to catch", "with edges of the form (optimizer, non-slot # variable, variable)).", "no gradient for the given variable. Args: loss: A callable", "this dependency and otherwise pretend it's a normal # graph.", "aggregation=tf_variables.VariableAggregation.NONE): if dtype is None: dtype = dtypes.float32 if isinstance(initializer,", "# unintentionally (specifically make_template would add a dependency on #", "= [] param_values = backend.batch_get_value(params) for pv, p, w in", "the name is derived from the var unique id. If", "def __init__(self): super(_RestoredOptimizer, self).__init__(\"_RestoredOptimizer\") self._hypers_created = True def get_config(self): #", "times too big. ### Variable Constraint All Keras optimizers respect", "from this class and override the following methods: - resource_apply_dense", "computed for `loss`. name: Optional name for the returned operation.", "var.assign(var.constraint(var)) else: return update_op update_ops = [] with backend.name_scope(name or", "= value else: prev_value = self._hyper[name] if (callable(prev_value) or isinstance(prev_value,", "without gradient: \" \"K.argmax, K.round, K.eval.\".format(param)) if hasattr(self, \"clipnorm\"): grads", "e # Backwards compatibility with Keras optimizers. if name ==", "\"\"\"Version 2 of class Optimizer.\"\"\" # pylint: disable=g-bad-name from __future__", "value if callable(value): value = value() if dtype: return math_ops.cast(value,", "`indices` and `summed_values` contains the sum of `values` slices associated", "import absolute_import from __future__ import division from __future__ import print_function", "A callable taking no arguments which returns the value to", "grads_and_vars, name): \"\"\"`apply_gradients` using a `DistributionStrategy`.\"\"\" reduced_grads = distribution.extended.batch_reduce_to( ds_reduce_util.ReduceOp.SUM,", "initializers.get(initializer) initial_value = functools.partial( initializer, shape=var.shape, dtype=var.dtype) else: initial_value =", "creator scopes # behave the same way they do when", "self._weights.append(self._iterations) return self._iterations @iterations.setter def iterations(self, variable): if self._iterations is", "\"\"\" summed_grad, unique_indices = _deduplicate_indexed_slices( values=grad, indices=indices) return self._resource_apply_sparse(summed_grad, handle,", "is None: if isinstance(initializer, six.string_types) or callable(initializer): initializer = initializers.get(initializer)", "= math_ops.cast(self.iterations, var_dtype) lr_t = math_ops.cast(lr_t(local_step), var_dtype) if self._initial_decay >", "capable of instantiating the same optimizer from the config dictionary.", "%r for %s, expected: %s.\" % (dtype, t.name, [v for", "explicitly instead of using this function. Args: loss: A callable", "_create_or_restore_slot_variable( self, slot_variable_position, slot_name, variable): \"\"\"Restore a slot variable's value,", "are called <i>Slots</i>. Slots have names and you can ask", "if name == \"_hyper\": raise e # Backwards compatibility with", "value in sorted(self._hyper.items()): if isinstance(value, ops.Tensor) or callable(value): continue else:", "to the variable `handle`. Args: grad: a `Tensor` representing the", "thread-safe. The user needs to perform synchronization if necessary. ###", "file except in compliance with the License. # You may", "\"Synchronization value can be set to \" \"VariableSynchronization.ON_READ only for", "pv.shape != w.shape: raise ValueError(\"Optimizer weight shape \" + str(pv.shape)", "list or tuple of `Variable` objects to update to minimize", "increments `global_step`. Raises: TypeError: If `grads_and_vars` is malformed. ValueError: If", "be applied to the variable after the gradient has been", "update_op = self._resource_apply_dense(grad, var) if var.constraint is not None: with", "of a tensor's shape is the *replica-local* batch size, which", "{\"name\": self._name} if hasattr(self, \"clipnorm\"): config[\"clipnorm\"] = self.clipnorm if hasattr(self,", "var_dtype) if isinstance(lr_t, learning_rate_schedule.LearningRateSchedule): local_step = math_ops.cast(self.iterations, var_dtype) lr_t =", "value): \"\"\"Override setattr to support dynamic hyperparameter setting.\"\"\" # Backwards", "the optimizer is saved without the non-slot # variable, or", "slot variables and hyperparameters when an optimizer is restored from", "in three steps: 1. Compute the gradients with `tf.GradientTape`. 2.", "import revived_types from tensorflow.python.training.tracking import base as trackable from tensorflow.python.util", "allow other float types. Returns: Valid types for loss, variables", "training or evaluation loops. See the `reduction` argument of your", "Default to the name passed to the `Optimizer` constructor. Returns:", "created.\"\"\" return self._weights def get_weights(self): params = self.weights return backend.batch_get_value(params)", "saved without the optimizer; # it's a dependency hypergraph with", "the number # of assignments. deferred_restorations.sort(key=lambda position: position.restore_uid, reverse=True) for", "# ============================================================================== \"\"\"Version 2 of class Optimizer.\"\"\" # pylint: disable=g-bad-name", "infinite recursion with __setattr__. if name == \"_hyper\": raise e", "Backwards compatibility with Keras optimizers. if name == \"lr\": name", "gradient for \"variable\". Note that \"gradient\" can be a `Tensor`,", "variable list would otherwise be incomplete before `minimize` and the", "context is graph mode or any of the update ops", "raise ValueError(\"Invalid type %r for %s, expected: %s.\" % (dtype,", "# slot variables which have not yet been created #", "update the list of variables. opt.minimize(loss, var_list=[var1, var2]) # update", "the names of the slots that it uses. Once you", "= value() if dtype: return math_ops.cast(value, dtype) else: return value", "{`clipnorm`, `clipvalue`, `lr`, `decay`}. `clipnorm` is clip gradients by norm;", "`Trackable` object to be restored. slot_name: The name of this", "if there is an active variable creator # scope. Generally", "in grads_and_vars] # Ask the optimizer to apply the capped", "with backend.get_graph().as_default(): grads = gradients.gradients(loss, params) for grad, param in", "this slot is being created for. \"\"\" variable_key = _var_key(variable)", "grad: a `Tensor` representing the gradient for the affected indices.", "import resource_variable_ops from tensorflow.python.ops import variables as tf_variables from tensorflow.python.platform", "you divide your loss by the global batch size, which", "grads_and_vars: return grads_and_vars filtered = [] vars_with_empty_grads = [] for", "your ops have a \" \"gradient defined (i.e. are differentiable).", "replicas being used to compute a single step. As a", "`var_list`. If `global_step` was not `None`, that operation also increments", "defined (i.e. are differentiable). \" \"Common ops without gradient: \"", "cases. Example: ```python opt = tf.keras.optimizers.SGD(learning_rate=0.1) model = tf.keras.Sequential() model.add(tf.keras.layers.Dense(num_hidden,", "for g, v in grads_and_vars if g is not None", "overwritten through user code: Example: ```python # Create an optimizer", "created # (trackable._CheckpointPosition objects). # {slot_name : # {_var_key(variable_to_train): [checkpoint_position,", "restore # UID in case slot variables have their own", "objects. ValueError: If some arguments are invalid, or var_list is", "get_config(self): # TODO(allenl): Save and restore the Optimizer's config raise", "`tf.GradientTape`. 2. Process the gradients as you wish. 3. Apply", "`minimize` and the variables are created at the first time", "to support hyperparameter access.\"\"\" try: return super(OptimizerV2, self).__getattribute__(name) except AttributeError", "is not None: # If we've either made this slot", "isinstance(value, trackable.Trackable): self._track_trackable(value, name, overwrite=True) if name not in self._hyper:", "\"clipnorm\"): grads = [clip_ops.clip_by_norm(g, self.clipnorm) for g in grads] if", "raise RuntimeError(\"Cannot set `iterations` to a new Variable after \"", "slot name you can ask the optimizer for the variable", "not None: var = var._distributed_container() if var._in_graph_mode: return var._shared_name return", "variable) pairs where \"gradient\" is the gradient for \"variable\". Note", "not owned by any one object (because we don't want", "v in grads_and_vars] grads_and_vars = zip(reduced_grads, var_list) def apply_grad_to_update_var(var, grad):", "before `minimize` since the variables are created at the first", "list of variables. opt.minimize(loss, var_list=[var1, var2]) # update learning rate", "for the hyper parameter. Hyper parameters can be overwritten through", "in zip(grads, params): if grad is None: raise ValueError(\"Variable {}", "[] vars_with_empty_grads = [] for grad, var in grads_and_vars: if", "than 0: {}\".format(decay)) self._initial_decay = decay if \"clipnorm\" in kwargs:", "self.get_gradients(loss, params) grads_and_vars = list(zip(grads, params)) self._assert_valid_dtypes([ v for g,", "Hyper parameters can be overwritten through user code: Example: ```python", "KIND, either express or implied. # See the License for", "variable creation if there is an active variable creator #", "used to create this optimizer, such as a function used", "_resource_scatter_update(self, x, i, v): with ops.control_dependencies( [resource_variable_ops.resource_scatter_update(x.handle, i, v)]): return", "may instead override this method to avoid the overhead of", "If you are not using these and you want to", "training algorithm, report stats about the slots, etc. ### Hyper", "and returns the value # to minimize. loss = lambda:", "the slot key for the variable: var_name/slot_name.\"\"\" name = _var_key(var)", "_distributed_apply(self, distribution, grads_and_vars, name): \"\"\"`apply_gradients` using a `DistributionStrategy`.\"\"\" reduced_grads =", "process the gradient before applying then call `tf.GradientTape` and `apply_gradients()`", "\"lr\": name = \"learning_rate\" if name in self._hyper: return self._get_hyper(name)", "to catch # `variable` also catch its eagerly created slot", "with a restoring initializer. No new variables are created when", "a variable which has an associated slot variable is created", "minimize. loss = lambda: 3 * var1 + 2 *", "which returns the value to minimize. var_list: list or tuple", "`variable` also catch its eagerly created slot variable # unintentionally", "the same way they do when graph building. and not", "returns the value # to minimize. loss = lambda: 3", "opt.minimize(loss, var_list=[var1, var2]) opt_op.run() # In eager mode, simply call", "`Operation` that applies the specified gradients. If `global_step` was not", "or any_symbolic: # If the current context is graph mode", "def _serialize_hyperparameter(self, hyperparameter_name): \"\"\"Serialize a hyperparameter that can be a", "deterministically. for name, value in sorted(self._hyper.items()): if isinstance(value, ops.Tensor) or", "+ \" weights. Provided weights: \" + str(weights)[:50] + \"...\")", "= _var_key(var) return name + \"/\" + slot_name class _RestoredOptimizer(OptimizerV2):", "with ops created by the original optimizer, but currently we", "optimizer algorithm requires additional variables) - get_config (serialization of the", "= self.weights return backend.batch_get_value(params) # TODO(tanzheny): Maybe share this logic", "`Variable` objects. ValueError: If some arguments are invalid, or var_list", "ops._get_graph_from_inputs(update_ops).as_default(): # pylint: disable=protected-access with ops.control_dependencies(update_ops): return self._iterations.assign_add(1).op return self._iterations.assign_add(1)", "constraint is not supported. ### Thread Compatibility The entire optimizer", "params): grads = self.get_gradients(loss, params) grads_and_vars = list(zip(grads, params)) self._assert_valid_dtypes([", "returned operation. Returns: An Operation that updates the variables in", "# If we've either made this slot variable, or if", "(the \"License\"); # you may not use this file except", "It returns an `Operation` that applies gradients. Args: grads_and_vars: List", "self._weights.append(self._iterations) def _decayed_lr(self, var_dtype): \"\"\"Get decayed learning rate as a", "the docstring of `_apply_sparse_duplicate_indices` for details. By default the correct", "\"_hyper\") and name in self._hyper: self._set_hyper(name, value) else: super(OptimizerV2, self).__setattr__(name,", "v.dtype != dtypes.resource ]) return [self.apply_gradients(grads_and_vars)] def _set_hyper(self, name, value):", "Use with `tf.distribute.Strategy`. This optimizer class is `tf.distribute.Strategy` aware, which", "inherit from this class and override the following methods: -", "for `loss`. name: Optional name for the returned operation. Returns:", "this slot variable, or if we've pulled out an #", "param in zip(grads, params): if grad is None: raise ValueError(\"Variable", "optimizer was expecting \" + str(len(params)) + \" weights. Provided", "computed (e.g. if gradient function not implemented). \"\"\" params =", "= distribution.extended.batch_reduce_to( ds_reduce_util.ReduceOp.SUM, grads_and_vars) var_list = [v for _, v", "of variables. with tf.GradientTape() as tape: loss = <call_loss_function> vars", "if not filtered: raise ValueError(\"No gradients provided for any variable:", "grads] grads_and_vars = zip(processed_grads, var_list) # grads_and_vars is a list", "with backend.name_scope(name or self._name): for grad, var in grads_and_vars: scope_name", "to be synced on read. trainable = False elif trainable", "associated slot variable is created or restored. When executing eagerly,", "from SavedModels is not currently \" \"supported. Please file a", "behavior, to sum non-unique indices and their associated gradients, is", "output) var_list_fn = lambda: model.trainable_weights for input, output in data:", "`params`. Arguments: loss: Loss tensor. params: List of variables. Returns:", "math_ops.cast(value, dtype) else: return value def __getattribute__(self, name): \"\"\"Overridden to", "returns op that minimizes the loss by updating the listed", "grad_loss=None, name=None): \"\"\"Minimize `loss` by updating `var_list`. This method simply", "Optimizers which override this method must deal with repeated indices.", "%s when minimizing the loss.\"), ([v.name for v in vars_with_empty_grads]))", "behave the same way they do when graph building. and", "self._iterations @iterations.setter def iterations(self, variable): if self._iterations is not None:", "in self._hyper: self._hyper[name] = value else: prev_value = self._hyper[name] if", "# # Unless required by applicable law or agreed to", "opt.learning_rate = 0.05 opt.minimize(loss, var_list=[var1, var2]) ``` ### Write a", "var_list) def apply_grad_to_update_var(var, grad): \"\"\"Apply gradient to variable.\"\"\" if isinstance(var,", "Subclasses should override to allow other float types. Returns: Valid", "update should be carried out under a graph # context.", "from tensorflow.python.keras.engine import base_layer_utils from tensorflow.python.keras.optimizer_v2 import learning_rate_schedule from tensorflow.python.keras.utils", "saved state) from this configuration. Returns: Python dictionary. \"\"\" config", "def _resource_scatter_add(self, x, i, v): with ops.control_dependencies( [resource_variable_ops.resource_scatter_add(x.handle, i, v)]):", "!= dtypes.resource ]) return grads_and_vars def get_gradients(self, loss, params): \"\"\"Returns", "Instead, _restore_slot_variable catches these after normal creation and adds restore", "aggregation=tf_variables.VariableAggregation.ONLY_FIRST_REPLICA) self._weights.append(self._iterations) return self._iterations @iterations.setter def iterations(self, variable): if self._iterations", "call minimize to update the list of variables. opt.minimize(loss, var_list=[var1,", "... } self._deferred_slot_restorations = {} decay = kwargs.pop(\"decay\", 0.0) if", "gradients to variables. This is the second part of `minimize()`.", "iteration if necessary. with ops.init_scope(): _ = self.iterations self._create_hypers() self._create_slots(var_list)", "should be able to use the _set_hyper()/state.get_hyper() facility instead. This", "size, which is off by a factor equal to the", "\"\"\"set hyper `name` to value. value can be callable, tensor,", "replicas. To average gradients, you divide your loss by the", "if the non-slot variable is saved without the optimizer; #", "initializer = trackable.CheckpointInitialValue( checkpoint_position=slot_variable_position) slot_variable = self.add_slot( var=variable, initializer=initializer, slot_name=slot_name)", "* local_step) return lr_t @abc.abstractmethod def get_config(self): \"\"\"Returns the config", "made this slot variable, or if we've pulled out an", "value. value can be callable, tensor, numeric.\"\"\" if isinstance(value, trackable.Trackable):", "dtypes.resource ]) return [self.apply_gradients(grads_and_vars)] def _set_hyper(self, name, value): \"\"\"set hyper", "slot_dict[slot_name] def _prepare(self, var_list): pass def _create_hypers(self): if self._hypers_created: return", "Copyright 2018 The TensorFlow Authors. All Rights Reserved. # #", "`Operation` which updates the value of the variable. \"\"\" summed_grad,", "var2]) ``` ### Write a customized optimizer. If you intend", "hyperparameter access.\"\"\" try: return super(OptimizerV2, self).__getattribute__(name) except AttributeError as e:", "along with ops created by the original optimizer, but currently", "ops to the graph. This method is nonetheless important when", "add_weight(self, name, shape, dtype=None, initializer=\"zeros\", trainable=None, synchronization=tf_variables.VariableSynchronization.AUTO, aggregation=tf_variables.VariableAggregation.NONE): if dtype", "None: if isinstance(initializer, six.string_types) or callable(initializer): initializer = initializers.get(initializer) initial_value", "= dtypes.float32 if isinstance(initializer, six.string_types) or callable(initializer): initializer = initializers.get(initializer)", "implied. # See the License for the specific language governing", "self._hyper = {} # dict: {variable name : {slot name", "Thread Compatibility The entire optimizer is currently thread compatible, not", "representing the indices for which the gradient is nonzero. Indices", "# In graph mode, returns op that minimizes the loss", "variables and gradients. Subclasses should override to allow other float", "handle: a `Tensor` of dtype `resource` which points to the", "owned by any one object (because we don't want to", "to minimize. loss = lambda: 3 * var1 * var1", "\"\"\"Returns the config of the optimimizer. An optimizer config is", "isinstance(initializer, six.string_types) or callable(initializer): initializer = initializers.get(initializer) initial_value = functools.partial(", "names to additional Python objects used to create this optimizer,", "name: Optional name for the returned operation. Default to the", "[v for v in valid_dtypes])) def _valid_dtypes(self): \"\"\"Valid types for", "hyperparameter setting.\"\"\" # Backwards compatibility with Keras optimizers. if name", "to be false when variable is to be synced on", "for this case). Deferring is mostly harmless # (aside from", "value can be set to \" \"VariableSynchronization.ON_READ only for non-trainable", "if param is callable.\"\"\" return param() if callable(param) else param", "we create the slot variable with a restoring initializer. No", "processed gradients with `apply_gradients()`. Example: ```python # Create an optimizer.", "\"\"\"Asserts tensors are all valid types (see `_valid_dtypes`). Args: tensors:", "def add_slot(self, var, slot_name, initializer=\"zeros\"): \"\"\"Add a new slot variable", "deal with repeated indices. See the docstring of `_apply_sparse_duplicate_indices` for", "capped gradients. opt.apply_gradients(capped_grads_and_vars) ``` ### Use with `tf.distribute.Strategy`. This optimizer", "the loss.\"), ([v.name for v in vars_with_empty_grads])) return filtered def", "deal correctly with non-unique indices may instead override `_resource_apply_sparse_duplicate_indices` to", "`handle`, with repeated indices. Optimizers which override this method must", "params: List of variables. Returns: List of gradient tensors. Raises:", "= value else: backend.set_value(self._hyper[name], value) def _get_hyper(self, name, dtype=None): if", "bothers \" \"you.\") revived_types.register_revived_type( \"optimizer\", lambda obj: isinstance(obj, OptimizerV2), versions=[revived_types.VersionedTypeRegistration(", "is sparse tensor, variable constraint is not supported. ### Thread", "a model. You never use this class directly, but instead", "to update to minimize `loss`, or a callable returning the", "be either regular Python values (like 1.0), tensors, or callables.", "Optional. A `Tensor` holding the gradient computed for `loss`. name:", "raise e def __setattr__(self, name, value): \"\"\"Override setattr to support", "in self._slot_names: self._slot_names.append(slot_name) var_key = _var_key(var) slot_dict = self._slots.setdefault(var_key, {})", "highest restore # UID in case slot variables have their", "Args: var: the variable. Returns: the unique name of the", "Ask the optimizer to apply the capped gradients. opt.apply_gradients(capped_grads_and_vars) ```", "Note that Optimizer instances should not bind to a single", "that scopes intended to catch # `variable` also catch its", "variables(self): \"\"\"Returns variables of this Optimizer based on the order", "= {} decay = kwargs.pop(\"decay\", 0.0) if decay < 0.:", "since the variables are created at the first time `loss`", "# context. (eager updates execute immediately) with ops._get_graph_from_inputs(update_ops).as_default(): # pylint:", "\"\"\"Key for representing a primary variable, for looking up slots.", "be restored). Args: slot_variable_position: A `trackable._CheckpointPosition` object indicating the slot", "This can be useful if you want to log debug", "or isinstance(prev_value, (ops.Tensor, int, float, learning_rate_schedule.LearningRateSchedule)) or isinstance(value, learning_rate_schedule.LearningRateSchedule)): self._hyper[name]", "the gradient before applying then call `tf.GradientTape` and `apply_gradients()` explicitly", "= nest.flatten(var_list) grads = tape.gradient(loss_value, var_list, grad_loss) if hasattr(self, \"clipnorm\"):", "without the optimizer; # it's a dependency hypergraph with edges", "the model is first called, instead of construction time. Examples", "to use for accumulators created for the optimizer. **kwargs: keyword", "@abc.abstractmethod def get_config(self): \"\"\"Returns the config of the optimimizer. An", "gradients.gradients(loss, params) for grad, param in zip(grads, params): if grad", "the var shared name. In eager mode the name is", "variable if it exists. if getattr(var, \"_distributed_container\", None) is not", "+ \"...\") if not params: return weight_value_tuples = [] param_values", "instead of construction time. Examples include 1) sequential models without", "graph, and so shouldn't keep Tensors as member variables. Generally", "(update variable given gradient tensor is dense) - resource_apply_sparse (update", "to a single graph, and so shouldn't keep Tensors as", "you should use `tf.math.reduce_sum` to add up your per-example losses", "g, v in grads_and_vars if g is not None and", "set to \" \"VariableSynchronization.ON_READ only for non-trainable variables. \" \"You", "is graph mode or any of the update ops are", "__setattr__(self, name, value): \"\"\"Override setattr to support dynamic hyperparameter setting.\"\"\"", "arguments. Allowed to be {`clipnorm`, `clipvalue`, `lr`, `decay`}. `clipnorm` is", "k in kwargs: if k not in allowed_kwargs: raise TypeError(\"Unexpected", "this Optimizer based on the order created.\"\"\" return self._weights @property", "else: return value def __getattribute__(self, name): \"\"\"Overridden to support hyperparameter", "is a Python dictionary (serializable) containing the configuration of an", "for k in kwargs: if k not in allowed_kwargs: raise", "for the variable it created to hold the slot value.", "restoring initializer. No new variables are created when graph building.", "`learning_rate` instead. Raises: ValueError: If name is malformed. RuntimeError: If", "value to minimize. var_list: list or tuple of `Variable` objects", "Unless required by applicable law or agreed to in writing,", "the highest restore # UID in case slot variables have", "+ str(pv.shape) + \" not compatible with \" \"provided weight", "for the variables in `var_list`. This is the first part", "get the primary variable first. Args: var: the variable. Returns:", "time inverse decay of learning rate. `lr` is included for", "vars_with_empty_grads.append(var) else: filtered.append((grad, var)) filtered = tuple(filtered) if not filtered:", "[(MyCapper(gv[0]), gv[1]) for gv in grads_and_vars] # Ask the optimizer", "indices and their associated gradients, is enforced by first pre-processing", "dimension of `values` (as in an IndexedSlices object). Returns: A", "to `params`. Arguments: loss: Loss tensor. params: List of variables.", "x, i, v): with ops.control_dependencies( [resource_variable_ops.resource_scatter_add(x.handle, i, v)]): return x.value()", "variables which have not yet been created # (trackable._CheckpointPosition objects).", "`global_step` was not `None`, that operation also increments `global_step`. Raises:", "function used for a hyperparameter. Returns: An optimizer instance. \"\"\"", "derived from the var unique id. If distribution strategy exists,", "save the slot variable if the optimizer is saved without", "the specific language governing permissions and # limitations under the", "to None.\"\"\" grads_and_vars = tuple(grads_and_vars) if not grads_and_vars: return grads_and_vars", "The name of this `Optimizer`'s slot to restore into. variable:", "the second part of `minimize()`. It returns an `Operation` that", "to \" \"VariableSynchronization.ON_READ only for non-trainable variables. \" \"You have", "for g in grads ] grads_and_vars = list(zip(grads, var_list)) self._assert_valid_dtypes([", "unique_indices = _deduplicate_indexed_slices( values=grad, indices=indices) return self._resource_apply_sparse(summed_grad, handle, unique_indices) def", "None: var = var._distributed_container() if var._in_graph_mode: return var._shared_name return var._unique_id", "integer `Tensor`, indexing into the first dimension of `values` (as", "self._get_hyper(\"decay\", var_dtype) lr_t = lr_t / (1. + decay_t *", "self._hyper: return self._get_hyper(name) raise e def __setattr__(self, name, value): \"\"\"Override", "base class for optimizers. This class defines the API to", "gradient can be `None`. Raises: TypeError: If `var_list` contains anything", "slot variable for `var`.\"\"\" if slot_name not in self._slot_names: self._slot_names.append(slot_name)", "@property def weights(self): \"\"\"Returns variables of this Optimizer based on", "update ops are # symbolic then the step update should", "created to hold the slot value. This can be useful", "from its config. This method is the reverse of `get_config`,", "`reduction` argument of your loss which should be set to", "as trackable from tensorflow.python.util import nest from tensorflow.python.util.tf_export import keras_export", "backend.get_value(value) return value def variables(self): \"\"\"Returns variables of this Optimizer", "for representing a primary variable, for looking up slots. In", "variables. Generally you should be able to use the _set_hyper()/state.get_hyper()", "from tensorflow.python.ops import variables as tf_variables from tensorflow.python.platform import tf_logging", "arguments passed to the optimizer subclass constructor (the `__init__` method),", "of assignments. deferred_restorations.sort(key=lambda position: position.restore_uid, reverse=True) for checkpoint_position in deferred_restorations:", "class Optimizer.\"\"\" # pylint: disable=g-bad-name from __future__ import absolute_import from", "for any variable: %s.\" % ([v.name for _, v in", "None, that operation also increments `global_step`. Raises: TypeError: If `grads_and_vars`", "math_ops.cast(lr_t(local_step), var_dtype) if self._initial_decay > 0.: local_step = math_ops.cast(self.iterations, var_dtype)", "facility instead. This class in stateful and thread-compatible. Args: name:", "value else: prev_value = self._hyper[name] if (callable(prev_value) or isinstance(prev_value, (ops.Tensor,", "primary variable, for looking up slots. In graph mode the", "are arguments passed to the optimizer subclass constructor (the `__init__`", "is the *replica-local* batch size, which is off by a", "import ops from tensorflow.python.framework import tensor_util from tensorflow.python.keras import backend", "into the first dimension of `values` (as in an IndexedSlices", "variable it created to hold the slot value. This can", "for (_, v) in grads_and_vars] # Create iteration if necessary.", "self).__init__(\"_RestoredOptimizer\") self._hypers_created = True def get_config(self): # TODO(allenl): Save and", "float types. Returns: Valid types for loss, variables and gradients.", "currently \" \"supported. Please file a feature request if this", "applied to the variable. Important: If gradient is sparse tensor,", "name, value): \"\"\"Override setattr to support dynamic hyperparameter setting.\"\"\" #", "= tf.keras.optimizers.SGD(learning_rate=0.1) # `loss` is a callable that takes no", "from tensorflow.python.eager import context from tensorflow.python.framework import dtypes from tensorflow.python.framework", "_get_hyper(self, name, dtype=None): if not self._hypers_created: self._create_hypers() value = self._hyper[name]", "learning_rate_schedule from tensorflow.python.keras.utils import tf_utils from tensorflow.python.ops import array_ops from", "to minimize. var_list: list or tuple of `Variable` objects to", "current context is graph mode or any of the update", "dependency hypergraph with edges of the form (optimizer, non-slot #", "_set_hyper(self, name, value): \"\"\"set hyper `name` to value. value can", "be callable, tensor, numeric.\"\"\" if isinstance(value, trackable.Trackable): self._track_trackable(value, name, overwrite=True)", "should be set to `tf.keras.losses.Reduction.SUM_OVER_BATCH_SIZE` for averaging or `tf.keras.losses.Reduction.SUM` for", "for averaging or `tf.keras.losses.Reduction.SUM` for not. If you are not", "\"\"\"Create a new Optimizer. This must be called by the", "dictionary. \"\"\" config = {\"name\": self._name} if hasattr(self, \"clipnorm\"): config[\"clipnorm\"]", "distribution_strategy_context as distribute_ctx from tensorflow.python.distribute import reduce_util as ds_reduce_util from", "with backend.name_scope(\"update\" + scope_name): update_ops.extend( distribution.extended.update( var, apply_grad_to_update_var, args=(grad,), group=False))", "gradients and applying them to the variables. If you want", "Please file a feature request if this limitation bothers \"", "= tuple(filtered) if not filtered: raise ValueError(\"No gradients provided for", "# variable, variable)). So we don't _track_ slot variables anywhere,", "be computed (e.g. if gradient function not implemented). \"\"\" params", "a new slot variable for `var`.\"\"\" if slot_name not in", "= [] self._iterations = None # For implementing Trackable. Stores", "objects). # {slot_name : # {_var_key(variable_to_train): [checkpoint_position, ... ], ...", "set_weights(self, weights): params = self.weights if len(params) != len(weights): raise", "to minimize the number # of assignments. deferred_restorations.sort(key=lambda position: position.restore_uid,", "tensorflow.python.ops import resource_variable_ops from tensorflow.python.ops import variables as tf_variables from", "`Tensor` with rank >= 1. indices: A one-dimensional integer `Tensor`,", "\"...\") if not params: return weight_value_tuples = [] param_values =", "self._name = name self._hyper = {} # dict: {variable name", "number # of assignments. deferred_restorations.sort(key=lambda position: position.restore_uid, reverse=True) for checkpoint_position", "shape=var.shape, dtype=var.dtype) else: initial_value = initializer strategy = distribute_ctx.get_strategy() with", "_restore_slot_variable catches these after normal creation and adds restore ops", "or callable(initializer): initializer = initializers.get(initializer) if synchronization == tf_variables.VariableSynchronization.ON_READ: if", "_get_slot_key_from_var(var, slot_name): \"\"\"Get the slot key for the variable: var_name/slot_name.\"\"\"", "without the non-slot # variable, or if the non-slot variable", "variables. opt_op = opt.minimize(loss, var_list=[var1, var2]) opt_op.run() # In eager", "aware, which means it automatically sums gradients across all replicas.", "that when using `tf.distribute.Strategy`, the first component of a tensor's", "six from tensorflow.python.distribute import distribution_strategy_context as distribute_ctx from tensorflow.python.distribute import", "is to be synced on read. trainable = False elif", "nonzero. Indices are unique. Returns: An `Operation` which updates the", "is the second part of `minimize()`. It returns an `Operation`", "or callables. If they are callable, the callable will be", "as you wish. 3. Apply the processed gradients with `apply_gradients()`.", "indices for which the gradient is nonzero. Indices may be", "1) sequential models without input shape pre-defined, or 2) subclassed", "as logging from tensorflow.python.saved_model import revived_types from tensorflow.python.training.tracking import base", "_resource_apply_sparse(self, grad, handle, indices): \"\"\"Add ops to apply sparse gradients", "is None: trainable = True variable = self._add_variable_with_custom_getter( name=name, shape=shape,", "(\"Gradients does not exist for variables %s when minimizing the", "t.dtype.base_dtype if dtype not in valid_dtypes: raise ValueError(\"Invalid type %r", "(like 1.0), tensors, or callables. If they are callable, the", "gradients before applying them you can instead use the optimizer", "instead of _create_vars. \"\"\" allowed_kwargs = {\"clipnorm\", \"clipvalue\", \"lr\", \"decay\"}", "be many times too big. ### Variable Constraint All Keras", "loss = lambda: 3 * var1 * var1 + 2", "grad, handle): \"\"\"Add ops to apply dense gradients to the", "== \"_hyper\": raise e # Backwards compatibility with Keras optimizers.", "constraints. If constraint function is passed to any variable, the", "dtype is None: dtype = dtypes.float32 if isinstance(initializer, six.string_types) or", "variable constraint is not supported. ### Thread Compatibility The entire", "slot_variable_position: A `trackable._CheckpointPosition` object indicating the slot variable `Trackable` object", "of `indices` and `summed_values` contains the sum of `values` slices", "return super(OptimizerV2, self).__getattribute__(name) except AttributeError as e: # Needed to", "ops have a \" \"gradient defined (i.e. are differentiable). \"", "= slot_dict.get(slot_name, None) if (slot_variable is None and context.executing_eagerly() and", "slot_name: The name of this `Optimizer`'s slot to restore into.", "def _resource_apply_sparse_duplicate_indices(self, grad, handle, indices): \"\"\"Add ops to apply sparse", "Constraint All Keras optimizers respect variable constraints. If constraint function", "applying them you can instead use the optimizer in three", "```python # Create an optimizer. opt = tf.keras.optimizers.SGD(learning_rate=0.1) # Compute", "for example cap them, etc. capped_grads_and_vars = [(MyCapper(gv[0]), gv[1]) for", "the variable `handle`. Args: grad: a `Tensor` representing the gradient.", "if we've pulled out an # existing slot variable, we", "import nest from tensorflow.python.util.tf_export import keras_export def _deduplicate_indexed_slices(values, indices): \"\"\"Sums", "want to # save the slot variable if the optimizer", "ops.init_scope(): _ = self.iterations self._create_hypers() self._create_slots(var_list) self._prepare(var_list) return distribute_ctx.get_replica_context().merge_call( self._distributed_apply,", "`Tensor`, indexing into the first dimension of `values` (as in", "variables have gradients. \"\"\" grads_and_vars = _filter_grads(grads_and_vars) var_list = [v", "catch its eagerly created slot variable # unintentionally (specifically make_template", "filtered def _var_key(var): \"\"\"Key for representing a primary variable, for", "weight = tf_variables.Variable( name=\"%s/%s\" % (var._shared_name, slot_name), # pylint: disable=protected-access", "# --------------- # For implementing the trackable interface # ---------------", "tensor is sparse) - create_slots (if your optimizer algorithm requires", "was expecting \" + str(len(params)) + \" weights. Provided weights:", "% (dtype, t.name, [v for v in valid_dtypes])) def _valid_dtypes(self):", "if self._iterations is not None: raise RuntimeError(\"Cannot set `iterations` to", "when graph building. Instead, _restore_slot_variable catches these after normal creation", "= list(zip(grads, var_list)) self._assert_valid_dtypes([ v for g, v in grads_and_vars", "dense) - resource_apply_sparse (update variable given gradient tensor is sparse)", "backend.name_scope(name or self._name): for grad, var in grads_and_vars: scope_name =", "weight self._restore_slot_variable( slot_name=slot_name, variable=var, slot_variable=weight) self._weights.append(weight) return weight def get_slot(self,", "not params: return weight_value_tuples = [] param_values = backend.batch_get_value(params) for", "of `get_config`, capable of instantiating the same optimizer from the", "# We didn't make the slot variable. Defer restoring until", "later (without any saved state) from this configuration. Returns: Python", "ops to apply dense gradients to the variable `handle`. Args:", "1.0), tensors, or callables. If they are callable, the callable", "the list of variables. opt.minimize(loss, var_list=[var1, var2]) # update learning", "absolute_import from __future__ import division from __future__ import print_function import", "a newly created slot variable's value.\"\"\" variable_key = _var_key(variable) deferred_restorations", "opt_op.run() # In eager mode, simply call minimize to update", "in config: config[\"learning_rate\"] = config.pop(\"lr\") if \"learning_rate\" in config: if", "constraint will be applied to the variable after the gradient", "pylint: disable=protected-access # Get the distributed variable if it exists.", "var.constraint is not None: with ops.control_dependencies([update_op]): return var.assign(var.constraint(var)) else: return", "= {\"name\": self._name} if hasattr(self, \"clipnorm\"): config[\"clipnorm\"] = self.clipnorm if", "loss = <call_loss_function> vars = <list_of_variables> grads = tape.gradient(loss, vars)", "assignments. deferred_restorations.sort(key=lambda position: position.restore_uid, reverse=True) for checkpoint_position in deferred_restorations: checkpoint_position.restore(slot_variable)", "is included for backward compatibility, recommended to use `learning_rate` instead.", "return value() if tensor_util.is_tensor(value): return backend.get_value(value) return value def variables(self):", "additional variables associated with the variables to train. These are", "exists, get the primary variable first. Args: var: the variable.", "`Adam` and `Adagrad` allocate and manage additional variables associated with", "\"clipvalue\"): grads = [ clip_ops.clip_by_value(g, -self.clipvalue, self.clipvalue) for g in", "not a valid type. \"\"\" valid_dtypes = self._valid_dtypes() for t", "a primary variable, for looking up slots. In graph mode", "have names and you can ask the optimizer for the", "3 * var1 + 2 * var2 # In eager", "sparse gradients to the variable `handle`. Similar to `_apply_sparse`, the", "would add a dependency on # a slot variable if", "(optimizer, non-slot # variable, variable)). So we don't _track_ slot", "{}).setdefault(variable_key, []).append( slot_variable_position) def _filter_grads(grads_and_vars): \"\"\"Filter out iterable with grad", "tensorflow.python.keras import backend from tensorflow.python.keras import initializers from tensorflow.python.keras.engine import", "class is `tf.distribute.Strategy` aware, which means it automatically sums gradients", "variable given gradient tensor is dense) - resource_apply_sparse (update variable", "to perform synchronization if necessary. ### Slots Many optimizer subclasses,", "the value of the variable. \"\"\" summed_grad, unique_indices = _deduplicate_indexed_slices(", "gradients as you wish. 3. Apply the processed gradients with", "passed to the optimizer subclass constructor (the `__init__` method), and", "mean that scopes intended to catch # `variable` also catch", "isinstance(var, ops.Tensor): raise NotImplementedError(\"Trying to update a Tensor \", var)", "t.name, [v for v in valid_dtypes])) def _valid_dtypes(self): \"\"\"Valid types", "for the returned operation. Returns: An Operation that updates the", "backend.batch_get_value(params) for pv, p, w in zip(param_values, params, weights): if", "variable constraints. If constraint function is passed to any variable,", "keras_export def _deduplicate_indexed_slices(values, indices): \"\"\"Sums `values` associated with any non-unique", "lr_t = math_ops.cast(lr_t(local_step), var_dtype) if self._initial_decay > 0.: local_step =", "grads_and_vars: List of (gradient, variable) pairs. name: Optional name for", "A Python dictionary mapping names to additional Python objects used", "`indices`. Args: values: A `Tensor` with rank >= 1. indices:", "as `tf.keras.optimizers.SGD`, `tf.keras.optimizers.Adam`. ### Usage ```python # Create an optimizer", "= lr_t / (1. + decay_t * local_step) return lr_t", "grad_loss=None): \"\"\"Compute gradients of `loss` for the variables in `var_list`.", "You never use this class directly, but instead instantiate one", "minimizes the loss by updating the listed # variables. opt_op", "You may obtain a copy of the License at #", "if not context.executing_eagerly() or any_symbolic: # If the current context", "the variable to be updated. Returns: An `Operation` which updates", "must deal with repeated indices. See the docstring of `_apply_sparse_duplicate_indices`", "Generally you should be able to use the _set_hyper()/state.get_hyper() facility", "+ self._name + \" with a weight list of length", "a `DistributionStrategy`.\"\"\" reduced_grads = distribution.extended.batch_reduce_to( ds_reduce_util.ReduceOp.SUM, grads_and_vars) var_list = [v", "`loss` with respect to `params`. Arguments: loss: Loss tensor. params:", "# Compute the gradients for a list of variables. with", "`clipnorm` is clip gradients by norm; `clipvalue` is clip gradients", "the optimizer to apply the capped gradients. opt.apply_gradients(capped_grads_and_vars) ``` ###", "]) return grads_and_vars def get_gradients(self, loss, params): \"\"\"Returns gradients of", "and v.dtype != dtypes.resource ]) return [self.apply_gradients(grads_and_vars)] def _set_hyper(self, name,", "indices. Optimizers which override this method must deal with repeated", "in these cases. Example: ```python opt = tf.keras.optimizers.SGD(learning_rate=0.1) model =", "\"gradient\" can be a `Tensor`, an `IndexedSlices`, or `None` if", "for which the gradient is nonzero. Indices are unique. Returns:", "their own dependencies, in which case # those could differ", "v)]): return x.value() def _resource_scatter_update(self, x, i, v): with ops.control_dependencies(", "in which case # those could differ between restores. self._deferred_slot_restorations.setdefault(", "\"Restoring functional Optimzers from SavedModels is not currently \" \"supported.", "when graph building for the case when a slot variable", "not in allowed_kwargs: raise TypeError(\"Unexpected keyword argument \" \"passed to", "them you can instead use the optimizer in three steps:", "\"\"\"Filter out iterable with grad equal to None.\"\"\" grads_and_vars =", "initializers from tensorflow.python.keras.engine import base_layer_utils from tensorflow.python.keras.optimizer_v2 import learning_rate_schedule from", "[clip_ops.clip_by_norm(g, self.clipnorm) for g in grads] if hasattr(self, \"clipvalue\"): grads", "Tensors to check. Raises: ValueError: If any tensor is not", "If `global_step` was not None, that operation also increments `global_step`.", "self.clipvalue) for g in grads ] return grads def apply_gradients(self,", "loss.\"), ([v.name for v in vars_with_empty_grads])) return filtered def _var_key(var):", "var_list_fn) ``` ### Processing gradients before applying them. Calling `minimize()`", "in grads_and_vars],)) if vars_with_empty_grads: logging.warning( (\"Gradients does not exist for", "nest.flatten(var_list) grads = tape.gradient(loss_value, var_list, grad_loss) if hasattr(self, \"clipnorm\"): grads", "Test that we handle weight decay in a reasonable way.", "to log debug a training algorithm, report stats about the", "self._slots.setdefault(var_key, {}) weight = slot_dict.get(slot_name, None) if weight is None:", "to sum non-unique indices and their associated gradients, is enforced", "loss by the global batch size, which is done automatically", "tensorflow.python.util.tf_export import keras_export def _deduplicate_indexed_slices(values, indices): \"\"\"Sums `values` associated with", "is passed to any variable, the constraint will be applied", "/ (1. + decay_t * local_step) return lr_t @abc.abstractmethod def", "and v.dtype != dtypes.resource ]) return grads_and_vars def get_gradients(self, loss,", "to the variables. If you want to process the gradients", "variables # when possible, but this may mean that scopes", "with respect to `params`. Arguments: loss: Loss tensor. params: List", "weights(self): \"\"\"Returns variables of this Optimizer based on the order", "return self._resource_apply_sparse(summed_grad, handle, unique_indices) def _resource_apply_sparse(self, grad, handle, indices): \"\"\"Add", "callable will be called during `apply_gradients()` to get the value", "if self._initial_decay > 0.: local_step = math_ops.cast(self.iterations, var_dtype) decay_t =", "is not a valid type. \"\"\" valid_dtypes = self._valid_dtypes() for", "``` ### Custom training loop with Keras models In Keras", "losses and then divide by the global batch size. Note", "keep Tensors as member variables. Generally you should be able", "or a callable returning the list or tuple of `Variable`", "= self._get_hyper(\"decay\", var_dtype) lr_t = lr_t / (1. + decay_t", "w)) backend.batch_set_value(weight_value_tuples) def add_weight(self, name, shape, dtype=None, initializer=\"zeros\", trainable=None, synchronization=tf_variables.VariableSynchronization.AUTO,", "value() if tensor_util.is_tensor(value): return backend.get_value(value) return value def variables(self): \"\"\"Returns", "from this configuration. Returns: Python dictionary. \"\"\" config = {\"name\":", "opt.minimize(loss, var_list=[var1, var2]) ``` ### Custom training loop with Keras", "your optimizer algorithm requires additional variables) - get_config (serialization of", "`apply_gradients`). \"\"\" # TODO(allenl): Make the restored optimizer functional by", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "a new Variable after \" \"the Optimizer weights have been", "objects. \"\"\" grads_and_vars = self._compute_gradients( loss, var_list=var_list, grad_loss=grad_loss) return self.apply_gradients(grads_and_vars,", "unique_indices) def _resource_apply_sparse(self, grad, handle, indices): \"\"\"Add ops to apply", "to add up your per-example losses and then divide by", "`apply_gradients()`. If you want to process the gradient before applying", "`minimize()` takes care of both computing the gradients and applying", "harmless # (aside from double initialization), and makes variable creator", "always present, but gradient can be `None`. Raises: TypeError: If", "indices: a `Tensor` of integral type representing the indices for", "slot variable, we should restore it. slot_variable_position.restore(slot_variable) else: # We", "variable list would otherwise be incomplete before `minimize` since the", "dtypes.bfloat16, dtypes.float32, dtypes.float64]) def _call_if_callable(self, param): \"\"\"Call the function if", "to process the gradient before applying then call `tf.GradientTape` and", "object (because we don't want to # save the slot", "name : {slot name : variable}} self._slots = {} self._slot_names", "functional Optimzers from SavedModels is not currently \" \"supported. Please", "\"lr\", \"decay\"} for k in kwargs: if k not in", "= [(MyCapper(gv[0]), gv[1]) for gv in grads_and_vars] # Ask the", "or callable(value): continue else: self._hyper[name] = self.add_weight( name, shape=[], trainable=False,", "= lambda: 3 * var1 * var1 + 2 *", "the gradient for the affected indices. handle: a `Tensor` of", "grads = tape.gradient(loss, vars) processed_grads = [process_gradient(g) for g in", "when using `tf.distribute.Strategy`, the first component of a tensor's shape", "None: self._iterations = self.add_weight( \"iter\", shape=[], dtype=dtypes.int64, trainable=False, aggregation=tf_variables.VariableAggregation.ONLY_FIRST_REPLICA) self._weights.append(self._iterations)", "Raises: ValueError: In case any gradient cannot be computed (e.g.", "optimimizer. An optimizer config is a Python dictionary (serializable) containing", "minimize the number # of assignments. deferred_restorations.sort(key=lambda position: position.restore_uid, reverse=True)", "tensorflow.python.eager import backprop from tensorflow.python.eager import context from tensorflow.python.framework import", "by the constructors of subclasses. Note that Optimizer instances should", "Tensors as member variables. Generally you should be able to", "unintentionally (specifically make_template would add a dependency on # a", "one-dimensional integer `Tensor`, indexing into the first dimension of `values`", "tf.keras.Sequential() model.add(tf.keras.layers.Dense(num_hidden, activation='relu')) model.add(tf.keras.layers.Dense(num_classes, activation='sigmoid') loss_fn = lambda: tf.keras.losses.mse(model(input), output)", "raise ValueError(\"decay cannot be less than 0: {}\".format(decay)) self._initial_decay =", "# pylint: disable=protected-access dtype=var.dtype, trainable=False, initial_value=initial_value) backend.track_variable(weight) slot_dict[slot_name] = weight", "is not supported. ### Thread Compatibility The entire optimizer is", "Python dictionary. \"\"\" config = {\"name\": self._name} if hasattr(self, \"clipnorm\"):", "This method is nonetheless important when graph building for the", "by any one object (because we don't want to #", "lambda: 3 * var1 + 2 * var2 # In", "with \" \"provided weight shape \" + str(w.shape)) weight_value_tuples.append((p, w))", "trackable from tensorflow.python.util import nest from tensorflow.python.util.tf_export import keras_export def", "for grad, var in grads_and_vars: if grad is None: vars_with_empty_grads.append(var)", "= var._distributed_container() if var._in_graph_mode: return var._shared_name return var._unique_id def _get_slot_key_from_var(var,", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "function. Args: loss: A callable taking no arguments which returns", "support using the optimizer object iself (e.g. through `apply_gradients`). \"\"\"", "variable after the gradient has been applied to the variable.", "which updates the value of the variable. \"\"\" raise NotImplementedError()", "var_list) # grads_and_vars is a list of tuples (gradient, variable).", "License. # You may obtain a copy of the License", "included for backward compatibility, recommended to use `learning_rate` instead. Raises:", "to be {`clipnorm`, `clipvalue`, `lr`, `decay`}. `clipnorm` is clip gradients", "< 0: raise ValueError(\"Expected {} >= 0, received: {}\".format(k, kwargs[k]))", "self._iterations.assign_add(1).op return self._iterations.assign_add(1) def get_updates(self, loss, params): grads = self.get_gradients(loss,", "be a float, callable, or Tensor.\"\"\" value = self._hyper[hyperparameter_name] if", "with ops._get_graph_from_inputs(update_ops).as_default(): # pylint: disable=protected-access with ops.control_dependencies(update_ops): return self._iterations.assign_add(1).op return", "part of `minimize()`. It returns an `Operation` that applies gradients.", "applying them to the variables. If you want to process", "with Keras optimizers. if name == \"lr\": name = \"learning_rate\"", "the optimimizer. An optimizer config is a Python dictionary (serializable)", "list or tuple of `Variable` objects. Use callable when the", "otherwise be incomplete before `minimize` since the variables are created", "= learning_rate_schedule.deserialize( config[\"learning_rate\"], custom_objects=custom_objects) return cls(**config) def _serialize_hyperparameter(self, hyperparameter_name): \"\"\"Serialize", ">= 1. indices: A one-dimensional integer `Tensor`, indexing into the", "\"\"\" return set( [dtypes.float16, dtypes.bfloat16, dtypes.float32, dtypes.float64]) def _call_if_callable(self, param):", "0: raise ValueError(\"Expected {} >= 0, received: {}\".format(k, kwargs[k])) self._use_locking", "name, overwrite=True) if name not in self._hyper: self._hyper[name] = value", "```python opt = tf.keras.optimizers.SGD(learning_rate=0.1) model = tf.keras.Sequential() model.add(tf.keras.layers.Dense(num_hidden, activation='relu')) model.add(tf.keras.layers.Dense(num_classes,", "Tensor \", var) if isinstance(grad, ops.IndexedSlices): if var.constraint is not", "for variables %s when minimizing the loss.\"), ([v.name for v", "the variable. Returns: the unique name of the variable. \"\"\"", "minimize `loss`, or a callable returning the list or tuple", "NotImplementedError() def _resource_apply_sparse_duplicate_indices(self, grad, handle, indices): \"\"\"Add ops to apply", "such as `Adam` and `Adagrad` allocate and manage additional variables", "this method has been de-duplicated. Optimizers which deal correctly with", "self.clipnorm if hasattr(self, \"clipvalue\"): config[\"clipvalue\"] = self.clipvalue return config @classmethod", "that applies gradients. Args: grads_and_vars: List of (gradient, variable) pairs.", "and passing them on to `_resource_apply_sparse`. Optimizers which deal correctly", "and `apply_gradients()` explicitly instead of using this function. Args: loss:", "`apply_gradients()` explicitly instead of using this function. Args: loss: A", "self._hypers_created = True def get_config(self): # TODO(allenl): Save and restore", "same optimizer can be reinstantiated later (without any saved state)", "name=\"%s/%s\" % (var._shared_name, slot_name), # pylint: disable=protected-access dtype=var.dtype, trainable=False, initial_value=initial_value)", "not in self._slot_names: self._slot_names.append(slot_name) var_key = _var_key(var) slot_dict = self._slots.setdefault(var_key,", "governing permissions and # limitations under the License. # ==============================================================================", "0, received: {}\".format(k, kwargs[k])) self._use_locking = True self._name = name", "in self._hyper: self._set_hyper(name, value) else: super(OptimizerV2, self).__setattr__(name, value) def get_slot_names(self):", "_decayed_lr(self, var_dtype): \"\"\"Get decayed learning rate as a Tensor with", "have a \" \"gradient defined (i.e. are differentiable). \" \"Common", "you want to process the gradients before applying them you", "TensorFlow Authors. All Rights Reserved. # # Licensed under the", "AttributeError as e: # Needed to avoid infinite recursion with", "no arguments which returns the value to minimize. var_list: list", "a restoring initializer. No new variables are created when graph", "v in vars_with_empty_grads])) return filtered def _var_key(var): \"\"\"Key for representing", "grads_and_vars = list(zip(grads, var_list)) self._assert_valid_dtypes([ v for g, v in", "for the affected indices. handle: a `Tensor` of dtype `resource`", "on optimizer \" + self._name + \" with a weight", "math_ops from tensorflow.python.ops import resource_variable_ops from tensorflow.python.ops import variables as", "array_ops.shape(unique_indices)[0]) return (summed_values, unique_indices) @six.add_metaclass(abc.ABCMeta) @keras_export(\"keras.optimizers.Optimizer\") class OptimizerV2(trackable.Trackable): \"\"\"Updated base", "loss: A callable taking no arguments which returns the value", "from tensorflow.python.ops import resource_variable_ops from tensorflow.python.ops import variables as tf_variables", "callable, or Tensor.\"\"\" value = self._hyper[hyperparameter_name] if isinstance(value, learning_rate_schedule.LearningRateSchedule): return", "(ops.Tensor, int, float, learning_rate_schedule.LearningRateSchedule)) or isinstance(value, learning_rate_schedule.LearningRateSchedule)): self._hyper[name] = value", "eagerly, we create the slot variable with a restoring initializer.", "do when graph building. and not ops.get_default_graph()._variable_creator_stack): # pylint: disable=protected-access", "tuple of (`summed_values`, `unique_indices`) where `unique_indices` is a de-duplicated version", "object to be restored. slot_name: The name of this `Optimizer`'s", "is derived from the var shared name. In eager mode", "factor equal to the number of replicas being used to", "This optimizer class is `tf.distribute.Strategy` aware, which means it automatically", "IndexedSlices object). Returns: A tuple of (`summed_values`, `unique_indices`) where `unique_indices`", "and then divide by the global batch size. Note that", "to apply sparse gradients to the variable `handle`. Similar to", "for a hyperparameter. Returns: An optimizer instance. \"\"\" if \"lr\"", "zip(processed_grads, var_list) # grads_and_vars is a list of tuples (gradient,", "have specified trainable=True and \" \"synchronization=VariableSynchronization.ON_READ.\") else: # Set trainable", "Optimzers from SavedModels is not currently \" \"supported. Please file", "trackable.CheckpointInitialValue( checkpoint_position=slot_variable_position) slot_variable = self.add_slot( var=variable, initializer=initializer, slot_name=slot_name) # Slot", "can be reinstantiated later (without any saved state) from this", "for non-trainable variables. \" \"You have specified trainable=True and \"", "tf.GradientTape() as tape: loss = <call_loss_function> vars = <list_of_variables> grads", "# pylint: disable=g-bad-name from __future__ import absolute_import from __future__ import", "needs to perform synchronization if necessary. ### Slots Many optimizer", "tensorflow.python.ops import clip_ops from tensorflow.python.ops import gradients from tensorflow.python.ops import", "given gradient tensor is dense) - resource_apply_sparse (update variable given", "the value of the variable. \"\"\" raise NotImplementedError() def _resource_apply_sparse_duplicate_indices(self,", "answer, resulting in gradients that can be many times too", "the variables are not `Variable` objects. \"\"\" grads_and_vars = self._compute_gradients(", "[v for (_, v) in grads_and_vars] # Create iteration if", "= self._deferred_slot_restorations.get( slot_name, {}).pop(variable_key, []) # Iterate over restores, highest", "ops.control_dependencies(update_ops): return self._iterations.assign_add(1).op return self._iterations.assign_add(1) def get_updates(self, loss, params): grads", "in grads_and_vars if g is not None and v.dtype !=", "(the `__init__` method), and then passed to `self._set_hyper()`. They can", "disable=protected-access with ops.control_dependencies(update_ops): return self._iterations.assign_add(1).op return self._iterations.assign_add(1) def get_updates(self, loss,", "has been applied to the variable. Important: If gradient is", "but this may mean that scopes intended to catch #", "these after normal creation and adds restore ops to the", "model. You never use this class directly, but instead instantiate", "models, sometimes variables are created when the model is first", "def weights(self): \"\"\"Returns variables of this Optimizer based on the", "ops.executing_eagerly_outside_functions() else \"_\" + var.op.name) with backend.name_scope(\"update\" + scope_name): update_ops.extend(", "the variables. If you want to process the gradients before", "(_, v) in grads_and_vars] # Create iteration if necessary. with", "import backend from tensorflow.python.keras import initializers from tensorflow.python.keras.engine import base_layer_utils", "if isinstance(initializer, six.string_types) or callable(initializer): initializer = initializers.get(initializer) if synchronization", "self._hyper[name] if (callable(prev_value) or isinstance(prev_value, (ops.Tensor, int, float, learning_rate_schedule.LearningRateSchedule)) or", "iself (e.g. through `apply_gradients`). \"\"\" # TODO(allenl): Make the restored", "should override to allow other float types. Returns: Valid types", "= lambda: 3 * var1 + 2 * var2 #", "# Defer slot variable creation if there is an active", "self._distributed_apply, args=(grads_and_vars,), kwargs={\"name\": name}) def _distributed_apply(self, distribution, grads_and_vars, name): \"\"\"`apply_gradients`", "\"the Optimizer weights have been created\") self._iterations = variable self._weights.append(self._iterations)", "gradients by value, `decay` is included for backward compatibility to", "eagerly created slot variable # unintentionally (specifically make_template would add", "weight_value_tuples.append((p, w)) backend.batch_set_value(weight_value_tuples) def add_weight(self, name, shape, dtype=None, initializer=\"zeros\", trainable=None,", "tape.gradient(loss_value, var_list, grad_loss) if hasattr(self, \"clipnorm\"): grads = [clip_ops.clip_by_norm(g, self.clipnorm)", "symbolic then the step update should be carried out under", "ValueError(\"No gradients provided for any variable: %s.\" % ([v.name for", "setting.\"\"\" # Backwards compatibility with Keras optimizers. if name ==", "reverse=True) for checkpoint_position in deferred_restorations: checkpoint_position.restore(slot_variable) def _create_or_restore_slot_variable( self, slot_variable_position,", "optimizer object iself (e.g. through `apply_gradients`). \"\"\" # TODO(allenl): Make", "are # symbolic then the step update should be carried", "def get_config(self): # TODO(allenl): Save and restore the Optimizer's config", "optimization algorithm, simply inherit from this class and override the", "tuple of `Variable` objects. Use callable when the variable list", "v in grads_and_vars],)) if vars_with_empty_grads: logging.warning( (\"Gradients does not exist", "def _compute_gradients(self, loss, var_list, grad_loss=None): \"\"\"Compute gradients of `loss` for", "received: {}\".format(k, kwargs[k])) self._use_locking = True self._name = name self._hyper", "compatibility, recommended to use `learning_rate` instead. Raises: ValueError: If name", "before applying them. Calling `minimize()` takes care of both computing", "self._use_locking = True self._name = name self._hyper = {} #", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "there is no gradient for the given variable. Args: loss:", "\"clipvalue\", \"lr\", \"decay\"} for k in kwargs: if k not", "six.string_types) or callable(initializer): initializer = initializers.get(initializer) initial_value = functools.partial( initializer,", "variables are not owned by any one object (because we", "indices=indices) return self._resource_apply_sparse(summed_grad, handle, unique_indices) def _resource_apply_sparse(self, grad, handle, indices):", "only for non-trainable variables. \" \"You have specified trainable=True and", "v)]): return x.value() # --------------- # For implementing the trackable", "is not None and v.dtype != dtypes.resource ]) return grads_and_vars", "returns the value to minimize. var_list: list or tuple of", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "for the specific language governing permissions and # limitations under", "1. Compute the gradients with `tf.GradientTape`. 2. Process the gradients", "to allow time inverse decay of learning rate. `lr` is", "`var`.\"\"\" if slot_name not in self._slot_names: self._slot_names.append(slot_name) var_key = _var_key(var)", "The number of training steps this Optimizer has run.\"\"\" if", "this optimizer, such as a function used for a hyperparameter.", "def iterations(self, variable): if self._iterations is not None: raise RuntimeError(\"Cannot", "lr_t = lr_t / (1. + decay_t * local_step) return", "if self._hypers_created: return # Iterate hyper values deterministically. for name,", "to the 'gradient' part, for example cap them, etc. capped_grads_and_vars", "Returns: the unique name of the variable. \"\"\" # pylint:", "optimizer: \" + str(k)) # checks that all keyword arguments", "a `Tensor` of integral type representing the indices for which", "\", var) if isinstance(grad, ops.IndexedSlices): if var.constraint is not None:", "be a `Tensor`, an `IndexedSlices`, or `None` if there is", "with backprop.GradientTape() as tape: if not callable(var_list): tape.watch(var_list) loss_value =", "language governing permissions and # limitations under the License. #", "the var unique id. If distribution strategy exists, get the", "values=grad, indices=indices) return self._resource_apply_sparse(summed_grad, handle, unique_indices) def _resource_apply_sparse(self, grad, handle,", "if hasattr(self, \"_hyper\") and name in self._hyper: self._set_hyper(name, value) else:", "slot_name, {}).pop(variable_key, []) # Iterate over restores, highest restore UID", "instead use the optimizer in three steps: 1. Compute the", "required by applicable law or agreed to in writing, software", "return lr_t @abc.abstractmethod def get_config(self): \"\"\"Returns the config of the", "(aside from double initialization), and makes variable creator scopes #", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "a graph # context. (eager updates execute immediately) with ops._get_graph_from_inputs(update_ops).as_default():", "slot_dict.get(slot_name, None) if weight is None: if isinstance(initializer, six.string_types) or", "if isinstance(grad, ops.IndexedSlices): if var.constraint is not None: raise RuntimeError(", "# {_var_key(variable_to_train): [checkpoint_position, ... ], ... }, # ... }", "`loss` by updating `var_list`. This method simply computes gradient using", "`tf.keras.optimizers.Adam`. ### Usage ```python # Create an optimizer with the", "to allow other float types. Returns: Valid types for loss,", "self._resource_apply_dense(grad, var) if var.constraint is not None: with ops.control_dependencies([update_op]): return", "_restore_slot_variable(self, slot_name, variable, slot_variable): \"\"\"Restore a newly created slot variable's", "operation also increments `global_step`. Raises: ValueError: If some of the", "\"\"\" if \"lr\" in config: config[\"learning_rate\"] = config.pop(\"lr\") if \"learning_rate\"", "\"\"\"Get the slot key for the variable: var_name/slot_name.\"\"\" name =", "If _create_slots has been overridden instead of _create_vars. \"\"\" allowed_kwargs", "e: # Needed to avoid infinite recursion with __setattr__. if", "we don't want to # save the slot variable if", "variable, or if the non-slot variable is saved without the", "abc import functools import six from tensorflow.python.distribute import distribution_strategy_context as", "grad: a `Tensor` representing the gradient. handle: a `Tensor` of", "if dtype: return math_ops.cast(value, dtype) else: return value def __getattribute__(self,", "agreed to in writing, software # distributed under the License", "first time when `loss` is called. grad_loss: Optional. A `Tensor`", "it exists. if getattr(var, \"_distributed_container\", None) is not None: var", "to apply the capped gradients. opt.apply_gradients(capped_grads_and_vars) ``` ### Use with", "thread-compatible. Args: name: A non-empty string. The name to use", "invalid, or var_list is None. \"\"\" # TODO(josh11b): Test that", "list of (gradient, variable) pairs where \"gradient\" is the gradient", "distributed under the License is distributed on an \"AS IS\"", "object). Returns: A tuple of (`summed_values`, `unique_indices`) where `unique_indices` is", "= slot_dict.get(slot_name, None) if weight is None: if isinstance(initializer, six.string_types)", "that updates the variables in `var_list`. If `global_step` was not", "overhead of summing. Args: grad: a `Tensor` representing the gradient", "for. \"\"\" variable_key = _var_key(variable) slot_dict = self._slots.get(variable_key, {}) slot_variable", "malformed. RuntimeError: If _create_slots has been overridden instead of _create_vars.", "return grads def apply_gradients(self, grads_and_vars, name=None): \"\"\"Apply gradients to variables.", "special-case this dependency and otherwise pretend it's a normal #", "and you want to average gradients, you should use `tf.math.reduce_sum`", "Deferring is mostly harmless # (aside from double initialization), and", "the affected indices. handle: a `Tensor` of dtype `resource` which", "allowed_kwargs: raise TypeError(\"Unexpected keyword argument \" \"passed to optimizer: \"", "value for the hyper parameter. Hyper parameters can be overwritten", "if not callable(var_list): tape.watch(var_list) loss_value = loss() if callable(var_list): var_list", "True @property def iterations(self): \"\"\"Variable. The number of training steps", "var, slot_name, initializer=\"zeros\"): \"\"\"Add a new slot variable for `var`.\"\"\"", "are created when graph building. Instead, _restore_slot_variable catches these after", "the License. # ============================================================================== \"\"\"Version 2 of class Optimizer.\"\"\" #", "If name is malformed. RuntimeError: If _create_slots has been overridden", "In Keras models, sometimes variables are created when the model", "initializer. No new variables are created when graph building. Instead,", "optimizer; # it's a dependency hypergraph with edges of the", "the name passed to the `Optimizer` constructor. Returns: An `Operation`", "is nonzero. Indices are unique. Returns: An `Operation` which updates", "be `None`. Raises: TypeError: If `var_list` contains anything else than", "during `apply_gradients()` to get the value for the hyper parameter.", "update learning rate opt.learning_rate = 0.05 opt.minimize(loss, var_list=[var1, var2]) ```", "self._slots = {} self._slot_names = [] self._weights = [] self._iterations", "constructors of subclasses. Note that Optimizer instances should not bind", "way. with backprop.GradientTape() as tape: if not callable(var_list): tape.watch(var_list) loss_value", "necessary. with ops.init_scope(): _ = self.iterations self._create_hypers() self._create_slots(var_list) self._prepare(var_list) return", "optimizer. The same optimizer can be reinstantiated later (without any", "created slot variable's value.\"\"\" variable_key = _var_key(variable) deferred_restorations = self._deferred_slot_restorations.get(", "in `var_list`. This is the first part of `minimize()`. It", "all replicas. To average gradients, you divide your loss by", "v) in grads_and_vars] # Create iteration if necessary. with ops.init_scope():", "variables. Returns: List of gradient tensors. Raises: ValueError: In case", "in kwargs: self.clipvalue = kwargs.pop(\"clipvalue\") self._hypers_created = False def minimize(self,", "for g in grads ] return grads def apply_gradients(self, grads_and_vars,", "zip(grads, params): if grad is None: raise ValueError(\"Variable {} has", "from tensorflow.python.keras import backend from tensorflow.python.keras import initializers from tensorflow.python.keras.engine", "isinstance(obj, OptimizerV2), versions=[revived_types.VersionedTypeRegistration( object_factory=lambda proto: _RestoredOptimizer(), version=1, min_producer_version=1, min_consumer_version=1, setter=_RestoredOptimizer._set_hyper", "graph. if slot_variable is not None: # If we've either", "def minimize(self, loss, var_list, grad_loss=None, name=None): \"\"\"Minimize `loss` by updating", "you intend to create your own optimization algorithm, simply inherit", "algorithm, simply inherit from this class and override the following", "the order created.\"\"\" return self._weights @property def weights(self): \"\"\"Returns variables", "API to add Ops to train a model. You never", "vars) processed_grads = [process_gradient(g) for g in grads] grads_and_vars =", "`grads_and_vars` is malformed. ValueError: If none of the variables have", "the restored optimizer functional by tracing its apply # methods.", "# If the current context is graph mode or any", "from_config(cls, config, custom_objects=None): \"\"\"Creates an optimizer from its config. This", "return # Iterate hyper values deterministically. for name, value in", "default the correct behavior, to sum non-unique indices and their", "creation and adds restore ops to the graph. This method", "\"You have specified trainable=True and \" \"synchronization=VariableSynchronization.ON_READ.\") else: # Set", "for g in grads] if hasattr(self, \"clipvalue\"): grads = [", "not `Variable` objects. \"\"\" grads_and_vars = self._compute_gradients( loss, var_list=var_list, grad_loss=grad_loss)", "the optimizer subclass constructor (the `__init__` method), and then passed", "hyper parameters) \"\"\" def __init__(self, name, **kwargs): \"\"\"Create a new", "cls(**config) def _serialize_hyperparameter(self, hyperparameter_name): \"\"\"Serialize a hyperparameter that can be", "def _call_if_callable(self, param): \"\"\"Call the function if param is callable.\"\"\"", "variable, slot_variable): \"\"\"Restore a newly created slot variable's value.\"\"\" variable_key", "creating it. Called when a variable which has an associated", "3. Apply the processed gradients with `apply_gradients()`. Example: ```python #", "to the number of replicas being used to compute a", "Compute the gradients for a list of variables. with tf.GradientTape()", "gradient has been applied to the variable. Important: If gradient", "have a slot name you can ask the optimizer for", "your own optimization algorithm, simply inherit from this class and", "on the order created.\"\"\" return self._weights @property def weights(self): \"\"\"Returns", "= _var_key(variable) slot_dict = self._slots.get(variable_key, {}) slot_variable = slot_dict.get(slot_name, None)", "ops created by the original optimizer, but currently we do", "is malformed. ValueError: If none of the variables have gradients.", "it's a normal # graph. if slot_variable is not None:", "but currently we do not support using the optimizer object", "- get_config (serialization of the optimizer, include all hyper parameters)", "by a factor equal to the number of replicas being", "apply the capped gradients. opt.apply_gradients(capped_grads_and_vars) ``` ### Use with `tf.distribute.Strategy`.", "representing a primary variable, for looking up slots. In graph", "_prepare(self, var_list): pass def _create_hypers(self): if self._hypers_created: return # Iterate", "`tf.keras.losses.Reduction.SUM` for not. If you are not using these and", "this function. Args: loss: A callable taking no arguments which", "value of the variable. \"\"\" raise NotImplementedError() def _resource_apply_sparse_duplicate_indices(self, grad,", "= var_list() var_list = nest.flatten(var_list) grads = tape.gradient(loss_value, var_list, grad_loss)", "`tf.distribute.Strategy` aware, which means it automatically sums gradients across all", "A `Tensor` holding the gradient computed for `loss`. name: Optional", "the gradients for a list of variables. with tf.GradientTape() as", "hasattr(self, \"clipvalue\"): grads = [ clip_ops.clip_by_value(g, -self.clipvalue, self.clipvalue) for g", "pylint: disable=protected-access dtype=var.dtype, trainable=False, initial_value=initial_value) backend.track_variable(weight) slot_dict[slot_name] = weight self._restore_slot_variable(", "def apply_gradients(self, grads_and_vars, name=None): \"\"\"Apply gradients to variables. This is", "kwargs[k])) self._use_locking = True self._name = name self._hyper = {}", "(causing us to realize that the slot variable needs to", "or if we've pulled out an # existing slot variable,", "value() if dtype: return math_ops.cast(value, dtype) else: return value def", "math_ops.cast(self.iterations, var_dtype) lr_t = math_ops.cast(lr_t(local_step), var_dtype) if self._initial_decay > 0.:", "or 2) subclassed models. Pass var_list as callable in these", "list of tuples (gradient, variable). Do whatever you # need", "This class defines the API to add Ops to train", "deal correctly with duplicate indices may instead override this method", "def _resource_scatter_update(self, x, i, v): with ops.control_dependencies( [resource_variable_ops.resource_scatter_update(x.handle, i, v)]):", "'gradient' part, for example cap them, etc. capped_grads_and_vars = [(MyCapper(gv[0]),", "shape \" + str(pv.shape) + \" not compatible with \"", "self._hypers_created = False def minimize(self, loss, var_list, grad_loss=None, name=None): \"\"\"Minimize", "isinstance(initializer, six.string_types) or callable(initializer): initializer = initializers.get(initializer) if synchronization ==", "can be either regular Python values (like 1.0), tensors, or", "grads_and_vars) var_list = [v for _, v in grads_and_vars] grads_and_vars", "or `None` if there is no gradient for the given", "correct behavior, to sum non-unique indices and their associated gradients,", "when the variable list would otherwise be incomplete before `minimize`", "_var_key(var) slot_dict = self._slots.setdefault(var_key, {}) weight = slot_dict.get(slot_name, None) if", "tensorflow.python.training.tracking import base as trackable from tensorflow.python.util import nest from", "config dictionary. Arguments: config: A Python dictionary, typically the output", "= zip(processed_grads, var_list) # grads_and_vars is a list of tuples", "first component of a tensor's shape is the *replica-local* batch", "[resource_variable_ops.resource_scatter_update(x.handle, i, v)]): return x.value() # --------------- # For implementing", "grads_and_vars = list(zip(grads, params)) self._assert_valid_dtypes([ v for g, v in", "sum of `values` slices associated with each unique index. \"\"\"", "return value if callable(value): value = value() if dtype: return", "in grads ] grads_and_vars = list(zip(grads, var_list)) self._assert_valid_dtypes([ v for", "OR CONDITIONS OF ANY KIND, either express or implied. #", "be called by the constructors of subclasses. Note that Optimizer", "accumulators created for the optimizer. **kwargs: keyword arguments. Allowed to", "slot_name, variable): \"\"\"Restore a slot variable's value, possibly creating it.", "from tensorflow.python.framework import tensor_util from tensorflow.python.keras import backend from tensorflow.python.keras", "the License is distributed on an \"AS IS\" BASIS, #", "in case slot variables have their own dependencies, in which", "slot variable. Defer restoring until it gets created # normally.", "`Optimizer`'s slot to restore into. variable: The variable object this", "= trackable.CheckpointInitialValue( checkpoint_position=slot_variable_position) slot_variable = self.add_slot( var=variable, initializer=initializer, slot_name=slot_name) #", "get_slot(self, var, slot_name): var_key = _var_key(var) slot_dict = self._slots[var_key] return", "the indices for which the gradient is nonzero. Indices are", "add a dependency on # a slot variable if not", "and override the following methods: - resource_apply_dense (update variable given", "opt.minimize(loss, var_list=[var1, var2]) ``` ### Write a customized optimizer. If", "for a list of variables. with tf.GradientTape() as tape: loss", "name}) def _distributed_apply(self, distribution, grads_and_vars, name): \"\"\"`apply_gradients` using a `DistributionStrategy`.\"\"\"", "self).__getattribute__(name) except AttributeError as e: # Needed to avoid infinite", "in self._hyper: return self._get_hyper(name) raise e def __setattr__(self, name, value):", "of `values` (as in an IndexedSlices object). Returns: A tuple", "on to `_resource_apply_sparse`. Optimizers which deal correctly with duplicate indices", "need to the 'gradient' part, for example cap them, etc.", "override this method must deal with repeated indices. See the", "is the first part of `minimize()`. It returns a list", "slot_variable_position) def _filter_grads(grads_and_vars): \"\"\"Filter out iterable with grad equal to", "trainable = False elif trainable is None: trainable = True", "some arguments are invalid, or var_list is None. \"\"\" #", "dtype=var.dtype) else: initial_value = initializer strategy = distribute_ctx.get_strategy() with strategy.extended.colocate_vars_with(var):", "pulled out an # existing slot variable, we should restore", "algorithm requires additional variables) - get_config (serialization of the optimizer,", "any of the update ops are # symbolic then the", "hasattr(self, \"_hyper\") and name in self._hyper: self._set_hyper(name, value) else: super(OptimizerV2,", "self._weights def get_weights(self): params = self.weights return backend.batch_get_value(params) # TODO(tanzheny):", "else: prev_value = self._hyper[name] if (callable(prev_value) or isinstance(prev_value, (ops.Tensor, int,", "\"K.argmax, K.round, K.eval.\".format(param)) if hasattr(self, \"clipnorm\"): grads = [clip_ops.clip_by_norm(g, self.clipnorm)", "after \" \"the Optimizer weights have been created\") self._iterations =", "sorted(self._hyper.items()): if isinstance(value, ops.Tensor) or callable(value): continue else: self._hyper[name] =", "under the License. # ============================================================================== \"\"\"Version 2 of class Optimizer.\"\"\"", "0.: raise ValueError(\"decay cannot be less than 0: {}\".format(decay)) self._initial_decay", "carried out under a graph # context. (eager updates execute", "`lr` is included for backward compatibility, recommended to use `learning_rate`", "be useful if you want to log debug a training", "= list(zip(grads, params)) self._assert_valid_dtypes([ v for g, v in grads_and_vars", "for this optimizer's slots.\"\"\" return self._slot_names def add_slot(self, var, slot_name,", "tf_utils from tensorflow.python.ops import array_ops from tensorflow.python.ops import clip_ops from", "apply_gradients(self, grads_and_vars, name=None): \"\"\"Apply gradients to variables. This is the", "`clipvalue` is clip gradients by value, `decay` is included for", "`unique_indices`) where `unique_indices` is a de-duplicated version of `indices` and", "for backward compatibility to allow time inverse decay of learning", "variable. Args: loss: A callable taking no arguments which returns", "to the variable to be updated. Returns: An `Operation` which", "A tuple of (`summed_values`, `unique_indices`) where `unique_indices` is a de-duplicated", "the correct behavior, to sum non-unique indices and their associated", "is None. \"\"\" # TODO(josh11b): Test that we handle weight", "`Variable` objects. \"\"\" grads_and_vars = self._compute_gradients( loss, var_list=var_list, grad_loss=grad_loss) return", "law or agreed to in writing, software # distributed under", "weight = slot_dict.get(slot_name, None) if weight is None: if isinstance(initializer,", "to eagerly create/restore slot variables # when possible, but this", "optimizer for the names of the slots that it uses.", "which is done automatically if you use `tf.keras` built-in training", "to variables. This is the second part of `minimize()`. It", "initializer = initializers.get(initializer) if synchronization == tf_variables.VariableSynchronization.ON_READ: if trainable: raise", "is the gradient for \"variable\". Note that \"gradient\" can be", "then the step update should be carried out under a", "= \"learning_rate\" if name in self._hyper: return self._get_hyper(name) raise e", "strategy = distribute_ctx.get_strategy() with strategy.extended.colocate_vars_with(var): weight = tf_variables.Variable( name=\"%s/%s\" %", "w.shape: raise ValueError(\"Optimizer weight shape \" + str(pv.shape) + \"", "raise ValueError( \"Synchronization value can be set to \" \"VariableSynchronization.ON_READ", "i, v)]): return x.value() def _resource_scatter_update(self, x, i, v): with", "name=name, shape=shape, getter=base_layer_utils.make_variable, overwrite=True, initializer=initializer, dtype=dtype, trainable=trainable, use_resource=True, synchronization=synchronization, aggregation=aggregation)", "are created when the model is first called, instead of", "`clipvalue`, `lr`, `decay`}. `clipnorm` is clip gradients by norm; `clipvalue`", "don't want to # save the slot variable if the", "\" \"the Optimizer weights have been created\") self._iterations = variable", "anything else than `Variable` objects. ValueError: If some arguments are", "`tf.GradientTape` and `apply_gradients()` explicitly instead of using this function. Args:", "\" + str(weights)[:50] + \"...\") if not params: return weight_value_tuples", "or `tf.keras.losses.Reduction.SUM` for not. If you are not using these", "tensors, or callables. If they are callable, the callable will", "the output of get_config. custom_objects: A Python dictionary mapping names", "the variable. \"\"\" summed_grad, unique_indices = _deduplicate_indexed_slices( values=grad, indices=indices) return", "the slots, etc. ### Hyper parameters These are arguments passed", "to create your own optimization algorithm, simply inherit from this", "to avoid infinite recursion with __setattr__. if name == \"_hyper\":", "Args: grad: a `Tensor` representing the gradient. handle: a `Tensor`", "argument to this method has been de-duplicated. Optimizers which deal", "a Tensor with dtype=var_dtype.\"\"\" lr_t = self._get_hyper(\"learning_rate\", var_dtype) if isinstance(lr_t,", "vars_with_empty_grads: logging.warning( (\"Gradients does not exist for variables %s when", "return backend.batch_get_value(params) # TODO(tanzheny): Maybe share this logic with base_layer.", "raise ValueError(\"No gradients provided for any variable: %s.\" % ([v.name", "name == \"lr\": name = \"learning_rate\" if hasattr(self, \"_hyper\") and", "gradients to `handle`, with repeated indices. Optimizers which override this", "indexing into the first dimension of `values` (as in an", "is None: self._iterations = self.add_weight( \"iter\", shape=[], dtype=dtypes.int64, trainable=False, aggregation=tf_variables.VariableAggregation.ONLY_FIRST_REPLICA)", "the global batch size. Note that when using `tf.distribute.Strategy`, the", "if grad is None: raise ValueError(\"Variable {} has `None` for", "Args: loss: A callable taking no arguments which returns the", "may obtain a copy of the License at # #", "name to use for accumulators created for the optimizer. **kwargs:", "lr_t = self._get_hyper(\"learning_rate\", var_dtype) if isinstance(lr_t, learning_rate_schedule.LearningRateSchedule): local_step = math_ops.cast(self.iterations,", "we've pulled out an # existing slot variable, we should", "which the gradient is nonzero. Indices are unique. Returns: An", "be reinstantiated later (without any saved state) from this configuration.", "initial_value=initial_value) backend.track_variable(weight) slot_dict[slot_name] = weight self._restore_slot_variable( slot_name=slot_name, variable=var, slot_variable=weight) self._weights.append(weight)", "= tape.gradient(loss, vars) processed_grads = [process_gradient(g) for g in grads]", "global batch size, which is done automatically if you use", "reinstantiated later (without any saved state) from this configuration. Returns:", "name: A non-empty string. The name to use for accumulators", "return self._iterations.assign_add(1).op return self._iterations.assign_add(1) def get_updates(self, loss, params): grads =", "of (`summed_values`, `unique_indices`) where `unique_indices` is a de-duplicated version of", "this optimizer's slots.\"\"\" return self._slot_names def add_slot(self, var, slot_name, initializer=\"zeros\"):", "the trackable interface # --------------- def _restore_slot_variable(self, slot_name, variable, slot_variable):", "slot_name not in self._slot_names: self._slot_names.append(slot_name) var_key = _var_key(var) slot_dict =", "self._weights.append(weight) return weight def get_slot(self, var, slot_name): var_key = _var_key(var)", "as `Adam` and `Adagrad` allocate and manage additional variables associated", "override this method to avoid the overhead of summing. Args:", "if self._iterations is None: self._iterations = self.add_weight( \"iter\", shape=[], dtype=dtypes.int64,", "may not use this file except in compliance with the", "restored). Args: slot_variable_position: A `trackable._CheckpointPosition` object indicating the slot variable", "{}) slot_variable = slot_dict.get(slot_name, None) if (slot_variable is None and", "hypergraph with edges of the form (optimizer, non-slot # variable,", "self._hypers_created: return # Iterate hyper values deterministically. for name, value", "Generally we'd like to eagerly create/restore slot variables # when", "base_layer_utils from tensorflow.python.keras.optimizer_v2 import learning_rate_schedule from tensorflow.python.keras.utils import tf_utils from", "ValueError(\"Variable {} has `None` for gradient. \" \"Please make sure", "tensors): \"\"\"Asserts tensors are all valid types (see `_valid_dtypes`). Args:", "updated. indices: a `Tensor` of integral type representing the indices", "override the following methods: - resource_apply_dense (update variable given gradient", "of _create_vars. \"\"\" allowed_kwargs = {\"clipnorm\", \"clipvalue\", \"lr\", \"decay\"} for", "this file except in compliance with the License. # You", "if name == \"lr\": name = \"learning_rate\" if hasattr(self, \"_hyper\")", "optimizer, such as a function used for a hyperparameter. Returns:", "created for. \"\"\" variable_key = _var_key(variable) slot_dict = self._slots.get(variable_key, {})", "A list of (gradient, variable) pairs. Variable is always present,", "self._compute_gradients( loss, var_list=var_list, grad_loss=grad_loss) return self.apply_gradients(grads_and_vars, name=name) def _compute_gradients(self, loss,", "ops.Tensor) or callable(value): continue else: self._hyper[name] = self.add_weight( name, shape=[],", "backend.set_value(self._hyper[name], value) def _get_hyper(self, name, dtype=None): if not self._hypers_created: self._create_hypers()", "the reverse of `get_config`, capable of instantiating the same optimizer", "slot_variable_position, slot_name, variable): \"\"\"Restore a slot variable's value, possibly creating", "contains anything else than `Variable` objects. ValueError: If some arguments", "distribution.extended.batch_reduce_to( ds_reduce_util.ReduceOp.SUM, grads_and_vars) var_list = [v for _, v in", "functions along with ops created by the original optimizer, but", "Apply the processed gradients with `apply_gradients()`. Example: ```python # Create", "non-trainable variables. \" \"You have specified trainable=True and \" \"synchronization=VariableSynchronization.ON_READ.\")", "ops to apply sparse gradients to the variable `handle`. Similar", "hyperparameter_name): \"\"\"Serialize a hyperparameter that can be a float, callable,", "input, output in data: opt.minimize(loss_fn, var_list_fn) ``` ### Processing gradients", "# # Licensed under the Apache License, Version 2.0 (the", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "config, custom_objects=None): \"\"\"Creates an optimizer from its config. This method", "learning_rate_schedule.LearningRateSchedule): return learning_rate_schedule.serialize(value) if callable(value): return value() if tensor_util.is_tensor(value): return", "(if your optimizer algorithm requires additional variables) - get_config (serialization", "is not None: raise RuntimeError(\"Cannot set `iterations` to a new", "params = self.weights if len(params) != len(weights): raise ValueError( \"You", "v): with ops.control_dependencies( [resource_variable_ops.resource_scatter_update(x.handle, i, v)]): return x.value() # ---------------", "x.value() def _resource_scatter_update(self, x, i, v): with ops.control_dependencies( [resource_variable_ops.resource_scatter_update(x.handle, i,", "variables %s when minimizing the loss.\"), ([v.name for v in", "Returns: Valid types for loss, variables and gradients. \"\"\" return", "callable when the variable list would otherwise be incomplete before", "mode the name is derived from the var unique id.", "\"\"\" allowed_kwargs = {\"clipnorm\", \"clipvalue\", \"lr\", \"decay\"} for k in", "key for the variable: var_name/slot_name.\"\"\" name = _var_key(var) return name", "original optimizer, but currently we do not support using the", "with `tf.distribute.Strategy`. This optimizer class is `tf.distribute.Strategy` aware, which means", "the slot variable if the optimizer is saved without the", "`Tensor` holding the gradient computed for `loss`. name: Optional name", "for v in valid_dtypes])) def _valid_dtypes(self): \"\"\"Valid types for loss,", "sequential models without input shape pre-defined, or 2) subclassed models.", "cannot be computed (e.g. if gradient function not implemented). \"\"\"", "and slot_variable_position.is_simple_variable() # Defer slot variable creation if there is", "variable `Trackable` object to be restored. slot_name: The name of", "the primary variable first. Args: var: the variable. Returns: the", "+ slot_name class _RestoredOptimizer(OptimizerV2): \"\"\"A non-functional Optimizer implementation for checkpoint", "Ops to train a model. You never use this class", "`values` (as in an IndexedSlices object). Returns: A tuple of", "with ops.control_dependencies( [resource_variable_ops.resource_scatter_add(x.handle, i, v)]): return x.value() def _resource_scatter_update(self, x,", "grads_and_vars = tuple(grads_and_vars) if not grads_and_vars: return grads_and_vars filtered =", "a dependency on # a slot variable if not for", "tf_variables.VariableSynchronization.ON_READ: if trainable: raise ValueError( \"Synchronization value can be set", "g in grads ] return grads def apply_gradients(self, grads_and_vars, name=None):", "both computing the gradients and applying them to the variables.", "unique_indices) @six.add_metaclass(abc.ABCMeta) @keras_export(\"keras.optimizers.Optimizer\") class OptimizerV2(trackable.Trackable): \"\"\"Updated base class for optimizers.", "callable returning the list or tuple of `Variable` objects. Use", "decayed learning rate as a Tensor with dtype=var_dtype.\"\"\" lr_t =", "share this logic with base_layer. def set_weights(self, weights): params =", "shape=shape, getter=base_layer_utils.make_variable, overwrite=True, initializer=initializer, dtype=dtype, trainable=trainable, use_resource=True, synchronization=synchronization, aggregation=aggregation) backend.track_variable(variable)", "a single graph, and so shouldn't keep Tensors as member", "return self._iterations.assign_add(1) def get_updates(self, loss, params): grads = self.get_gradients(loss, params)", "(without any saved state) from this configuration. Returns: Python dictionary.", "a valid type. \"\"\" valid_dtypes = self._valid_dtypes() for t in", "var._distributed_container() if var._in_graph_mode: return var._shared_name return var._unique_id def _get_slot_key_from_var(var, slot_name):", "ds_reduce_util from tensorflow.python.eager import backprop from tensorflow.python.eager import context from", "sum non-unique indices and their associated gradients, is enforced by", "configuration of an optimizer. The same optimizer can be reinstantiated", "= t.dtype.base_dtype if dtype not in valid_dtypes: raise ValueError(\"Invalid type", "else than `Variable` objects. ValueError: If some arguments are invalid,", "OptimizerV2(trackable.Trackable): \"\"\"Updated base class for optimizers. This class defines the", "state) from this configuration. Returns: Python dictionary. \"\"\" config =", "base_layer. def set_weights(self, weights): params = self.weights if len(params) !=", "var_list: list or tuple of `Variable` objects to update to", "many times too big. ### Variable Constraint All Keras optimizers", "else: initial_value = initializer strategy = distribute_ctx.get_strategy() with strategy.extended.colocate_vars_with(var): weight", "* var2 # In graph mode, returns op that minimizes", "group=False)) any_symbolic = any(isinstance(i, ops.Operation) or tf_utils.is_symbolic_tensor(i) for i in", "Optimizers which deal correctly with duplicate indices may instead override", "handle, indices): \"\"\"Add ops to apply sparse gradients to the", "self._assert_valid_dtypes([ v for g, v in grads_and_vars if g is", "config of the optimimizer. An optimizer config is a Python", "be set to `tf.keras.losses.Reduction.SUM_OVER_BATCH_SIZE` for averaging or `tf.keras.losses.Reduction.SUM` for not.", "length \" + str(len(weights)) + \", but the optimizer was", "variable creator # scope. Generally we'd like to eagerly create/restore", "which should be set to `tf.keras.losses.Reduction.SUM_OVER_BATCH_SIZE` for averaging or `tf.keras.losses.Reduction.SUM`", "None and v.dtype != dtypes.resource ]) return [self.apply_gradients(grads_and_vars)] def _set_hyper(self,", "`Tensor`, an `IndexedSlices`, or `None` if there is no gradient", "%s.\" % ([v.name for _, v in grads_and_vars],)) if vars_with_empty_grads:", "for looking up slots. In graph mode the name is", "gradients. \"\"\" return set( [dtypes.float16, dtypes.bfloat16, dtypes.float32, dtypes.float64]) def _call_if_callable(self,", "Usage ```python # Create an optimizer with the desired parameters.", "parameters) \"\"\" def __init__(self, name, **kwargs): \"\"\"Create a new Optimizer.", "Compute the gradients with `tf.GradientTape`. 2. Process the gradients as", "double initialization), and makes variable creator scopes # behave the", "is a list of tuples (gradient, variable). Do whatever you", "\"clipnorm\"): config[\"clipnorm\"] = self.clipnorm if hasattr(self, \"clipvalue\"): config[\"clipvalue\"] = self.clipvalue", "parameter. Hyper parameters can be overwritten through user code: Example:", "or implied. # See the License for the specific language", "the following methods: - resource_apply_dense (update variable given gradient tensor", "of the variable. \"\"\" summed_grad, unique_indices = _deduplicate_indexed_slices( values=grad, indices=indices)", "but instead instantiate one of its subclasses such as `tf.keras.optimizers.SGD`,", "config[\"clipvalue\"] = self.clipvalue return config @classmethod def from_config(cls, config, custom_objects=None):", "indices may instead override `_resource_apply_sparse_duplicate_indices` to avoid this overhead. Args:", "@classmethod def from_config(cls, config, custom_objects=None): \"\"\"Creates an optimizer from its", "this case). Deferring is mostly harmless # (aside from double", "can be a `Tensor`, an `IndexedSlices`, or `None` if there", "len(weights): raise ValueError( \"You called `set_weights(weights)` on optimizer \" +", "--------------- # For implementing the trackable interface # --------------- def", "variable. Important: If gradient is sparse tensor, variable constraint is", "false when variable is to be synced on read. trainable", "the variables are created at the first time `loss` is", "If none of the variables have gradients. \"\"\" grads_and_vars =", "in an IndexedSlices object). Returns: A tuple of (`summed_values`, `unique_indices`)", "lr_t @abc.abstractmethod def get_config(self): \"\"\"Returns the config of the optimimizer.", "Raises: TypeError: If `var_list` contains anything else than `Variable` objects.", "slot_variable_position.restore(slot_variable) else: # We didn't make the slot variable. Defer", "Use callable when the variable list would otherwise be incomplete", "can be many times too big. ### Variable Constraint All", ": {slot name : variable}} self._slots = {} self._slot_names =", "the gradient computed for `loss`. Returns: A list of (gradient,", "from tensorflow.python.training.tracking import base as trackable from tensorflow.python.util import nest", "to this method has been de-duplicated. Optimizers which deal correctly", "objects to update to minimize `loss`, or a callable returning", "else param def _resource_apply_dense(self, grad, handle): \"\"\"Add ops to apply", "under a graph # context. (eager updates execute immediately) with", "@six.add_metaclass(abc.ABCMeta) @keras_export(\"keras.optimizers.Optimizer\") class OptimizerV2(trackable.Trackable): \"\"\"Updated base class for optimizers. This", "a new Optimizer. This must be called by the constructors", "clip gradients by norm; `clipvalue` is clip gradients by value,", "self.iterations self._create_hypers() self._create_slots(var_list) self._prepare(var_list) return distribute_ctx.get_replica_context().merge_call( self._distributed_apply, args=(grads_and_vars,), kwargs={\"name\": name})", "\"iter\", shape=[], dtype=dtypes.int64, trainable=False, aggregation=tf_variables.VariableAggregation.ONLY_FIRST_REPLICA) self._weights.append(self._iterations) return self._iterations @iterations.setter def", "self._name + \" with a weight list of length \"", "== tf_variables.VariableSynchronization.ON_READ: if trainable: raise ValueError( \"Synchronization value can be", "params = nest.flatten(params) with backend.get_graph().as_default(): grads = gradients.gradients(loss, params) for", "a Tensor \", var) if isinstance(grad, ops.IndexedSlices): if var.constraint is", "+ 2 * var2 * var2 # In graph mode,", "name, **kwargs): \"\"\"Create a new Optimizer. This must be called", "overwrite=True) if name not in self._hyper: self._hyper[name] = value else:", "if callable(value): return value() if tensor_util.is_tensor(value): return backend.get_value(value) return value", "variables. This is the second part of `minimize()`. It returns", "`variable` has just been added to a dependency graph (causing", "var2 # In eager mode, simply call minimize to update", "in a reasonable way. with backprop.GradientTape() as tape: if not", "return variable def _assert_valid_dtypes(self, tensors): \"\"\"Asserts tensors are all valid", "slot to restore into. variable: The variable object this slot", "lambda: tf.keras.losses.mse(model(input), output) var_list_fn = lambda: model.trainable_weights for input, output", "any variable: %s.\" % ([v.name for _, v in grads_and_vars],))", "dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_util from", "_ = self.iterations self._create_hypers() self._create_slots(var_list) self._prepare(var_list) return distribute_ctx.get_replica_context().merge_call( self._distributed_apply, args=(grads_and_vars,),", "_resource_scatter_add(self, x, i, v): with ops.control_dependencies( [resource_variable_ops.resource_scatter_add(x.handle, i, v)]): return", "instantiate one of its subclasses such as `tf.keras.optimizers.SGD`, `tf.keras.optimizers.Adam`. ###", "`IndexedSlices`, or `None` if there is no gradient for the", "If distribution strategy exists, get the primary variable first. Args:", "Example: ```python opt = tf.keras.optimizers.SGD(learning_rate=0.1) model = tf.keras.Sequential() model.add(tf.keras.layers.Dense(num_hidden, activation='relu'))", "single graph, and so shouldn't keep Tensors as member variables.", "which the gradient is nonzero. Indices may be repeated. Returns:", "# methods. def __init__(self): super(_RestoredOptimizer, self).__init__(\"_RestoredOptimizer\") self._hypers_created = True def", "create/restore slot variables # when possible, but this may mean", "len(params) != len(weights): raise ValueError( \"You called `set_weights(weights)` on optimizer", "grads_and_vars = zip(processed_grads, var_list) # grads_and_vars is a list of", "by the global batch size. Note that when using `tf.distribute.Strategy`,", "values, new_index_positions, array_ops.shape(unique_indices)[0]) return (summed_values, unique_indices) @six.add_metaclass(abc.ABCMeta) @keras_export(\"keras.optimizers.Optimizer\") class OptimizerV2(trackable.Trackable):", "Args: name: A non-empty string. The name to use for", "add_slot(self, var, slot_name, initializer=\"zeros\"): \"\"\"Add a new slot variable for", "slot_name): var_key = _var_key(var) slot_dict = self._slots[var_key] return slot_dict[slot_name] def", "Optimizer based on the order created.\"\"\" return self._weights def get_weights(self):", "config raise NotImplementedError( \"Restoring functional Optimzers from SavedModels is not", "Compatibility The entire optimizer is currently thread compatible, not thread-safe.", "grads_and_vars def get_gradients(self, loss, params): \"\"\"Returns gradients of `loss` with", "set( [dtypes.float16, dtypes.bfloat16, dtypes.float32, dtypes.float64]) def _call_if_callable(self, param): \"\"\"Call the", "variables of this Optimizer based on the order created.\"\"\" return", "the graph. This method is nonetheless important when graph building", "\"gradient\" is the gradient for \"variable\". Note that \"gradient\" can", "\" \"supported. Please file a feature request if this limitation", "def _resource_apply_sparse(self, grad, handle, indices): \"\"\"Add ops to apply sparse", "is currently thread compatible, not thread-safe. The user needs to", "0: {}\".format(decay)) self._initial_decay = decay if \"clipnorm\" in kwargs: self.clipnorm", "slot_name, {}).setdefault(variable_key, []).append( slot_variable_position) def _filter_grads(grads_and_vars): \"\"\"Filter out iterable with", "callables. If they are callable, the callable will be called", "six.string_types) or callable(initializer): initializer = initializers.get(initializer) if synchronization == tf_variables.VariableSynchronization.ON_READ:", "without input shape pre-defined, or 2) subclassed models. Pass var_list", "variable) pairs. name: Optional name for the returned operation. Default", "**kwargs): \"\"\"Create a new Optimizer. This must be called by", "variable # unintentionally (specifically make_template would add a dependency on", "# variable, or if the non-slot variable is saved without", "def __init__(self, name, **kwargs): \"\"\"Create a new Optimizer. This must", "gradients of `loss` with respect to `params`. Arguments: loss: Loss", "with repeated indices. See the docstring of `_apply_sparse_duplicate_indices` for details.", "initializer = initializers.get(initializer) initial_value = functools.partial( initializer, shape=var.shape, dtype=var.dtype) else:", "defines the API to add Ops to train a model.", "to the variable `handle`. Similar to `_apply_sparse`, the `indices` argument", "--------------- def _restore_slot_variable(self, slot_name, variable, slot_variable): \"\"\"Restore a newly created", "them. Calling `minimize()` takes care of both computing the gradients", "update_op update_ops = [] with backend.name_scope(name or self._name): for grad,", "\"\"\" # pylint: disable=protected-access # Get the distributed variable if", "variable: %s.\" % ([v.name for _, v in grads_and_vars],)) if", "the one with the highest restore # UID in case", "created at the first time when `loss` is called. grad_loss:", "be able to use the _set_hyper()/state.get_hyper() facility instead. This class", "method), and then passed to `self._set_hyper()`. They can be either", "import print_function import abc import functools import six from tensorflow.python.distribute", "the value for the hyper parameter. Hyper parameters can be", "Raises: ValueError: If name is malformed. RuntimeError: If _create_slots has", "get_config(self): \"\"\"Returns the config of the optimimizer. An optimizer config", "model is first called, instead of construction time. Examples include", "to the graph. This method is nonetheless important when graph", "\"\"\" params = nest.flatten(params) with backend.get_graph().as_default(): grads = gradients.gradients(loss, params)", "if weight is None: if isinstance(initializer, six.string_types) or callable(initializer): initializer", "pv, p, w in zip(param_values, params, weights): if pv.shape !=", "with duplicate indices may instead override this method to avoid", "config.pop(\"lr\") if \"learning_rate\" in config: if isinstance(config[\"learning_rate\"], dict): config[\"learning_rate\"] =", "the case when a slot variable has already been created", "with strategy.extended.colocate_vars_with(var): weight = tf_variables.Variable( name=\"%s/%s\" % (var._shared_name, slot_name), #", "dtype=None): if not self._hypers_created: self._create_hypers() value = self._hyper[name] if isinstance(value,", "is saved without the non-slot # variable, or if the", "Returns: An Operation that updates the variables in `var_list`. If", "`None`, that operation also increments `global_step`. Raises: ValueError: If some", "grad, param in zip(grads, params): if grad is None: raise", "\"\"\"Returns variables of this Optimizer based on the order created.\"\"\"", "else: # We didn't make the slot variable. Defer restoring", "are non-negative. if kwargs[k] < 0: raise ValueError(\"Expected {} >=", "if not grads_and_vars: return grads_and_vars filtered = [] vars_with_empty_grads =", "config[\"clipnorm\"] = self.clipnorm if hasattr(self, \"clipvalue\"): config[\"clipvalue\"] = self.clipvalue return", "in grads] if hasattr(self, \"clipvalue\"): grads = [ clip_ops.clip_by_value(g, -self.clipvalue,", "Maybe share this logic with base_layer. def set_weights(self, weights): params", "grads = self.get_gradients(loss, params) grads_and_vars = list(zip(grads, params)) self._assert_valid_dtypes([ v", "the returned operation. Default to the name passed to the", "can instead use the optimizer in three steps: 1. Compute", "holding the gradient computed for `loss`. Returns: A list of", "slot_variable = self.add_slot( var=variable, initializer=initializer, slot_name=slot_name) # Slot variables are", "None: with ops.control_dependencies([update_op]): return var.assign(var.constraint(var)) else: return update_op update_ops =", "# when possible, but this may mean that scopes intended", "self.clipnorm) for g in grads] if hasattr(self, \"clipvalue\"): grads =", "to the name passed to the `Optimizer` constructor. Returns: An", "restored from a SavedModel. These variables may be referenced in", "model = tf.keras.Sequential() model.add(tf.keras.layers.Dense(num_hidden, activation='relu')) model.add(tf.keras.layers.Dense(num_classes, activation='sigmoid') loss_fn = lambda:", "Defer restoring until it gets created # normally. We keep", "\" \"Common ops without gradient: \" \"K.argmax, K.round, K.eval.\".format(param)) if", "make sure that all of your ops have a \"", "3 * var1 * var1 + 2 * var2 *", "the gradients and applying them to the variables. If you", "callable, tensor, numeric.\"\"\" if isinstance(value, trackable.Trackable): self._track_trackable(value, name, overwrite=True) if", "[ clip_ops.clip_by_value(g, -self.clipvalue, self.clipvalue) for g in grads ] grads_and_vars", "return x.value() def _resource_scatter_update(self, x, i, v): with ops.control_dependencies( [resource_variable_ops.resource_scatter_update(x.handle,", "called during `apply_gradients()` to get the value for the hyper", "dictionary mapping names to additional Python objects used to create", "second part of `minimize()`. It returns an `Operation` that applies", "not None: with ops.control_dependencies([update_op]): return var.assign(var.constraint(var)) else: return update_op update_ops", "provided for any variable: %s.\" % ([v.name for _, v", "version of `indices` and `summed_values` contains the sum of `values`", "you # need to the 'gradient' part, for example cap", "= math_ops.unsorted_segment_sum( values, new_index_positions, array_ops.shape(unique_indices)[0]) return (summed_values, unique_indices) @six.add_metaclass(abc.ABCMeta) @keras_export(\"keras.optimizers.Optimizer\")", "to `_resource_apply_sparse`. Optimizers which deal correctly with duplicate indices may", "var2]) opt_op.run() # In eager mode, simply call minimize to", "# existing slot variable, we should restore it. slot_variable_position.restore(slot_variable) else:", "variables. If you want to process the gradients before applying", "These are called <i>Slots</i>. Slots have names and you can", "and manage additional variables associated with the variables to train.", "average gradients, you should use `tf.math.reduce_sum` to add up your", "never use this class directly, but instead instantiate one of", "are differentiable). \" \"Common ops without gradient: \" \"K.argmax, K.round,", "to check. Raises: ValueError: If any tensor is not a", "= {\"clipnorm\", \"clipvalue\", \"lr\", \"decay\"} for k in kwargs: if", "x, i, v): with ops.control_dependencies( [resource_variable_ops.resource_scatter_update(x.handle, i, v)]): return x.value()", "Raises: ValueError: If any tensor is not a valid type.", "the indices for which the gradient is nonzero. Indices may", "decay < 0.: raise ValueError(\"decay cannot be less than 0:", "so shouldn't keep Tensors as member variables. Generally you should", "from tensorflow.python.ops import gradients from tensorflow.python.ops import math_ops from tensorflow.python.ops", "in gradients that can be many times too big. ###", "the first component of a tensor's shape is the *replica-local*", "Once you have a slot name you can ask the", "operation. Returns: An Operation that updates the variables in `var_list`.", "that operation also increments `global_step`. Raises: TypeError: If `grads_and_vars` is", "if dtype is None: dtype = dtypes.float32 if isinstance(initializer, six.string_types)", "tensorflow.python.keras.optimizer_v2 import learning_rate_schedule from tensorflow.python.keras.utils import tf_utils from tensorflow.python.ops import", "\"gradient defined (i.e. are differentiable). \" \"Common ops without gradient:", "to a new Variable after \" \"the Optimizer weights have", "Iterate over restores, highest restore UID first to minimize the", "if k not in allowed_kwargs: raise TypeError(\"Unexpected keyword argument \"", "`resource` which points to the variable to be updated. Returns:", "This method is the reverse of `get_config`, capable of instantiating", "`summed_values` contains the sum of `values` slices associated with each", "into. variable: The variable object this slot is being created", "want to process the gradients before applying them you can", "the variable list would otherwise be incomplete before `minimize` since", "if ops.executing_eagerly_outside_functions() else \"_\" + var.op.name) with backend.name_scope(\"update\" + scope_name):", "# (trackable._CheckpointPosition objects). # {slot_name : # {_var_key(variable_to_train): [checkpoint_position, ...", "= decay if \"clipnorm\" in kwargs: self.clipnorm = kwargs.pop(\"clipnorm\") if", "return grads_and_vars filtered = [] vars_with_empty_grads = [] for grad,", "Create an optimizer. opt = tf.keras.optimizers.SGD(learning_rate=0.1) # Compute the gradients", "Keras models In Keras models, sometimes variables are created when", "when minimizing the loss.\"), ([v.name for v in vars_with_empty_grads])) return", "return self._get_hyper(name) raise e def __setattr__(self, name, value): \"\"\"Override setattr", "dtype=None, initializer=\"zeros\", trainable=None, synchronization=tf_variables.VariableSynchronization.AUTO, aggregation=tf_variables.VariableAggregation.NONE): if dtype is None: dtype", "[process_gradient(g) for g in grads] grads_and_vars = zip(processed_grads, var_list) #", "not compatible with \" \"provided weight shape \" + str(w.shape))", "when graph building. and not ops.get_default_graph()._variable_creator_stack): # pylint: disable=protected-access initializer", "must be called by the constructors of subclasses. Note that", "nest from tensorflow.python.util.tf_export import keras_export def _deduplicate_indexed_slices(values, indices): \"\"\"Sums `values`", "# Iterate over restores, highest restore UID first to minimize", "restore it. slot_variable_position.restore(slot_variable) else: # We didn't make the slot", "# of assignments. deferred_restorations.sort(key=lambda position: position.restore_uid, reverse=True) for checkpoint_position in", "of variables. opt.minimize(loss, var_list=[var1, var2]) # update learning rate opt.learning_rate", "restore # slot variables which have not yet been created", "restore the Optimizer's config raise NotImplementedError( \"Restoring functional Optimzers from", "\"Please make sure that all of your ops have a", "keyword argument \" \"passed to optimizer: \" + str(k)) #", "one object (because we don't want to # save the", "the variable. Important: If gradient is sparse tensor, variable constraint", "be {`clipnorm`, `clipvalue`, `lr`, `decay`}. `clipnorm` is clip gradients by", "in writing, software # distributed under the License is distributed", "\"\"\"Updated base class for optimizers. This class defines the API", "The same optimizer can be reinstantiated later (without any saved", "model.trainable_weights for input, output in data: opt.minimize(loss_fn, var_list_fn) ``` ###", "variables and gradients. \"\"\" return set( [dtypes.float16, dtypes.bfloat16, dtypes.float32, dtypes.float64])", "argument \" \"passed to optimizer: \" + str(k)) # checks", "= _deduplicate_indexed_slices( values=grad, indices=indices) return self._resource_apply_sparse(summed_grad, handle, unique_indices) def _resource_apply_sparse(self,", "slot variable needs to be restored). Args: slot_variable_position: A `trackable._CheckpointPosition`", "if isinstance(value, trackable.Trackable): self._track_trackable(value, name, overwrite=True) if name not in", "`Tensor` of dtype `resource` which points to the variable to", "or if the non-slot variable is saved without the optimizer;", "backend from tensorflow.python.keras import initializers from tensorflow.python.keras.engine import base_layer_utils from", "model.add(tf.keras.layers.Dense(num_classes, activation='sigmoid') loss_fn = lambda: tf.keras.losses.mse(model(input), output) var_list_fn = lambda:", "over restores, highest restore UID first to minimize the number", "is no gradient for the given variable. Args: loss: A", "trainable=True and \" \"synchronization=VariableSynchronization.ON_READ.\") else: # Set trainable to be", "slot variables which have not yet been created # (trackable._CheckpointPosition", "variable if the optimizer is saved without the non-slot #", "intend to create your own optimization algorithm, simply inherit from", "for `loss`. Returns: A list of (gradient, variable) pairs. Variable", "batch size. Note that when using `tf.distribute.Strategy`, the first component", "= tf.keras.optimizers.SGD(learning_rate=0.1) model = tf.keras.Sequential() model.add(tf.keras.layers.Dense(num_hidden, activation='relu')) model.add(tf.keras.layers.Dense(num_classes, activation='sigmoid') loss_fn", "+ str(k)) # checks that all keyword arguments are non-negative.", "= initializers.get(initializer) initial_value = functools.partial( initializer, shape=var.shape, dtype=var.dtype) else: initial_value", "Calling `minimize()` takes care of both computing the gradients and", "sparse gradients to `handle`, with repeated indices. Optimizers which override", "with repeated indices. Optimizers which override this method must deal", "been overridden instead of _create_vars. \"\"\" allowed_kwargs = {\"clipnorm\", \"clipvalue\",", "way they do when graph building. and not ops.get_default_graph()._variable_creator_stack): #", "return cls(**config) def _serialize_hyperparameter(self, hyperparameter_name): \"\"\"Serialize a hyperparameter that can", "\"learning_rate\" if hasattr(self, \"_hyper\") and name in self._hyper: self._set_hyper(name, value)", "for the optimizer. **kwargs: keyword arguments. Allowed to be {`clipnorm`,", "and \" \"synchronization=VariableSynchronization.ON_READ.\") else: # Set trainable to be false", "If you want to process the gradients before applying them", "out iterable with grad equal to None.\"\"\" grads_and_vars = tuple(grads_and_vars)", "self.clipvalue) for g in grads ] grads_and_vars = list(zip(grads, var_list))", "graph mode or any of the update ops are #", "the non-slot variable is saved without the optimizer; # it's", "variable if not for this case). Deferring is mostly harmless", "and # instead special-case this dependency and otherwise pretend it's", "License is distributed on an \"AS IS\" BASIS, # WITHOUT", "A one-dimensional integer `Tensor`, indexing into the first dimension of", "If you want to process the gradient before applying then", "name for the returned operation. Returns: An Operation that updates", "License, Version 2.0 (the \"License\"); # you may not use", "backward compatibility, recommended to use `learning_rate` instead. Raises: ValueError: If", "to update the list of variables. opt.minimize(loss, var_list=[var1, var2]) ```", "new Variable after \" \"the Optimizer weights have been created\")", "calls `apply_gradients()`. If you want to process the gradient before", "handle): \"\"\"Add ops to apply dense gradients to the variable", "not in self._hyper: self._hyper[name] = value else: prev_value = self._hyper[name]", "`Variable` objects. Use callable when the variable list would otherwise", "`Tensor` of integral type representing the indices for which the", "the `Optimizer` constructor. Returns: An `Operation` that applies the specified", "not exist for variables %s when minimizing the loss.\"), ([v.name", "use_resource=True, synchronization=synchronization, aggregation=aggregation) backend.track_variable(variable) return variable def _assert_valid_dtypes(self, tensors): \"\"\"Asserts", "the loss by updating the listed # variables. opt_op =", "values: A `Tensor` with rank >= 1. indices: A one-dimensional", "requires additional variables) - get_config (serialization of the optimizer, include", "updating `var_list`. This method simply computes gradient using `tf.GradientTape` and", "# a slot variable if not for this case). Deferring", "`tf.keras` built-in training or evaluation loops. See the `reduction` argument", "looking up slots. In graph mode the name is derived", "TypeError(\"Unexpected keyword argument \" \"passed to optimizer: \" + str(k))", "to a dependency graph (causing us to realize that the", "var_dtype) decay_t = self._get_hyper(\"decay\", var_dtype) lr_t = lr_t / (1.", "the optimizer was expecting \" + str(len(params)) + \" weights.", "\" \"provided weight shape \" + str(w.shape)) weight_value_tuples.append((p, w)) backend.batch_set_value(weight_value_tuples)", "using these and you want to average gradients, you should", "value. This can be useful if you want to log", "Example: ```python # Create an optimizer. opt = tf.keras.optimizers.SGD(learning_rate=0.1) #", "None. \"\"\" # TODO(josh11b): Test that we handle weight decay", "var_list, grad_loss) if hasattr(self, \"clipnorm\"): grads = [clip_ops.clip_by_norm(g, self.clipnorm) for", "the variables to train. These are called <i>Slots</i>. Slots have", "\"\"\"A list of names for this optimizer's slots.\"\"\" return self._slot_names", "See the docstring of `_apply_sparse_duplicate_indices` for details. By default the", "a result, using `tf.math.reduce_mean` will give the wrong answer, resulting", "restored optimizer functional by tracing its apply # methods. def", "of names for this optimizer's slots.\"\"\" return self._slot_names def add_slot(self,", "the License for the specific language governing permissions and #", "= self._add_variable_with_custom_getter( name=name, shape=shape, getter=base_layer_utils.make_variable, overwrite=True, initializer=initializer, dtype=dtype, trainable=trainable, use_resource=True,", "import backprop from tensorflow.python.eager import context from tensorflow.python.framework import dtypes", "order created.\"\"\" return self._weights def get_weights(self): params = self.weights return", "name): \"\"\"`apply_gradients` using a `DistributionStrategy`.\"\"\" reduced_grads = distribution.extended.batch_reduce_to( ds_reduce_util.ReduceOp.SUM, grads_and_vars)", "desired parameters. opt = tf.keras.optimizers.SGD(learning_rate=0.1) # `loss` is a callable", "optimizers. if name == \"lr\": name = \"learning_rate\" if name", "some of the variables are not `Variable` objects. \"\"\" grads_and_vars", "if pv.shape != w.shape: raise ValueError(\"Optimizer weight shape \" +", "+ \", but the optimizer was expecting \" + str(len(params))", "also increments `global_step`. Raises: TypeError: If `grads_and_vars` is malformed. ValueError:", "def get_slot_names(self): \"\"\"A list of names for this optimizer's slots.\"\"\"", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "cannot be less than 0: {}\".format(decay)) self._initial_decay = decay if", "dtype=dtype, trainable=trainable, use_resource=True, synchronization=synchronization, aggregation=aggregation) backend.track_variable(variable) return variable def _assert_valid_dtypes(self,", "t in tensors: dtype = t.dtype.base_dtype if dtype not in", "that it uses. Once you have a slot name you", "of `minimize()`. It returns a list of (gradient, variable) pairs", "\"_distributed_container\", None) is not None: var = var._distributed_container() if var._in_graph_mode:", "(update variable given gradient tensor is sparse) - create_slots (if", "then call `tf.GradientTape` and `apply_gradients()` explicitly instead of using this", "dtype = t.dtype.base_dtype if dtype not in valid_dtypes: raise ValueError(\"Invalid", "and then passed to `self._set_hyper()`. They can be either regular", "proto: _RestoredOptimizer(), version=1, min_producer_version=1, min_consumer_version=1, setter=_RestoredOptimizer._set_hyper # pylint: disable=protected-access )])", "these cases. Example: ```python opt = tf.keras.optimizers.SGD(learning_rate=0.1) model = tf.keras.Sequential()", "of class Optimizer.\"\"\" # pylint: disable=g-bad-name from __future__ import absolute_import", "variable def _assert_valid_dtypes(self, tensors): \"\"\"Asserts tensors are all valid types", "# pylint: disable=protected-access # Get the distributed variable if it", "self._resource_apply_sparse_duplicate_indices( grad.values, var, grad.indices) update_op = self._resource_apply_dense(grad, var) if var.constraint", "return grads_and_vars def get_gradients(self, loss, params): \"\"\"Returns gradients of `loss`", "{slot_name : # {_var_key(variable_to_train): [checkpoint_position, ... ], ... }, #", "not bind to a single graph, and so shouldn't keep", "graph mode the name is derived from the var shared", "loss = lambda: 3 * var1 + 2 * var2", "has run.\"\"\" if self._iterations is None: self._iterations = self.add_weight( \"iter\",", "to create this optimizer, such as a function used for", "value, `decay` is included for backward compatibility to allow time", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "types. Returns: Valid types for loss, variables and gradients. \"\"\"", "until it gets created # normally. We keep a list", "check. Raises: ValueError: If any tensor is not a valid", "to avoid this overhead. Args: grad: a `Tensor` representing the", "(`summed_values`, `unique_indices`) where `unique_indices` is a de-duplicated version of `indices`", "optimizer. opt = tf.keras.optimizers.SGD(learning_rate=0.1) # Compute the gradients for a", "gradient for the affected indices. handle: a `Tensor` of dtype", "TypeError: If `grads_and_vars` is malformed. ValueError: If none of the", "the variables in `var_list`. This is the first part of", "variables are created when graph building. Instead, _restore_slot_variable catches these", "them to the variables. If you want to process the", "the variable after the gradient has been applied to the", "loops. See the `reduction` argument of your loss which should", "associated with each unique index. \"\"\" unique_indices, new_index_positions = array_ops.unique(indices)", "[self.apply_gradients(grads_and_vars)] def _set_hyper(self, name, value): \"\"\"set hyper `name` to value.", "{}) weight = slot_dict.get(slot_name, None) if weight is None: if", "variable. Defer restoring until it gets created # normally. We", "create the slot variable with a restoring initializer. No new", "and you can ask the optimizer for the names of", "graph. This method is nonetheless important when graph building for", "training steps this Optimizer has run.\"\"\" if self._iterations is None:", "grad.values, var, grad.indices) update_op = self._resource_apply_dense(grad, var) if var.constraint is", "you can ask the optimizer for the variable it created", "of `_apply_sparse_duplicate_indices` for details. By default the correct behavior, to", "([v.name for v in vars_with_empty_grads])) return filtered def _var_key(var): \"\"\"Key", "{} has `None` for gradient. \" \"Please make sure that", "kwargs[k] < 0: raise ValueError(\"Expected {} >= 0, received: {}\".format(k,", "In case any gradient cannot be computed (e.g. if gradient", "\"clipvalue\"): config[\"clipvalue\"] = self.clipvalue return config @classmethod def from_config(cls, config,", "= [ clip_ops.clip_by_value(g, -self.clipvalue, self.clipvalue) for g in grads ]", "first called, instead of construction time. Examples include 1) sequential", "bind to a single graph, and so shouldn't keep Tensors", "clip_ops from tensorflow.python.ops import gradients from tensorflow.python.ops import math_ops from", "these and you want to average gradients, you should use", "v.dtype != dtypes.resource ]) return grads_and_vars def get_gradients(self, loss, params):", "as tape: loss = <call_loss_function> vars = <list_of_variables> grads =", "g in grads] if hasattr(self, \"clipvalue\"): grads = [ clip_ops.clip_by_value(g,", "execute immediately) with ops._get_graph_from_inputs(update_ops).as_default(): # pylint: disable=protected-access with ops.control_dependencies(update_ops): return", "any_symbolic = any(isinstance(i, ops.Operation) or tf_utils.is_symbolic_tensor(i) for i in update_ops)", "through user code: Example: ```python # Create an optimizer with", "of `values` slices associated with each unique index. \"\"\" unique_indices,", "tensorflow.python.distribute import reduce_util as ds_reduce_util from tensorflow.python.eager import backprop from", "# distributed under the License is distributed on an \"AS", "import variables as tf_variables from tensorflow.python.platform import tf_logging as logging", "the wrong answer, resulting in gradients that can be many", "indices: A one-dimensional integer `Tensor`, indexing into the first dimension", "then divide by the global batch size. Note that when", "of your ops have a \" \"gradient defined (i.e. are", "# Unless required by applicable law or agreed to in", "In eager mode, simply call minimize to update the list", "called, instead of construction time. Examples include 1) sequential models", "shape is the *replica-local* batch size, which is off by", "loss_fn = lambda: tf.keras.losses.mse(model(input), output) var_list_fn = lambda: model.trainable_weights for", "scope_name = (\"\" if ops.executing_eagerly_outside_functions() else \"_\" + var.op.name) with", "such as a function used for a hyperparameter. Returns: An", "\"\"\"`apply_gradients` using a `DistributionStrategy`.\"\"\" reduced_grads = distribution.extended.batch_reduce_to( ds_reduce_util.ReduceOp.SUM, grads_and_vars) var_list", "handle, unique_indices) def _resource_apply_sparse(self, grad, handle, indices): \"\"\"Add ops to", "RuntimeError: If _create_slots has been overridden instead of _create_vars. \"\"\"", "apply sparse gradients to the variable `handle`. Similar to `_apply_sparse`,", "`handle`. Similar to `_apply_sparse`, the `indices` argument to this method", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "An Operation that updates the variables in `var_list`. If `global_step`", "Trackable. Stores information about how to restore # slot variables", "and makes variable creator scopes # behave the same way", "contains the sum of `values` slices associated with each unique", "update_ops) if not context.executing_eagerly() or any_symbolic: # If the current", "``` ### Processing gradients before applying them. Calling `minimize()` takes", "(slot_variable is None and context.executing_eagerly() and slot_variable_position.is_simple_variable() # Defer slot", "False elif trainable is None: trainable = True variable =", "can be callable, tensor, numeric.\"\"\" if isinstance(value, trackable.Trackable): self._track_trackable(value, name,", "in grads_and_vars: if grad is None: vars_with_empty_grads.append(var) else: filtered.append((grad, var))", "hyperparameter that can be a float, callable, or Tensor.\"\"\" value", "created slot variable # unintentionally (specifically make_template would add a", "i, v): with ops.control_dependencies( [resource_variable_ops.resource_scatter_add(x.handle, i, v)]): return x.value() def", "algorithm, report stats about the slots, etc. ### Hyper parameters", "to apply dense gradients to the variable `handle`. Args: grad:", "callable(value): value = value() if dtype: return math_ops.cast(value, dtype) else:", "``` ### Use with `tf.distribute.Strategy`. This optimizer class is `tf.distribute.Strategy`", "of `minimize()`. It returns an `Operation` that applies gradients. Args:", "us to realize that the slot variable needs to be", "intended to catch # `variable` also catch its eagerly created", "too big. ### Variable Constraint All Keras optimizers respect variable", "name, shape, dtype=None, initializer=\"zeros\", trainable=None, synchronization=tf_variables.VariableSynchronization.AUTO, aggregation=tf_variables.VariableAggregation.NONE): if dtype is", "not supported. ### Thread Compatibility The entire optimizer is currently", "`var_list` contains anything else than `Variable` objects. ValueError: If some", "is enforced by first pre-processing `grad` and `indices` and passing", "self._slots[var_key] return slot_dict[slot_name] def _prepare(self, var_list): pass def _create_hypers(self): if", "= self.add_weight( name, shape=[], trainable=False, initializer=value, aggregation=tf_variables.VariableAggregation.ONLY_FIRST_REPLICA) self._hypers_created = True", "the Apache License, Version 2.0 (the \"License\"); # you may", "`minimize()`. It returns an `Operation` that applies gradients. Args: grads_and_vars:", "be less than 0: {}\".format(decay)) self._initial_decay = decay if \"clipnorm\"", "params): if grad is None: raise ValueError(\"Variable {} has `None`", "from __future__ import division from __future__ import print_function import abc", "not self._hypers_created: self._create_hypers() value = self._hyper[name] if isinstance(value, learning_rate_schedule.LearningRateSchedule): return", "getter=base_layer_utils.make_variable, overwrite=True, initializer=initializer, dtype=dtype, trainable=trainable, use_resource=True, synchronization=synchronization, aggregation=aggregation) backend.track_variable(variable) return", "value of the variable. \"\"\" summed_grad, unique_indices = _deduplicate_indexed_slices( values=grad,", "or self._name): for grad, var in grads_and_vars: scope_name = (\"\"", "slot variable is created or restored. When executing eagerly, we", "and hyperparameters when an optimizer is restored from a SavedModel.", "kwargs.pop(\"decay\", 0.0) if decay < 0.: raise ValueError(\"decay cannot be", "when variable is to be synced on read. trainable =", "for name, value in sorted(self._hyper.items()): if isinstance(value, ops.Tensor) or callable(value):", "= math_ops.cast(lr_t(local_step), var_dtype) if self._initial_decay > 0.: local_step = math_ops.cast(self.iterations,", "how to restore # slot variables which have not yet", "be repeated. Returns: An `Operation` which updates the value of", "p, w in zip(param_values, params, weights): if pv.shape != w.shape:", "for gradient. \" \"Please make sure that all of your", "Returns: An `Operation` that applies the specified gradients. If `global_step`", "the slot value. This can be useful if you want", "= tape.gradient(loss_value, var_list, grad_loss) if hasattr(self, \"clipnorm\"): grads = [clip_ops.clip_by_norm(g,", "\" weights. Provided weights: \" + str(weights)[:50] + \"...\") if", "the first time `loss` is called. grad_loss: Optional. A `Tensor`", "if callable(value): value = value() if dtype: return math_ops.cast(value, dtype)", "done automatically if you use `tf.keras` built-in training or evaluation", "in data: opt.minimize(loss_fn, var_list_fn) ``` ### Processing gradients before applying", "a de-duplicated version of `indices` and `summed_values` contains the sum", "self._weights = [] self._iterations = None # For implementing Trackable.", "UID first to minimize the number # of assignments. deferred_restorations.sort(key=lambda", "local_step = math_ops.cast(self.iterations, var_dtype) lr_t = math_ops.cast(lr_t(local_step), var_dtype) if self._initial_decay", "and not ops.get_default_graph()._variable_creator_stack): # pylint: disable=protected-access initializer = trackable.CheckpointInitialValue( checkpoint_position=slot_variable_position)", "\"\"\"Restore a newly created slot variable's value.\"\"\" variable_key = _var_key(variable)", "use `tf.keras` built-in training or evaluation loops. See the `reduction`", "created when graph building. Instead, _restore_slot_variable catches these after normal", "_var_key(variable) slot_dict = self._slots.get(variable_key, {}) slot_variable = slot_dict.get(slot_name, None) if", "if (slot_variable is None and context.executing_eagerly() and slot_variable_position.is_simple_variable() # Defer", "([v.name for _, v in grads_and_vars],)) if vars_with_empty_grads: logging.warning( (\"Gradients", "input shape pre-defined, or 2) subclassed models. Pass var_list as", "return param() if callable(param) else param def _resource_apply_dense(self, grad, handle):", "method simply computes gradient using `tf.GradientTape` and calls `apply_gradients()`. If", "perform synchronization if necessary. ### Slots Many optimizer subclasses, such", "the optimizer for the variable it created to hold the", "gradient using `tf.GradientTape` and calls `apply_gradients()`. If you want to", "OptimizerV2), versions=[revived_types.VersionedTypeRegistration( object_factory=lambda proto: _RestoredOptimizer(), version=1, min_producer_version=1, min_consumer_version=1, setter=_RestoredOptimizer._set_hyper #", "Tensor.\"\"\" value = self._hyper[hyperparameter_name] if isinstance(value, learning_rate_schedule.LearningRateSchedule): return learning_rate_schedule.serialize(value) if", "a Python dictionary (serializable) containing the configuration of an optimizer.", "var)) filtered = tuple(filtered) if not filtered: raise ValueError(\"No gradients", "any tensor is not a valid type. \"\"\" valid_dtypes =", "`grad` and `indices` and passing them on to `_resource_apply_sparse`. Optimizers", "decay in a reasonable way. with backprop.GradientTape() as tape: if", "nest.flatten(params) with backend.get_graph().as_default(): grads = gradients.gradients(loss, params) for grad, param", "(dtype, t.name, [v for v in valid_dtypes])) def _valid_dtypes(self): \"\"\"Valid", "that can be a float, callable, or Tensor.\"\"\" value =", "\" \"Please make sure that all of your ops have", "you are not using these and you want to average", "(eager updates execute immediately) with ops._get_graph_from_inputs(update_ops).as_default(): # pylint: disable=protected-access with", "new variables are created when graph building. Instead, _restore_slot_variable catches", "var_name/slot_name.\"\"\" name = _var_key(var) return name + \"/\" + slot_name", "process the gradients before applying them you can instead use", "have not yet been created # (trackable._CheckpointPosition objects). # {slot_name", "by first pre-processing `grad` and `indices` and passing them on", "So we don't _track_ slot variables anywhere, and # instead", "lambda: model.trainable_weights for input, output in data: opt.minimize(loss_fn, var_list_fn) ```", "dict): config[\"learning_rate\"] = learning_rate_schedule.deserialize( config[\"learning_rate\"], custom_objects=custom_objects) return cls(**config) def _serialize_hyperparameter(self,", "import division from __future__ import print_function import abc import functools", "and context.executing_eagerly() and slot_variable_position.is_simple_variable() # Defer slot variable creation if", "self._hypers_created = True @property def iterations(self): \"\"\"Variable. The number of", "TypeError: If `var_list` contains anything else than `Variable` objects. ValueError:", "instead of using this function. Args: loss: A callable taking", "optimizer. **kwargs: keyword arguments. Allowed to be {`clipnorm`, `clipvalue`, `lr`,", "= {} self._slot_names = [] self._weights = [] self._iterations =", "first part of `minimize()`. It returns a list of (gradient,", "thread compatible, not thread-safe. The user needs to perform synchronization", "initial_value = functools.partial( initializer, shape=var.shape, dtype=var.dtype) else: initial_value = initializer", "in stateful and thread-compatible. Args: name: A non-empty string. The", "Args: tensors: Tensors to check. Raises: ValueError: If any tensor", "with the highest restore # UID in case slot variables", "de-duplicated. Optimizers which deal correctly with non-unique indices may instead", "v for g, v in grads_and_vars if g is not", "if isinstance(config[\"learning_rate\"], dict): config[\"learning_rate\"] = learning_rate_schedule.deserialize( config[\"learning_rate\"], custom_objects=custom_objects) return cls(**config)", "{_var_key(variable_to_train): [checkpoint_position, ... ], ... }, # ... } self._deferred_slot_restorations", "`Tensor` holding the gradient computed for `loss`. Returns: A list", "in zip(param_values, params, weights): if pv.shape != w.shape: raise ValueError(\"Optimizer", "variable): \"\"\"Restore a slot variable's value, possibly creating it. Called", "gradient tensor is sparse) - create_slots (if your optimizer algorithm", "up your per-example losses and then divide by the global", "expected: %s.\" % (dtype, t.name, [v for v in valid_dtypes]))", "unique index. \"\"\" unique_indices, new_index_positions = array_ops.unique(indices) summed_values = math_ops.unsorted_segment_sum(", "_var_key(var) slot_dict = self._slots[var_key] return slot_dict[slot_name] def _prepare(self, var_list): pass", "updates execute immediately) with ops._get_graph_from_inputs(update_ops).as_default(): # pylint: disable=protected-access with ops.control_dependencies(update_ops):", "configuration. Returns: Python dictionary. \"\"\" config = {\"name\": self._name} if", "return self._weights @property def weights(self): \"\"\"Returns variables of this Optimizer", "to realize that the slot variable needs to be restored).", "optimizer for the variable it created to hold the slot", "= math_ops.cast(self.iterations, var_dtype) decay_t = self._get_hyper(\"decay\", var_dtype) lr_t = lr_t", "gradient. handle: a `Tensor` of dtype `resource` which points to", "```python # Create an optimizer with the desired parameters. opt", "tensor_util.is_tensor(value): return backend.get_value(value) return value def variables(self): \"\"\"Returns variables of", "under the License is distributed on an \"AS IS\" BASIS,", "directly, but instead instantiate one of its subclasses such as", "lambda: 3 * var1 * var1 + 2 * var2", "gradients, you should use `tf.math.reduce_sum` to add up your per-example", "variable. \"\"\" # pylint: disable=protected-access # Get the distributed variable", "with `tf.GradientTape`. 2. Process the gradients as you wish. 3.", "Pass var_list as callable in these cases. Example: ```python opt", "Variable is always present, but gradient can be `None`. Raises:", "variable.\") return self._resource_apply_sparse_duplicate_indices( grad.values, var, grad.indices) update_op = self._resource_apply_dense(grad, var)", "the sum of `values` slices associated with each unique index.", "be overwritten through user code: Example: ```python # Create an", "= self.get_gradients(loss, params) grads_and_vars = list(zip(grads, params)) self._assert_valid_dtypes([ v for", "= initializers.get(initializer) if synchronization == tf_variables.VariableSynchronization.ON_READ: if trainable: raise ValueError(", "non-slot variable is saved without the optimizer; # it's a", "for the returned operation. Default to the name passed to", "gradient is nonzero. Indices are unique. Returns: An `Operation` which", "raise NotImplementedError(\"Trying to update a Tensor \", var) if isinstance(grad,", "from tensorflow.python.util import nest from tensorflow.python.util.tf_export import keras_export def _deduplicate_indexed_slices(values,", "Variable after \" \"the Optimizer weights have been created\") self._iterations", "{} >= 0, received: {}\".format(k, kwargs[k])) self._use_locking = True self._name", "gradients. Subclasses should override to allow other float types. Returns:", "name == \"_hyper\": raise e # Backwards compatibility with Keras", "based on the order created.\"\"\" return self._weights @property def weights(self):", "params, weights): if pv.shape != w.shape: raise ValueError(\"Optimizer weight shape", "pylint: disable=protected-access with ops.control_dependencies(update_ops): return self._iterations.assign_add(1).op return self._iterations.assign_add(1) def get_updates(self,", "disable=protected-access dtype=var.dtype, trainable=False, initial_value=initial_value) backend.track_variable(weight) slot_dict[slot_name] = weight self._restore_slot_variable( slot_name=slot_name,", "indices may instead override this method to avoid the overhead", "size, which is done automatically if you use `tf.keras` built-in", "\"\"\" variable_key = _var_key(variable) slot_dict = self._slots.get(variable_key, {}) slot_variable =", "based on the order created.\"\"\" return self._weights def get_weights(self): params", "use the _set_hyper()/state.get_hyper() facility instead. This class in stateful and", "variable. \"\"\" raise NotImplementedError() def _resource_scatter_add(self, x, i, v): with", "self._hypers_created: self._create_hypers() value = self._hyper[name] if isinstance(value, learning_rate_schedule.LearningRateSchedule): return value", "from __future__ import absolute_import from __future__ import division from __future__", "variable object this slot is being created for. \"\"\" variable_key", "tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import", "edges of the form (optimizer, non-slot # variable, variable)). So", "the gradients as you wish. 3. Apply the processed gradients", "optimizer instance. \"\"\" if \"lr\" in config: config[\"learning_rate\"] = config.pop(\"lr\")", "the processed gradients with `apply_gradients()`. Example: ```python # Create an", "dictionary, typically the output of get_config. custom_objects: A Python dictionary", "list would otherwise be incomplete before `minimize` since the variables", "optimizer \" + self._name + \" with a weight list", "\" + str(k)) # checks that all keyword arguments are", "a factor equal to the number of replicas being used", "backend.batch_set_value(weight_value_tuples) def add_weight(self, name, shape, dtype=None, initializer=\"zeros\", trainable=None, synchronization=tf_variables.VariableSynchronization.AUTO, aggregation=tf_variables.VariableAggregation.NONE):", "import tensor_util from tensorflow.python.keras import backend from tensorflow.python.keras import initializers", "ds_reduce_util.ReduceOp.SUM, grads_and_vars) var_list = [v for _, v in grads_and_vars]", "var shared name. In eager mode the name is derived", "existing slot variable, we should restore it. slot_variable_position.restore(slot_variable) else: #", "disable=g-bad-name from __future__ import absolute_import from __future__ import division from", "= self.iterations self._create_hypers() self._create_slots(var_list) self._prepare(var_list) return distribute_ctx.get_replica_context().merge_call( self._distributed_apply, args=(grads_and_vars,), kwargs={\"name\":", "to support dynamic hyperparameter setting.\"\"\" # Backwards compatibility with Keras", "otherwise be incomplete before `minimize` and the variables are created", "variables are created at the first time `loss` is called.", "is the reverse of `get_config`, capable of instantiating the same", "e def __setattr__(self, name, value): \"\"\"Override setattr to support dynamic", "a float, callable, or Tensor.\"\"\" value = self._hyper[hyperparameter_name] if isinstance(value,", "points to the variable to be updated. Returns: An `Operation`", "(gradient, variable). Do whatever you # need to the 'gradient'", "by the global batch size, which is done automatically if", "user needs to perform synchronization if necessary. ### Slots Many", "{\"clipnorm\", \"clipvalue\", \"lr\", \"decay\"} for k in kwargs: if k", "\" + str(len(params)) + \" weights. Provided weights: \" +", "building. Instead, _restore_slot_variable catches these after normal creation and adds", "not currently \" \"supported. Please file a feature request if", "reduced_grads = distribution.extended.batch_reduce_to( ds_reduce_util.ReduceOp.SUM, grads_and_vars) var_list = [v for _,", "to any variable, the constraint will be applied to the", "If some arguments are invalid, or var_list is None. \"\"\"", "from tensorflow.python.keras.utils import tf_utils from tensorflow.python.ops import array_ops from tensorflow.python.ops", "= self._get_hyper(\"learning_rate\", var_dtype) if isinstance(lr_t, learning_rate_schedule.LearningRateSchedule): local_step = math_ops.cast(self.iterations, var_dtype)", "tracing its apply # methods. def __init__(self): super(_RestoredOptimizer, self).__init__(\"_RestoredOptimizer\") self._hypers_created", "variable: The variable object this slot is being created for.", "callable(initializer): initializer = initializers.get(initializer) initial_value = functools.partial( initializer, shape=var.shape, dtype=var.dtype)", "Optimizer instances should not bind to a single graph, and", "update to minimize `loss`, or a callable returning the list", "else: backend.set_value(self._hyper[name], value) def _get_hyper(self, name, dtype=None): if not self._hypers_created:", "include all hyper parameters) \"\"\" def __init__(self, name, **kwargs): \"\"\"Create", "self._name): for grad, var in grads_and_vars: scope_name = (\"\" if", "a list of (gradient, variable) pairs where \"gradient\" is the", "var=variable, initializer=initializer, slot_name=slot_name) # Slot variables are not owned by", "limitations under the License. # ============================================================================== \"\"\"Version 2 of class", "the gradient computed for `loss`. name: Optional name for the", "= True self._name = name self._hyper = {} # dict:", "given variable. Args: loss: A callable taking no arguments which", "def get_gradients(self, loss, params): \"\"\"Returns gradients of `loss` with respect", "the variable it created to hold the slot value. This", "class in stateful and thread-compatible. Args: name: A non-empty string.", "grad_loss=grad_loss) return self.apply_gradients(grads_and_vars, name=name) def _compute_gradients(self, loss, var_list, grad_loss=None): \"\"\"Compute", "object this slot is being created for. \"\"\" variable_key =", "prev_value = self._hyper[name] if (callable(prev_value) or isinstance(prev_value, (ops.Tensor, int, float,", "\"\"\"Apply gradients to variables. This is the second part of", "a callable returning the list or tuple of `Variable` objects.", "unique id. If distribution strategy exists, get the primary variable", "for _, v in grads_and_vars] grads_and_vars = zip(reduced_grads, var_list) def", "+ str(weights)[:50] + \"...\") if not params: return weight_value_tuples =", "_deduplicate_indexed_slices( values=grad, indices=indices) return self._resource_apply_sparse(summed_grad, handle, unique_indices) def _resource_apply_sparse(self, grad,", "in deferred_restorations: checkpoint_position.restore(slot_variable) def _create_or_restore_slot_variable( self, slot_variable_position, slot_name, variable): \"\"\"Restore", "def _create_or_restore_slot_variable( self, slot_variable_position, slot_name, variable): \"\"\"Restore a slot variable's", "= tuple(grads_and_vars) if not grads_and_vars: return grads_and_vars filtered = []", "expecting \" + str(len(params)) + \" weights. Provided weights: \"", "name. In eager mode the name is derived from the", "\"VariableSynchronization.ON_READ only for non-trainable variables. \" \"You have specified trainable=True", "grads def apply_gradients(self, grads_and_vars, name=None): \"\"\"Apply gradients to variables. This", "\"learning_rate\" if name in self._hyper: return self._get_hyper(name) raise e def", "weights. Provided weights: \" + str(weights)[:50] + \"...\") if not", "self._slots.get(variable_key, {}) slot_variable = slot_dict.get(slot_name, None) if (slot_variable is None", "import six from tensorflow.python.distribute import distribution_strategy_context as distribute_ctx from tensorflow.python.distribute", "update_ops = [] with backend.name_scope(name or self._name): for grad, var", "with `apply_gradients()`. Example: ```python # Create an optimizer. opt =", "gradient: \" \"K.argmax, K.round, K.eval.\".format(param)) if hasattr(self, \"clipnorm\"): grads =", "= (\"\" if ops.executing_eagerly_outside_functions() else \"_\" + var.op.name) with backend.name_scope(\"update\"", "compatibility with Keras optimizers. if name == \"lr\": name =", "ANY KIND, either express or implied. # See the License", "or any of the update ops are # symbolic then", "v in valid_dtypes])) def _valid_dtypes(self): \"\"\"Valid types for loss, variables", "implementing the trackable interface # --------------- def _restore_slot_variable(self, slot_name, variable,", "if g is not None and v.dtype != dtypes.resource ])", "var: the variable. Returns: the unique name of the variable.", "the License. # You may obtain a copy of the", "self.add_weight( name, shape=[], trainable=False, initializer=value, aggregation=tf_variables.VariableAggregation.ONLY_FIRST_REPLICA) self._hypers_created = True @property", "by updating `var_list`. This method simply computes gradient using `tf.GradientTape`", "\"\"\"Get decayed learning rate as a Tensor with dtype=var_dtype.\"\"\" lr_t", "This must be called by the constructors of subclasses. Note", "`apply_gradients()`. Example: ```python # Create an optimizer. opt = tf.keras.optimizers.SGD(learning_rate=0.1)", "self._hyper[name] if isinstance(value, learning_rate_schedule.LearningRateSchedule): return value if callable(value): value =", "# See the License for the specific language governing permissions", "_create_hypers(self): if self._hypers_created: return # Iterate hyper values deterministically. for", "its subclasses such as `tf.keras.optimizers.SGD`, `tf.keras.optimizers.Adam`. ### Usage ```python #", "graph building for the case when a slot variable has", "<list_of_variables> grads = tape.gradient(loss, vars) processed_grads = [process_gradient(g) for g", "of length \" + str(len(weights)) + \", but the optimizer", "for grad, param in zip(grads, params): if grad is None:", "= nest.flatten(params) with backend.get_graph().as_default(): grads = gradients.gradients(loss, params) for grad,", "the optimizer; # it's a dependency hypergraph with edges of", "raise ValueError(\"Optimizer weight shape \" + str(pv.shape) + \" not", "2) subclassed models. Pass var_list as callable in these cases.", "tensorflow.python.framework import ops from tensorflow.python.framework import tensor_util from tensorflow.python.keras import", "dtypes.float64]) def _call_if_callable(self, param): \"\"\"Call the function if param is", "which deal correctly with duplicate indices may instead override this", "have been created\") self._iterations = variable self._weights.append(self._iterations) def _decayed_lr(self, var_dtype):", "has an associated slot variable is created or restored. When", "return weight_value_tuples = [] param_values = backend.batch_get_value(params) for pv, p,", "ask the optimizer for the variable it created to hold", "ValueError(\"Expected {} >= 0, received: {}\".format(k, kwargs[k])) self._use_locking = True", "the update ops are # symbolic then the step update", "slot value. This can be useful if you want to", "+ scope_name): update_ops.extend( distribution.extended.update( var, apply_grad_to_update_var, args=(grad,), group=False)) any_symbolic =", "name in self._hyper: self._set_hyper(name, value) else: super(OptimizerV2, self).__setattr__(name, value) def", "instead special-case this dependency and otherwise pretend it's a normal", "Hyper parameters These are arguments passed to the optimizer subclass", "you should be able to use the _set_hyper()/state.get_hyper() facility instead.", "raise NotImplementedError( \"Restoring functional Optimzers from SavedModels is not currently", "simply call minimize to update the list of variables. opt.minimize(loss,", "primary variable first. Args: var: the variable. Returns: the unique", "them on to `_resource_apply_sparse`. Optimizers which deal correctly with duplicate", "with the variables to train. These are called <i>Slots</i>. Slots", "method has been de-duplicated. Optimizers which deal correctly with non-unique", "and `indices` and passing them on to `_resource_apply_sparse`. Optimizers which", "of your loss which should be set to `tf.keras.losses.Reduction.SUM_OVER_BATCH_SIZE` for", "is off by a factor equal to the number of", "`tf.math.reduce_sum` to add up your per-example losses and then divide", "<call_loss_function> vars = <list_of_variables> grads = tape.gradient(loss, vars) processed_grads =", "============================================================================== \"\"\"Version 2 of class Optimizer.\"\"\" # pylint: disable=g-bad-name from", "for which the gradient is nonzero. Indices may be repeated.", "slot key for the variable: var_name/slot_name.\"\"\" name = _var_key(var) return", "2. Process the gradients as you wish. 3. Apply the", "list(zip(grads, var_list)) self._assert_valid_dtypes([ v for g, v in grads_and_vars if", "grads_and_vars] # Create iteration if necessary. with ops.init_scope(): _ =", "otherwise pretend it's a normal # graph. if slot_variable is", "evaluation loops. See the `reduction` argument of your loss which", "Defer slot variable creation if there is an active variable", "when the model is first called, instead of construction time.", "# checks that all keyword arguments are non-negative. if kwargs[k]", "Licensed under the Apache License, Version 2.0 (the \"License\"); #", "return name + \"/\" + slot_name class _RestoredOptimizer(OptimizerV2): \"\"\"A non-functional", "parameters These are arguments passed to the optimizer subclass constructor", "initializer=initializer, dtype=dtype, trainable=trainable, use_resource=True, synchronization=synchronization, aggregation=aggregation) backend.track_variable(variable) return variable def", "i, v): with ops.control_dependencies( [resource_variable_ops.resource_scatter_update(x.handle, i, v)]): return x.value() #", "necessary. ### Slots Many optimizer subclasses, such as `Adam` and", "(var._shared_name, slot_name), # pylint: disable=protected-access dtype=var.dtype, trainable=False, initial_value=initial_value) backend.track_variable(weight) slot_dict[slot_name]", "of `loss` with respect to `params`. Arguments: loss: Loss tensor.", "param() if callable(param) else param def _resource_apply_dense(self, grad, handle): \"\"\"Add", "var_key = _var_key(var) slot_dict = self._slots.setdefault(var_key, {}) weight = slot_dict.get(slot_name,", "\"you.\") revived_types.register_revived_type( \"optimizer\", lambda obj: isinstance(obj, OptimizerV2), versions=[revived_types.VersionedTypeRegistration( object_factory=lambda proto:", "is a callable that takes no argument and returns the", "# instead special-case this dependency and otherwise pretend it's a", "in kwargs: if k not in allowed_kwargs: raise TypeError(\"Unexpected keyword", "self._slot_names.append(slot_name) var_key = _var_key(var) slot_dict = self._slots.setdefault(var_key, {}) weight =", "applying them. Calling `minimize()` takes care of both computing the", "writing, software # distributed under the License is distributed on", "not None, that operation also increments `global_step`. Raises: TypeError: If", "`lr`, `decay`}. `clipnorm` is clip gradients by norm; `clipvalue` is", "raise e # Backwards compatibility with Keras optimizers. if name", "recursion with __setattr__. if name == \"_hyper\": raise e #", "instance. \"\"\" if \"lr\" in config: config[\"learning_rate\"] = config.pop(\"lr\") if", "return [self.apply_gradients(grads_and_vars)] def _set_hyper(self, name, value): \"\"\"set hyper `name` to", "the hyper parameter. Hyper parameters can be overwritten through user", "[]) # Iterate over restores, highest restore UID first to", "\"supported. Please file a feature request if this limitation bothers", "tensors. Raises: ValueError: In case any gradient cannot be computed", "non-empty string. The name to use for accumulators created for", "var_list, grad_loss=None): \"\"\"Compute gradients of `loss` for the variables in", "tensorflow.python.platform import tf_logging as logging from tensorflow.python.saved_model import revived_types from", "variable. Returns: the unique name of the variable. \"\"\" #", "tape.gradient(loss, vars) processed_grads = [process_gradient(g) for g in grads] grads_and_vars", "automatically sums gradients across all replicas. To average gradients, you", "gradients. \"\"\" grads_and_vars = _filter_grads(grads_and_vars) var_list = [v for (_,", "catch # `variable` also catch its eagerly created slot variable", "_RestoredOptimizer(OptimizerV2): \"\"\"A non-functional Optimizer implementation for checkpoint compatibility. Holds slot", "variable creator scopes # behave the same way they do", "points to the variable to be updated. indices: a `Tensor`", "list of names for this optimizer's slots.\"\"\" return self._slot_names def", "creation if there is an active variable creator # scope.", "name is malformed. RuntimeError: If _create_slots has been overridden instead", "name : variable}} self._slots = {} self._slot_names = [] self._weights", "by value, `decay` is included for backward compatibility to allow", "`_apply_sparse`, the `indices` argument to this method has been de-duplicated.", "[] with backend.name_scope(name or self._name): for grad, var in grads_and_vars:", "* var2 * var2 # In graph mode, returns op", "grads_and_vars: if grad is None: vars_with_empty_grads.append(var) else: filtered.append((grad, var)) filtered", "iterations(self): \"\"\"Variable. The number of training steps this Optimizer has", "its eagerly created slot variable # unintentionally (specifically make_template would", "information about how to restore # slot variables which have", "the slot variable with a restoring initializer. No new variables", "The variable object this slot is being created for. \"\"\"", "_, v in grads_and_vars],)) if vars_with_empty_grads: logging.warning( (\"Gradients does not", "`Operation` that applies gradients. Args: grads_and_vars: List of (gradient, variable)", "this overhead. Args: grad: a `Tensor` representing the gradient for", "self.apply_gradients(grads_and_vars, name=name) def _compute_gradients(self, loss, var_list, grad_loss=None): \"\"\"Compute gradients of", "self._deferred_slot_restorations = {} decay = kwargs.pop(\"decay\", 0.0) if decay <", "tensor, variable constraint is not supported. ### Thread Compatibility The", "[] param_values = backend.batch_get_value(params) for pv, p, w in zip(param_values,", "that all keyword arguments are non-negative. if kwargs[k] < 0:", "config: config[\"learning_rate\"] = config.pop(\"lr\") if \"learning_rate\" in config: if isinstance(config[\"learning_rate\"],", "is always present, but gradient can be `None`. Raises: TypeError:", "(\"\" if ops.executing_eagerly_outside_functions() else \"_\" + var.op.name) with backend.name_scope(\"update\" +", "True variable = self._add_variable_with_custom_getter( name=name, shape=shape, getter=base_layer_utils.make_variable, overwrite=True, initializer=initializer, dtype=dtype,", "shape \" + str(w.shape)) weight_value_tuples.append((p, w)) backend.batch_set_value(weight_value_tuples) def add_weight(self, name,", "`loss` is called. grad_loss: Optional. A `Tensor` holding the gradient", "(i.e. are differentiable). \" \"Common ops without gradient: \" \"K.argmax,", "None: raise ValueError(\"Variable {} has `None` for gradient. \" \"Please", "to use `learning_rate` instead. Raises: ValueError: If name is malformed.", "{variable name : {slot name : variable}} self._slots = {}", "which updates the value of the variable. \"\"\" summed_grad, unique_indices", "a list of variables. with tf.GradientTape() as tape: loss =", "var_list=[var1, var2]) ``` ### Custom training loop with Keras models", "at the first time `loss` is called. grad_loss: Optional. A", "the constraint will be applied to the variable after the", "applied to the variable after the gradient has been applied", "keyword arguments are non-negative. if kwargs[k] < 0: raise ValueError(\"Expected", "the step update should be carried out under a graph", "None: # If we've either made this slot variable, or", "`None`. Raises: TypeError: If `var_list` contains anything else than `Variable`", "be called during `apply_gradients()` to get the value for the", "For implementing the trackable interface # --------------- def _restore_slot_variable(self, slot_name,", "name of the variable. \"\"\" # pylint: disable=protected-access # Get", "value def variables(self): \"\"\"Returns variables of this Optimizer based on", "self._iterations.assign_add(1) def get_updates(self, loss, params): grads = self.get_gradients(loss, params) grads_and_vars", "0.05 opt.minimize(loss, var_list=[var1, var2]) ``` ### Write a customized optimizer.", "= backend.batch_get_value(params) for pv, p, w in zip(param_values, params, weights):", "and applying them to the variables. If you want to", "user code: Example: ```python # Create an optimizer with the", "weight decay in a reasonable way. with backprop.GradientTape() as tape:", "# TODO(allenl): Save and restore the Optimizer's config raise NotImplementedError(", "but the optimizer was expecting \" + str(len(params)) + \"", "and their associated gradients, is enforced by first pre-processing `grad`", "learning rate opt.learning_rate = 0.05 opt.minimize(loss, var_list=[var1, var2]) ``` ###", "\"\"\" valid_dtypes = self._valid_dtypes() for t in tensors: dtype =", "of dtype `resource` which points to the variable to be", "- create_slots (if your optimizer algorithm requires additional variables) -", "will give the wrong answer, resulting in gradients that can", "Python objects used to create this optimizer, such as a", "grads_and_vars] grads_and_vars = zip(reduced_grads, var_list) def apply_grad_to_update_var(var, grad): \"\"\"Apply gradient", "part, for example cap them, etc. capped_grads_and_vars = [(MyCapper(gv[0]), gv[1])", "# For implementing the trackable interface # --------------- def _restore_slot_variable(self,", "it's a dependency hypergraph with edges of the form (optimizer,", "grad_loss: Optional. A `Tensor` holding the gradient computed for `loss`.", "if callable(var_list): var_list = var_list() var_list = nest.flatten(var_list) grads =", "return x.value() # --------------- # For implementing the trackable interface", "TODO(allenl): Make the restored optimizer functional by tracing its apply", "tensors: Tensors to check. Raises: ValueError: If any tensor is", "if hasattr(self, \"clipvalue\"): grads = [ clip_ops.clip_by_value(g, -self.clipvalue, self.clipvalue) for", "# TODO(josh11b): Test that we handle weight decay in a", "variable = self._add_variable_with_custom_getter( name=name, shape=shape, getter=base_layer_utils.make_variable, overwrite=True, initializer=initializer, dtype=dtype, trainable=trainable,", "!= len(weights): raise ValueError( \"You called `set_weights(weights)` on optimizer \"", "list of variables. opt.minimize(loss, var_list=[var1, var2]) ``` ### Custom training", "synced on read. trainable = False elif trainable is None:", "loss, var_list=var_list, grad_loss=grad_loss) return self.apply_gradients(grads_and_vars, name=name) def _compute_gradients(self, loss, var_list,", "type representing the indices for which the gradient is nonzero.", "variable). Do whatever you # need to the 'gradient' part,", "result, using `tf.math.reduce_mean` will give the wrong answer, resulting in", "return self._weights def get_weights(self): params = self.weights return backend.batch_get_value(params) #", "* var1 + 2 * var2 * var2 # In", "\", but the optimizer was expecting \" + str(len(params)) +", "ops.Tensor): raise NotImplementedError(\"Trying to update a Tensor \", var) if", "\"/\" + slot_name class _RestoredOptimizer(OptimizerV2): \"\"\"A non-functional Optimizer implementation for", "= self._compute_gradients( loss, var_list=var_list, grad_loss=grad_loss) return self.apply_gradients(grads_and_vars, name=name) def _compute_gradients(self,", "enforced by first pre-processing `grad` and `indices` and passing them", "weights: \" + str(weights)[:50] + \"...\") if not params: return", "= self._slots.get(variable_key, {}) slot_variable = slot_dict.get(slot_name, None) if (slot_variable is", "loss, variables and gradients. \"\"\" return set( [dtypes.float16, dtypes.bfloat16, dtypes.float32,", "scope_name): update_ops.extend( distribution.extended.update( var, apply_grad_to_update_var, args=(grad,), group=False)) any_symbolic = any(isinstance(i,", "be referenced in functions along with ops created by the", "call `tf.GradientTape` and `apply_gradients()` explicitly instead of using this function.", "this class directly, but instead instantiate one of its subclasses", "any one object (because we don't want to # save", "such as `tf.keras.optimizers.SGD`, `tf.keras.optimizers.Adam`. ### Usage ```python # Create an", "revived_types from tensorflow.python.training.tracking import base as trackable from tensorflow.python.util import", "> 0.: local_step = math_ops.cast(self.iterations, var_dtype) decay_t = self._get_hyper(\"decay\", var_dtype)", "objects used to create this optimizer, such as a function", "limitation bothers \" \"you.\") revived_types.register_revived_type( \"optimizer\", lambda obj: isinstance(obj, OptimizerV2),", "\"lr\": name = \"learning_rate\" if hasattr(self, \"_hyper\") and name in", "_var_key(variable) deferred_restorations = self._deferred_slot_restorations.get( slot_name, {}).pop(variable_key, []) # Iterate over", "If gradient is sparse tensor, variable constraint is not supported.", "their associated gradients, is enforced by first pre-processing `grad` and", "= [] vars_with_empty_grads = [] for grad, var in grads_and_vars:", "rate opt.learning_rate = 0.05 opt.minimize(loss, var_list=[var1, var2]) ``` ### Write", "(as in an IndexedSlices object). Returns: A tuple of (`summed_values`,", "compute a single step. As a result, using `tf.math.reduce_mean` will", "we've either made this slot variable, or if we've pulled", "distributed variable if it exists. if getattr(var, \"_distributed_container\", None) is", "loss, var_list, grad_loss=None): \"\"\"Compute gradients of `loss` for the variables", "not None and v.dtype != dtypes.resource ]) return grads_and_vars def", "+ str(len(params)) + \" weights. Provided weights: \" + str(weights)[:50]", "Args: values: A `Tensor` with rank >= 1. indices: A", "additional Python objects used to create this optimizer, such as", "grads_and_vars],)) if vars_with_empty_grads: logging.warning( (\"Gradients does not exist for variables", "all valid types (see `_valid_dtypes`). Args: tensors: Tensors to check.", "True def get_config(self): # TODO(allenl): Save and restore the Optimizer's", "!= dtypes.resource ]) return [self.apply_gradients(grads_and_vars)] def _set_hyper(self, name, value): \"\"\"set", "out under a graph # context. (eager updates execute immediately)", "from tensorflow.python.saved_model import revived_types from tensorflow.python.training.tracking import base as trackable", "import base_layer_utils from tensorflow.python.keras.optimizer_v2 import learning_rate_schedule from tensorflow.python.keras.utils import tf_utils", "in valid_dtypes: raise ValueError(\"Invalid type %r for %s, expected: %s.\"", "type %r for %s, expected: %s.\" % (dtype, t.name, [v", "]) return [self.apply_gradients(grads_and_vars)] def _set_hyper(self, name, value): \"\"\"set hyper `name`", "optimizer is currently thread compatible, not thread-safe. The user needs", "slot_variable_position.is_simple_variable() # Defer slot variable creation if there is an", "= tf.keras.Sequential() model.add(tf.keras.layers.Dense(num_hidden, activation='relu')) model.add(tf.keras.layers.Dense(num_classes, activation='sigmoid') loss_fn = lambda: tf.keras.losses.mse(model(input),", "None) is not None: var = var._distributed_container() if var._in_graph_mode: return", "variable has already been created but `variable` has just been", "for checkpoint_position in deferred_restorations: checkpoint_position.restore(slot_variable) def _create_or_restore_slot_variable( self, slot_variable_position, slot_name,", "rather than the one with the highest restore # UID", "var_list=[var1, var2]) opt_op.run() # In eager mode, simply call minimize", ": variable}} self._slots = {} self._slot_names = [] self._weights =", "# Create an optimizer with the desired parameters. opt =", "Iterate hyper values deterministically. for name, value in sorted(self._hyper.items()): if", "scopes # behave the same way they do when graph", "when `loss` is called. grad_loss: Optional. A `Tensor` holding the", "return value def __getattribute__(self, name): \"\"\"Overridden to support hyperparameter access.\"\"\"", "after normal creation and adds restore ops to the graph.", "None) if weight is None: if isinstance(initializer, six.string_types) or callable(initializer):", "get the value for the hyper parameter. Hyper parameters can", "optimizer class is `tf.distribute.Strategy` aware, which means it automatically sums", "_valid_dtypes(self): \"\"\"Valid types for loss, variables and gradients. Subclasses should", "of the optimizer, include all hyper parameters) \"\"\" def __init__(self,", "% (var._shared_name, slot_name), # pylint: disable=protected-access dtype=var.dtype, trainable=False, initial_value=initial_value) backend.track_variable(weight)", "instead override this method to avoid the overhead of summing.", "opt.minimize(loss_fn, var_list_fn) ``` ### Processing gradients before applying them. Calling", "= True variable = self._add_variable_with_custom_getter( name=name, shape=shape, getter=base_layer_utils.make_variable, overwrite=True, initializer=initializer,", "with each unique index. \"\"\" unique_indices, new_index_positions = array_ops.unique(indices) summed_values", "\"\"\" grads_and_vars = _filter_grads(grads_and_vars) var_list = [v for (_, v)", "been applied to the variable. Important: If gradient is sparse", "or tuple of `Variable` objects to update to minimize `loss`,", "computes gradient using `tf.GradientTape` and calls `apply_gradients()`. If you want", "\"\"\" # TODO(josh11b): Test that we handle weight decay in", "used to compute a single step. As a result, using", "constraint function is passed to any variable, the constraint will", "built-in training or evaluation loops. See the `reduction` argument of", "processed_grads = [process_gradient(g) for g in grads] grads_and_vars = zip(processed_grads,", "var, slot_name): var_key = _var_key(var) slot_dict = self._slots[var_key] return slot_dict[slot_name]", "like to eagerly create/restore slot variables # when possible, but", "NotImplementedError() def _resource_scatter_add(self, x, i, v): with ops.control_dependencies( [resource_variable_ops.resource_scatter_add(x.handle, i,", "backprop.GradientTape() as tape: if not callable(var_list): tape.watch(var_list) loss_value = loss()", "var1 * var1 + 2 * var2 * var2 #", "= zip(reduced_grads, var_list) def apply_grad_to_update_var(var, grad): \"\"\"Apply gradient to variable.\"\"\"", "as distribute_ctx from tensorflow.python.distribute import reduce_util as ds_reduce_util from tensorflow.python.eager", "minimize. loss = lambda: 3 * var1 * var1 +", "None: raise RuntimeError( \"Cannot use a constraint function on a", "optimizers. if name == \"lr\": name = \"learning_rate\" if hasattr(self,", "if you use `tf.keras` built-in training or evaluation loops. See", "= \"learning_rate\" if hasattr(self, \"_hyper\") and name in self._hyper: self._set_hyper(name,", "grads_and_vars = _filter_grads(grads_and_vars) var_list = [v for (_, v) in", "representing the gradient. handle: a `Tensor` of dtype `resource` which", "same way they do when graph building. and not ops.get_default_graph()._variable_creator_stack):", "saved without the non-slot # variable, or if the non-slot", "mode, returns op that minimizes the loss by updating the", "ops from tensorflow.python.framework import tensor_util from tensorflow.python.keras import backend from", "to train. These are called <i>Slots</i>. Slots have names and", "dtypes.float32, dtypes.float64]) def _call_if_callable(self, param): \"\"\"Call the function if param", "we handle weight decay in a reasonable way. with backprop.GradientTape()", "interface # --------------- def _restore_slot_variable(self, slot_name, variable, slot_variable): \"\"\"Restore a", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "when possible, but this may mean that scopes intended to", "the original optimizer, but currently we do not support using", "valid types (see `_valid_dtypes`). Args: tensors: Tensors to check. Raises:", "present, but gradient can be `None`. Raises: TypeError: If `var_list`", "\" with a weight list of length \" + str(len(weights))", "restore UID first to minimize the number # of assignments.", "we do not support using the optimizer object iself (e.g.", "self._iterations = self.add_weight( \"iter\", shape=[], dtype=dtypes.int64, trainable=False, aggregation=tf_variables.VariableAggregation.ONLY_FIRST_REPLICA) self._weights.append(self._iterations) return", "a SavedModel. These variables may be referenced in functions along", "name, shape=[], trainable=False, initializer=value, aggregation=tf_variables.VariableAggregation.ONLY_FIRST_REPLICA) self._hypers_created = True @property def", "of (gradient, variable) pairs. name: Optional name for the returned", "Optimizer.\"\"\" # pylint: disable=g-bad-name from __future__ import absolute_import from __future__", "grads] if hasattr(self, \"clipvalue\"): grads = [ clip_ops.clip_by_value(g, -self.clipvalue, self.clipvalue)", "operation. Default to the name passed to the `Optimizer` constructor.", "v): with ops.control_dependencies( [resource_variable_ops.resource_scatter_add(x.handle, i, v)]): return x.value() def _resource_scatter_update(self,", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "is nonzero. Indices may be repeated. Returns: An `Operation` which", "grads_and_vars = self._compute_gradients( loss, var_list=var_list, grad_loss=grad_loss) return self.apply_gradients(grads_and_vars, name=name) def", "hasattr(self, \"clipnorm\"): grads = [clip_ops.clip_by_norm(g, self.clipnorm) for g in grads]", "str(weights)[:50] + \"...\") if not params: return weight_value_tuples = []", "the form (optimizer, non-slot # variable, variable)). So we don't", "`_apply_sparse_duplicate_indices` for details. By default the correct behavior, to sum", "shape pre-defined, or 2) subclassed models. Pass var_list as callable", "of summing. Args: grad: a `Tensor` representing the gradient for", "has been de-duplicated. Optimizers which deal correctly with non-unique indices", "self._create_slots(var_list) self._prepare(var_list) return distribute_ctx.get_replica_context().merge_call( self._distributed_apply, args=(grads_and_vars,), kwargs={\"name\": name}) def _distributed_apply(self,", "form (optimizer, non-slot # variable, variable)). So we don't _track_", "the optimizer. **kwargs: keyword arguments. Allowed to be {`clipnorm`, `clipvalue`,", "argument of your loss which should be set to `tf.keras.losses.Reduction.SUM_OVER_BATCH_SIZE`", "if dtype not in valid_dtypes: raise ValueError(\"Invalid type %r for", "instead instantiate one of its subclasses such as `tf.keras.optimizers.SGD`, `tf.keras.optimizers.Adam`.", "kwargs: self.clipnorm = kwargs.pop(\"clipnorm\") if \"clipvalue\" in kwargs: self.clipvalue =", "output in data: opt.minimize(loss_fn, var_list_fn) ``` ### Processing gradients before", "slot_name=slot_name) # Slot variables are not owned by any one", "_var_key(var): \"\"\"Key for representing a primary variable, for looking up", "on # a slot variable if not for this case).", "`values` associated with any non-unique `indices`. Args: values: A `Tensor`", "by the original optimizer, but currently we do not support", "synchronization == tf_variables.VariableSynchronization.ON_READ: if trainable: raise ValueError( \"Synchronization value can", "Arguments: loss: Loss tensor. params: List of variables. Returns: List", "case). Deferring is mostly harmless # (aside from double initialization),", "the slot variable. Defer restoring until it gets created #", "called <i>Slots</i>. Slots have names and you can ask the", "Returns: An `Operation` which updates the value of the variable.", "to minimize `loss`, or a callable returning the list or", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "with dtype=var_dtype.\"\"\" lr_t = self._get_hyper(\"learning_rate\", var_dtype) if isinstance(lr_t, learning_rate_schedule.LearningRateSchedule): local_step", "_resource_apply_dense(self, grad, handle): \"\"\"Add ops to apply dense gradients to", "just been added to a dependency graph (causing us to", "slot variables anywhere, and # instead special-case this dependency and", "If we've either made this slot variable, or if we've", "which case # those could differ between restores. self._deferred_slot_restorations.setdefault( slot_name,", "optimizers respect variable constraints. If constraint function is passed to", "grads_and_vars filtered = [] vars_with_empty_grads = [] for grad, var", "else: super(OptimizerV2, self).__setattr__(name, value) def get_slot_names(self): \"\"\"A list of names", "slot is being created for. \"\"\" variable_key = _var_key(variable) slot_dict", "### Variable Constraint All Keras optimizers respect variable constraints. If", "`decay`}. `clipnorm` is clip gradients by norm; `clipvalue` is clip", "To average gradients, you divide your loss by the global", "checkpoint_position=slot_variable_position) slot_variable = self.add_slot( var=variable, initializer=initializer, slot_name=slot_name) # Slot variables", "\" \"You have specified trainable=True and \" \"synchronization=VariableSynchronization.ON_READ.\") else: #", "grad is None: vars_with_empty_grads.append(var) else: filtered.append((grad, var)) filtered = tuple(filtered)", "var_list=[var1, var2]) # update learning rate opt.learning_rate = 0.05 opt.minimize(loss,", "ops.get_default_graph()._variable_creator_stack): # pylint: disable=protected-access initializer = trackable.CheckpointInitialValue( checkpoint_position=slot_variable_position) slot_variable =", "] return grads def apply_gradients(self, grads_and_vars, name=None): \"\"\"Apply gradients to", "a single step. As a result, using `tf.math.reduce_mean` will give", "of the slots that it uses. Once you have a", "restore ops to the graph. This method is nonetheless important", "`tf.distribute.Strategy`, the first component of a tensor's shape is the", "self._name} if hasattr(self, \"clipnorm\"): config[\"clipnorm\"] = self.clipnorm if hasattr(self, \"clipvalue\"):", "building for the case when a slot variable has already", "= distribute_ctx.get_strategy() with strategy.extended.colocate_vars_with(var): weight = tf_variables.Variable( name=\"%s/%s\" % (var._shared_name,", "not in valid_dtypes: raise ValueError(\"Invalid type %r for %s, expected:", "ask the optimizer for the names of the slots that", "if trainable: raise ValueError( \"Synchronization value can be set to", "type. \"\"\" valid_dtypes = self._valid_dtypes() for t in tensors: dtype", "filtered: raise ValueError(\"No gradients provided for any variable: %s.\" %", "to the variable. Important: If gradient is sparse tensor, variable", "value) def get_slot_names(self): \"\"\"A list of names for this optimizer's", "str(pv.shape) + \" not compatible with \" \"provided weight shape", "For implementing Trackable. Stores information about how to restore #", "= self._hyper[name] if (callable(prev_value) or isinstance(prev_value, (ops.Tensor, int, float, learning_rate_schedule.LearningRateSchedule))", "tensorflow.python.eager import context from tensorflow.python.framework import dtypes from tensorflow.python.framework import", "This method simply computes gradient using `tf.GradientTape` and calls `apply_gradients()`.", "These are arguments passed to the optimizer subclass constructor (the", "grads ] grads_and_vars = list(zip(grads, var_list)) self._assert_valid_dtypes([ v for g,", "custom_objects=None): \"\"\"Creates an optimizer from its config. This method is", "var1 + 2 * var2 # In eager mode, simply", "be false when variable is to be synced on read.", "var) if var.constraint is not None: with ops.control_dependencies([update_op]): return var.assign(var.constraint(var))", "\"optimizer\", lambda obj: isinstance(obj, OptimizerV2), versions=[revived_types.VersionedTypeRegistration( object_factory=lambda proto: _RestoredOptimizer(), version=1,", "trainable=False, initial_value=initial_value) backend.track_variable(weight) slot_dict[slot_name] = weight self._restore_slot_variable( slot_name=slot_name, variable=var, slot_variable=weight)", "return config @classmethod def from_config(cls, config, custom_objects=None): \"\"\"Creates an optimizer", "be synced on read. trainable = False elif trainable is", "for grad, var in grads_and_vars: scope_name = (\"\" if ops.executing_eagerly_outside_functions()", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "a customized optimizer. If you intend to create your own", "slot variable if not for this case). Deferring is mostly", "tensorflow.python.ops import math_ops from tensorflow.python.ops import resource_variable_ops from tensorflow.python.ops import", "optimizer. If you intend to create your own optimization algorithm,", "for `var`.\"\"\" if slot_name not in self._slot_names: self._slot_names.append(slot_name) var_key =", "return update_op update_ops = [] with backend.name_scope(name or self._name): for", "a tensor's shape is the *replica-local* batch size, which is", "have gradients. \"\"\" grads_and_vars = _filter_grads(grads_and_vars) var_list = [v for", "eager mode the name is derived from the var unique", "gradient is sparse tensor, variable constraint is not supported. ###", "passing them on to `_resource_apply_sparse`. Optimizers which deal correctly with", "NotImplementedError( \"Restoring functional Optimzers from SavedModels is not currently \"", "Rights Reserved. # # Licensed under the Apache License, Version", "pylint: disable=protected-access initializer = trackable.CheckpointInitialValue( checkpoint_position=slot_variable_position) slot_variable = self.add_slot( var=variable,", "exists. if getattr(var, \"_distributed_container\", None) is not None: var =", "minimizing the loss.\"), ([v.name for v in vars_with_empty_grads])) return filtered", "specific language governing permissions and # limitations under the License.", "not for this case). Deferring is mostly harmless # (aside", "All Keras optimizers respect variable constraints. If constraint function is", "new Optimizer. This must be called by the constructors of", "variables are created at the first time when `loss` is", "trainable: raise ValueError( \"Synchronization value can be set to \"", "hyperparameters when an optimizer is restored from a SavedModel. These", "set to `tf.keras.losses.Reduction.SUM_OVER_BATCH_SIZE` for averaging or `tf.keras.losses.Reduction.SUM` for not. If", "<i>Slots</i>. Slots have names and you can ask the optimizer", "if var.constraint is not None: raise RuntimeError( \"Cannot use a", "str(k)) # checks that all keyword arguments are non-negative. if", "grads = gradients.gradients(loss, params) for grad, param in zip(grads, params):", "optimizer subclass constructor (the `__init__` method), and then passed to", "name of this `Optimizer`'s slot to restore into. variable: The", "the name is derived from the var shared name. In", "trackable.Trackable): self._track_trackable(value, name, overwrite=True) if name not in self._hyper: self._hyper[name]", "not None: raise RuntimeError( \"Cannot use a constraint function on", "restores, highest restore UID first to minimize the number #", "update_ops.extend( distribution.extended.update( var, apply_grad_to_update_var, args=(grad,), group=False)) any_symbolic = any(isinstance(i, ops.Operation)", "to update the list of variables. opt.minimize(loss, var_list=[var1, var2]) #", "w in zip(param_values, params, weights): if pv.shape != w.shape: raise", "= None # For implementing Trackable. Stores information about how", "customized optimizer. If you intend to create your own optimization", "\" \"you.\") revived_types.register_revived_type( \"optimizer\", lambda obj: isinstance(obj, OptimizerV2), versions=[revived_types.VersionedTypeRegistration( object_factory=lambda", "that Optimizer instances should not bind to a single graph,", "handle weight decay in a reasonable way. with backprop.GradientTape() as", "var_key = _var_key(var) slot_dict = self._slots[var_key] return slot_dict[slot_name] def _prepare(self,", "updates the variables in `var_list`. If `global_step` was not `None`,", "if gradient function not implemented). \"\"\" params = nest.flatten(params) with", "slots.\"\"\" return self._slot_names def add_slot(self, var, slot_name, initializer=\"zeros\"): \"\"\"Add a", "tensorflow.python.keras import initializers from tensorflow.python.keras.engine import base_layer_utils from tensorflow.python.keras.optimizer_v2 import", "given gradient tensor is sparse) - create_slots (if your optimizer", "# For implementing Trackable. Stores information about how to restore", "initializer=initializer, slot_name=slot_name) # Slot variables are not owned by any", "not support using the optimizer object iself (e.g. through `apply_gradients`).", "optimizer is restored from a SavedModel. These variables may be", "var_list, grad_loss=None, name=None): \"\"\"Minimize `loss` by updating `var_list`. This method", "nonzero. Indices may be repeated. Returns: An `Operation` which updates", "# you may not use this file except in compliance", "unique name of the variable. \"\"\" # pylint: disable=protected-access #", "= kwargs.pop(\"decay\", 0.0) if decay < 0.: raise ValueError(\"decay cannot", "where \"gradient\" is the gradient for \"variable\". Note that \"gradient\"", "is malformed. RuntimeError: If _create_slots has been overridden instead of", "= lambda: tf.keras.losses.mse(model(input), output) var_list_fn = lambda: model.trainable_weights for input,", "If `grads_and_vars` is malformed. ValueError: If none of the variables", "`indices` and passing them on to `_resource_apply_sparse`. Optimizers which deal", "reduce_util as ds_reduce_util from tensorflow.python.eager import backprop from tensorflow.python.eager import", "or Tensor.\"\"\" value = self._hyper[hyperparameter_name] if isinstance(value, learning_rate_schedule.LearningRateSchedule): return learning_rate_schedule.serialize(value)", "# UID in case slot variables have their own dependencies,", "off by a factor equal to the number of replicas", "code: Example: ```python # Create an optimizer with the desired", "= 0.05 opt.minimize(loss, var_list=[var1, var2]) ``` ### Write a customized", "slot variable's value, possibly creating it. Called when a variable", "by updating the listed # variables. opt_op = opt.minimize(loss, var_list=[var1,", "case when a slot variable has already been created but", "= functools.partial( initializer, shape=var.shape, dtype=var.dtype) else: initial_value = initializer strategy", "return self._iterations @iterations.setter def iterations(self, variable): if self._iterations is not", "computed for `loss`. Returns: A list of (gradient, variable) pairs.", "names for this optimizer's slots.\"\"\" return self._slot_names def add_slot(self, var,", "# scope. Generally we'd like to eagerly create/restore slot variables", "`set_weights(weights)` on optimizer \" + self._name + \" with a", "there is an active variable creator # scope. Generally we'd", "use this class directly, but instead instantiate one of its", "batch size, which is done automatically if you use `tf.keras`", "regular Python values (like 1.0), tensors, or callables. If they", "arguments are invalid, or var_list is None. \"\"\" # TODO(josh11b):", "function is passed to any variable, the constraint will be", "called by the constructors of subclasses. Note that Optimizer instances", "if \"clipnorm\" in kwargs: self.clipnorm = kwargs.pop(\"clipnorm\") if \"clipvalue\" in", "to apply sparse gradients to `handle`, with repeated indices. Optimizers", "non-slot # variable, variable)). So we don't _track_ slot variables", "list of length \" + str(len(weights)) + \", but the", "mode, simply call minimize to update the list of variables.", "zip(param_values, params, weights): if pv.shape != w.shape: raise ValueError(\"Optimizer weight", "not None: # If we've either made this slot variable,", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "weights): if pv.shape != w.shape: raise ValueError(\"Optimizer weight shape \"", "average gradients, you divide your loss by the global batch", "models In Keras models, sometimes variables are created when the", "`get_config`, capable of instantiating the same optimizer from the config", "called `set_weights(weights)` on optimizer \" + self._name + \" with", "`tf.math.reduce_mean` will give the wrong answer, resulting in gradients that", "summed_values = math_ops.unsorted_segment_sum( values, new_index_positions, array_ops.shape(unique_indices)[0]) return (summed_values, unique_indices) @six.add_metaclass(abc.ABCMeta)", "grad.indices) update_op = self._resource_apply_dense(grad, var) if var.constraint is not None:", "step update should be carried out under a graph #", "batch size, which is off by a factor equal to", "was not `None`, that operation also increments `global_step`. Raises: ValueError:", "variable.\"\"\" if isinstance(var, ops.Tensor): raise NotImplementedError(\"Trying to update a Tensor", "make_template would add a dependency on # a slot variable", "False def minimize(self, loss, var_list, grad_loss=None, name=None): \"\"\"Minimize `loss` by", "steps: 1. Compute the gradients with `tf.GradientTape`. 2. Process the", "can ask the optimizer for the names of the slots", "Python values (like 1.0), tensors, or callables. If they are", "- resource_apply_dense (update variable given gradient tensor is dense) -", "in allowed_kwargs: raise TypeError(\"Unexpected keyword argument \" \"passed to optimizer:", "If they are callable, the callable will be called during", "from tensorflow.python.platform import tf_logging as logging from tensorflow.python.saved_model import revived_types", "} self._deferred_slot_restorations = {} decay = kwargs.pop(\"decay\", 0.0) if decay", "under the Apache License, Version 2.0 (the \"License\"); # you", "self._iterations = variable self._weights.append(self._iterations) def _decayed_lr(self, var_dtype): \"\"\"Get decayed learning", "context. (eager updates execute immediately) with ops._get_graph_from_inputs(update_ops).as_default(): # pylint: disable=protected-access", "callable(var_list): tape.watch(var_list) loss_value = loss() if callable(var_list): var_list = var_list()", "is dense) - resource_apply_sparse (update variable given gradient tensor is", "= loss() if callable(var_list): var_list = var_list() var_list = nest.flatten(var_list)", "not None and v.dtype != dtypes.resource ]) return [self.apply_gradients(grads_and_vars)] def", "gradient to variable.\"\"\" if isinstance(var, ops.Tensor): raise NotImplementedError(\"Trying to update", "learning_rate_schedule.serialize(value) if callable(value): return value() if tensor_util.is_tensor(value): return backend.get_value(value) return", "with base_layer. def set_weights(self, weights): params = self.weights if len(params)", "tensors are all valid types (see `_valid_dtypes`). Args: tensors: Tensors", "which points to the variable to be updated. indices: a", "tf.keras.optimizers.SGD(learning_rate=0.1) # `loss` is a callable that takes no argument", "any variable, the constraint will be applied to the variable", "- resource_apply_sparse (update variable given gradient tensor is sparse) -", "g in grads ] grads_and_vars = list(zip(grads, var_list)) self._assert_valid_dtypes([ v", "versions=[revived_types.VersionedTypeRegistration( object_factory=lambda proto: _RestoredOptimizer(), version=1, min_producer_version=1, min_consumer_version=1, setter=_RestoredOptimizer._set_hyper # pylint:", "its config. This method is the reverse of `get_config`, capable", "up slots. In graph mode the name is derived from", "new_index_positions = array_ops.unique(indices) summed_values = math_ops.unsorted_segment_sum( values, new_index_positions, array_ops.shape(unique_indices)[0]) return", "read. trainable = False elif trainable is None: trainable =", "# graph. if slot_variable is not None: # If we've", "None: vars_with_empty_grads.append(var) else: filtered.append((grad, var)) filtered = tuple(filtered) if not", "Custom training loop with Keras models In Keras models, sometimes", "\"\"\"Overridden to support hyperparameter access.\"\"\" try: return super(OptimizerV2, self).__getattribute__(name) except", "if len(params) != len(weights): raise ValueError( \"You called `set_weights(weights)` on", "grad_loss) if hasattr(self, \"clipnorm\"): grads = [clip_ops.clip_by_norm(g, self.clipnorm) for g", "before `minimize` and the variables are created at the first", "\"\"\"Creates an optimizer from its config. This method is the", "an associated slot variable is created or restored. When executing", "activation='relu')) model.add(tf.keras.layers.Dense(num_classes, activation='sigmoid') loss_fn = lambda: tf.keras.losses.mse(model(input), output) var_list_fn =", "Slots Many optimizer subclasses, such as `Adam` and `Adagrad` allocate", "**kwargs: keyword arguments. Allowed to be {`clipnorm`, `clipvalue`, `lr`, `decay`}.", "variables. with tf.GradientTape() as tape: loss = <call_loss_function> vars =", "[v for _, v in grads_and_vars] grads_and_vars = zip(reduced_grads, var_list)", "*replica-local* batch size, which is off by a factor equal", "to value. value can be callable, tensor, numeric.\"\"\" if isinstance(value,", "True self._name = name self._hyper = {} # dict: {variable", "additional variables) - get_config (serialization of the optimizer, include all", "function if param is callable.\"\"\" return param() if callable(param) else", "# `variable` also catch its eagerly created slot variable #", "value def __getattribute__(self, name): \"\"\"Overridden to support hyperparameter access.\"\"\" try:", "Args: grads_and_vars: List of (gradient, variable) pairs. name: Optional name", "(see `_valid_dtypes`). Args: tensors: Tensors to check. Raises: ValueError: If", "be incomplete before `minimize` since the variables are created at", "slot variables have their own dependencies, in which case #", "is sparse) - create_slots (if your optimizer algorithm requires additional", "for g in grads] grads_and_vars = zip(processed_grads, var_list) # grads_and_vars", "a constraint function on a sparse variable.\") return self._resource_apply_sparse_duplicate_indices( grad.values,", "loss: Loss tensor. params: List of variables. Returns: List of", "initial_value = initializer strategy = distribute_ctx.get_strategy() with strategy.extended.colocate_vars_with(var): weight =", "Raises: ValueError: If some of the variables are not `Variable`", "ValueError(\"Invalid type %r for %s, expected: %s.\" % (dtype, t.name,", "initializer=value, aggregation=tf_variables.VariableAggregation.ONLY_FIRST_REPLICA) self._hypers_created = True @property def iterations(self): \"\"\"Variable. The", "arguments are non-negative. if kwargs[k] < 0: raise ValueError(\"Expected {}", "implemented). \"\"\" params = nest.flatten(params) with backend.get_graph().as_default(): grads = gradients.gradients(loss,", "you can instead use the optimizer in three steps: 1.", "`loss`. name: Optional name for the returned operation. Returns: An", "# Set trainable to be false when variable is to", "the variable to be updated. indices: a `Tensor` of integral", "If some of the variables are not `Variable` objects. \"\"\"", "# pylint: disable=protected-access initializer = trackable.CheckpointInitialValue( checkpoint_position=slot_variable_position) slot_variable = self.add_slot(", "one of its subclasses such as `tf.keras.optimizers.SGD`, `tf.keras.optimizers.Adam`. ### Usage", "`__init__` method), and then passed to `self._set_hyper()`. They can be", "ops to apply sparse gradients to `handle`, with repeated indices.", "a hyperparameter. Returns: An optimizer instance. \"\"\" if \"lr\" in", "this Optimizer based on the order created.\"\"\" return self._weights def", "var1 + 2 * var2 * var2 # In graph", "variables are created when the model is first called, instead", "also catch its eagerly created slot variable # unintentionally (specifically", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "backward compatibility to allow time inverse decay of learning rate.", "simply computes gradient using `tf.GradientTape` and calls `apply_gradients()`. If you", "opt = tf.keras.optimizers.SGD(learning_rate=0.1) # `loss` is a callable that takes", "to update a Tensor \", var) if isinstance(grad, ops.IndexedSlices): if", "representing the gradient for the affected indices. handle: a `Tensor`", "\"passed to optimizer: \" + str(k)) # checks that all", "ops are # symbolic then the step update should be", "to average gradients, you should use `tf.math.reduce_sum` to add up", "of the optimimizer. An optimizer config is a Python dictionary", "it. slot_variable_position.restore(slot_variable) else: # We didn't make the slot variable.", "= self._hyper[name] if isinstance(value, learning_rate_schedule.LearningRateSchedule): return value if callable(value): value", "hyper parameter. Hyper parameters can be overwritten through user code:", "from tensorflow.python.distribute import reduce_util as ds_reduce_util from tensorflow.python.eager import backprop", "The entire optimizer is currently thread compatible, not thread-safe. The", "# In eager mode, simply call minimize to update the", "or isinstance(value, learning_rate_schedule.LearningRateSchedule)): self._hyper[name] = value else: backend.set_value(self._hyper[name], value) def", "obj: isinstance(obj, OptimizerV2), versions=[revived_types.VersionedTypeRegistration( object_factory=lambda proto: _RestoredOptimizer(), version=1, min_producer_version=1, min_consumer_version=1,", "before applying them you can instead use the optimizer in", "checkpoint_position in deferred_restorations: checkpoint_position.restore(slot_variable) def _create_or_restore_slot_variable( self, slot_variable_position, slot_name, variable):", "if grad is None: vars_with_empty_grads.append(var) else: filtered.append((grad, var)) filtered =", "isinstance(value, ops.Tensor) or callable(value): continue else: self._hyper[name] = self.add_weight( name,", "debug a training algorithm, report stats about the slots, etc.", "instead. This class in stateful and thread-compatible. Args: name: A", "an IndexedSlices object). Returns: A tuple of (`summed_values`, `unique_indices`) where", "return self.apply_gradients(grads_and_vars, name=name) def _compute_gradients(self, loss, var_list, grad_loss=None): \"\"\"Compute gradients", "var_list = [v for _, v in grads_and_vars] grads_and_vars =", "from tensorflow.python.ops import math_ops from tensorflow.python.ops import resource_variable_ops from tensorflow.python.ops", "TODO(tanzheny): Maybe share this logic with base_layer. def set_weights(self, weights):", "(specifically make_template would add a dependency on # a slot", "variables may be referenced in functions along with ops created", "= self._slots[var_key] return slot_dict[slot_name] def _prepare(self, var_list): pass def _create_hypers(self):", "is not currently \" \"supported. Please file a feature request", "Arguments: config: A Python dictionary, typically the output of get_config.", "model.add(tf.keras.layers.Dense(num_hidden, activation='relu')) model.add(tf.keras.layers.Dense(num_classes, activation='sigmoid') loss_fn = lambda: tf.keras.losses.mse(model(input), output) var_list_fn", "Returns: Python dictionary. \"\"\" config = {\"name\": self._name} if hasattr(self,", "(1. + decay_t * local_step) return lr_t @abc.abstractmethod def get_config(self):", "\"You called `set_weights(weights)` on optimizer \" + self._name + \"", "dict: {variable name : {slot name : variable}} self._slots =", "slots, etc. ### Hyper parameters These are arguments passed to", "between restores. self._deferred_slot_restorations.setdefault( slot_name, {}).setdefault(variable_key, []).append( slot_variable_position) def _filter_grads(grads_and_vars): \"\"\"Filter", "name = _var_key(var) return name + \"/\" + slot_name class", "\"variable\". Note that \"gradient\" can be a `Tensor`, an `IndexedSlices`,", "ValueError( \"You called `set_weights(weights)` on optimizer \" + self._name +", "for the names of the slots that it uses. Once", "implementation for checkpoint compatibility. Holds slot variables and hyperparameters when", "variables anywhere, and # instead special-case this dependency and otherwise", "dtype) else: return value def __getattribute__(self, name): \"\"\"Overridden to support", "catches these after normal creation and adds restore ops to", "of the form (optimizer, non-slot # variable, variable)). So we", "should be carried out under a graph # context. (eager", "`tf.keras.losses.Reduction.SUM_OVER_BATCH_SIZE` for averaging or `tf.keras.losses.Reduction.SUM` for not. If you are", "= _var_key(var) slot_dict = self._slots.setdefault(var_key, {}) weight = slot_dict.get(slot_name, None)", "first to minimize the number # of assignments. deferred_restorations.sort(key=lambda position:", "dependencies, in which case # those could differ between restores.", "to the variable after the gradient has been applied to", "See the `reduction` argument of your loss which should be", "grad equal to None.\"\"\" grads_and_vars = tuple(grads_and_vars) if not grads_and_vars:", "variable, or if we've pulled out an # existing slot", "Indices may be repeated. Returns: An `Operation` which updates the", "for loss, variables and gradients. \"\"\" return set( [dtypes.float16, dtypes.bfloat16,", "incomplete before `minimize` since the variables are created at the", "we'd like to eagerly create/restore slot variables # when possible,", "it gets created # normally. We keep a list rather", "synchronization if necessary. ### Slots Many optimizer subclasses, such as", "instead. Raises: ValueError: If name is malformed. RuntimeError: If _create_slots", "Create an optimizer with the desired parameters. opt = tf.keras.optimizers.SGD(learning_rate=0.1)", "= self._slots.setdefault(var_key, {}) weight = slot_dict.get(slot_name, None) if weight is", "loss, variables and gradients. Subclasses should override to allow other", "the gradient for \"variable\". Note that \"gradient\" can be a", "on a sparse variable.\") return self._resource_apply_sparse_duplicate_indices( grad.values, var, grad.indices) update_op", "custom_objects: A Python dictionary mapping names to additional Python objects", "of this Optimizer based on the order created.\"\"\" return self._weights", "filtered.append((grad, var)) filtered = tuple(filtered) if not filtered: raise ValueError(\"No", "all hyper parameters) \"\"\" def __init__(self, name, **kwargs): \"\"\"Create a", "kwargs: self.clipvalue = kwargs.pop(\"clipvalue\") self._hypers_created = False def minimize(self, loss,", "have their own dependencies, in which case # those could", "var_dtype) lr_t = math_ops.cast(lr_t(local_step), var_dtype) if self._initial_decay > 0.: local_step", "We keep a list rather than the one with the", "tensor's shape is the *replica-local* batch size, which is off", "not ops.get_default_graph()._variable_creator_stack): # pylint: disable=protected-access initializer = trackable.CheckpointInitialValue( checkpoint_position=slot_variable_position) slot_variable", "apply_grad_to_update_var, args=(grad,), group=False)) any_symbolic = any(isinstance(i, ops.Operation) or tf_utils.is_symbolic_tensor(i) for", "return learning_rate_schedule.serialize(value) if callable(value): return value() if tensor_util.is_tensor(value): return backend.get_value(value)", "able to use the _set_hyper()/state.get_hyper() facility instead. This class in", "trainable to be false when variable is to be synced", "### Write a customized optimizer. If you intend to create", "slot_dict.get(slot_name, None) if (slot_variable is None and context.executing_eagerly() and slot_variable_position.is_simple_variable()", "they do when graph building. and not ops.get_default_graph()._variable_creator_stack): # pylint:", "= array_ops.unique(indices) summed_values = math_ops.unsorted_segment_sum( values, new_index_positions, array_ops.shape(unique_indices)[0]) return (summed_values,", "a dependency hypergraph with edges of the form (optimizer, non-slot", "learning rate. `lr` is included for backward compatibility, recommended to", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "# variables. opt_op = opt.minimize(loss, var_list=[var1, var2]) opt_op.run() # In", "number of training steps this Optimizer has run.\"\"\" if self._iterations", "we should restore it. slot_variable_position.restore(slot_variable) else: # We didn't make", "Authors. All Rights Reserved. # # Licensed under the Apache", "def _get_hyper(self, name, dtype=None): if not self._hypers_created: self._create_hypers() value =", "of training steps this Optimizer has run.\"\"\" if self._iterations is", "creator # scope. Generally we'd like to eagerly create/restore slot", "instances should not bind to a single graph, and so", "member variables. Generally you should be able to use the", "the config of the optimimizer. An optimizer config is a", "slot_name=slot_name, variable=var, slot_variable=weight) self._weights.append(weight) return weight def get_slot(self, var, slot_name):", "name for the returned operation. Default to the name passed", "`_valid_dtypes`). Args: tensors: Tensors to check. Raises: ValueError: If any", "are not owned by any one object (because we don't", "you can ask the optimizer for the names of the", "an optimizer. opt = tf.keras.optimizers.SGD(learning_rate=0.1) # Compute the gradients for", "example cap them, etc. capped_grads_and_vars = [(MyCapper(gv[0]), gv[1]) for gv", "been created # (trackable._CheckpointPosition objects). # {slot_name : # {_var_key(variable_to_train):", "optimizer functional by tracing its apply # methods. def __init__(self):", "a dependency graph (causing us to realize that the slot", "= True def get_config(self): # TODO(allenl): Save and restore the", "other float types. Returns: Valid types for loss, variables and", "indices): \"\"\"Add ops to apply sparse gradients to `handle`, with", "No new variables are created when graph building. Instead, _restore_slot_variable", "return var.assign(var.constraint(var)) else: return update_op update_ops = [] with backend.name_scope(name", "immediately) with ops._get_graph_from_inputs(update_ops).as_default(): # pylint: disable=protected-access with ops.control_dependencies(update_ops): return self._iterations.assign_add(1).op", "a `Tensor` of dtype `resource` which points to the variable", "add up your per-example losses and then divide by the", "optimizer is saved without the non-slot # variable, or if", "Apache License, Version 2.0 (the \"License\"); # you may not", "= config.pop(\"lr\") if \"learning_rate\" in config: if isinstance(config[\"learning_rate\"], dict): config[\"learning_rate\"]", "with Keras models In Keras models, sometimes variables are created", "Optional name for the returned operation. Default to the name", "tensor, numeric.\"\"\" if isinstance(value, trackable.Trackable): self._track_trackable(value, name, overwrite=True) if name", "should restore it. slot_variable_position.restore(slot_variable) else: # We didn't make the", "either express or implied. # See the License for the", "gradients before applying them. Calling `minimize()` takes care of both", "needs to be restored). Args: slot_variable_position: A `trackable._CheckpointPosition` object indicating", "class directly, but instead instantiate one of its subclasses such", "`tf.distribute.Strategy`. This optimizer class is `tf.distribute.Strategy` aware, which means it", "(serializable) containing the configuration of an optimizer. The same optimizer", "{} # dict: {variable name : {slot name : variable}}", "valid type. \"\"\" valid_dtypes = self._valid_dtypes() for t in tensors:", "`loss` for the variables in `var_list`. This is the first", "= <list_of_variables> grads = tape.gradient(loss, vars) processed_grads = [process_gradient(g) for", "self.weights return backend.batch_get_value(params) # TODO(tanzheny): Maybe share this logic with", "be incomplete before `minimize` and the variables are created at", "name is derived from the var shared name. In eager", "(gradient, variable) pairs. name: Optional name for the returned operation.", "This is the second part of `minimize()`. It returns an", "time. Examples include 1) sequential models without input shape pre-defined,", "name = \"learning_rate\" if hasattr(self, \"_hyper\") and name in self._hyper:", "name, dtype=None): if not self._hypers_created: self._create_hypers() value = self._hyper[name] if", "This class in stateful and thread-compatible. Args: name: A non-empty", "no argument and returns the value # to minimize. loss", "takes care of both computing the gradients and applying them", "_set_hyper()/state.get_hyper() facility instead. This class in stateful and thread-compatible. Args:", "any gradient cannot be computed (e.g. if gradient function not", "rate as a Tensor with dtype=var_dtype.\"\"\" lr_t = self._get_hyper(\"learning_rate\", var_dtype)", "slot_name, initializer=\"zeros\"): \"\"\"Add a new slot variable for `var`.\"\"\" if", "may instead override `_resource_apply_sparse_duplicate_indices` to avoid this overhead. Args: grad:", "tape.watch(var_list) loss_value = loss() if callable(var_list): var_list = var_list() var_list", "decay = kwargs.pop(\"decay\", 0.0) if decay < 0.: raise ValueError(\"decay", "slot variable creation if there is an active variable creator", "in `var_list`. If `global_step` was not `None`, that operation also", "first. Args: var: the variable. Returns: the unique name of", "with non-unique indices may instead override `_resource_apply_sparse_duplicate_indices` to avoid this", "a \" \"gradient defined (i.e. are differentiable). \" \"Common ops", "initializer=\"zeros\"): \"\"\"Add a new slot variable for `var`.\"\"\" if slot_name", "def get_weights(self): params = self.weights return backend.batch_get_value(params) # TODO(tanzheny): Maybe", "names of the slots that it uses. Once you have", "from tensorflow.python.keras import initializers from tensorflow.python.keras.engine import base_layer_utils from tensorflow.python.keras.optimizer_v2", "[] self._iterations = None # For implementing Trackable. Stores information", "variables in `var_list`. If `global_step` was not `None`, that operation", "= self.clipnorm if hasattr(self, \"clipvalue\"): config[\"clipvalue\"] = self.clipvalue return config", "methods. def __init__(self): super(_RestoredOptimizer, self).__init__(\"_RestoredOptimizer\") self._hypers_created = True def get_config(self):", "create your own optimization algorithm, simply inherit from this class", "dtype not in valid_dtypes: raise ValueError(\"Invalid type %r for %s,", "\"lr\" in config: config[\"learning_rate\"] = config.pop(\"lr\") if \"learning_rate\" in config:", "uses. Once you have a slot name you can ask", "kwargs.pop(\"clipnorm\") if \"clipvalue\" in kwargs: self.clipvalue = kwargs.pop(\"clipvalue\") self._hypers_created =", "Loss tensor. params: List of variables. Returns: List of gradient", "\"_\" + var.op.name) with backend.name_scope(\"update\" + scope_name): update_ops.extend( distribution.extended.update( var,", "and gradients. Subclasses should override to allow other float types.", "value of the variable. \"\"\" raise NotImplementedError() def _resource_scatter_add(self, x,", "value, possibly creating it. Called when a variable which has", "else: return update_op update_ops = [] with backend.name_scope(name or self._name):", "gradient cannot be computed (e.g. if gradient function not implemented).", "logging.warning( (\"Gradients does not exist for variables %s when minimizing", "correctly with duplicate indices may instead override this method to", "None: raise RuntimeError(\"Cannot set `iterations` to a new Variable after", "class _RestoredOptimizer(OptimizerV2): \"\"\"A non-functional Optimizer implementation for checkpoint compatibility. Holds", "grads_and_vars: scope_name = (\"\" if ops.executing_eagerly_outside_functions() else \"_\" + var.op.name)", "with ops.init_scope(): _ = self.iterations self._create_hypers() self._create_slots(var_list) self._prepare(var_list) return distribute_ctx.get_replica_context().merge_call(", "created # normally. We keep a list rather than the", "possibly creating it. Called when a variable which has an", "executing eagerly, we create the slot variable with a restoring", "ops.IndexedSlices): if var.constraint is not None: raise RuntimeError( \"Cannot use", "if \"learning_rate\" in config: if isinstance(config[\"learning_rate\"], dict): config[\"learning_rate\"] = learning_rate_schedule.deserialize(", "weight def get_slot(self, var, slot_name): var_key = _var_key(var) slot_dict =", "a `Tensor`, an `IndexedSlices`, or `None` if there is no", "An optimizer config is a Python dictionary (serializable) containing the", "after the gradient has been applied to the variable. Important:", "dictionary. Arguments: config: A Python dictionary, typically the output of", "var_list as callable in these cases. Example: ```python opt =", "gradients, is enforced by first pre-processing `grad` and `indices` and", "learning_rate_schedule.LearningRateSchedule): return value if callable(value): value = value() if dtype:", "_deduplicate_indexed_slices(values, indices): \"\"\"Sums `values` associated with any non-unique `indices`. Args:", "of `loss` for the variables in `var_list`. This is the", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "{} self._slot_names = [] self._weights = [] self._iterations = None", "which have not yet been created # (trackable._CheckpointPosition objects). #", "dtype = dtypes.float32 if isinstance(initializer, six.string_types) or callable(initializer): initializer =", "None.\"\"\" grads_and_vars = tuple(grads_and_vars) if not grads_and_vars: return grads_and_vars filtered", "`self._set_hyper()`. They can be either regular Python values (like 1.0),", "< 0.: raise ValueError(\"decay cannot be less than 0: {}\".format(decay))", "is None and context.executing_eagerly() and slot_variable_position.is_simple_variable() # Defer slot variable", "value = self._hyper[hyperparameter_name] if isinstance(value, learning_rate_schedule.LearningRateSchedule): return learning_rate_schedule.serialize(value) if callable(value):", "handle, indices): \"\"\"Add ops to apply sparse gradients to `handle`,", "It returns a list of (gradient, variable) pairs where \"gradient\"", "var_list)) self._assert_valid_dtypes([ v for g, v in grads_and_vars if g", "return distribute_ctx.get_replica_context().merge_call( self._distributed_apply, args=(grads_and_vars,), kwargs={\"name\": name}) def _distributed_apply(self, distribution, grads_and_vars,", "self._slot_names: self._slot_names.append(slot_name) var_key = _var_key(var) slot_dict = self._slots.setdefault(var_key, {}) weight", "they are callable, the callable will be called during `apply_gradients()`", "slot variable, or if we've pulled out an # existing", "none of the variables have gradients. \"\"\" grads_and_vars = _filter_grads(grads_and_vars)", "def _assert_valid_dtypes(self, tensors): \"\"\"Asserts tensors are all valid types (see", "name is derived from the var unique id. If distribution", "int, float, learning_rate_schedule.LearningRateSchedule)) or isinstance(value, learning_rate_schedule.LearningRateSchedule)): self._hyper[name] = value else:", "to restore into. variable: The variable object this slot is", "variables and hyperparameters when an optimizer is restored from a", "data: opt.minimize(loss_fn, var_list_fn) ``` ### Processing gradients before applying them.", "passed to `self._set_hyper()`. They can be either regular Python values", "step. As a result, using `tf.math.reduce_mean` will give the wrong", "_resource_apply_sparse_duplicate_indices(self, grad, handle, indices): \"\"\"Add ops to apply sparse gradients", "# dict: {variable name : {slot name : variable}} self._slots", "with ops.control_dependencies(update_ops): return self._iterations.assign_add(1).op return self._iterations.assign_add(1) def get_updates(self, loss, params):", "isinstance(prev_value, (ops.Tensor, int, float, learning_rate_schedule.LearningRateSchedule)) or isinstance(value, learning_rate_schedule.LearningRateSchedule)): self._hyper[name] =", "are created at the first time `loss` is called. grad_loss:", "are invalid, or var_list is None. \"\"\" # TODO(josh11b): Test", "\"\"\"Serialize a hyperparameter that can be a float, callable, or", "callable.\"\"\" return param() if callable(param) else param def _resource_apply_dense(self, grad,", "used for a hyperparameter. Returns: An optimizer instance. \"\"\" if", "a hyperparameter that can be a float, callable, or Tensor.\"\"\"", "= False elif trainable is None: trainable = True variable", "[resource_variable_ops.resource_scatter_add(x.handle, i, v)]): return x.value() def _resource_scatter_update(self, x, i, v):", "an optimizer from its config. This method is the reverse", "this configuration. Returns: Python dictionary. \"\"\" config = {\"name\": self._name}", "the current context is graph mode or any of the", "not grads_and_vars: return grads_and_vars filtered = [] vars_with_empty_grads = []", "dtype=var_dtype.\"\"\" lr_t = self._get_hyper(\"learning_rate\", var_dtype) if isinstance(lr_t, learning_rate_schedule.LearningRateSchedule): local_step =", "or tuple of `Variable` objects. Use callable when the variable", "`Variable` objects to update to minimize `loss`, or a callable", "self._set_hyper(name, value) else: super(OptimizerV2, self).__setattr__(name, value) def get_slot_names(self): \"\"\"A list", "params) for grad, param in zip(grads, params): if grad is", "etc. capped_grads_and_vars = [(MyCapper(gv[0]), gv[1]) for gv in grads_and_vars] #", "var in grads_and_vars: scope_name = (\"\" if ops.executing_eagerly_outside_functions() else \"_\"", "operation also increments `global_step`. Raises: TypeError: If `grads_and_vars` is malformed.", "variables have their own dependencies, in which case # those", "the variable. \"\"\" raise NotImplementedError() def _resource_apply_sparse_duplicate_indices(self, grad, handle, indices):", "the _set_hyper()/state.get_hyper() facility instead. This class in stateful and thread-compatible.", "if necessary. ### Slots Many optimizer subclasses, such as `Adam`", "tf_utils.is_symbolic_tensor(i) for i in update_ops) if not context.executing_eagerly() or any_symbolic:", "that takes no argument and returns the value # to", "dtype=dtypes.int64, trainable=False, aggregation=tf_variables.VariableAggregation.ONLY_FIRST_REPLICA) self._weights.append(self._iterations) return self._iterations @iterations.setter def iterations(self, variable):", "to be updated. indices: a `Tensor` of integral type representing", "for the case when a slot variable has already been", "variables) - get_config (serialization of the optimizer, include all hyper", "(callable(prev_value) or isinstance(prev_value, (ops.Tensor, int, float, learning_rate_schedule.LearningRateSchedule)) or isinstance(value, learning_rate_schedule.LearningRateSchedule)):", "def get_slot(self, var, slot_name): var_key = _var_key(var) slot_dict = self._slots[var_key]", "if synchronization == tf_variables.VariableSynchronization.ON_READ: if trainable: raise ValueError( \"Synchronization value", "SavedModels is not currently \" \"supported. Please file a feature", "`values` slices associated with each unique index. \"\"\" unique_indices, new_index_positions", "normal # graph. if slot_variable is not None: # If", "\"\"\"Sums `values` associated with any non-unique `indices`. Args: values: A", "return backend.get_value(value) return value def variables(self): \"\"\"Returns variables of this", "compatibility. Holds slot variables and hyperparameters when an optimizer is", "Keras models, sometimes variables are created when the model is", "the config dictionary. Arguments: config: A Python dictionary, typically the", "variable. \"\"\" raise NotImplementedError() def _resource_apply_sparse_duplicate_indices(self, grad, handle, indices): \"\"\"Add", "\"\"\" # TODO(allenl): Make the restored optimizer functional by tracing", "# grads_and_vars is a list of tuples (gradient, variable). Do", "float, learning_rate_schedule.LearningRateSchedule)) or isinstance(value, learning_rate_schedule.LearningRateSchedule)): self._hyper[name] = value else: backend.set_value(self._hyper[name],", "divide by the global batch size. Note that when using", "in config: if isinstance(config[\"learning_rate\"], dict): config[\"learning_rate\"] = learning_rate_schedule.deserialize( config[\"learning_rate\"], custom_objects=custom_objects)", "args=(grads_and_vars,), kwargs={\"name\": name}) def _distributed_apply(self, distribution, grads_and_vars, name): \"\"\"`apply_gradients` using", "param def _resource_apply_dense(self, grad, handle): \"\"\"Add ops to apply dense", "Args: slot_variable_position: A `trackable._CheckpointPosition` object indicating the slot variable `Trackable`", "self._track_trackable(value, name, overwrite=True) if name not in self._hyper: self._hyper[name] =", "overhead. Args: grad: a `Tensor` representing the gradient for the", "float, callable, or Tensor.\"\"\" value = self._hyper[hyperparameter_name] if isinstance(value, learning_rate_schedule.LearningRateSchedule):", "from double initialization), and makes variable creator scopes # behave", "`trackable._CheckpointPosition` object indicating the slot variable `Trackable` object to be", "which override this method must deal with repeated indices. See", "Valid types for loss, variables and gradients. \"\"\" return set(", "to `tf.keras.losses.Reduction.SUM_OVER_BATCH_SIZE` for averaging or `tf.keras.losses.Reduction.SUM` for not. If you", "[] self._weights = [] self._iterations = None # For implementing", "self._add_variable_with_custom_getter( name=name, shape=shape, getter=base_layer_utils.make_variable, overwrite=True, initializer=initializer, dtype=dtype, trainable=trainable, use_resource=True, synchronization=synchronization,", "is `tf.distribute.Strategy` aware, which means it automatically sums gradients across", "args=(grad,), group=False)) any_symbolic = any(isinstance(i, ops.Operation) or tf_utils.is_symbolic_tensor(i) for i", "to be restored. slot_name: The name of this `Optimizer`'s slot", "the optimizer, include all hyper parameters) \"\"\" def __init__(self, name,", "import base as trackable from tensorflow.python.util import nest from tensorflow.python.util.tf_export", "from tensorflow.python.util.tf_export import keras_export def _deduplicate_indexed_slices(values, indices): \"\"\"Sums `values` associated", "sums gradients across all replicas. To average gradients, you divide", "= initializer strategy = distribute_ctx.get_strategy() with strategy.extended.colocate_vars_with(var): weight = tf_variables.Variable(", "config: if isinstance(config[\"learning_rate\"], dict): config[\"learning_rate\"] = learning_rate_schedule.deserialize( config[\"learning_rate\"], custom_objects=custom_objects) return", "slot_variable=weight) self._weights.append(weight) return weight def get_slot(self, var, slot_name): var_key =", "this `Optimizer`'s slot to restore into. variable: The variable object", "create this optimizer, such as a function used for a", "dependency and otherwise pretend it's a normal # graph. if", "respect variable constraints. If constraint function is passed to any", "context.executing_eagerly() and slot_variable_position.is_simple_variable() # Defer slot variable creation if there", "backend.name_scope(\"update\" + scope_name): update_ops.extend( distribution.extended.update( var, apply_grad_to_update_var, args=(grad,), group=False)) any_symbolic", "None # For implementing Trackable. Stores information about how to", "name, value in sorted(self._hyper.items()): if isinstance(value, ops.Tensor) or callable(value): continue", "tf.keras.optimizers.SGD(learning_rate=0.1) # Compute the gradients for a list of variables.", "using `tf.math.reduce_mean` will give the wrong answer, resulting in gradients", "variables. opt.minimize(loss, var_list=[var1, var2]) # update learning rate opt.learning_rate =", "base as trackable from tensorflow.python.util import nest from tensorflow.python.util.tf_export import", "__setattr__. if name == \"_hyper\": raise e # Backwards compatibility", "of tuples (gradient, variable). Do whatever you # need to", "value else: backend.set_value(self._hyper[name], value) def _get_hyper(self, name, dtype=None): if not", "slot variable # unintentionally (specifically make_template would add a dependency", "gradients of `loss` for the variables in `var_list`. This is", "integral type representing the indices for which the gradient is", "tensorflow.python.framework import tensor_util from tensorflow.python.keras import backend from tensorflow.python.keras import", "taking no arguments which returns the value to minimize. var_list:", "the variable `handle`. Similar to `_apply_sparse`, the `indices` argument to", "unique_indices, new_index_positions = array_ops.unique(indices) summed_values = math_ops.unsorted_segment_sum( values, new_index_positions, array_ops.shape(unique_indices)[0])", "Stores information about how to restore # slot variables which", "use this file except in compliance with the License. #", "sometimes variables are created when the model is first called,", "slot variable has already been created but `variable` has just", "shouldn't keep Tensors as member variables. Generally you should be", "+ str(w.shape)) weight_value_tuples.append((p, w)) backend.batch_set_value(weight_value_tuples) def add_weight(self, name, shape, dtype=None,", "compatible, not thread-safe. The user needs to perform synchronization if", "# to minimize. loss = lambda: 3 * var1 *", "value): \"\"\"set hyper `name` to value. value can be callable,", "if isinstance(lr_t, learning_rate_schedule.LearningRateSchedule): local_step = math_ops.cast(self.iterations, var_dtype) lr_t = math_ops.cast(lr_t(local_step),", "graph building. and not ops.get_default_graph()._variable_creator_stack): # pylint: disable=protected-access initializer =", "loss, params): grads = self.get_gradients(loss, params) grads_and_vars = list(zip(grads, params))", "restoring until it gets created # normally. We keep a", "print_function import abc import functools import six from tensorflow.python.distribute import", "the gradients before applying them you can instead use the", "if not for this case). Deferring is mostly harmless #", "values deterministically. for name, value in sorted(self._hyper.items()): if isinstance(value, ops.Tensor)", "respect to `params`. Arguments: loss: Loss tensor. params: List of", "By default the correct behavior, to sum non-unique indices and", "included for backward compatibility to allow time inverse decay of", "The TensorFlow Authors. All Rights Reserved. # # Licensed under", "loss, var_list, grad_loss=None, name=None): \"\"\"Minimize `loss` by updating `var_list`. This", "weights): params = self.weights if len(params) != len(weights): raise ValueError(", "`handle`. Args: grad: a `Tensor` representing the gradient. handle: a", "the variables have gradients. \"\"\" grads_and_vars = _filter_grads(grads_and_vars) var_list =", "of `Variable` objects. Use callable when the variable list would", "A Python dictionary, typically the output of get_config. custom_objects: A", "entire optimizer is currently thread compatible, not thread-safe. The user", "tensors: dtype = t.dtype.base_dtype if dtype not in valid_dtypes: raise", "that can be many times too big. ### Variable Constraint", "tensor. params: List of variables. Returns: List of gradient tensors.", "specified gradients. If `global_step` was not None, that operation also", "loss, params): \"\"\"Returns gradients of `loss` with respect to `params`.", "\"Cannot use a constraint function on a sparse variable.\") return", "(because we don't want to # save the slot variable", "an optimizer. The same optimizer can be reinstantiated later (without", "tuple(filtered) if not filtered: raise ValueError(\"No gradients provided for any", "variable `handle`. Similar to `_apply_sparse`, the `indices` argument to this", "variable, for looking up slots. In graph mode the name", "# Slot variables are not owned by any one object", "slot_variable = slot_dict.get(slot_name, None) if (slot_variable is None and context.executing_eagerly()", "slot_variable): \"\"\"Restore a newly created slot variable's value.\"\"\" variable_key =", "divide your loss by the global batch size, which is", "As a result, using `tf.math.reduce_mean` will give the wrong answer,", "context from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from", "# symbolic then the step update should be carried out", "g in grads] grads_and_vars = zip(processed_grads, var_list) # grads_and_vars is", "= weight self._restore_slot_variable( slot_name=slot_name, variable=var, slot_variable=weight) self._weights.append(weight) return weight def", "Indices are unique. Returns: An `Operation` which updates the value", "equal to None.\"\"\" grads_and_vars = tuple(grads_and_vars) if not grads_and_vars: return", "of integral type representing the indices for which the gradient", "else: filtered.append((grad, var)) filtered = tuple(filtered) if not filtered: raise", "averaging or `tf.keras.losses.Reduction.SUM` for not. If you are not using", "return (summed_values, unique_indices) @six.add_metaclass(abc.ABCMeta) @keras_export(\"keras.optimizers.Optimizer\") class OptimizerV2(trackable.Trackable): \"\"\"Updated base class", "get_gradients(self, loss, params): \"\"\"Returns gradients of `loss` with respect to", "var2]) # update learning rate opt.learning_rate = 0.05 opt.minimize(loss, var_list=[var1,", "`loss`, or a callable returning the list or tuple of", "can ask the optimizer for the variable it created to", "sparse variable.\") return self._resource_apply_sparse_duplicate_indices( grad.values, var, grad.indices) update_op = self._resource_apply_dense(grad,", "config @classmethod def from_config(cls, config, custom_objects=None): \"\"\"Creates an optimizer from", "isinstance(grad, ops.IndexedSlices): if var.constraint is not None: raise RuntimeError( \"Cannot", "config = {\"name\": self._name} if hasattr(self, \"clipnorm\"): config[\"clipnorm\"] = self.clipnorm", "variable first. Args: var: the variable. Returns: the unique name", "value.\"\"\" variable_key = _var_key(variable) deferred_restorations = self._deferred_slot_restorations.get( slot_name, {}).pop(variable_key, [])", "in tensors: dtype = t.dtype.base_dtype if dtype not in valid_dtypes:", "_create_vars. \"\"\" allowed_kwargs = {\"clipnorm\", \"clipvalue\", \"lr\", \"decay\"} for k", "sparse tensor, variable constraint is not supported. ### Thread Compatibility", "object indicating the slot variable `Trackable` object to be restored.", "backend.track_variable(variable) return variable def _assert_valid_dtypes(self, tensors): \"\"\"Asserts tensors are all", "ops.control_dependencies( [resource_variable_ops.resource_scatter_update(x.handle, i, v)]): return x.value() # --------------- # For", "keyword arguments. Allowed to be {`clipnorm`, `clipvalue`, `lr`, `decay`}. `clipnorm`", "will be applied to the variable after the gradient has", "component of a tensor's shape is the *replica-local* batch size,", "License. # ============================================================================== \"\"\"Version 2 of class Optimizer.\"\"\" # pylint:", "allocate and manage additional variables associated with the variables to", "= kwargs.pop(\"clipvalue\") self._hypers_created = False def minimize(self, loss, var_list, grad_loss=None,", "ops.Operation) or tf_utils.is_symbolic_tensor(i) for i in update_ops) if not context.executing_eagerly()", "steps this Optimizer has run.\"\"\" if self._iterations is None: self._iterations", "didn't make the slot variable. Defer restoring until it gets", "variable, variable)). So we don't _track_ slot variables anywhere, and", "time when `loss` is called. grad_loss: Optional. A `Tensor` holding", "in compliance with the License. # You may obtain a", "pairs. name: Optional name for the returned operation. Default to", "variable self._weights.append(self._iterations) def _decayed_lr(self, var_dtype): \"\"\"Get decayed learning rate as", "\" + str(len(weights)) + \", but the optimizer was expecting", "software # distributed under the License is distributed on an", "differentiable). \" \"Common ops without gradient: \" \"K.argmax, K.round, K.eval.\".format(param))", "referenced in functions along with ops created by the original", "instead override `_resource_apply_sparse_duplicate_indices` to avoid this overhead. Args: grad: a", "def _deduplicate_indexed_slices(values, indices): \"\"\"Sums `values` associated with any non-unique `indices`.", "for pv, p, w in zip(param_values, params, weights): if pv.shape", "name): \"\"\"Overridden to support hyperparameter access.\"\"\" try: return super(OptimizerV2, self).__getattribute__(name)", "Example: ```python # Create an optimizer with the desired parameters.", "weight_value_tuples = [] param_values = backend.batch_get_value(params) for pv, p, w", "2 * var2 # In eager mode, simply call minimize", "non-negative. if kwargs[k] < 0: raise ValueError(\"Expected {} >= 0,", "-self.clipvalue, self.clipvalue) for g in grads ] grads_and_vars = list(zip(grads,", "global batch size. Note that when using `tf.distribute.Strategy`, the first", "grad): \"\"\"Apply gradient to variable.\"\"\" if isinstance(var, ops.Tensor): raise NotImplementedError(\"Trying", "which has an associated slot variable is created or restored.", "give the wrong answer, resulting in gradients that can be", "trainable = True variable = self._add_variable_with_custom_getter( name=name, shape=shape, getter=base_layer_utils.make_variable, overwrite=True,", "dependency on # a slot variable if not for this", "var in grads_and_vars: if grad is None: vars_with_empty_grads.append(var) else: filtered.append((grad,", "# Backwards compatibility with Keras optimizers. if name == \"lr\":", "opt_op = opt.minimize(loss, var_list=[var1, var2]) opt_op.run() # In eager mode,", "self._create_hypers() self._create_slots(var_list) self._prepare(var_list) return distribute_ctx.get_replica_context().merge_call( self._distributed_apply, args=(grads_and_vars,), kwargs={\"name\": name}) def", "distribution.extended.update( var, apply_grad_to_update_var, args=(grad,), group=False)) any_symbolic = any(isinstance(i, ops.Operation) or", "variable. \"\"\" summed_grad, unique_indices = _deduplicate_indexed_slices( values=grad, indices=indices) return self._resource_apply_sparse(summed_grad,", "= <call_loss_function> vars = <list_of_variables> grads = tape.gradient(loss, vars) processed_grads", "if isinstance(var, ops.Tensor): raise NotImplementedError(\"Trying to update a Tensor \",", "summing. Args: grad: a `Tensor` representing the gradient for the", "from the var shared name. In eager mode the name", "if getattr(var, \"_distributed_container\", None) is not None: var = var._distributed_container()", "import keras_export def _deduplicate_indexed_slices(values, indices): \"\"\"Sums `values` associated with any", "Operation that updates the variables in `var_list`. If `global_step` was", "the first time when `loss` is called. grad_loss: Optional. A", "raise ValueError(\"Variable {} has `None` for gradient. \" \"Please make", "self._deferred_slot_restorations.get( slot_name, {}).pop(variable_key, []) # Iterate over restores, highest restore", "return var._unique_id def _get_slot_key_from_var(var, slot_name): \"\"\"Get the slot key for", "Python dictionary (serializable) containing the configuration of an optimizer. The", "[]).append( slot_variable_position) def _filter_grads(grads_and_vars): \"\"\"Filter out iterable with grad equal", "duplicate indices may instead override this method to avoid the", "grad, var in grads_and_vars: if grad is None: vars_with_empty_grads.append(var) else:", "raise TypeError(\"Unexpected keyword argument \" \"passed to optimizer: \" +", "then passed to `self._set_hyper()`. They can be either regular Python", "not `None`, that operation also increments `global_step`. Raises: ValueError: If", "with grad equal to None.\"\"\" grads_and_vars = tuple(grads_and_vars) if not", "`global_step`. Raises: TypeError: If `grads_and_vars` is malformed. ValueError: If none", "the variable. \"\"\" raise NotImplementedError() def _resource_scatter_add(self, x, i, v):", "= opt.minimize(loss, var_list=[var1, var2]) opt_op.run() # In eager mode, simply", "[checkpoint_position, ... ], ... }, # ... } self._deferred_slot_restorations =", "### Thread Compatibility The entire optimizer is currently thread compatible,", "created for the optimizer. **kwargs: keyword arguments. Allowed to be", "returns an `Operation` that applies gradients. Args: grads_and_vars: List of", "_filter_grads(grads_and_vars) var_list = [v for (_, v) in grads_and_vars] #", "self._get_hyper(name) raise e def __setattr__(self, name, value): \"\"\"Override setattr to", "methods: - resource_apply_dense (update variable given gradient tensor is dense)", "Set trainable to be false when variable is to be", "(gradient, variable) pairs where \"gradient\" is the gradient for \"variable\".", "run.\"\"\" if self._iterations is None: self._iterations = self.add_weight( \"iter\", shape=[],", "building. and not ops.get_default_graph()._variable_creator_stack): # pylint: disable=protected-access initializer = trackable.CheckpointInitialValue(", "update a Tensor \", var) if isinstance(grad, ops.IndexedSlices): if var.constraint", "values (like 1.0), tensors, or callables. If they are callable,", "slot_dict = self._slots.setdefault(var_key, {}) weight = slot_dict.get(slot_name, None) if weight", "array_ops from tensorflow.python.ops import clip_ops from tensorflow.python.ops import gradients from", "= _filter_grads(grads_and_vars) var_list = [v for (_, v) in grads_and_vars]", "isinstance(lr_t, learning_rate_schedule.LearningRateSchedule): local_step = math_ops.cast(self.iterations, var_dtype) lr_t = math_ops.cast(lr_t(local_step), var_dtype)", "variable)). So we don't _track_ slot variables anywhere, and #", "== \"lr\": name = \"learning_rate\" if name in self._hyper: return", "(gradient, variable) pairs. Variable is always present, but gradient can", "disable=protected-access # Get the distributed variable if it exists. if", "inverse decay of learning rate. `lr` is included for backward", "g is not None and v.dtype != dtypes.resource ]) return", "subclass constructor (the `__init__` method), and then passed to `self._set_hyper()`.", "callable in these cases. Example: ```python opt = tf.keras.optimizers.SGD(learning_rate=0.1) model", "value # to minimize. loss = lambda: 3 * var1", "\"\"\"Apply gradient to variable.\"\"\" if isinstance(var, ops.Tensor): raise NotImplementedError(\"Trying to", "the distributed variable if it exists. if getattr(var, \"_distributed_container\", None)", "return set( [dtypes.float16, dtypes.bfloat16, dtypes.float32, dtypes.float64]) def _call_if_callable(self, param): \"\"\"Call", "loss which should be set to `tf.keras.losses.Reduction.SUM_OVER_BATCH_SIZE` for averaging or", "super(OptimizerV2, self).__getattribute__(name) except AttributeError as e: # Needed to avoid", "self._slot_names def add_slot(self, var, slot_name, initializer=\"zeros\"): \"\"\"Add a new slot", "ValueError(\"Optimizer weight shape \" + str(pv.shape) + \" not compatible", ": # {_var_key(variable_to_train): [checkpoint_position, ... ], ... }, # ...", "var = var._distributed_container() if var._in_graph_mode: return var._shared_name return var._unique_id def", "them, etc. capped_grads_and_vars = [(MyCapper(gv[0]), gv[1]) for gv in grads_and_vars]", "= _var_key(variable) deferred_restorations = self._deferred_slot_restorations.get( slot_name, {}).pop(variable_key, []) # Iterate", "does not exist for variables %s when minimizing the loss.\"),", "= self._valid_dtypes() for t in tensors: dtype = t.dtype.base_dtype if", "would otherwise be incomplete before `minimize` since the variables are", "# those could differ between restores. self._deferred_slot_restorations.setdefault( slot_name, {}).setdefault(variable_key, []).append(", "var_list() var_list = nest.flatten(var_list) grads = tape.gradient(loss_value, var_list, grad_loss) if", "as member variables. Generally you should be able to use", "access.\"\"\" try: return super(OptimizerV2, self).__getattribute__(name) except AttributeError as e: #", "`loss`. Returns: A list of (gradient, variable) pairs. Variable is", "self._iterations is None: self._iterations = self.add_weight( \"iter\", shape=[], dtype=dtypes.int64, trainable=False,", "it uses. Once you have a slot name you can", "def _restore_slot_variable(self, slot_name, variable, slot_variable): \"\"\"Restore a newly created slot", "the callable will be called during `apply_gradients()` to get the", "been de-duplicated. Optimizers which deal correctly with non-unique indices may", "with the License. # You may obtain a copy of", "the value of the variable. \"\"\" raise NotImplementedError() def _resource_scatter_add(self,", "first time `loss` is called. grad_loss: Optional. A `Tensor` holding", "apply_grad_to_update_var(var, grad): \"\"\"Apply gradient to variable.\"\"\" if isinstance(var, ops.Tensor): raise", "from tensorflow.python.distribute import distribution_strategy_context as distribute_ctx from tensorflow.python.distribute import reduce_util", "want to average gradients, you should use `tf.math.reduce_sum` to add", "and # limitations under the License. # ============================================================================== \"\"\"Version 2", "activation='sigmoid') loss_fn = lambda: tf.keras.losses.mse(model(input), output) var_list_fn = lambda: model.trainable_weights", "`var_list`. This is the first part of `minimize()`. It returns", "optimizers. This class defines the API to add Ops to", "Optional name for the returned operation. Returns: An Operation that", "str(len(params)) + \" weights. Provided weights: \" + str(weights)[:50] +", "+ decay_t * local_step) return lr_t @abc.abstractmethod def get_config(self): \"\"\"Returns", "of this `Optimizer`'s slot to restore into. variable: The variable", "\"\"\"Add ops to apply dense gradients to the variable `handle`.", "be carried out under a graph # context. (eager updates", "of the variables have gradients. \"\"\" grads_and_vars = _filter_grads(grads_and_vars) var_list", "If any tensor is not a valid type. \"\"\" valid_dtypes", "\" not compatible with \" \"provided weight shape \" +", "initialization), and makes variable creator scopes # behave the same", "the *replica-local* batch size, which is off by a factor", "dense gradients to the variable `handle`. Args: grad: a `Tensor`", "objects. Use callable when the variable list would otherwise be", "variable to be updated. Returns: An `Operation` which updates the", "lr_t / (1. + decay_t * local_step) return lr_t @abc.abstractmethod", "backprop from tensorflow.python.eager import context from tensorflow.python.framework import dtypes from", "following methods: - resource_apply_dense (update variable given gradient tensor is", "the variable. \"\"\" # pylint: disable=protected-access # Get the distributed", "\"\"\"A non-functional Optimizer implementation for checkpoint compatibility. Holds slot variables", "`unique_indices` is a de-duplicated version of `indices` and `summed_values` contains", "of the variable. \"\"\" raise NotImplementedError() def _resource_apply_sparse_duplicate_indices(self, grad, handle,", "a function used for a hyperparameter. Returns: An optimizer instance.", "highest restore UID first to minimize the number # of", "first pre-processing `grad` and `indices` and passing them on to", "is created or restored. When executing eagerly, we create the", "nonetheless important when graph building for the case when a", "dependency graph (causing us to realize that the slot variable", "the constructors of subclasses. Note that Optimizer instances should not", "constructor. Returns: An `Operation` that applies the specified gradients. If", "if isinstance(initializer, six.string_types) or callable(initializer): initializer = initializers.get(initializer) initial_value =", "trainable is None: trainable = True variable = self._add_variable_with_custom_getter( name=name,", "whatever you # need to the 'gradient' part, for example", "types for loss, variables and gradients. \"\"\" return set( [dtypes.float16,", "pairs where \"gradient\" is the gradient for \"variable\". Note that", "variable is created or restored. When executing eagerly, we create", "valid_dtypes])) def _valid_dtypes(self): \"\"\"Valid types for loss, variables and gradients.", "or restored. When executing eagerly, we create the slot variable", "callable(var_list): var_list = var_list() var_list = nest.flatten(var_list) grads = tape.gradient(loss_value,", "when an optimizer is restored from a SavedModel. These variables", "express or implied. # See the License for the specific", "local_step = math_ops.cast(self.iterations, var_dtype) decay_t = self._get_hyper(\"decay\", var_dtype) lr_t =", "variables to train. These are called <i>Slots</i>. Slots have names", "if (callable(prev_value) or isinstance(prev_value, (ops.Tensor, int, float, learning_rate_schedule.LearningRateSchedule)) or isinstance(value,", "except in compliance with the License. # You may obtain", "restore into. variable: The variable object this slot is being", "UID in case slot variables have their own dependencies, in", "tape: loss = <call_loss_function> vars = <list_of_variables> grads = tape.gradient(loss,", "and calls `apply_gradients()`. If you want to process the gradient", "Allowed to be {`clipnorm`, `clipvalue`, `lr`, `decay`}. `clipnorm` is clip", "created when the model is first called, instead of construction", "types for loss, variables and gradients. Subclasses should override to", "for loss, variables and gradients. Subclasses should override to allow", "non-unique indices and their associated gradients, is enforced by first", "make the slot variable. Defer restoring until it gets created", "setattr to support dynamic hyperparameter setting.\"\"\" # Backwards compatibility with", "variable is saved without the optimizer; # it's a dependency", "strategy.extended.colocate_vars_with(var): weight = tf_variables.Variable( name=\"%s/%s\" % (var._shared_name, slot_name), # pylint:", "slot variable `Trackable` object to be restored. slot_name: The name", "config[\"learning_rate\"], custom_objects=custom_objects) return cls(**config) def _serialize_hyperparameter(self, hyperparameter_name): \"\"\"Serialize a hyperparameter", "created by the original optimizer, but currently we do not", "set `iterations` to a new Variable after \" \"the Optimizer", "supported. ### Thread Compatibility The entire optimizer is currently thread", "created at the first time `loss` is called. grad_loss: Optional.", "your loss by the global batch size, which is done", "return weight def get_slot(self, var, slot_name): var_key = _var_key(var) slot_dict", "# Licensed under the Apache License, Version 2.0 (the \"License\");", "Returns: A tuple of (`summed_values`, `unique_indices`) where `unique_indices` is a", "class and override the following methods: - resource_apply_dense (update variable", "self._hyper: self._set_hyper(name, value) else: super(OptimizerV2, self).__setattr__(name, value) def get_slot_names(self): \"\"\"A", "as ds_reduce_util from tensorflow.python.eager import backprop from tensorflow.python.eager import context", "def variables(self): \"\"\"Returns variables of this Optimizer based on the", "None: trainable = True variable = self._add_variable_with_custom_getter( name=name, shape=shape, getter=base_layer_utils.make_variable,", "variable to be updated. indices: a `Tensor` of integral type", "\" \"passed to optimizer: \" + str(k)) # checks that", "for not. If you are not using these and you", "wrong answer, resulting in gradients that can be many times", "shape, dtype=None, initializer=\"zeros\", trainable=None, synchronization=tf_variables.VariableSynchronization.AUTO, aggregation=tf_variables.VariableAggregation.NONE): if dtype is None:", "clip gradients by value, `decay` is included for backward compatibility", "norm; `clipvalue` is clip gradients by value, `decay` is included", "before applying then call `tf.GradientTape` and `apply_gradients()` explicitly instead of", "for t in tensors: dtype = t.dtype.base_dtype if dtype not", "config[\"learning_rate\"] = learning_rate_schedule.deserialize( config[\"learning_rate\"], custom_objects=custom_objects) return cls(**config) def _serialize_hyperparameter(self, hyperparameter_name):", "big. ### Variable Constraint All Keras optimizers respect variable constraints.", "2 of class Optimizer.\"\"\" # pylint: disable=g-bad-name from __future__ import", "takes no argument and returns the value # to minimize.", "variable, the constraint will be applied to the variable after", "de-duplicated version of `indices` and `summed_values` contains the sum of", "v in grads_and_vars if g is not None and v.dtype", "= [v for (_, v) in grads_and_vars] # Create iteration", "a slot variable has already been created but `variable` has", "restores. self._deferred_slot_restorations.setdefault( slot_name, {}).setdefault(variable_key, []).append( slot_variable_position) def _filter_grads(grads_and_vars): \"\"\"Filter out", "Holds slot variables and hyperparameters when an optimizer is restored", "grads_and_vars is a list of tuples (gradient, variable). Do whatever", "self._hyper[hyperparameter_name] if isinstance(value, learning_rate_schedule.LearningRateSchedule): return learning_rate_schedule.serialize(value) if callable(value): return value()", "subclassed models. Pass var_list as callable in these cases. Example:", "CONDITIONS OF ANY KIND, either express or implied. # See", "has just been added to a dependency graph (causing us", "the gradient has been applied to the variable. Important: If", "of replicas being used to compute a single step. As", "associated gradients, is enforced by first pre-processing `grad` and `indices`", "lambda obj: isinstance(obj, OptimizerV2), versions=[revived_types.VersionedTypeRegistration( object_factory=lambda proto: _RestoredOptimizer(), version=1, min_producer_version=1,", "is derived from the var unique id. If distribution strategy", "gradients. If `global_step` was not None, that operation also increments", "hasattr(self, \"clipnorm\"): config[\"clipnorm\"] = self.clipnorm if hasattr(self, \"clipvalue\"): config[\"clipvalue\"] =", "not using these and you want to average gradients, you", "\"\"\"Restore a slot variable's value, possibly creating it. Called when", "as a function used for a hyperparameter. Returns: An optimizer", "def _create_hypers(self): if self._hypers_created: return # Iterate hyper values deterministically.", "a list rather than the one with the highest restore", "= [v for _, v in grads_and_vars] grads_and_vars = zip(reduced_grads,", "gradients, you divide your loss by the global batch size,", "either made this slot variable, or if we've pulled out", "the number of replicas being used to compute a single", "the overhead of summing. Args: grad: a `Tensor` representing the", "by tracing its apply # methods. def __init__(self): super(_RestoredOptimizer, self).__init__(\"_RestoredOptimizer\")", "are all valid types (see `_valid_dtypes`). Args: tensors: Tensors to", "being created for. \"\"\" variable_key = _var_key(variable) slot_dict = self._slots.get(variable_key,", "you wish. 3. Apply the processed gradients with `apply_gradients()`. Example:", "params): \"\"\"Returns gradients of `loss` with respect to `params`. Arguments:", "import tf_utils from tensorflow.python.ops import array_ops from tensorflow.python.ops import clip_ops", "graph mode, returns op that minimizes the loss by updating", "Keras optimizers respect variable constraints. If constraint function is passed", "from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework", "gradients with `apply_gradients()`. Example: ```python # Create an optimizer. opt", "updates the value of the variable. \"\"\" summed_grad, unique_indices =", "`Tensor` representing the gradient for the affected indices. handle: a", "care of both computing the gradients and applying them to", "would otherwise be incomplete before `minimize` and the variables are", "which points to the variable to be updated. Returns: An", "variable: var_name/slot_name.\"\"\" name = _var_key(var) return name + \"/\" +", "callable(value): continue else: self._hyper[name] = self.add_weight( name, shape=[], trainable=False, initializer=value,", "= self.add_slot( var=variable, initializer=initializer, slot_name=slot_name) # Slot variables are not", "stats about the slots, etc. ### Hyper parameters These are", "\"decay\"} for k in kwargs: if k not in allowed_kwargs:", "from __future__ import print_function import abc import functools import six", "is first called, instead of construction time. Examples include 1)", "method to avoid the overhead of summing. Args: grad: a", "mode the name is derived from the var shared name.", "non-unique `indices`. Args: values: A `Tensor` with rank >= 1.", "is not None: with ops.control_dependencies([update_op]): return var.assign(var.constraint(var)) else: return update_op", "may mean that scopes intended to catch # `variable` also", "shape=[], dtype=dtypes.int64, trainable=False, aggregation=tf_variables.VariableAggregation.ONLY_FIRST_REPLICA) self._weights.append(self._iterations) return self._iterations @iterations.setter def iterations(self,", "train a model. You never use this class directly, but", "variable_key = _var_key(variable) deferred_restorations = self._deferred_slot_restorations.get( slot_name, {}).pop(variable_key, []) #", "equal to the number of replicas being used to compute", "Optimizers which deal correctly with non-unique indices may instead override", "from the var unique id. If distribution strategy exists, get", "typically the output of get_config. custom_objects: A Python dictionary mapping", "increments `global_step`. Raises: ValueError: If some of the variables are", "\"\"\" config = {\"name\": self._name} if hasattr(self, \"clipnorm\"): config[\"clipnorm\"] =", "== \"lr\": name = \"learning_rate\" if hasattr(self, \"_hyper\") and name", "and restore the Optimizer's config raise NotImplementedError( \"Restoring functional Optimzers", "malformed. ValueError: If none of the variables have gradients. \"\"\"", "\"Common ops without gradient: \" \"K.argmax, K.round, K.eval.\".format(param)) if hasattr(self,", "division from __future__ import print_function import abc import functools import", "but `variable` has just been added to a dependency graph", "method must deal with repeated indices. See the docstring of", "a reasonable way. with backprop.GradientTape() as tape: if not callable(var_list):", "dictionary (serializable) containing the configuration of an optimizer. The same", "this method must deal with repeated indices. See the docstring", "derived from the var shared name. In eager mode the", "param is callable.\"\"\" return param() if callable(param) else param def", "variables. \" \"You have specified trainable=True and \" \"synchronization=VariableSynchronization.ON_READ.\") else:", "If you intend to create your own optimization algorithm, simply", "applying then call `tf.GradientTape` and `apply_gradients()` explicitly instead of using", "import tf_logging as logging from tensorflow.python.saved_model import revived_types from tensorflow.python.training.tracking", "restored. When executing eagerly, we create the slot variable with", "Note that \"gradient\" can be a `Tensor`, an `IndexedSlices`, or", "slot_name class _RestoredOptimizer(OptimizerV2): \"\"\"A non-functional Optimizer implementation for checkpoint compatibility.", "`Operation` which updates the value of the variable. \"\"\" raise", "= _var_key(var) slot_dict = self._slots[var_key] return slot_dict[slot_name] def _prepare(self, var_list):", "for checkpoint compatibility. Holds slot variables and hyperparameters when an", "list would otherwise be incomplete before `minimize` and the variables", "returns a list of (gradient, variable) pairs where \"gradient\" is", "for input, output in data: opt.minimize(loss_fn, var_list_fn) ``` ### Processing", "Called when a variable which has an associated slot variable", "from the config dictionary. Arguments: config: A Python dictionary, typically", "self).__setattr__(name, value) def get_slot_names(self): \"\"\"A list of names for this", "slot variable's value.\"\"\" variable_key = _var_key(variable) deferred_restorations = self._deferred_slot_restorations.get( slot_name,", "are not using these and you want to average gradients,", "ValueError: If some arguments are invalid, or var_list is None.", "\"\"\"Minimize `loss` by updating `var_list`. This method simply computes gradient", "If the current context is graph mode or any of", "a feature request if this limitation bothers \" \"you.\") revived_types.register_revived_type(", "hyperparameter. Returns: An optimizer instance. \"\"\" if \"lr\" in config:", "i, v)]): return x.value() # --------------- # For implementing the", "optimizer config is a Python dictionary (serializable) containing the configuration", "constructor (the `__init__` method), and then passed to `self._set_hyper()`. They", "if name in self._hyper: return self._get_hyper(name) raise e def __setattr__(self,", "decay_t = self._get_hyper(\"decay\", var_dtype) lr_t = lr_t / (1. +", "gradients by norm; `clipvalue` is clip gradients by value, `decay`", "in valid_dtypes])) def _valid_dtypes(self): \"\"\"Valid types for loss, variables and", "= tf.keras.optimizers.SGD(learning_rate=0.1) # Compute the gradients for a list of", "instantiating the same optimizer from the config dictionary. Arguments: config:", "value can be callable, tensor, numeric.\"\"\" if isinstance(value, trackable.Trackable): self._track_trackable(value,", "this Optimizer has run.\"\"\" if self._iterations is None: self._iterations =", "summed_grad, unique_indices = _deduplicate_indexed_slices( values=grad, indices=indices) return self._resource_apply_sparse(summed_grad, handle, unique_indices)", "but gradient can be `None`. Raises: TypeError: If `var_list` contains", "of variables. Returns: List of gradient tensors. Raises: ValueError: In", "[ clip_ops.clip_by_value(g, -self.clipvalue, self.clipvalue) for g in grads ] return", "through `apply_gradients`). \"\"\" # TODO(allenl): Make the restored optimizer functional", "called. grad_loss: Optional. A `Tensor` holding the gradient computed for", "from tensorflow.python.eager import backprop from tensorflow.python.eager import context from tensorflow.python.framework", "variable given gradient tensor is sparse) - create_slots (if your", "__future__ import division from __future__ import print_function import abc import", "variable_key = _var_key(variable) slot_dict = self._slots.get(variable_key, {}) slot_variable = slot_dict.get(slot_name,", "all of your ops have a \" \"gradient defined (i.e.", "avoid infinite recursion with __setattr__. if name == \"_hyper\": raise", "in grads_and_vars: scope_name = (\"\" if ops.executing_eagerly_outside_functions() else \"_\" +", "Note that when using `tf.distribute.Strategy`, the first component of a", "should use `tf.math.reduce_sum` to add up your per-example losses and", "self.add_weight( \"iter\", shape=[], dtype=dtypes.int64, trainable=False, aggregation=tf_variables.VariableAggregation.ONLY_FIRST_REPLICA) self._weights.append(self._iterations) return self._iterations @iterations.setter", "any non-unique `indices`. Args: values: A `Tensor` with rank >=", "`iterations` to a new Variable after \" \"the Optimizer weights", "aggregation=aggregation) backend.track_variable(variable) return variable def _assert_valid_dtypes(self, tensors): \"\"\"Asserts tensors are", "don't _track_ slot variables anywhere, and # instead special-case this", "kwargs: if k not in allowed_kwargs: raise TypeError(\"Unexpected keyword argument", "use `learning_rate` instead. Raises: ValueError: If name is malformed. RuntimeError:", "with ops.control_dependencies([update_op]): return var.assign(var.constraint(var)) else: return update_op update_ops = []", "the listed # variables. opt_op = opt.minimize(loss, var_list=[var1, var2]) opt_op.run()", "log debug a training algorithm, report stats about the slots,", "params) grads_and_vars = list(zip(grads, params)) self._assert_valid_dtypes([ v for g, v", "ValueError: If name is malformed. RuntimeError: If _create_slots has been", "(e.g. if gradient function not implemented). \"\"\" params = nest.flatten(params)", "the list of variables. opt.minimize(loss, var_list=[var1, var2]) ``` ### Custom", "etc. ### Hyper parameters These are arguments passed to the", "of construction time. Examples include 1) sequential models without input", "@iterations.setter def iterations(self, variable): if self._iterations is not None: raise", "{slot name : variable}} self._slots = {} self._slot_names = []", "if hasattr(self, \"clipvalue\"): config[\"clipvalue\"] = self.clipvalue return config @classmethod def", "variable `handle`. Args: grad: a `Tensor` representing the gradient. handle:", "gradient before applying then call `tf.GradientTape` and `apply_gradients()` explicitly instead", "that the slot variable needs to be restored). Args: slot_variable_position:", "# normally. We keep a list rather than the one", "an `IndexedSlices`, or `None` if there is no gradient for", "variables. opt.minimize(loss, var_list=[var1, var2]) ``` ### Custom training loop with", "get_config (serialization of the optimizer, include all hyper parameters) \"\"\"", "is None: dtype = dtypes.float32 if isinstance(initializer, six.string_types) or callable(initializer):", "self.add_slot( var=variable, initializer=initializer, slot_name=slot_name) # Slot variables are not owned", "pretend it's a normal # graph. if slot_variable is not", "created.\"\"\" return self._weights @property def weights(self): \"\"\"Returns variables of this", "a list of tuples (gradient, variable). Do whatever you #", "the capped gradients. opt.apply_gradients(capped_grads_and_vars) ``` ### Use with `tf.distribute.Strategy`. This", "tuple of `Variable` objects to update to minimize `loss`, or", "_var_key(var) return name + \"/\" + slot_name class _RestoredOptimizer(OptimizerV2): \"\"\"A", "] grads_and_vars = list(zip(grads, var_list)) self._assert_valid_dtypes([ v for g, v", "slots that it uses. Once you have a slot name", "id. If distribution strategy exists, get the primary variable first.", "# `loss` is a callable that takes no argument and", "optimizer from the config dictionary. Arguments: config: A Python dictionary,", "as callable in these cases. Example: ```python opt = tf.keras.optimizers.SGD(learning_rate=0.1)", "for the given variable. Args: loss: A callable taking no", "`Adagrad` allocate and manage additional variables associated with the variables", "if it exists. if getattr(var, \"_distributed_container\", None) is not None:", "grads_and_vars, name=None): \"\"\"Apply gradients to variables. This is the second", "to restore # slot variables which have not yet been", "dtype: return math_ops.cast(value, dtype) else: return value def __getattribute__(self, name):", "\" + self._name + \" with a weight list of", "\"\"\"Compute gradients of `loss` for the variables in `var_list`. This", "value) else: super(OptimizerV2, self).__setattr__(name, value) def get_slot_names(self): \"\"\"A list of", "if slot_variable is not None: # If we've either made", "exist for variables %s when minimizing the loss.\"), ([v.name for", "apply sparse gradients to `handle`, with repeated indices. Optimizers which", "= kwargs.pop(\"clipnorm\") if \"clipvalue\" in kwargs: self.clipvalue = kwargs.pop(\"clipvalue\") self._hypers_created", "# Create an optimizer. opt = tf.keras.optimizers.SGD(learning_rate=0.1) # Compute the", "is being created for. \"\"\" variable_key = _var_key(variable) slot_dict =", "array_ops.unique(indices) summed_values = math_ops.unsorted_segment_sum( values, new_index_positions, array_ops.shape(unique_indices)[0]) return (summed_values, unique_indices)", "`global_step` was not None, that operation also increments `global_step`. Raises:", "the returned operation. Returns: An Operation that updates the variables", "These variables may be referenced in functions along with ops", "`resource` which points to the variable to be updated. indices:", "the first dimension of `values` (as in an IndexedSlices object).", "= [clip_ops.clip_by_norm(g, self.clipnorm) for g in grads] if hasattr(self, \"clipvalue\"):", "ValueError( \"Synchronization value can be set to \" \"VariableSynchronization.ON_READ only", "# Iterate hyper values deterministically. for name, value in sorted(self._hyper.items()):", "active variable creator # scope. Generally we'd like to eagerly", "self, slot_variable_position, slot_name, variable): \"\"\"Restore a slot variable's value, possibly", "An `Operation` which updates the value of the variable. \"\"\"", "avoid the overhead of summing. Args: grad: a `Tensor` representing", "the 'gradient' part, for example cap them, etc. capped_grads_and_vars =", "gradients with `tf.GradientTape`. 2. Process the gradients as you wish.", "`minimize` since the variables are created at the first time", "def _filter_grads(grads_and_vars): \"\"\"Filter out iterable with grad equal to None.\"\"\"", "mostly harmless # (aside from double initialization), and makes variable", "decay if \"clipnorm\" in kwargs: self.clipnorm = kwargs.pop(\"clipnorm\") if \"clipvalue\"", "def apply_grad_to_update_var(var, grad): \"\"\"Apply gradient to variable.\"\"\" if isinstance(var, ops.Tensor):", "the `indices` argument to this method has been de-duplicated. Optimizers", "+ \"/\" + slot_name class _RestoredOptimizer(OptimizerV2): \"\"\"A non-functional Optimizer implementation", "its apply # methods. def __init__(self): super(_RestoredOptimizer, self).__init__(\"_RestoredOptimizer\") self._hypers_created =", "a slot variable's value, possibly creating it. Called when a", "When executing eagerly, we create the slot variable with a", "list of variables. with tf.GradientTape() as tape: loss = <call_loss_function>", "Returns: A list of (gradient, variable) pairs. Variable is always", "in vars_with_empty_grads])) return filtered def _var_key(var): \"\"\"Key for representing a", "train. These are called <i>Slots</i>. Slots have names and you", "2 * var2 * var2 # In graph mode, returns", "also increments `global_step`. Raises: ValueError: If some of the variables", "`None` for gradient. \" \"Please make sure that all of", "for \"variable\". Note that \"gradient\" can be a `Tensor`, an", "value = value() if dtype: return math_ops.cast(value, dtype) else: return", "by norm; `clipvalue` is clip gradients by value, `decay` is", "params: return weight_value_tuples = [] param_values = backend.batch_get_value(params) for pv,", "= True @property def iterations(self): \"\"\"Variable. The number of training", "be updated. Returns: An `Operation` which updates the value of", "object iself (e.g. through `apply_gradients`). \"\"\" # TODO(allenl): Make the", "Optimizer's config raise NotImplementedError( \"Restoring functional Optimzers from SavedModels is", "number of replicas being used to compute a single step.", "of learning rate. `lr` is included for backward compatibility, recommended", "self._iterations = None # For implementing Trackable. Stores information about", "sparse) - create_slots (if your optimizer algorithm requires additional variables)", "clip_ops.clip_by_value(g, -self.clipvalue, self.clipvalue) for g in grads ] return grads", "been created\") self._iterations = variable self._weights.append(self._iterations) def _decayed_lr(self, var_dtype): \"\"\"Get", "a `Tensor` representing the gradient for the affected indices. handle:", "ops.control_dependencies([update_op]): return var.assign(var.constraint(var)) else: return update_op update_ops = [] with", "is done automatically if you use `tf.keras` built-in training or", "of the update ops are # symbolic then the step", "deferred_restorations.sort(key=lambda position: position.restore_uid, reverse=True) for checkpoint_position in deferred_restorations: checkpoint_position.restore(slot_variable) def", "``` ### Write a customized optimizer. If you intend to", "Optimizer. This must be called by the constructors of subclasses.", "with __setattr__. if name == \"_hyper\": raise e # Backwards", "var_list = var_list() var_list = nest.flatten(var_list) grads = tape.gradient(loss_value, var_list,", "The name to use for accumulators created for the optimizer.", "Create iteration if necessary. with ops.init_scope(): _ = self.iterations self._create_hypers()", "dtype=var.dtype, trainable=False, initial_value=initial_value) backend.track_variable(weight) slot_dict[slot_name] = weight self._restore_slot_variable( slot_name=slot_name, variable=var,", "is callable.\"\"\" return param() if callable(param) else param def _resource_apply_dense(self,", "loop with Keras models In Keras models, sometimes variables are", "weight shape \" + str(w.shape)) weight_value_tuples.append((p, w)) backend.batch_set_value(weight_value_tuples) def add_weight(self,", "isinstance(value, learning_rate_schedule.LearningRateSchedule): return value if callable(value): value = value() if", "to be restored). Args: slot_variable_position: A `trackable._CheckpointPosition` object indicating the", "config: A Python dictionary, typically the output of get_config. custom_objects:", "any(isinstance(i, ops.Operation) or tf_utils.is_symbolic_tensor(i) for i in update_ops) if not", "realize that the slot variable needs to be restored). Args:", "# update learning rate opt.learning_rate = 0.05 opt.minimize(loss, var_list=[var1, var2])", "}, # ... } self._deferred_slot_restorations = {} decay = kwargs.pop(\"decay\",", "`tf.keras.optimizers.SGD`, `tf.keras.optimizers.Adam`. ### Usage ```python # Create an optimizer with", "list(zip(grads, params)) self._assert_valid_dtypes([ v for g, v in grads_and_vars if", "it. Called when a variable which has an associated slot", "self._hyper: self._hyper[name] = value else: prev_value = self._hyper[name] if (callable(prev_value)", "feature request if this limitation bothers \" \"you.\") revived_types.register_revived_type( \"optimizer\",", "can be overwritten through user code: Example: ```python # Create", "try: return super(OptimizerV2, self).__getattribute__(name) except AttributeError as e: # Needed", "for backward compatibility, recommended to use `learning_rate` instead. Raises: ValueError:", "tensorflow.python.ops import array_ops from tensorflow.python.ops import clip_ops from tensorflow.python.ops import", "# limitations under the License. # ============================================================================== \"\"\"Version 2 of", "optimizer subclasses, such as `Adam` and `Adagrad` allocate and manage", "variables in `var_list`. This is the first part of `minimize()`.", "learning_rate_schedule.deserialize( config[\"learning_rate\"], custom_objects=custom_objects) return cls(**config) def _serialize_hyperparameter(self, hyperparameter_name): \"\"\"Serialize a", "\"_hyper\": raise e # Backwards compatibility with Keras optimizers. if", "= lambda: model.trainable_weights for input, output in data: opt.minimize(loss_fn, var_list_fn)", "var_list is None. \"\"\" # TODO(josh11b): Test that we handle", "variable): if self._iterations is not None: raise RuntimeError(\"Cannot set `iterations`", "specified trainable=True and \" \"synchronization=VariableSynchronization.ON_READ.\") else: # Set trainable to", "apply dense gradients to the variable `handle`. Args: grad: a", "adds restore ops to the graph. This method is nonetheless", "recommended to use `learning_rate` instead. Raises: ValueError: If name is", "name=None): \"\"\"Apply gradients to variables. This is the second part", "if \"clipvalue\" in kwargs: self.clipvalue = kwargs.pop(\"clipvalue\") self._hypers_created = False", "is clip gradients by norm; `clipvalue` is clip gradients by", "containing the configuration of an optimizer. The same optimizer can", "using this function. Args: loss: A callable taking no arguments", "_compute_gradients(self, loss, var_list, grad_loss=None): \"\"\"Compute gradients of `loss` for the", "tuples (gradient, variable). Do whatever you # need to the", "if vars_with_empty_grads: logging.warning( (\"Gradients does not exist for variables %s", "initializer, shape=var.shape, dtype=var.dtype) else: initial_value = initializer strategy = distribute_ctx.get_strategy()", "yet been created # (trackable._CheckpointPosition objects). # {slot_name : #", "loss() if callable(var_list): var_list = var_list() var_list = nest.flatten(var_list) grads", "distribute_ctx.get_strategy() with strategy.extended.colocate_vars_with(var): weight = tf_variables.Variable( name=\"%s/%s\" % (var._shared_name, slot_name),", "2018 The TensorFlow Authors. All Rights Reserved. # # Licensed", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "gradient tensors. Raises: ValueError: In case any gradient cannot be", "you want to process the gradient before applying then call", "ValueError: In case any gradient cannot be computed (e.g. if", "\"provided weight shape \" + str(w.shape)) weight_value_tuples.append((p, w)) backend.batch_set_value(weight_value_tuples) def", "arguments which returns the value to minimize. var_list: list or", "subclasses, such as `Adam` and `Adagrad` allocate and manage additional", "sure that all of your ops have a \" \"gradient", "docstring of `_apply_sparse_duplicate_indices` for details. By default the correct behavior,", "list of (gradient, variable) pairs. Variable is always present, but", "op that minimizes the loss by updating the listed #", "raise NotImplementedError() def _resource_scatter_add(self, x, i, v): with ops.control_dependencies( [resource_variable_ops.resource_scatter_add(x.handle,", "graph building. Instead, _restore_slot_variable catches these after normal creation and", "* var2 # In eager mode, simply call minimize to", "var.constraint is not None: raise RuntimeError( \"Cannot use a constraint", "any_symbolic: # If the current context is graph mode or", "to additional Python objects used to create this optimizer, such", "variable, we should restore it. slot_variable_position.restore(slot_variable) else: # We didn't", "you use `tf.keras` built-in training or evaluation loops. See the", "the given variable. Args: loss: A callable taking no arguments", "first dimension of `values` (as in an IndexedSlices object). Returns:", "vars = <list_of_variables> grads = tape.gradient(loss, vars) processed_grads = [process_gradient(g)", "correctly with non-unique indices may instead override `_resource_apply_sparse_duplicate_indices` to avoid", "time `loss` is called. grad_loss: Optional. A `Tensor` holding the", "List of variables. Returns: List of gradient tensors. Raises: ValueError:", "the optimizer object iself (e.g. through `apply_gradients`). \"\"\" # TODO(allenl):", "class for optimizers. This class defines the API to add", "construction time. Examples include 1) sequential models without input shape", "be set to \" \"VariableSynchronization.ON_READ only for non-trainable variables. \"", "backend.track_variable(weight) slot_dict[slot_name] = weight self._restore_slot_variable( slot_name=slot_name, variable=var, slot_variable=weight) self._weights.append(weight) return", "var._shared_name return var._unique_id def _get_slot_key_from_var(var, slot_name): \"\"\"Get the slot key", "name = \"learning_rate\" if name in self._hyper: return self._get_hyper(name) raise", "pylint: disable=g-bad-name from __future__ import absolute_import from __future__ import division", "use the optimizer in three steps: 1. Compute the gradients", "if isinstance(value, learning_rate_schedule.LearningRateSchedule): return learning_rate_schedule.serialize(value) if callable(value): return value() if", "Optimizer based on the order created.\"\"\" return self._weights @property def", "filtered = [] vars_with_empty_grads = [] for grad, var in", "get_weights(self): params = self.weights return backend.batch_get_value(params) # TODO(tanzheny): Maybe share", "continue else: self._hyper[name] = self.add_weight( name, shape=[], trainable=False, initializer=value, aggregation=tf_variables.VariableAggregation.ONLY_FIRST_REPLICA)", "the non-slot # variable, or if the non-slot variable is", "return slot_dict[slot_name] def _prepare(self, var_list): pass def _create_hypers(self): if self._hypers_created:", "# ... } self._deferred_slot_restorations = {} decay = kwargs.pop(\"decay\", 0.0)", "\"\"\"Add a new slot variable for `var`.\"\"\" if slot_name not", "def _set_hyper(self, name, value): \"\"\"set hyper `name` to value. value", "a training algorithm, report stats about the slots, etc. ###", "has already been created but `variable` has just been added", "else: self._hyper[name] = self.add_weight( name, shape=[], trainable=False, initializer=value, aggregation=tf_variables.VariableAggregation.ONLY_FIRST_REPLICA) self._hypers_created", "hold the slot value. This can be useful if you", "var) if isinstance(grad, ops.IndexedSlices): if var.constraint is not None: raise", "hyper values deterministically. for name, value in sorted(self._hyper.items()): if isinstance(value,", "created or restored. When executing eagerly, we create the slot", "+ \" not compatible with \" \"provided weight shape \"", "variable) pairs. Variable is always present, but gradient can be", "[dtypes.float16, dtypes.bfloat16, dtypes.float32, dtypes.float64]) def _call_if_callable(self, param): \"\"\"Call the function", "an optimizer is restored from a SavedModel. These variables may", "not yet been created # (trackable._CheckpointPosition objects). # {slot_name :", "gradient tensor is dense) - resource_apply_sparse (update variable given gradient", "training loop with Keras models In Keras models, sometimes variables", "not context.executing_eagerly() or any_symbolic: # If the current context is", "name: Optional name for the returned operation. Returns: An Operation", "an # existing slot variable, we should restore it. slot_variable_position.restore(slot_variable)", "ValueError: If any tensor is not a valid type. \"\"\"", "optimizer from its config. This method is the reverse of", "permissions and # limitations under the License. # ============================================================================== \"\"\"Version", "a weight list of length \" + str(len(weights)) + \",", "Version 2.0 (the \"License\"); # you may not use this", "is not None and v.dtype != dtypes.resource ]) return [self.apply_gradients(grads_and_vars)]", "+ \" with a weight list of length \" +", "is None: vars_with_empty_grads.append(var) else: filtered.append((grad, var)) filtered = tuple(filtered) if", "for accumulators created for the optimizer. **kwargs: keyword arguments. Allowed", "TODO(allenl): Save and restore the Optimizer's config raise NotImplementedError( \"Restoring", "tensorflow.python.keras.utils import tf_utils from tensorflow.python.ops import array_ops from tensorflow.python.ops import", "Optimizer weights have been created\") self._iterations = variable self._weights.append(self._iterations) def", "backend.batch_get_value(params) # TODO(tanzheny): Maybe share this logic with base_layer. def", "# Copyright 2018 The TensorFlow Authors. All Rights Reserved. #", "`DistributionStrategy`.\"\"\" reduced_grads = distribution.extended.batch_reduce_to( ds_reduce_util.ReduceOp.SUM, grads_and_vars) var_list = [v for", "Make the restored optimizer functional by tracing its apply #", "@property def iterations(self): \"\"\"Variable. The number of training steps this", "which is off by a factor equal to the number", "__init__(self, name, **kwargs): \"\"\"Create a new Optimizer. This must be", "K.round, K.eval.\".format(param)) if hasattr(self, \"clipnorm\"): grads = [clip_ops.clip_by_norm(g, self.clipnorm) for", "__future__ import absolute_import from __future__ import division from __future__ import", "the configuration of an optimizer. The same optimizer can be", "callable(param) else param def _resource_apply_dense(self, grad, handle): \"\"\"Add ops to", "a slot variable if not for this case). Deferring is", "the optimizer for the names of the slots that it", "opt = tf.keras.optimizers.SGD(learning_rate=0.1) model = tf.keras.Sequential() model.add(tf.keras.layers.Dense(num_hidden, activation='relu')) model.add(tf.keras.layers.Dense(num_classes, activation='sigmoid')", "computing the gradients and applying them to the variables. If", "been added to a dependency graph (causing us to realize", "(summed_values, unique_indices) @six.add_metaclass(abc.ABCMeta) @keras_export(\"keras.optimizers.Optimizer\") class OptimizerV2(trackable.Trackable): \"\"\"Updated base class for", "request if this limitation bothers \" \"you.\") revived_types.register_revived_type( \"optimizer\", lambda", "same optimizer from the config dictionary. Arguments: config: A Python", "slot_name), # pylint: disable=protected-access dtype=var.dtype, trainable=False, initial_value=initial_value) backend.track_variable(weight) slot_dict[slot_name] =", "An `Operation` that applies the specified gradients. If `global_step` was", "else \"_\" + var.op.name) with backend.name_scope(\"update\" + scope_name): update_ops.extend( distribution.extended.update(", "to add Ops to train a model. You never use", "callable taking no arguments which returns the value to minimize.", "the function if param is callable.\"\"\" return param() if callable(param)", "trackable interface # --------------- def _restore_slot_variable(self, slot_name, variable, slot_variable): \"\"\"Restore", "where `unique_indices` is a de-duplicated version of `indices` and `summed_values`", "Python dictionary mapping names to additional Python objects used to", "already been created but `variable` has just been added to", "it created to hold the slot value. This can be", "get_updates(self, loss, params): grads = self.get_gradients(loss, params) grads_and_vars = list(zip(grads,", "with rank >= 1. indices: A one-dimensional integer `Tensor`, indexing", "across all replicas. To average gradients, you divide your loss", "new slot variable for `var`.\"\"\" if slot_name not in self._slot_names:", "List of (gradient, variable) pairs. name: Optional name for the", "as a Tensor with dtype=var_dtype.\"\"\" lr_t = self._get_hyper(\"learning_rate\", var_dtype) if", "functools import six from tensorflow.python.distribute import distribution_strategy_context as distribute_ctx from", "if hasattr(self, \"clipnorm\"): config[\"clipnorm\"] = self.clipnorm if hasattr(self, \"clipvalue\"): config[\"clipvalue\"]", "Returns: An optimizer instance. \"\"\" if \"lr\" in config: config[\"learning_rate\"]", "Save and restore the Optimizer's config raise NotImplementedError( \"Restoring functional", "by applicable law or agreed to in writing, software #", "wish. 3. Apply the processed gradients with `apply_gradients()`. Example: ```python", "normal creation and adds restore ops to the graph. This", "override `_resource_apply_sparse_duplicate_indices` to avoid this overhead. Args: grad: a `Tensor`", "use for accumulators created for the optimizer. **kwargs: keyword arguments.", "\"\"\" raise NotImplementedError() def _resource_scatter_add(self, x, i, v): with ops.control_dependencies(", "less than 0: {}\".format(decay)) self._initial_decay = decay if \"clipnorm\" in", "that we handle weight decay in a reasonable way. with", "synchronization=tf_variables.VariableSynchronization.AUTO, aggregation=tf_variables.VariableAggregation.NONE): if dtype is None: dtype = dtypes.float32 if", "Provided weights: \" + str(weights)[:50] + \"...\") if not params:", "[] for grad, var in grads_and_vars: if grad is None:", "initializers.get(initializer) if synchronization == tf_variables.VariableSynchronization.ON_READ: if trainable: raise ValueError( \"Synchronization", "updates the value of the variable. \"\"\" raise NotImplementedError() def", "updating the listed # variables. opt_op = opt.minimize(loss, var_list=[var1, var2])", "self._hyper[name] = self.add_weight( name, shape=[], trainable=False, initializer=value, aggregation=tf_variables.VariableAggregation.ONLY_FIRST_REPLICA) self._hypers_created =", "not filtered: raise ValueError(\"No gradients provided for any variable: %s.\"", "raise ValueError( \"You called `set_weights(weights)` on optimizer \" + self._name", "the gradient is nonzero. Indices are unique. Returns: An `Operation`", "trainable=False, aggregation=tf_variables.VariableAggregation.ONLY_FIRST_REPLICA) self._weights.append(self._iterations) return self._iterations @iterations.setter def iterations(self, variable): if", "can be set to \" \"VariableSynchronization.ON_READ only for non-trainable variables.", "`decay` is included for backward compatibility to allow time inverse", "= name self._hyper = {} # dict: {variable name :", "has been overridden instead of _create_vars. \"\"\" allowed_kwargs = {\"clipnorm\",", "def __getattribute__(self, name): \"\"\"Overridden to support hyperparameter access.\"\"\" try: return", "name=name) def _compute_gradients(self, loss, var_list, grad_loss=None): \"\"\"Compute gradients of `loss`", "slot_dict[slot_name] = weight self._restore_slot_variable( slot_name=slot_name, variable=var, slot_variable=weight) self._weights.append(weight) return weight", "constraint function on a sparse variable.\") return self._resource_apply_sparse_duplicate_indices( grad.values, var,", "to train a model. You never use this class directly,", "distribute_ctx.get_replica_context().merge_call( self._distributed_apply, args=(grads_and_vars,), kwargs={\"name\": name}) def _distributed_apply(self, distribution, grads_and_vars, name):", "when a slot variable has already been created but `variable`", "def _decayed_lr(self, var_dtype): \"\"\"Get decayed learning rate as a Tensor", "in functions along with ops created by the original optimizer,", "reasonable way. with backprop.GradientTape() as tape: if not callable(var_list): tape.watch(var_list)", "\"\"\"Add ops to apply sparse gradients to `handle`, with repeated", "argument and returns the value # to minimize. loss =", "if the optimizer is saved without the non-slot # variable,", "for _, v in grads_and_vars],)) if vars_with_empty_grads: logging.warning( (\"Gradients does", "+ str(len(weights)) + \", but the optimizer was expecting \"", "checkpoint_position.restore(slot_variable) def _create_or_restore_slot_variable( self, slot_variable_position, slot_name, variable): \"\"\"Restore a slot", "iterable with grad equal to None.\"\"\" grads_and_vars = tuple(grads_and_vars) if", "var_dtype): \"\"\"Get decayed learning rate as a Tensor with dtype=var_dtype.\"\"\"", "this method to avoid the overhead of summing. Args: grad:", "non-slot # variable, or if the non-slot variable is saved", "can be `None`. Raises: TypeError: If `var_list` contains anything else", "`_resource_apply_sparse_duplicate_indices` to avoid this overhead. Args: grad: a `Tensor` representing", "indicating the slot variable `Trackable` object to be restored. slot_name:", "disable=protected-access initializer = trackable.CheckpointInitialValue( checkpoint_position=slot_variable_position) slot_variable = self.add_slot( var=variable, initializer=initializer,", "case slot variables have their own dependencies, in which case", "gradient computed for `loss`. name: Optional name for the returned", "= gradients.gradients(loss, params) for grad, param in zip(grads, params): if", "added to a dependency graph (causing us to realize that", "name passed to the `Optimizer` constructor. Returns: An `Operation` that", "variable=var, slot_variable=weight) self._weights.append(weight) return weight def get_slot(self, var, slot_name): var_key", "the order created.\"\"\" return self._weights def get_weights(self): params = self.weights", "gradients that can be many times too big. ### Variable", "indices. See the docstring of `_apply_sparse_duplicate_indices` for details. By default", "opt = tf.keras.optimizers.SGD(learning_rate=0.1) # Compute the gradients for a list", "param): \"\"\"Call the function if param is callable.\"\"\" return param()", "from tensorflow.python.ops import clip_ops from tensorflow.python.ops import gradients from tensorflow.python.ops", "if slot_name not in self._slot_names: self._slot_names.append(slot_name) var_key = _var_key(var) slot_dict", "def _valid_dtypes(self): \"\"\"Valid types for loss, variables and gradients. Subclasses", "variables associated with the variables to train. These are called", "# TODO(allenl): Make the restored optimizer functional by tracing its", "in grads_and_vars] # Create iteration if necessary. with ops.init_scope(): _", "been created but `variable` has just been added to a", "manage additional variables associated with the variables to train. These", "will be called during `apply_gradients()` to get the value for", "capped_grads_and_vars = [(MyCapper(gv[0]), gv[1]) for gv in grads_and_vars] # Ask", "gradients from tensorflow.python.ops import math_ops from tensorflow.python.ops import resource_variable_ops from", "means it automatically sums gradients across all replicas. To average", "A non-empty string. The name to use for accumulators created", "{}\".format(k, kwargs[k])) self._use_locking = True self._name = name self._hyper =", "\"\"\"Override setattr to support dynamic hyperparameter setting.\"\"\" # Backwards compatibility", "the gradient. handle: a `Tensor` of dtype `resource` which points", "initializer strategy = distribute_ctx.get_strategy() with strategy.extended.colocate_vars_with(var): weight = tf_variables.Variable( name=\"%s/%s\"", "for v in vars_with_empty_grads])) return filtered def _var_key(var): \"\"\"Key for", "or callable(initializer): initializer = initializers.get(initializer) initial_value = functools.partial( initializer, shape=var.shape,", "optimizer, include all hyper parameters) \"\"\" def __init__(self, name, **kwargs):", "# Create iteration if necessary. with ops.init_scope(): _ = self.iterations", "of instantiating the same optimizer from the config dictionary. Arguments:", "= self.clipvalue return config @classmethod def from_config(cls, config, custom_objects=None): \"\"\"Creates", "super(OptimizerV2, self).__setattr__(name, value) def get_slot_names(self): \"\"\"A list of names for", "a `Tensor` representing the gradient. handle: a `Tensor` of dtype", "indices): \"\"\"Add ops to apply sparse gradients to the variable", "self._prepare(var_list) return distribute_ctx.get_replica_context().merge_call( self._distributed_apply, args=(grads_and_vars,), kwargs={\"name\": name}) def _distributed_apply(self, distribution,", "gradient computed for `loss`. Returns: A list of (gradient, variable)", "applicable law or agreed to in writing, software # distributed", "tensorflow.python.distribute import distribution_strategy_context as distribute_ctx from tensorflow.python.distribute import reduce_util as", "i in update_ops) if not context.executing_eagerly() or any_symbolic: # If", "are created at the first time when `loss` is called.", "all keyword arguments are non-negative. if kwargs[k] < 0: raise", "Needed to avoid infinite recursion with __setattr__. if name ==", "Examples include 1) sequential models without input shape pre-defined, or", "in update_ops) if not context.executing_eagerly() or any_symbolic: # If the", "None and context.executing_eagerly() and slot_variable_position.is_simple_variable() # Defer slot variable creation", "in grads_and_vars] grads_and_vars = zip(reduced_grads, var_list) def apply_grad_to_update_var(var, grad): \"\"\"Apply", "position.restore_uid, reverse=True) for checkpoint_position in deferred_restorations: checkpoint_position.restore(slot_variable) def _create_or_restore_slot_variable( self,", "using the optimizer object iself (e.g. through `apply_gradients`). \"\"\" #", "of get_config. custom_objects: A Python dictionary mapping names to additional", "resource_apply_dense (update variable given gradient tensor is dense) - resource_apply_sparse", "of using this function. Args: loss: A callable taking no", "normally. We keep a list rather than the one with", "as tf_variables from tensorflow.python.platform import tf_logging as logging from tensorflow.python.saved_model", "of (gradient, variable) pairs where \"gradient\" is the gradient for", "subclasses. Note that Optimizer instances should not bind to a", "for gv in grads_and_vars] # Ask the optimizer to apply", "gradients. Args: grads_and_vars: List of (gradient, variable) pairs. name: Optional", "params)) self._assert_valid_dtypes([ v for g, v in grads_and_vars if g", "if var._in_graph_mode: return var._shared_name return var._unique_id def _get_slot_key_from_var(var, slot_name): \"\"\"Get", "if isinstance(value, ops.Tensor) or callable(value): continue else: self._hyper[name] = self.add_weight(", "the variable: var_name/slot_name.\"\"\" name = _var_key(var) return name + \"/\"", "minimize. var_list: list or tuple of `Variable` objects to update", "size. Note that when using `tf.distribute.Strategy`, the first component of", "\"\"\" raise NotImplementedError() def _resource_apply_sparse_duplicate_indices(self, grad, handle, indices): \"\"\"Add ops", "overridden instead of _create_vars. \"\"\" allowed_kwargs = {\"clipnorm\", \"clipvalue\", \"lr\",", "Slots have names and you can ask the optimizer for", "scopes intended to catch # `variable` also catch its eagerly", "simply inherit from this class and override the following methods:", "class defines the API to add Ops to train a", "name == \"lr\": name = \"learning_rate\" if name in self._hyper:", "to `handle`, with repeated indices. Optimizers which override this method", "graph # context. (eager updates execute immediately) with ops._get_graph_from_inputs(update_ops).as_default(): #", "self._resource_apply_sparse(summed_grad, handle, unique_indices) def _resource_apply_sparse(self, grad, handle, indices): \"\"\"Add ops", "+ 2 * var2 # In eager mode, simply call", "= tf_variables.Variable( name=\"%s/%s\" % (var._shared_name, slot_name), # pylint: disable=protected-access dtype=var.dtype,", "method is nonetheless important when graph building for the case", "str(len(weights)) + \", but the optimizer was expecting \" +", "weights have been created\") self._iterations = variable self._weights.append(self._iterations) def _decayed_lr(self,", "\"\"\" def __init__(self, name, **kwargs): \"\"\"Create a new Optimizer. This", "# Needed to avoid infinite recursion with __setattr__. if name", "indices for which the gradient is nonzero. Indices are unique.", "= False def minimize(self, loss, var_list, grad_loss=None, name=None): \"\"\"Minimize `loss`", "# You may obtain a copy of the License at", "= {} # dict: {variable name : {slot name :", "{}).pop(variable_key, []) # Iterate over restores, highest restore UID first", "\"synchronization=VariableSynchronization.ON_READ.\") else: # Set trainable to be false when variable", "`None` if there is no gradient for the given variable.", "return filtered def _var_key(var): \"\"\"Key for representing a primary variable,", "that minimizes the loss by updating the listed # variables.", "Keras optimizers. if name == \"lr\": name = \"learning_rate\" if", "an optimizer with the desired parameters. opt = tf.keras.optimizers.SGD(learning_rate=0.1) #", "Variable Constraint All Keras optimizers respect variable constraints. If constraint", "tape: if not callable(var_list): tape.watch(var_list) loss_value = loss() if callable(var_list):", "tuple(grads_and_vars) if not grads_and_vars: return grads_and_vars filtered = [] vars_with_empty_grads", "using `tf.distribute.Strategy`, the first component of a tensor's shape is", "indices. handle: a `Tensor` of dtype `resource` which points to", "%s.\" % (dtype, t.name, [v for v in valid_dtypes])) def", "grads_and_vars = zip(reduced_grads, var_list) def apply_grad_to_update_var(var, grad): \"\"\"Apply gradient to", "indices): \"\"\"Sums `values` associated with any non-unique `indices`. Args: values:", "of variables. opt.minimize(loss, var_list=[var1, var2]) ``` ### Custom training loop", "name + \"/\" + slot_name class _RestoredOptimizer(OptimizerV2): \"\"\"A non-functional Optimizer", "and `Adagrad` allocate and manage additional variables associated with the", "def set_weights(self, weights): params = self.weights if len(params) != len(weights):", "A `Tensor` with rank >= 1. indices: A one-dimensional integer", "# {slot_name : # {_var_key(variable_to_train): [checkpoint_position, ... ], ... },", "self.clipnorm = kwargs.pop(\"clipnorm\") if \"clipvalue\" in kwargs: self.clipvalue = kwargs.pop(\"clipvalue\")", "useful if you want to log debug a training algorithm,", "get_slot_names(self): \"\"\"A list of names for this optimizer's slots.\"\"\" return", "### Use with `tf.distribute.Strategy`. This optimizer class is `tf.distribute.Strategy` aware,", "to variable.\"\"\" if isinstance(var, ops.Tensor): raise NotImplementedError(\"Trying to update a", "logging from tensorflow.python.saved_model import revived_types from tensorflow.python.training.tracking import base as", "optimizer to apply the capped gradients. opt.apply_gradients(capped_grads_and_vars) ``` ### Use", "Args: grad: a `Tensor` representing the gradient for the affected", "and adds restore ops to the graph. This method is", "%s, expected: %s.\" % (dtype, t.name, [v for v in", "be updated. indices: a `Tensor` of integral type representing the", "parameters can be overwritten through user code: Example: ```python #", "in grads ] return grads def apply_gradients(self, grads_and_vars, name=None): \"\"\"Apply", "variable's value, possibly creating it. Called when a variable which", "import array_ops from tensorflow.python.ops import clip_ops from tensorflow.python.ops import gradients", "the global batch size, which is done automatically if you", "variable needs to be restored). Args: slot_variable_position: A `trackable._CheckpointPosition` object", "tensor is dense) - resource_apply_sparse (update variable given gradient tensor", "passed to any variable, the constraint will be applied to", "initializer=\"zeros\", trainable=None, synchronization=tf_variables.VariableSynchronization.AUTO, aggregation=tf_variables.VariableAggregation.NONE): if dtype is None: dtype =", "strategy exists, get the primary variable first. Args: var: the", "if decay < 0.: raise ValueError(\"decay cannot be less than", "case # those could differ between restores. self._deferred_slot_restorations.setdefault( slot_name, {}).setdefault(variable_key,", "0.0) if decay < 0.: raise ValueError(\"decay cannot be less", "aggregation=tf_variables.VariableAggregation.ONLY_FIRST_REPLICA) self._hypers_created = True @property def iterations(self): \"\"\"Variable. The number", "trainable=None, synchronization=tf_variables.VariableSynchronization.AUTO, aggregation=tf_variables.VariableAggregation.NONE): if dtype is None: dtype = dtypes.float32", "return var._shared_name return var._unique_id def _get_slot_key_from_var(var, slot_name): \"\"\"Get the slot", "to the optimizer subclass constructor (the `__init__` method), and then", "pairs. Variable is always present, but gradient can be `None`.", "per-example losses and then divide by the global batch size.", "method is the reverse of `get_config`, capable of instantiating the", "Slot variables are not owned by any one object (because", "import initializers from tensorflow.python.keras.engine import base_layer_utils from tensorflow.python.keras.optimizer_v2 import learning_rate_schedule", "self._deferred_slot_restorations.setdefault( slot_name, {}).setdefault(variable_key, []).append( slot_variable_position) def _filter_grads(grads_and_vars): \"\"\"Filter out iterable", "= self.add_weight( \"iter\", shape=[], dtype=dtypes.int64, trainable=False, aggregation=tf_variables.VariableAggregation.ONLY_FIRST_REPLICA) self._weights.append(self._iterations) return self._iterations", "if callable(param) else param def _resource_apply_dense(self, grad, handle): \"\"\"Add ops", "dtypes.float32 if isinstance(initializer, six.string_types) or callable(initializer): initializer = initializers.get(initializer) if", "variable}} self._slots = {} self._slot_names = [] self._weights = []", "the gradient is nonzero. Indices may be repeated. Returns: An", "var2 * var2 # In graph mode, returns op that", "In graph mode, returns op that minimizes the loss by", "grads = [clip_ops.clip_by_norm(g, self.clipnorm) for g in grads] if hasattr(self,", "`Optimizer` constructor. Returns: An `Operation` that applies the specified gradients.", "= [] for grad, var in grads_and_vars: if grad is", "gradient function not implemented). \"\"\" params = nest.flatten(params) with backend.get_graph().as_default():", "valid_dtypes = self._valid_dtypes() for t in tensors: dtype = t.dtype.base_dtype", "the API to add Ops to train a model. You", "var2]) ``` ### Custom training loop with Keras models In", "if name == \"lr\": name = \"learning_rate\" if name in", "# (aside from double initialization), and makes variable creator scopes", "callable(value): return value() if tensor_util.is_tensor(value): return backend.get_value(value) return value def", "applies gradients. Args: grads_and_vars: List of (gradient, variable) pairs. name:", "to get the value for the hyper parameter. Hyper parameters", "if kwargs[k] < 0: raise ValueError(\"Expected {} >= 0, received:", "# behave the same way they do when graph building.", "], ... }, # ... } self._deferred_slot_restorations = {} decay", "to # save the slot variable if the optimizer is", "minimize to update the list of variables. opt.minimize(loss, var_list=[var1, var2])", "the slots that it uses. Once you have a slot", "of its subclasses such as `tf.keras.optimizers.SGD`, `tf.keras.optimizers.Adam`. ### Usage ```python", "raise ValueError(\"Expected {} >= 0, received: {}\".format(k, kwargs[k])) self._use_locking =", "learning_rate_schedule.LearningRateSchedule): local_step = math_ops.cast(self.iterations, var_dtype) lr_t = math_ops.cast(lr_t(local_step), var_dtype) if", "A `trackable._CheckpointPosition` object indicating the slot variable `Trackable` object to", "are unique. Returns: An `Operation` which updates the value of", "grad is None: raise ValueError(\"Variable {} has `None` for gradient.", "created\") self._iterations = variable self._weights.append(self._iterations) def _decayed_lr(self, var_dtype): \"\"\"Get decayed", "for optimizers. This class defines the API to add Ops", "which means it automatically sums gradients across all replicas. To", "has `None` for gradient. \" \"Please make sure that all", "var_dtype) if self._initial_decay > 0.: local_step = math_ops.cast(self.iterations, var_dtype) decay_t", "optimizer, but currently we do not support using the optimizer", "{}\".format(decay)) self._initial_decay = decay if \"clipnorm\" in kwargs: self.clipnorm =", "raise NotImplementedError() def _resource_apply_sparse_duplicate_indices(self, grad, handle, indices): \"\"\"Add ops to", "\"License\"); # you may not use this file except in", "gv[1]) for gv in grads_and_vars] # Ask the optimizer to", "you want to average gradients, you should use `tf.math.reduce_sum` to", "_call_if_callable(self, param): \"\"\"Call the function if param is callable.\"\"\" return", "important when graph building for the case when a slot", "and `summed_values` contains the sum of `values` slices associated with", "models without input shape pre-defined, or 2) subclassed models. Pass", "def _var_key(var): \"\"\"Key for representing a primary variable, for looking", "on read. trainable = False elif trainable is None: trainable", "variables as tf_variables from tensorflow.python.platform import tf_logging as logging from", "{} decay = kwargs.pop(\"decay\", 0.0) if decay < 0.: raise", "applies the specified gradients. If `global_step` was not None, that", "of `Variable` objects to update to minimize `loss`, or a", "var_list): pass def _create_hypers(self): if self._hypers_created: return # Iterate hyper", "var_dtype) lr_t = lr_t / (1. + decay_t * local_step)", "rate. `lr` is included for backward compatibility, recommended to use", "1. indices: A one-dimensional integer `Tensor`, indexing into the first", "subclasses such as `tf.keras.optimizers.SGD`, `tf.keras.optimizers.Adam`. ### Usage ```python # Create", "# TODO(tanzheny): Maybe share this logic with base_layer. def set_weights(self,", "is nonetheless important when graph building for the case when", "is mostly harmless # (aside from double initialization), and makes", "and otherwise pretend it's a normal # graph. if slot_variable", "own optimization algorithm, simply inherit from this class and override", "is a de-duplicated version of `indices` and `summed_values` contains the", "callable that takes no argument and returns the value #", "the desired parameters. opt = tf.keras.optimizers.SGD(learning_rate=0.1) # `loss` is a", "# Ask the optimizer to apply the capped gradients. opt.apply_gradients(capped_grads_and_vars)", "avoid this overhead. Args: grad: a `Tensor` representing the gradient", "or evaluation loops. See the `reduction` argument of your loss", "unique. Returns: An `Operation` which updates the value of the", "with ops.control_dependencies( [resource_variable_ops.resource_scatter_update(x.handle, i, v)]): return x.value() # --------------- #", "\" + str(w.shape)) weight_value_tuples.append((p, w)) backend.batch_set_value(weight_value_tuples) def add_weight(self, name, shape,", "var._in_graph_mode: return var._shared_name return var._unique_id def _get_slot_key_from_var(var, slot_name): \"\"\"Get the", "or var_list is None. \"\"\" # TODO(josh11b): Test that we", "`var_list`. This method simply computes gradient using `tf.GradientTape` and calls", "return math_ops.cast(value, dtype) else: return value def __getattribute__(self, name): \"\"\"Overridden", "grad, var in grads_and_vars: scope_name = (\"\" if ops.executing_eagerly_outside_functions() else", "optimizer with the desired parameters. opt = tf.keras.optimizers.SGD(learning_rate=0.1) # `loss`", "valid_dtypes: raise ValueError(\"Invalid type %r for %s, expected: %s.\" %", "was not None, that operation also increments `global_step`. Raises: TypeError:", "= [process_gradient(g) for g in grads] grads_and_vars = zip(processed_grads, var_list)", "def _resource_apply_dense(self, grad, handle): \"\"\"Add ops to apply dense gradients", "variable which has an associated slot variable is created or", "var_list=var_list, grad_loss=grad_loss) return self.apply_gradients(grads_and_vars, name=name) def _compute_gradients(self, loss, var_list, grad_loss=None):", "self._hyper[name] = value else: backend.set_value(self._hyper[name], value) def _get_hyper(self, name, dtype=None):", "def add_weight(self, name, shape, dtype=None, initializer=\"zeros\", trainable=None, synchronization=tf_variables.VariableSynchronization.AUTO, aggregation=tf_variables.VariableAggregation.NONE): if", "gradients to the variable `handle`. Args: grad: a `Tensor` representing", "* var1 + 2 * var2 # In eager mode,", "ops.control_dependencies( [resource_variable_ops.resource_scatter_add(x.handle, i, v)]): return x.value() def _resource_scatter_update(self, x, i,", "if not self._hypers_created: self._create_hypers() value = self._hyper[name] if isinstance(value, learning_rate_schedule.LearningRateSchedule):", "We didn't make the slot variable. Defer restoring until it", "want to process the gradient before applying then call `tf.GradientTape`", "the first part of `minimize()`. It returns a list of", "of an optimizer. The same optimizer can be reinstantiated later", "Do whatever you # need to the 'gradient' part, for", "ops without gradient: \" \"K.argmax, K.round, K.eval.\".format(param)) if hasattr(self, \"clipnorm\"):", "In eager mode the name is derived from the var", "incomplete before `minimize` and the variables are created at the", "except AttributeError as e: # Needed to avoid infinite recursion", "as tape: if not callable(var_list): tape.watch(var_list) loss_value = loss() if", "is not None: raise RuntimeError( \"Cannot use a constraint function", "a slot name you can ask the optimizer for the" ]
[ "import requests headers = { 'content-type': 'application/json', 'Authorization': 'Token <PASSWORD>'", "= 'http://127.0.0.1:8000/api/v2/avaliacoes' resultado = requests.get(url=url_base_cursos, headers=headers) assert resultado.status_code == 200", "'content-type': 'application/json', 'Authorization': 'Token <PASSWORD>' } url_base_cursos = 'http://127.0.0.1:8000/api/v2/cursos' url_base_avaliacoes", "url_base_cursos = 'http://127.0.0.1:8000/api/v2/cursos' url_base_avaliacoes = 'http://127.0.0.1:8000/api/v2/avaliacoes' resultado = requests.get(url=url_base_cursos, headers=headers)", "= { 'content-type': 'application/json', 'Authorization': 'Token <PASSWORD>' } url_base_cursos =", "'http://127.0.0.1:8000/api/v2/cursos' url_base_avaliacoes = 'http://127.0.0.1:8000/api/v2/avaliacoes' resultado = requests.get(url=url_base_cursos, headers=headers) assert resultado.status_code", "<PASSWORD>' } url_base_cursos = 'http://127.0.0.1:8000/api/v2/cursos' url_base_avaliacoes = 'http://127.0.0.1:8000/api/v2/avaliacoes' resultado =", "} url_base_cursos = 'http://127.0.0.1:8000/api/v2/cursos' url_base_avaliacoes = 'http://127.0.0.1:8000/api/v2/avaliacoes' resultado = requests.get(url=url_base_cursos,", "= 'http://127.0.0.1:8000/api/v2/cursos' url_base_avaliacoes = 'http://127.0.0.1:8000/api/v2/avaliacoes' resultado = requests.get(url=url_base_cursos, headers=headers) assert", "headers = { 'content-type': 'application/json', 'Authorization': 'Token <PASSWORD>' } url_base_cursos", "<filename>escola/teste_get.py import requests headers = { 'content-type': 'application/json', 'Authorization': 'Token", "{ 'content-type': 'application/json', 'Authorization': 'Token <PASSWORD>' } url_base_cursos = 'http://127.0.0.1:8000/api/v2/cursos'", "url_base_avaliacoes = 'http://127.0.0.1:8000/api/v2/avaliacoes' resultado = requests.get(url=url_base_cursos, headers=headers) assert resultado.status_code ==", "requests headers = { 'content-type': 'application/json', 'Authorization': 'Token <PASSWORD>' }", "'Authorization': 'Token <PASSWORD>' } url_base_cursos = 'http://127.0.0.1:8000/api/v2/cursos' url_base_avaliacoes = 'http://127.0.0.1:8000/api/v2/avaliacoes'", "'application/json', 'Authorization': 'Token <PASSWORD>' } url_base_cursos = 'http://127.0.0.1:8000/api/v2/cursos' url_base_avaliacoes =", "'Token <PASSWORD>' } url_base_cursos = 'http://127.0.0.1:8000/api/v2/cursos' url_base_avaliacoes = 'http://127.0.0.1:8000/api/v2/avaliacoes' resultado" ]
[ "get_crs(api): tested_crs = \"CRS-2021-1\" resp = api.get(\"wafcrs?name=\" + tested_crs) if", "in waf_policy_obj[\"pre_crs_groups\"]: if pre_crs[\"name\"] == \"VDI_409_ENFORCE_XML\": pre_crs[\"rules\"] = xml_rule pre_crs[\"enable\"]", "waf_policy_obj[\"whitelist\"][\"rules\"].append(allowlist_rule) def get_id_from_group(group): pattern = re.compile(\"[^\\d]*(?P<group_id>\\d\\d\\d)\") match = pattern.match(group[\"name\"]) assert", "os import sys from avi.sdk.avi_api import ApiSession API_VERSION = \"18.2.13\"", "re.compile(\"[^\\d]*(?P<group_id>\\d\\d\\d)\") match = pattern.match(group[\"name\"]) assert match, \"can not extract group", "requests with /broker/xml uri xml_rule = [ { \"index\": 0,", "False def add_pre_crs_group(waf_policy_obj): #add a rule to parse body as", "[]): group_id = get_id_from_group(crs_group) if group_id >= 950: crs_group[\"enable\"] =", "policy successfully') else: logger.error('Error : %s' % resp.text) if __name__", "\"mode\": \"WAF_MODE_DETECTION_ONLY\", \"waf_profile_ref\": \"/api/wafprofile?name=System-WAF-Profile\" } waf_crs = get_crs(api) if waf_crs", "args, waf_policy_obj): add_allowlist_rule(waf_policy_obj) disable_crs_response_rules(waf_policy_obj) add_pre_crs_group(waf_policy_obj) resp = api.put('wafpolicy/%s' %waf_policy_obj['uuid'], data=waf_policy_obj)", "default='admin') parser.add_argument('-a', '--authtoken', help='Authentication token') parser.add_argument('-c', '--controller_ip', action=\"store\", help='controller ip')", "if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('-u', '--user', action=\"store\",", "name', default='admin') parser.add_argument('-a', '--authtoken', help='Authentication token') parser.add_argument('-c', '--controller_ip', action=\"store\", help='controller", "if rule[\"index\"]>index: index = rule[\"index\"] allowlist_rule[\"index\"] = index+1 waf_policy_obj[\"whitelist\"][\"rules\"].append(allowlist_rule) def", "uri xml_rule = [ { \"index\": 0, \"name\": \"enforce XML", "waf_crs = json.loads(resp.text)[\"results\"] return waf_crs[0] def create_vdi_waf_policy(api, args): waf_policy_obj =", "logging.getLogger(__name__) def add_allowlist_rule(waf_policy_obj): #add a allowlist rule to allow request", "match = pattern.match(group[\"name\"]) assert match, \"can not extract group id", "logging import os import sys from avi.sdk.avi_api import ApiSession API_VERSION", "side rules and some specific rules for crs_group in waf_policy_obj.get(\"crs_groups\",", ": %s', resp.text) exit(0) waf_crs = json.loads(resp.text)[\"results\"] return waf_crs[0] def", "WAF policy successfully') else: logger.error('Error : %s' % resp.text) if", "import sys from avi.sdk.avi_api import ApiSession API_VERSION = \"18.2.13\" SYSTEM_WAF_POLICY_VDI='System-WAF-Policy-VDI'", "as xml for requests with /broker/xml uri xml_rule = [", "= 0 waf_policy_obj.setdefault(\"whitelist\", {}).setdefault(\"rules\", []) for rule in waf_policy_obj[\"whitelist\"][\"rules\"][:]: if", "= int(match.group(\"group_id\")) assert groupid == 0 or 100 <= groupid", "= parser.parse_args() if args.password: api = ApiSession.get_session(args.controller_ip, args.user, args.password, tenant=args.tenant,", "SYSTEM_WAF_POLICY_VDI, \"mode\": \"WAF_MODE_DETECTION_ONLY\", \"waf_profile_ref\": \"/api/wafprofile?name=System-WAF-Profile\" } waf_crs = get_crs(api) if", "\"WAF will buffer the whole request body first and then", "VDI, client wants to stream data between client and server", "request with uri beginning with /ice/ allowlist_rule={ \"index\": 0, \"name\":", "help='controller user', default='admin') parser.add_argument('-p', '--password', action=\"store\", help='controller user password', default='<PASSWORD>')", "pre_crs[\"rules\"] = xml_rule pre_crs[\"enable\"] = True return if pre_crs[\"index\"] >", "parser.add_argument('-p', '--password', action=\"store\", help='controller user password', default='<PASSWORD>') parser.add_argument('-t', '--tenant', action=\"store\",", "rule[\"rule_id\"] == \"932105\": rule[\"enable\"] = False def add_pre_crs_group(waf_policy_obj): #add a", "%s' % resp.text) if __name__ == '__main__': parser = argparse.ArgumentParser()", "some URLs like /ice/..., we should allow these URLs\", \"actions\":", "group '{}'\".format(group[\"name\"]) groupid = int(match.group(\"group_id\")) assert groupid == 0 or", "add_allowlist_rule(waf_policy_obj): #add a allowlist rule to allow request with uri", "a rule to parse body as xml for requests with", "def add_pre_crs_group(waf_policy_obj): #add a rule to parse body as xml", "in waf_policy_obj: waf_policy_obj[\"pre_crs_groups\"] = list() for pre_crs in waf_policy_obj[\"pre_crs_groups\"]: if", "rule[\"name\"] == \"allowlist-start-with-ice\": waf_policy_obj[\"whitelist\"][\"rules\"].remove(rule) if rule[\"index\"]>index: index = rule[\"index\"] allowlist_rule[\"index\"]", "xml for requests with /broker/xml uri xml_rule = [ {", "and enforce the request body to be parsed as XML", "Inc. import argparse import json import re import logging import", "logger.error('Error : %s', resp.text) exit(0) waf_crs = json.loads(resp.text)[\"results\"] return waf_crs[0]", "args.password, tenant=args.tenant, api_version=API_VERSION) elif args.authtoken: api = ApiSession.get_session(args.controller_ip, args.user, tenant=args.tenant,", ">= 950: crs_group[\"enable\"] = False for rule in crs_group.get(\"rules\", []):", "and server for some URLs like /ice/..., we should allow", "with /broker/xml uri xml_rule = [ { \"index\": 0, \"name\":", "\\\"phase:1,id:4099822,t:none,nolog,pass,chain\\\" \\n SecRule REQUEST_URI \\\"@streq /broker/xml\\\" \\\"t:none,ctl:requestBodyProcessor=XML\\\"\" } ] pre_crs_group", "crs_group.get(\"rules\", []): if rule[\"rule_id\"] == \"920330\" or rule[\"rule_id\"] == \"932105\":", "args.user, args.password, tenant=args.tenant, api_version=API_VERSION) elif args.authtoken: api = ApiSession.get_session(args.controller_ip, args.user,", "== \"allowlist-start-with-ice\": waf_policy_obj[\"whitelist\"][\"rules\"].remove(rule) if rule[\"index\"]>index: index = rule[\"index\"] allowlist_rule[\"index\"] =", "from group '{}'\".format(group[\"name\"]) groupid = int(match.group(\"group_id\")) assert groupid == 0", "#disable response side rules and some specific rules for crs_group", "} } index = 0 waf_policy_obj.setdefault(\"whitelist\", {}).setdefault(\"rules\", []) for rule", "resp = api.put('wafpolicy/%s' %waf_policy_obj['uuid'], data=waf_policy_obj) if resp.status_code in range(200, 300):", "\"index\": 0, \"name\": \"enforce XML parsing for /broker/xml\", \"description\": \"Clients", "extract group id from group '{}'\".format(group[\"name\"]) groupid = int(match.group(\"group_id\")) assert", "group_id = get_id_from_group(crs_group) if group_id >= 950: crs_group[\"enable\"] = False", "to be parsed as XML in WAF\", \"rule\": \"SecRule REQUEST_METHOD", "waf_policy_obj = api.get_object_by_name('wafpolicy', SYSTEM_WAF_POLICY_VDI) if not waf_policy_obj: create_vdi_waf_policy(api, args) else:", "None: return waf_policy_obj[\"waf_crs_ref\"]=\"/api/wafcrs?name=\"+waf_crs[\"name\"] waf_policy_obj[\"crs_groups\"] = list() for group in waf_crs[\"groups\"]:", "\"waf_profile_ref\": \"/api/wafprofile?name=System-WAF-Profile\" } waf_crs = get_crs(api) if waf_crs is None:", "= list() for group in waf_crs[\"groups\"]: waf_policy_obj[\"crs_groups\"].append(group) add_allowlist_rule(waf_policy_obj) disable_crs_response_rules(waf_policy_obj) add_pre_crs_group(waf_policy_obj)", "xml_rule pre_crs[\"enable\"] = True return if pre_crs[\"index\"] > index: index", "0 if \"pre_crs_groups\" not in waf_policy_obj: waf_policy_obj[\"pre_crs_groups\"] = list() for", "pattern.match(group[\"name\"]) assert match, \"can not extract group id from group", "/ice/ allowlist_rule={ \"index\": 0, \"name\": \"allowlist-start-with-ice\", \"description\": \"WAF will buffer", "pre_crs[\"name\"] == \"VDI_409_ENFORCE_XML\": pre_crs[\"rules\"] = xml_rule pre_crs[\"enable\"] = True return", "as XML in WAF\", \"rule\": \"SecRule REQUEST_METHOD \\\"@streq POST\\\" \\\"phase:1,id:4099822,t:none,nolog,pass,chain\\\"", "int(match.group(\"group_id\")) assert groupid == 0 or 100 <= groupid <=", "sys from avi.sdk.avi_api import ApiSession API_VERSION = \"18.2.13\" SYSTEM_WAF_POLICY_VDI='System-WAF-Policy-VDI' logger", "assert match, \"can not extract group id from group '{}'\".format(group[\"name\"])", "index = pre_crs[\"index\"] pre_crs_group[\"index\"] = index + 1 waf_policy_obj[\"pre_crs_groups\"].append(pre_crs_group) def", "== 0 or 100 <= groupid <= 999, \"group id", "/broker/xml uri xml_rule = [ { \"index\": 0, \"name\": \"enforce", "disable_crs_response_rules(waf_policy_obj) add_pre_crs_group(waf_policy_obj) resp = api.post('wafpolicy', data=json.dumps(waf_policy_obj)) if resp.status_code in range(200,", "logger.error(\"Controller does not have CRS %s, please install first.\" %", "for pre_crs in waf_policy_obj[\"pre_crs_groups\"]: if pre_crs[\"name\"] == \"VDI_409_ENFORCE_XML\": pre_crs[\"rules\"] =", "= \"18.2.13\" SYSTEM_WAF_POLICY_VDI='System-WAF-Policy-VDI' logger = logging.getLogger(__name__) def add_allowlist_rule(waf_policy_obj): #add a", "groupid def disable_crs_response_rules(waf_policy_obj): #disable response side rules and some specific", "\"allowlist-start-with-ice\": waf_policy_obj[\"whitelist\"][\"rules\"].remove(rule) if rule[\"index\"]>index: index = rule[\"index\"] allowlist_rule[\"index\"] = index+1", "args): waf_policy_obj = { \"name\": SYSTEM_WAF_POLICY_VDI, \"mode\": \"WAF_MODE_DETECTION_ONLY\", \"waf_profile_ref\": \"/api/wafprofile?name=System-WAF-Profile\"", "for group '{}' not in expected range\".format(group[\"name\"]) return groupid def", "SYSTEM_WAF_POLICY_VDI='System-WAF-Policy-VDI' logger = logging.getLogger(__name__) def add_allowlist_rule(waf_policy_obj): #add a allowlist rule", "client wants to stream data between client and server for", "xml_rule } index = 0 if \"pre_crs_groups\" not in waf_policy_obj:", "\"description\": \"Clients often send the wrong Content-Type header. We ignore", "%waf_policy_obj['uuid'], data=waf_policy_obj) if resp.status_code in range(200, 300): logger.debug('Create WAF policy", "= api.get_object_by_name('wafpolicy', SYSTEM_WAF_POLICY_VDI) if not waf_policy_obj: create_vdi_waf_policy(api, args) else: update_waf_policy(api,", "With VDI, client wants to stream data between client and", "request body first and then release to backend. With VDI,", "authtokentoken must be provided.\") sys.exit(1) waf_policy_obj = api.get_object_by_name('wafpolicy', SYSTEM_WAF_POLICY_VDI) if", "= index+1 waf_policy_obj[\"whitelist\"][\"rules\"].append(allowlist_rule) def get_id_from_group(group): pattern = re.compile(\"[^\\d]*(?P<group_id>\\d\\d\\d)\") match =", "return if pre_crs[\"index\"] > index: index = pre_crs[\"index\"] pre_crs_group[\"index\"] =", "\"SecRule REQUEST_METHOD \\\"@streq POST\\\" \\\"phase:1,id:4099822,t:none,nolog,pass,chain\\\" \\n SecRule REQUEST_URI \\\"@streq /broker/xml\\\"", "= list() for pre_crs in waf_policy_obj[\"pre_crs_groups\"]: if pre_crs[\"name\"] == \"VDI_409_ENFORCE_XML\":", "json import re import logging import os import sys from", "api_version=API_VERSION) elif args.authtoken: api = ApiSession.get_session(args.controller_ip, args.user, tenant=args.tenant, token=args.authtoken, api_version=API_VERSION)", "\"index\": 0, \"name\": \"allowlist-start-with-ice\", \"description\": \"WAF will buffer the whole", "group_id >= 950: crs_group[\"enable\"] = False for rule in crs_group.get(\"rules\",", "waf_crs[\"groups\"]: waf_policy_obj[\"crs_groups\"].append(group) add_allowlist_rule(waf_policy_obj) disable_crs_response_rules(waf_policy_obj) add_pre_crs_group(waf_policy_obj) resp = api.post('wafpolicy', data=json.dumps(waf_policy_obj)) if", "REQUEST_URI \\\"@streq /broker/xml\\\" \\\"t:none,ctl:requestBodyProcessor=XML\\\"\" } ] pre_crs_group = { \"index\":", "/broker/xml\\\" \\\"t:none,ctl:requestBodyProcessor=XML\\\"\" } ] pre_crs_group = { \"index\": 0, \"name\":", "\"SENSITIVE\", \"match_str\": [ \"/ice/\" ], \"match_criteria\": \"BEGINS_WITH\" } } }", "\"name\": SYSTEM_WAF_POLICY_VDI, \"mode\": \"WAF_MODE_DETECTION_ONLY\", \"waf_profile_ref\": \"/api/wafprofile?name=System-WAF-Profile\" } waf_crs = get_crs(api)", "waf_policy_obj[\"whitelist\"][\"rules\"][:]: if rule[\"name\"] == \"allowlist-start-with-ice\": waf_policy_obj[\"whitelist\"][\"rules\"].remove(rule) if rule[\"index\"]>index: index =", "%s', resp.text) exit(0) waf_crs = json.loads(resp.text)[\"results\"] return waf_crs[0] def create_vdi_waf_policy(api,", "some specific rules for crs_group in waf_policy_obj.get(\"crs_groups\", []): group_id =", "policy successfully') else: logger.error('Error : %s' % resp.text) def update_waf_policy(api,", "None logger.error('Error : %s', resp.text) exit(0) waf_crs = json.loads(resp.text)[\"results\"] return", "groupid <= 999, \"group id for group '{}' not in", "and some specific rules for crs_group in waf_policy_obj.get(\"crs_groups\", []): group_id", "rule in waf_policy_obj[\"whitelist\"][\"rules\"][:]: if rule[\"name\"] == \"allowlist-start-with-ice\": waf_policy_obj[\"whitelist\"][\"rules\"].remove(rule) if rule[\"index\"]>index:", "header and enforce the request body to be parsed as", "or authtokentoken must be provided.\") sys.exit(1) waf_policy_obj = api.get_object_by_name('wafpolicy', SYSTEM_WAF_POLICY_VDI)", "rules and some specific rules for crs_group in waf_policy_obj.get(\"crs_groups\", []):", "rule[\"rule_id\"] == \"920330\" or rule[\"rule_id\"] == \"932105\": rule[\"enable\"] = False", "first and then release to backend. With VDI, client wants", "index = 0 waf_policy_obj.setdefault(\"whitelist\", {}).setdefault(\"rules\", []) for rule in waf_policy_obj[\"whitelist\"][\"rules\"][:]:", "tenant=args.tenant, token=args.authtoken, api_version=API_VERSION) else: logging.error(\"Either password or authtokentoken must be", "\"index\": 0, \"name\": \"VDI_409_ENFORCE_XML\", \"rules\": xml_rule } index = 0", "= pattern.match(group[\"name\"]) assert match, \"can not extract group id from", "def update_waf_policy(api, args, waf_policy_obj): add_allowlist_rule(waf_policy_obj) disable_crs_response_rules(waf_policy_obj) add_pre_crs_group(waf_policy_obj) resp = api.put('wafpolicy/%s'", "does not have CRS %s, please install first.\" % tested_crs)", "stream data between client and server for some URLs like", "the header and enforce the request body to be parsed", "resp = api.post('wafpolicy', data=json.dumps(waf_policy_obj)) if resp.status_code in range(200, 300): logger.debug('Create", "= 0 if \"pre_crs_groups\" not in waf_policy_obj: waf_policy_obj[\"pre_crs_groups\"] = list()", "[]) for rule in waf_policy_obj[\"whitelist\"][\"rules\"][:]: if rule[\"name\"] == \"allowlist-start-with-ice\": waf_policy_obj[\"whitelist\"][\"rules\"].remove(rule)", "to stream data between client and server for some URLs", "\"group id for group '{}' not in expected range\".format(group[\"name\"]) return", "allow request with uri beginning with /ice/ allowlist_rule={ \"index\": 0,", "pre_crs[\"index\"] pre_crs_group[\"index\"] = index + 1 waf_policy_obj[\"pre_crs_groups\"].append(pre_crs_group) def get_crs(api): tested_crs", "help='controller ip') args = parser.parse_args() if args.password: api = ApiSession.get_session(args.controller_ip,", "in waf_policy_obj[\"whitelist\"][\"rules\"][:]: if rule[\"name\"] == \"allowlist-start-with-ice\": waf_policy_obj[\"whitelist\"][\"rules\"].remove(rule) if rule[\"index\"]>index: index", "% resp.text) def update_waf_policy(api, args, waf_policy_obj): add_allowlist_rule(waf_policy_obj) disable_crs_response_rules(waf_policy_obj) add_pre_crs_group(waf_policy_obj) resp", "from avi.sdk.avi_api import ApiSession API_VERSION = \"18.2.13\" SYSTEM_WAF_POLICY_VDI='System-WAF-Policy-VDI' logger =", "950: crs_group[\"enable\"] = False for rule in crs_group.get(\"rules\", []): if", "\"932105\": rule[\"enable\"] = False def add_pre_crs_group(waf_policy_obj): #add a rule to", "argparse import json import re import logging import os import", "pre_crs[\"index\"] > index: index = pre_crs[\"index\"] pre_crs_group[\"index\"] = index +", "= [ { \"index\": 0, \"name\": \"enforce XML parsing for", "\"Clients often send the wrong Content-Type header. We ignore the", "rule in crs_group.get(\"rules\", []): if rule[\"rule_id\"] == \"920330\" or rule[\"rule_id\"]", "if pre_crs[\"name\"] == \"VDI_409_ENFORCE_XML\": pre_crs[\"rules\"] = xml_rule pre_crs[\"enable\"] = True", "parsed as XML in WAF\", \"rule\": \"SecRule REQUEST_METHOD \\\"@streq POST\\\"", "successfully') else: logger.error('Error : %s' % resp.text) if __name__ ==", ": %s' % resp.text) if __name__ == '__main__': parser =", "= ApiSession.get_session(args.controller_ip, args.user, tenant=args.tenant, token=args.authtoken, api_version=API_VERSION) else: logging.error(\"Either password or", "args.user, tenant=args.tenant, token=args.authtoken, api_version=API_VERSION) else: logging.error(\"Either password or authtokentoken must", "False for rule in crs_group.get(\"rules\", []): if rule[\"rule_id\"] == \"920330\"", "avi.sdk.avi_api import ApiSession API_VERSION = \"18.2.13\" SYSTEM_WAF_POLICY_VDI='System-WAF-Policy-VDI' logger = logging.getLogger(__name__)", "resp.status_code in range(200, 300): logger.debug('Create WAF policy successfully') else: logger.error('Error", "= api.put('wafpolicy/%s' %waf_policy_obj['uuid'], data=waf_policy_obj) if resp.status_code in range(200, 300): logger.debug('Create", "tenant=args.tenant, api_version=API_VERSION) elif args.authtoken: api = ApiSession.get_session(args.controller_ip, args.user, tenant=args.tenant, token=args.authtoken,", "groupid == 0 or 100 <= groupid <= 999, \"group", "\"name\": \"allowlist-start-with-ice\", \"description\": \"WAF will buffer the whole request body", "send the wrong Content-Type header. We ignore the header and", "#add a allowlist rule to allow request with uri beginning", "resp = api.get(\"wafcrs?name=\" + tested_crs) if resp.status_code not in range(200,", "if resp.status_code not in range(200, 300): if resp.status_code == 404:", "api = ApiSession.get_session(args.controller_ip, args.user, args.password, tenant=args.tenant, api_version=API_VERSION) elif args.authtoken: api", "group '{}' not in expected range\".format(group[\"name\"]) return groupid def disable_crs_response_rules(waf_policy_obj):", "often send the wrong Content-Type header. We ignore the header", "URLs\", \"actions\": [ \"WAF_POLICY_WHITELIST_ACTION_ALLOW\" ], \"match\": { \"path\": { \"match_case\":", "parse body as xml for requests with /broker/xml uri xml_rule", "specific rules for crs_group in waf_policy_obj.get(\"crs_groups\", []): group_id = get_id_from_group(crs_group)", "0 waf_policy_obj.setdefault(\"whitelist\", {}).setdefault(\"rules\", []) for rule in waf_policy_obj[\"whitelist\"][\"rules\"][:]: if rule[\"name\"]", "if resp.status_code in range(200, 300): logger.debug('Create WAF policy successfully') else:", "range(200, 300): logger.debug('Create WAF policy successfully') else: logger.error('Error : %s'", "must be provided.\") sys.exit(1) waf_policy_obj = api.get_object_by_name('wafpolicy', SYSTEM_WAF_POLICY_VDI) if not", "the whole request body first and then release to backend.", "match, \"can not extract group id from group '{}'\".format(group[\"name\"]) groupid", "/broker/xml\", \"description\": \"Clients often send the wrong Content-Type header. We", "api.get_object_by_name('wafpolicy', SYSTEM_WAF_POLICY_VDI) if not waf_policy_obj: create_vdi_waf_policy(api, args) else: update_waf_policy(api, args,", "%s' % resp.text) def update_waf_policy(api, args, waf_policy_obj): add_allowlist_rule(waf_policy_obj) disable_crs_response_rules(waf_policy_obj) add_pre_crs_group(waf_policy_obj)", "in waf_crs[\"groups\"]: waf_policy_obj[\"crs_groups\"].append(group) add_allowlist_rule(waf_policy_obj) disable_crs_response_rules(waf_policy_obj) add_pre_crs_group(waf_policy_obj) resp = api.post('wafpolicy', data=json.dumps(waf_policy_obj))", "then release to backend. With VDI, client wants to stream", "> index: index = pre_crs[\"index\"] pre_crs_group[\"index\"] = index + 1", "add_pre_crs_group(waf_policy_obj) resp = api.post('wafpolicy', data=json.dumps(waf_policy_obj)) if resp.status_code in range(200, 300):", "rule to allow request with uri beginning with /ice/ allowlist_rule={", "\"18.2.13\" SYSTEM_WAF_POLICY_VDI='System-WAF-Policy-VDI' logger = logging.getLogger(__name__) def add_allowlist_rule(waf_policy_obj): #add a allowlist", "XML in WAF\", \"rule\": \"SecRule REQUEST_METHOD \\\"@streq POST\\\" \\\"phase:1,id:4099822,t:none,nolog,pass,chain\\\" \\n", "api.post('wafpolicy', data=json.dumps(waf_policy_obj)) if resp.status_code in range(200, 300): logger.debug('Create WAF policy", "parsing for /broker/xml\", \"description\": \"Clients often send the wrong Content-Type", "for rule in crs_group.get(\"rules\", []): if rule[\"rule_id\"] == \"920330\" or", "parser.add_argument('-u', '--user', action=\"store\", help='controller user', default='admin') parser.add_argument('-p', '--password', action=\"store\", help='controller", "if resp.status_code == 404: logger.error(\"Controller does not have CRS %s,", "{ \"name\": SYSTEM_WAF_POLICY_VDI, \"mode\": \"WAF_MODE_DETECTION_ONLY\", \"waf_profile_ref\": \"/api/wafprofile?name=System-WAF-Profile\" } waf_crs =", "buffer the whole request body first and then release to", "} index = 0 waf_policy_obj.setdefault(\"whitelist\", {}).setdefault(\"rules\", []) for rule in", "waf_policy_obj): add_allowlist_rule(waf_policy_obj) disable_crs_response_rules(waf_policy_obj) add_pre_crs_group(waf_policy_obj) resp = api.put('wafpolicy/%s' %waf_policy_obj['uuid'], data=waf_policy_obj) if", "0 or 100 <= groupid <= 999, \"group id for", "= False for rule in crs_group.get(\"rules\", []): if rule[\"rule_id\"] ==", "data=json.dumps(waf_policy_obj)) if resp.status_code in range(200, 300): logger.debug('Create WAF policy successfully')", "\"WAF_MODE_DETECTION_ONLY\", \"waf_profile_ref\": \"/api/wafprofile?name=System-WAF-Profile\" } waf_crs = get_crs(api) if waf_crs is", "logger.debug('Create WAF policy successfully') else: logger.error('Error : %s' % resp.text)", "action=\"store\", help='controller ip') args = parser.parse_args() if args.password: api =", "Content-Type header. We ignore the header and enforce the request", "crs_group[\"enable\"] = False for rule in crs_group.get(\"rules\", []): if rule[\"rule_id\"]", "disable_crs_response_rules(waf_policy_obj) add_pre_crs_group(waf_policy_obj) resp = api.put('wafpolicy/%s' %waf_policy_obj['uuid'], data=waf_policy_obj) if resp.status_code in", "== 404: logger.error(\"Controller does not have CRS %s, please install", "id from group '{}'\".format(group[\"name\"]) groupid = int(match.group(\"group_id\")) assert groupid ==", "if \"pre_crs_groups\" not in waf_policy_obj: waf_policy_obj[\"pre_crs_groups\"] = list() for pre_crs", "client and server for some URLs like /ice/..., we should", "update_waf_policy(api, args, waf_policy_obj): add_allowlist_rule(waf_policy_obj) disable_crs_response_rules(waf_policy_obj) add_pre_crs_group(waf_policy_obj) resp = api.put('wafpolicy/%s' %waf_policy_obj['uuid'],", "is None: return waf_policy_obj[\"waf_crs_ref\"]=\"/api/wafcrs?name=\"+waf_crs[\"name\"] waf_policy_obj[\"crs_groups\"] = list() for group in", "not in range(200, 300): if resp.status_code == 404: logger.error(\"Controller does", "group in waf_crs[\"groups\"]: waf_policy_obj[\"crs_groups\"].append(group) add_allowlist_rule(waf_policy_obj) disable_crs_response_rules(waf_policy_obj) add_pre_crs_group(waf_policy_obj) resp = api.post('wafpolicy',", "waf_policy_obj[\"crs_groups\"] = list() for group in waf_crs[\"groups\"]: waf_policy_obj[\"crs_groups\"].append(group) add_allowlist_rule(waf_policy_obj) disable_crs_response_rules(waf_policy_obj)", "\\\"@streq /broker/xml\\\" \\\"t:none,ctl:requestBodyProcessor=XML\\\"\" } ] pre_crs_group = { \"index\": 0,", "help='Authentication token') parser.add_argument('-c', '--controller_ip', action=\"store\", help='controller ip') args = parser.parse_args()", "index: index = pre_crs[\"index\"] pre_crs_group[\"index\"] = index + 1 waf_policy_obj[\"pre_crs_groups\"].append(pre_crs_group)", "Copyright 2021 VMware, Inc. import argparse import json import re", "pattern = re.compile(\"[^\\d]*(?P<group_id>\\d\\d\\d)\") match = pattern.match(group[\"name\"]) assert match, \"can not", "'--user', action=\"store\", help='controller user', default='admin') parser.add_argument('-p', '--password', action=\"store\", help='controller user", "= { \"index\": 0, \"name\": \"VDI_409_ENFORCE_XML\", \"rules\": xml_rule } index", "return waf_crs[0] def create_vdi_waf_policy(api, args): waf_policy_obj = { \"name\": SYSTEM_WAF_POLICY_VDI,", "\"rules\": xml_rule } index = 0 if \"pre_crs_groups\" not in", "resp.text) exit(0) waf_crs = json.loads(resp.text)[\"results\"] return waf_crs[0] def create_vdi_waf_policy(api, args):", "100 <= groupid <= 999, \"group id for group '{}'", "the wrong Content-Type header. We ignore the header and enforce", "\"pre_crs_groups\" not in waf_policy_obj: waf_policy_obj[\"pre_crs_groups\"] = list() for pre_crs in", "range\".format(group[\"name\"]) return groupid def disable_crs_response_rules(waf_policy_obj): #disable response side rules and", "return groupid def disable_crs_response_rules(waf_policy_obj): #disable response side rules and some", "\"actions\": [ \"WAF_POLICY_WHITELIST_ACTION_ALLOW\" ], \"match\": { \"path\": { \"match_case\": \"SENSITIVE\",", "if waf_crs is None: return waf_policy_obj[\"waf_crs_ref\"]=\"/api/wafcrs?name=\"+waf_crs[\"name\"] waf_policy_obj[\"crs_groups\"] = list() for", "for group in waf_crs[\"groups\"]: waf_policy_obj[\"crs_groups\"].append(group) add_allowlist_rule(waf_policy_obj) disable_crs_response_rules(waf_policy_obj) add_pre_crs_group(waf_policy_obj) resp =", "[]): if rule[\"rule_id\"] == \"920330\" or rule[\"rule_id\"] == \"932105\": rule[\"enable\"]", "= api.post('wafpolicy', data=json.dumps(waf_policy_obj)) if resp.status_code in range(200, 300): logger.debug('Create WAF", "password', default='<PASSWORD>') parser.add_argument('-t', '--tenant', action=\"store\", help='tenant name', default='admin') parser.add_argument('-a', '--authtoken',", "__name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('-u', '--user', action=\"store\", help='controller", "parser.add_argument('-t', '--tenant', action=\"store\", help='tenant name', default='admin') parser.add_argument('-a', '--authtoken', help='Authentication token')", "999, \"group id for group '{}' not in expected range\".format(group[\"name\"])", "waf_policy_obj[\"whitelist\"][\"rules\"].remove(rule) if rule[\"index\"]>index: index = rule[\"index\"] allowlist_rule[\"index\"] = index+1 waf_policy_obj[\"whitelist\"][\"rules\"].append(allowlist_rule)", "300): if resp.status_code == 404: logger.error(\"Controller does not have CRS", "with uri beginning with /ice/ allowlist_rule={ \"index\": 0, \"name\": \"allowlist-start-with-ice\",", "not in expected range\".format(group[\"name\"]) return groupid def disable_crs_response_rules(waf_policy_obj): #disable response", "pre_crs_group[\"index\"] = index + 1 waf_policy_obj[\"pre_crs_groups\"].append(pre_crs_group) def get_crs(api): tested_crs =", "waf_policy_obj[\"pre_crs_groups\"]: if pre_crs[\"name\"] == \"VDI_409_ENFORCE_XML\": pre_crs[\"rules\"] = xml_rule pre_crs[\"enable\"] =", "between client and server for some URLs like /ice/..., we", "ip') args = parser.parse_args() if args.password: api = ApiSession.get_session(args.controller_ip, args.user,", "index = 0 if \"pre_crs_groups\" not in waf_policy_obj: waf_policy_obj[\"pre_crs_groups\"] =", "api_version=API_VERSION) else: logging.error(\"Either password or authtokentoken must be provided.\") sys.exit(1)", "} index = 0 if \"pre_crs_groups\" not in waf_policy_obj: waf_policy_obj[\"pre_crs_groups\"]", "\"description\": \"WAF will buffer the whole request body first and", "= \"CRS-2021-1\" resp = api.get(\"wafcrs?name=\" + tested_crs) if resp.status_code not", "be parsed as XML in WAF\", \"rule\": \"SecRule REQUEST_METHOD \\\"@streq", "data between client and server for some URLs like /ice/...,", "expected range\".format(group[\"name\"]) return groupid def disable_crs_response_rules(waf_policy_obj): #disable response side rules", "sys.exit(1) waf_policy_obj = api.get_object_by_name('wafpolicy', SYSTEM_WAF_POLICY_VDI) if not waf_policy_obj: create_vdi_waf_policy(api, args)", "], \"match\": { \"path\": { \"match_case\": \"SENSITIVE\", \"match_str\": [ \"/ice/\"", "== '__main__': parser = argparse.ArgumentParser() parser.add_argument('-u', '--user', action=\"store\", help='controller user',", "create_vdi_waf_policy(api, args): waf_policy_obj = { \"name\": SYSTEM_WAF_POLICY_VDI, \"mode\": \"WAF_MODE_DETECTION_ONLY\", \"waf_profile_ref\":", "get_id_from_group(crs_group) if group_id >= 950: crs_group[\"enable\"] = False for rule", "waf_policy_obj.setdefault(\"whitelist\", {}).setdefault(\"rules\", []) for rule in waf_policy_obj[\"whitelist\"][\"rules\"][:]: if rule[\"name\"] ==", "successfully') else: logger.error('Error : %s' % resp.text) def update_waf_policy(api, args,", "300): logger.debug('Create WAF policy successfully') else: logger.error('Error : %s' %", "not extract group id from group '{}'\".format(group[\"name\"]) groupid = int(match.group(\"group_id\"))", "for /broker/xml\", \"description\": \"Clients often send the wrong Content-Type header.", "We ignore the header and enforce the request body to", "add_allowlist_rule(waf_policy_obj) disable_crs_response_rules(waf_policy_obj) add_pre_crs_group(waf_policy_obj) resp = api.put('wafpolicy/%s' %waf_policy_obj['uuid'], data=waf_policy_obj) if resp.status_code", "get_crs(api) if waf_crs is None: return waf_policy_obj[\"waf_crs_ref\"]=\"/api/wafcrs?name=\"+waf_crs[\"name\"] waf_policy_obj[\"crs_groups\"] = list()", "'{}' not in expected range\".format(group[\"name\"]) return groupid def disable_crs_response_rules(waf_policy_obj): #disable", "'--password', action=\"store\", help='controller user password', default='<PASSWORD>') parser.add_argument('-t', '--tenant', action=\"store\", help='tenant", "help='tenant name', default='admin') parser.add_argument('-a', '--authtoken', help='Authentication token') parser.add_argument('-c', '--controller_ip', action=\"store\",", "= re.compile(\"[^\\d]*(?P<group_id>\\d\\d\\d)\") match = pattern.match(group[\"name\"]) assert match, \"can not extract", "and then release to backend. With VDI, client wants to", "should allow these URLs\", \"actions\": [ \"WAF_POLICY_WHITELIST_ACTION_ALLOW\" ], \"match\": {", "def get_crs(api): tested_crs = \"CRS-2021-1\" resp = api.get(\"wafcrs?name=\" + tested_crs)", "import logging import os import sys from avi.sdk.avi_api import ApiSession", "'__main__': parser = argparse.ArgumentParser() parser.add_argument('-u', '--user', action=\"store\", help='controller user', default='admin')", "= { \"name\": SYSTEM_WAF_POLICY_VDI, \"mode\": \"WAF_MODE_DETECTION_ONLY\", \"waf_profile_ref\": \"/api/wafprofile?name=System-WAF-Profile\" } waf_crs", "action=\"store\", help='controller user', default='admin') parser.add_argument('-p', '--password', action=\"store\", help='controller user password',", "in range(200, 300): if resp.status_code == 404: logger.error(\"Controller does not", "} waf_crs = get_crs(api) if waf_crs is None: return waf_policy_obj[\"waf_crs_ref\"]=\"/api/wafcrs?name=\"+waf_crs[\"name\"]", "in WAF\", \"rule\": \"SecRule REQUEST_METHOD \\\"@streq POST\\\" \\\"phase:1,id:4099822,t:none,nolog,pass,chain\\\" \\n SecRule", "enforce the request body to be parsed as XML in", "to allow request with uri beginning with /ice/ allowlist_rule={ \"index\":", "\\\"@streq POST\\\" \\\"phase:1,id:4099822,t:none,nolog,pass,chain\\\" \\n SecRule REQUEST_URI \\\"@streq /broker/xml\\\" \\\"t:none,ctl:requestBodyProcessor=XML\\\"\" }", "'--controller_ip', action=\"store\", help='controller ip') args = parser.parse_args() if args.password: api", "ApiSession.get_session(args.controller_ip, args.user, args.password, tenant=args.tenant, api_version=API_VERSION) elif args.authtoken: api = ApiSession.get_session(args.controller_ip,", "backend. With VDI, client wants to stream data between client", "def disable_crs_response_rules(waf_policy_obj): #disable response side rules and some specific rules", "logger.error('Error : %s' % resp.text) def update_waf_policy(api, args, waf_policy_obj): add_allowlist_rule(waf_policy_obj)", "\"can not extract group id from group '{}'\".format(group[\"name\"]) groupid =", "WAF policy successfully') else: logger.error('Error : %s' % resp.text) def", "% resp.text) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('-u',", "{ \"index\": 0, \"name\": \"VDI_409_ENFORCE_XML\", \"rules\": xml_rule } index =", "import re import logging import os import sys from avi.sdk.avi_api", "please install first.\" % tested_crs) return None logger.error('Error : %s',", "waf_policy_obj[\"pre_crs_groups\"] = list() for pre_crs in waf_policy_obj[\"pre_crs_groups\"]: if pre_crs[\"name\"] ==", "'--tenant', action=\"store\", help='tenant name', default='admin') parser.add_argument('-a', '--authtoken', help='Authentication token') parser.add_argument('-c',", "to parse body as xml for requests with /broker/xml uri", "import json import re import logging import os import sys", "json.loads(resp.text)[\"results\"] return waf_crs[0] def create_vdi_waf_policy(api, args): waf_policy_obj = { \"name\":", "resp.text) def update_waf_policy(api, args, waf_policy_obj): add_allowlist_rule(waf_policy_obj) disable_crs_response_rules(waf_policy_obj) add_pre_crs_group(waf_policy_obj) resp =", "parser.add_argument('-c', '--controller_ip', action=\"store\", help='controller ip') args = parser.parse_args() if args.password:", "if group_id >= 950: crs_group[\"enable\"] = False for rule in", "be provided.\") sys.exit(1) waf_policy_obj = api.get_object_by_name('wafpolicy', SYSTEM_WAF_POLICY_VDI) if not waf_policy_obj:", "\"WAF_POLICY_WHITELIST_ACTION_ALLOW\" ], \"match\": { \"path\": { \"match_case\": \"SENSITIVE\", \"match_str\": [", "= get_id_from_group(crs_group) if group_id >= 950: crs_group[\"enable\"] = False for", "or 100 <= groupid <= 999, \"group id for group", "ignore the header and enforce the request body to be", "\"allowlist-start-with-ice\", \"description\": \"WAF will buffer the whole request body first", "+ 1 waf_policy_obj[\"pre_crs_groups\"].append(pre_crs_group) def get_crs(api): tested_crs = \"CRS-2021-1\" resp =", "waf_crs = get_crs(api) if waf_crs is None: return waf_policy_obj[\"waf_crs_ref\"]=\"/api/wafcrs?name=\"+waf_crs[\"name\"] waf_policy_obj[\"crs_groups\"]", "{ \"index\": 0, \"name\": \"enforce XML parsing for /broker/xml\", \"description\":", "<= groupid <= 999, \"group id for group '{}' not", "request body to be parsed as XML in WAF\", \"rule\":", "\"920330\" or rule[\"rule_id\"] == \"932105\": rule[\"enable\"] = False def add_pre_crs_group(waf_policy_obj):", "assert groupid == 0 or 100 <= groupid <= 999,", "rule[\"enable\"] = False def add_pre_crs_group(waf_policy_obj): #add a rule to parse", "rules for crs_group in waf_policy_obj.get(\"crs_groups\", []): group_id = get_id_from_group(crs_group) if", "these URLs\", \"actions\": [ \"WAF_POLICY_WHITELIST_ACTION_ALLOW\" ], \"match\": { \"path\": {", "pre_crs_group = { \"index\": 0, \"name\": \"VDI_409_ENFORCE_XML\", \"rules\": xml_rule }", "= get_crs(api) if waf_crs is None: return waf_policy_obj[\"waf_crs_ref\"]=\"/api/wafcrs?name=\"+waf_crs[\"name\"] waf_policy_obj[\"crs_groups\"] =", "range(200, 300): if resp.status_code == 404: logger.error(\"Controller does not have", "XML parsing for /broker/xml\", \"description\": \"Clients often send the wrong", "not have CRS %s, please install first.\" % tested_crs) return", "resp.status_code not in range(200, 300): if resp.status_code == 404: logger.error(\"Controller", "= ApiSession.get_session(args.controller_ip, args.user, args.password, tenant=args.tenant, api_version=API_VERSION) elif args.authtoken: api =", "parser.add_argument('-a', '--authtoken', help='Authentication token') parser.add_argument('-c', '--controller_ip', action=\"store\", help='controller ip') args", "re import logging import os import sys from avi.sdk.avi_api import", "wants to stream data between client and server for some", "logger.error('Error : %s' % resp.text) if __name__ == '__main__': parser", "elif args.authtoken: api = ApiSession.get_session(args.controller_ip, args.user, tenant=args.tenant, token=args.authtoken, api_version=API_VERSION) else:", "if pre_crs[\"index\"] > index: index = pre_crs[\"index\"] pre_crs_group[\"index\"] = index", "token=args.authtoken, api_version=API_VERSION) else: logging.error(\"Either password or authtokentoken must be provided.\")", "index+1 waf_policy_obj[\"whitelist\"][\"rules\"].append(allowlist_rule) def get_id_from_group(group): pattern = re.compile(\"[^\\d]*(?P<group_id>\\d\\d\\d)\") match = pattern.match(group[\"name\"])", "REQUEST_METHOD \\\"@streq POST\\\" \\\"phase:1,id:4099822,t:none,nolog,pass,chain\\\" \\n SecRule REQUEST_URI \\\"@streq /broker/xml\\\" \\\"t:none,ctl:requestBodyProcessor=XML\\\"\"", "import ApiSession API_VERSION = \"18.2.13\" SYSTEM_WAF_POLICY_VDI='System-WAF-Policy-VDI' logger = logging.getLogger(__name__) def", "api.get(\"wafcrs?name=\" + tested_crs) if resp.status_code not in range(200, 300): if", "not in waf_policy_obj: waf_policy_obj[\"pre_crs_groups\"] = list() for pre_crs in waf_policy_obj[\"pre_crs_groups\"]:", "\"rule\": \"SecRule REQUEST_METHOD \\\"@streq POST\\\" \\\"phase:1,id:4099822,t:none,nolog,pass,chain\\\" \\n SecRule REQUEST_URI \\\"@streq", "{}).setdefault(\"rules\", []) for rule in waf_policy_obj[\"whitelist\"][\"rules\"][:]: if rule[\"name\"] == \"allowlist-start-with-ice\":", "groupid = int(match.group(\"group_id\")) assert groupid == 0 or 100 <=", "waf_policy_obj = { \"name\": SYSTEM_WAF_POLICY_VDI, \"mode\": \"WAF_MODE_DETECTION_ONLY\", \"waf_profile_ref\": \"/api/wafprofile?name=System-WAF-Profile\" }", "ApiSession.get_session(args.controller_ip, args.user, tenant=args.tenant, token=args.authtoken, api_version=API_VERSION) else: logging.error(\"Either password or authtokentoken", "default='admin') parser.add_argument('-p', '--password', action=\"store\", help='controller user password', default='<PASSWORD>') parser.add_argument('-t', '--tenant',", "a allowlist rule to allow request with uri beginning with", "rule[\"index\"]>index: index = rule[\"index\"] allowlist_rule[\"index\"] = index+1 waf_policy_obj[\"whitelist\"][\"rules\"].append(allowlist_rule) def get_id_from_group(group):", "import os import sys from avi.sdk.avi_api import ApiSession API_VERSION =", "parser = argparse.ArgumentParser() parser.add_argument('-u', '--user', action=\"store\", help='controller user', default='admin') parser.add_argument('-p',", "\"VDI_409_ENFORCE_XML\": pre_crs[\"rules\"] = xml_rule pre_crs[\"enable\"] = True return if pre_crs[\"index\"]", "CRS %s, please install first.\" % tested_crs) return None logger.error('Error", "with /ice/ allowlist_rule={ \"index\": 0, \"name\": \"allowlist-start-with-ice\", \"description\": \"WAF will", "waf_policy_obj[\"waf_crs_ref\"]=\"/api/wafcrs?name=\"+waf_crs[\"name\"] waf_policy_obj[\"crs_groups\"] = list() for group in waf_crs[\"groups\"]: waf_policy_obj[\"crs_groups\"].append(group) add_allowlist_rule(waf_policy_obj)", "waf_policy_obj.get(\"crs_groups\", []): group_id = get_id_from_group(crs_group) if group_id >= 950: crs_group[\"enable\"]", "WAF\", \"rule\": \"SecRule REQUEST_METHOD \\\"@streq POST\\\" \\\"phase:1,id:4099822,t:none,nolog,pass,chain\\\" \\n SecRule REQUEST_URI", "like /ice/..., we should allow these URLs\", \"actions\": [ \"WAF_POLICY_WHITELIST_ACTION_ALLOW\"", "\"path\": { \"match_case\": \"SENSITIVE\", \"match_str\": [ \"/ice/\" ], \"match_criteria\": \"BEGINS_WITH\"", "add_pre_crs_group(waf_policy_obj) resp = api.put('wafpolicy/%s' %waf_policy_obj['uuid'], data=waf_policy_obj) if resp.status_code in range(200,", "action=\"store\", help='controller user password', default='<PASSWORD>') parser.add_argument('-t', '--tenant', action=\"store\", help='tenant name',", "will buffer the whole request body first and then release", "action=\"store\", help='tenant name', default='admin') parser.add_argument('-a', '--authtoken', help='Authentication token') parser.add_argument('-c', '--controller_ip',", "xml_rule = [ { \"index\": 0, \"name\": \"enforce XML parsing", "1 waf_policy_obj[\"pre_crs_groups\"].append(pre_crs_group) def get_crs(api): tested_crs = \"CRS-2021-1\" resp = api.get(\"wafcrs?name=\"", "\"match_str\": [ \"/ice/\" ], \"match_criteria\": \"BEGINS_WITH\" } } } index", "list() for pre_crs in waf_policy_obj[\"pre_crs_groups\"]: if pre_crs[\"name\"] == \"VDI_409_ENFORCE_XML\": pre_crs[\"rules\"]", "import argparse import json import re import logging import os", "\"name\": \"enforce XML parsing for /broker/xml\", \"description\": \"Clients often send", "release to backend. With VDI, client wants to stream data", "waf_policy_obj[\"crs_groups\"].append(group) add_allowlist_rule(waf_policy_obj) disable_crs_response_rules(waf_policy_obj) add_pre_crs_group(waf_policy_obj) resp = api.post('wafpolicy', data=json.dumps(waf_policy_obj)) if resp.status_code", "return None logger.error('Error : %s', resp.text) exit(0) waf_crs = json.loads(resp.text)[\"results\"]", "\"VDI_409_ENFORCE_XML\", \"rules\": xml_rule } index = 0 if \"pre_crs_groups\" not", "tested_crs = \"CRS-2021-1\" resp = api.get(\"wafcrs?name=\" + tested_crs) if resp.status_code", "in range(200, 300): logger.debug('Create WAF policy successfully') else: logger.error('Error :", "add_pre_crs_group(waf_policy_obj): #add a rule to parse body as xml for", "server for some URLs like /ice/..., we should allow these", "group id from group '{}'\".format(group[\"name\"]) groupid = int(match.group(\"group_id\")) assert groupid", "pre_crs[\"enable\"] = True return if pre_crs[\"index\"] > index: index =", "user', default='admin') parser.add_argument('-p', '--password', action=\"store\", help='controller user password', default='<PASSWORD>') parser.add_argument('-t',", "allow these URLs\", \"actions\": [ \"WAF_POLICY_WHITELIST_ACTION_ALLOW\" ], \"match\": { \"path\":", "= index + 1 waf_policy_obj[\"pre_crs_groups\"].append(pre_crs_group) def get_crs(api): tested_crs = \"CRS-2021-1\"", "index + 1 waf_policy_obj[\"pre_crs_groups\"].append(pre_crs_group) def get_crs(api): tested_crs = \"CRS-2021-1\" resp", "rule to parse body as xml for requests with /broker/xml", "== \"VDI_409_ENFORCE_XML\": pre_crs[\"rules\"] = xml_rule pre_crs[\"enable\"] = True return if", "def create_vdi_waf_policy(api, args): waf_policy_obj = { \"name\": SYSTEM_WAF_POLICY_VDI, \"mode\": \"WAF_MODE_DETECTION_ONLY\",", "'{}'\".format(group[\"name\"]) groupid = int(match.group(\"group_id\")) assert groupid == 0 or 100", "} } } index = 0 waf_policy_obj.setdefault(\"whitelist\", {}).setdefault(\"rules\", []) for", "exit(0) waf_crs = json.loads(resp.text)[\"results\"] return waf_crs[0] def create_vdi_waf_policy(api, args): waf_policy_obj", "waf_crs[0] def create_vdi_waf_policy(api, args): waf_policy_obj = { \"name\": SYSTEM_WAF_POLICY_VDI, \"mode\":", "response side rules and some specific rules for crs_group in", "= json.loads(resp.text)[\"results\"] return waf_crs[0] def create_vdi_waf_policy(api, args): waf_policy_obj = {", "whole request body first and then release to backend. With", "\"name\": \"VDI_409_ENFORCE_XML\", \"rules\": xml_rule } index = 0 if \"pre_crs_groups\"", "\"match\": { \"path\": { \"match_case\": \"SENSITIVE\", \"match_str\": [ \"/ice/\" ],", "for crs_group in waf_policy_obj.get(\"crs_groups\", []): group_id = get_id_from_group(crs_group) if group_id", "for requests with /broker/xml uri xml_rule = [ { \"index\":", "URLs like /ice/..., we should allow these URLs\", \"actions\": [", "body as xml for requests with /broker/xml uri xml_rule =", "#add a rule to parse body as xml for requests", "tested_crs) return None logger.error('Error : %s', resp.text) exit(0) waf_crs =", "API_VERSION = \"18.2.13\" SYSTEM_WAF_POLICY_VDI='System-WAF-Policy-VDI' logger = logging.getLogger(__name__) def add_allowlist_rule(waf_policy_obj): #add", "data=waf_policy_obj) if resp.status_code in range(200, 300): logger.debug('Create WAF policy successfully')", "== \"920330\" or rule[\"rule_id\"] == \"932105\": rule[\"enable\"] = False def", "POST\\\" \\\"phase:1,id:4099822,t:none,nolog,pass,chain\\\" \\n SecRule REQUEST_URI \\\"@streq /broker/xml\\\" \\\"t:none,ctl:requestBodyProcessor=XML\\\"\" } ]", "id for group '{}' not in expected range\".format(group[\"name\"]) return groupid", "0, \"name\": \"enforce XML parsing for /broker/xml\", \"description\": \"Clients often", "= argparse.ArgumentParser() parser.add_argument('-u', '--user', action=\"store\", help='controller user', default='admin') parser.add_argument('-p', '--password',", "allowlist_rule={ \"index\": 0, \"name\": \"allowlist-start-with-ice\", \"description\": \"WAF will buffer the", "logger = logging.getLogger(__name__) def add_allowlist_rule(waf_policy_obj): #add a allowlist rule to", "VMware, Inc. import argparse import json import re import logging", "waf_policy_obj: waf_policy_obj[\"pre_crs_groups\"] = list() for pre_crs in waf_policy_obj[\"pre_crs_groups\"]: if pre_crs[\"name\"]", "to backend. With VDI, client wants to stream data between", "for some URLs like /ice/..., we should allow these URLs\",", "parser.parse_args() if args.password: api = ApiSession.get_session(args.controller_ip, args.user, args.password, tenant=args.tenant, api_version=API_VERSION)", "# Copyright 2021 VMware, Inc. import argparse import json import", "'--authtoken', help='Authentication token') parser.add_argument('-c', '--controller_ip', action=\"store\", help='controller ip') args =", "api = ApiSession.get_session(args.controller_ip, args.user, tenant=args.tenant, token=args.authtoken, api_version=API_VERSION) else: logging.error(\"Either password", "in expected range\".format(group[\"name\"]) return groupid def disable_crs_response_rules(waf_policy_obj): #disable response side", "2021 VMware, Inc. import argparse import json import re import", ": %s' % resp.text) def update_waf_policy(api, args, waf_policy_obj): add_allowlist_rule(waf_policy_obj) disable_crs_response_rules(waf_policy_obj)", "if rule[\"rule_id\"] == \"920330\" or rule[\"rule_id\"] == \"932105\": rule[\"enable\"] =", "\"/api/wafprofile?name=System-WAF-Profile\" } waf_crs = get_crs(api) if waf_crs is None: return", "provided.\") sys.exit(1) waf_policy_obj = api.get_object_by_name('wafpolicy', SYSTEM_WAF_POLICY_VDI) if not waf_policy_obj: create_vdi_waf_policy(api,", "for rule in waf_policy_obj[\"whitelist\"][\"rules\"][:]: if rule[\"name\"] == \"allowlist-start-with-ice\": waf_policy_obj[\"whitelist\"][\"rules\"].remove(rule) if", "= True return if pre_crs[\"index\"] > index: index = pre_crs[\"index\"]", "\"enforce XML parsing for /broker/xml\", \"description\": \"Clients often send the", "rule[\"index\"] allowlist_rule[\"index\"] = index+1 waf_policy_obj[\"whitelist\"][\"rules\"].append(allowlist_rule) def get_id_from_group(group): pattern = re.compile(\"[^\\d]*(?P<group_id>\\d\\d\\d)\")", "= False def add_pre_crs_group(waf_policy_obj): #add a rule to parse body", "default='<PASSWORD>') parser.add_argument('-t', '--tenant', action=\"store\", help='tenant name', default='admin') parser.add_argument('-a', '--authtoken', help='Authentication", "wrong Content-Type header. We ignore the header and enforce the", "return waf_policy_obj[\"waf_crs_ref\"]=\"/api/wafcrs?name=\"+waf_crs[\"name\"] waf_policy_obj[\"crs_groups\"] = list() for group in waf_crs[\"groups\"]: waf_policy_obj[\"crs_groups\"].append(group)", "args.authtoken: api = ApiSession.get_session(args.controller_ip, args.user, tenant=args.tenant, token=args.authtoken, api_version=API_VERSION) else: logging.error(\"Either", "beginning with /ice/ allowlist_rule={ \"index\": 0, \"name\": \"allowlist-start-with-ice\", \"description\": \"WAF", "<= 999, \"group id for group '{}' not in expected", "token') parser.add_argument('-c', '--controller_ip', action=\"store\", help='controller ip') args = parser.parse_args() if", "\"CRS-2021-1\" resp = api.get(\"wafcrs?name=\" + tested_crs) if resp.status_code not in", "args.password: api = ApiSession.get_session(args.controller_ip, args.user, args.password, tenant=args.tenant, api_version=API_VERSION) elif args.authtoken:", "\\\"t:none,ctl:requestBodyProcessor=XML\\\"\" } ] pre_crs_group = { \"index\": 0, \"name\": \"VDI_409_ENFORCE_XML\",", "0, \"name\": \"allowlist-start-with-ice\", \"description\": \"WAF will buffer the whole request", "list() for group in waf_crs[\"groups\"]: waf_policy_obj[\"crs_groups\"].append(group) add_allowlist_rule(waf_policy_obj) disable_crs_response_rules(waf_policy_obj) add_pre_crs_group(waf_policy_obj) resp", "get_id_from_group(group): pattern = re.compile(\"[^\\d]*(?P<group_id>\\d\\d\\d)\") match = pattern.match(group[\"name\"]) assert match, \"can", "else: logger.error('Error : %s' % resp.text) def update_waf_policy(api, args, waf_policy_obj):", "= api.get(\"wafcrs?name=\" + tested_crs) if resp.status_code not in range(200, 300):", "install first.\" % tested_crs) return None logger.error('Error : %s', resp.text)", "we should allow these URLs\", \"actions\": [ \"WAF_POLICY_WHITELIST_ACTION_ALLOW\" ], \"match\":", "\"/ice/\" ], \"match_criteria\": \"BEGINS_WITH\" } } } index = 0", "def add_allowlist_rule(waf_policy_obj): #add a allowlist rule to allow request with", "+ tested_crs) if resp.status_code not in range(200, 300): if resp.status_code", "\"match_criteria\": \"BEGINS_WITH\" } } } index = 0 waf_policy_obj.setdefault(\"whitelist\", {}).setdefault(\"rules\",", "SYSTEM_WAF_POLICY_VDI) if not waf_policy_obj: create_vdi_waf_policy(api, args) else: update_waf_policy(api, args, waf_policy_obj)", "\\n SecRule REQUEST_URI \\\"@streq /broker/xml\\\" \\\"t:none,ctl:requestBodyProcessor=XML\\\"\" } ] pre_crs_group =", "if rule[\"name\"] == \"allowlist-start-with-ice\": waf_policy_obj[\"whitelist\"][\"rules\"].remove(rule) if rule[\"index\"]>index: index = rule[\"index\"]", "[ \"/ice/\" ], \"match_criteria\": \"BEGINS_WITH\" } } } index =", "= pre_crs[\"index\"] pre_crs_group[\"index\"] = index + 1 waf_policy_obj[\"pre_crs_groups\"].append(pre_crs_group) def get_crs(api):", "\"BEGINS_WITH\" } } } index = 0 waf_policy_obj.setdefault(\"whitelist\", {}).setdefault(\"rules\", [])", "else: logging.error(\"Either password or authtokentoken must be provided.\") sys.exit(1) waf_policy_obj", "body first and then release to backend. With VDI, client", "{ \"path\": { \"match_case\": \"SENSITIVE\", \"match_str\": [ \"/ice/\" ], \"match_criteria\":", "first.\" % tested_crs) return None logger.error('Error : %s', resp.text) exit(0)", "pre_crs in waf_policy_obj[\"pre_crs_groups\"]: if pre_crs[\"name\"] == \"VDI_409_ENFORCE_XML\": pre_crs[\"rules\"] = xml_rule", "waf_crs is None: return waf_policy_obj[\"waf_crs_ref\"]=\"/api/wafcrs?name=\"+waf_crs[\"name\"] waf_policy_obj[\"crs_groups\"] = list() for group", "logging.error(\"Either password or authtokentoken must be provided.\") sys.exit(1) waf_policy_obj =", "body to be parsed as XML in WAF\", \"rule\": \"SecRule", "if args.password: api = ApiSession.get_session(args.controller_ip, args.user, args.password, tenant=args.tenant, api_version=API_VERSION) elif", "tested_crs) if resp.status_code not in range(200, 300): if resp.status_code ==", "[ \"WAF_POLICY_WHITELIST_ACTION_ALLOW\" ], \"match\": { \"path\": { \"match_case\": \"SENSITIVE\", \"match_str\":", "404: logger.error(\"Controller does not have CRS %s, please install first.\"", "resp.status_code == 404: logger.error(\"Controller does not have CRS %s, please", "have CRS %s, please install first.\" % tested_crs) return None", "def get_id_from_group(group): pattern = re.compile(\"[^\\d]*(?P<group_id>\\d\\d\\d)\") match = pattern.match(group[\"name\"]) assert match,", "= xml_rule pre_crs[\"enable\"] = True return if pre_crs[\"index\"] > index:", "} ] pre_crs_group = { \"index\": 0, \"name\": \"VDI_409_ENFORCE_XML\", \"rules\":", "help='controller user password', default='<PASSWORD>') parser.add_argument('-t', '--tenant', action=\"store\", help='tenant name', default='admin')", "allowlist rule to allow request with uri beginning with /ice/", "or rule[\"rule_id\"] == \"932105\": rule[\"enable\"] = False def add_pre_crs_group(waf_policy_obj): #add", "[ { \"index\": 0, \"name\": \"enforce XML parsing for /broker/xml\",", "= logging.getLogger(__name__) def add_allowlist_rule(waf_policy_obj): #add a allowlist rule to allow", "%s, please install first.\" % tested_crs) return None logger.error('Error :", "disable_crs_response_rules(waf_policy_obj): #disable response side rules and some specific rules for", "0, \"name\": \"VDI_409_ENFORCE_XML\", \"rules\": xml_rule } index = 0 if", "SecRule REQUEST_URI \\\"@streq /broker/xml\\\" \\\"t:none,ctl:requestBodyProcessor=XML\\\"\" } ] pre_crs_group = {", "waf_policy_obj[\"pre_crs_groups\"].append(pre_crs_group) def get_crs(api): tested_crs = \"CRS-2021-1\" resp = api.get(\"wafcrs?name=\" +", "\"match_case\": \"SENSITIVE\", \"match_str\": [ \"/ice/\" ], \"match_criteria\": \"BEGINS_WITH\" } }", "args = parser.parse_args() if args.password: api = ApiSession.get_session(args.controller_ip, args.user, args.password,", "/ice/..., we should allow these URLs\", \"actions\": [ \"WAF_POLICY_WHITELIST_ACTION_ALLOW\" ],", "], \"match_criteria\": \"BEGINS_WITH\" } } } index = 0 waf_policy_obj.setdefault(\"whitelist\",", "% tested_crs) return None logger.error('Error : %s', resp.text) exit(0) waf_crs", "argparse.ArgumentParser() parser.add_argument('-u', '--user', action=\"store\", help='controller user', default='admin') parser.add_argument('-p', '--password', action=\"store\",", "ApiSession API_VERSION = \"18.2.13\" SYSTEM_WAF_POLICY_VDI='System-WAF-Policy-VDI' logger = logging.getLogger(__name__) def add_allowlist_rule(waf_policy_obj):", "the request body to be parsed as XML in WAF\",", "<gh_stars>0 # Copyright 2021 VMware, Inc. import argparse import json", "{ \"match_case\": \"SENSITIVE\", \"match_str\": [ \"/ice/\" ], \"match_criteria\": \"BEGINS_WITH\" }", "== \"932105\": rule[\"enable\"] = False def add_pre_crs_group(waf_policy_obj): #add a rule", "password or authtokentoken must be provided.\") sys.exit(1) waf_policy_obj = api.get_object_by_name('wafpolicy',", "user password', default='<PASSWORD>') parser.add_argument('-t', '--tenant', action=\"store\", help='tenant name', default='admin') parser.add_argument('-a',", "header. We ignore the header and enforce the request body", "else: logger.error('Error : %s' % resp.text) if __name__ == '__main__':", "in waf_policy_obj.get(\"crs_groups\", []): group_id = get_id_from_group(crs_group) if group_id >= 950:", "= rule[\"index\"] allowlist_rule[\"index\"] = index+1 waf_policy_obj[\"whitelist\"][\"rules\"].append(allowlist_rule) def get_id_from_group(group): pattern =", "allowlist_rule[\"index\"] = index+1 waf_policy_obj[\"whitelist\"][\"rules\"].append(allowlist_rule) def get_id_from_group(group): pattern = re.compile(\"[^\\d]*(?P<group_id>\\d\\d\\d)\") match", "in crs_group.get(\"rules\", []): if rule[\"rule_id\"] == \"920330\" or rule[\"rule_id\"] ==", "True return if pre_crs[\"index\"] > index: index = pre_crs[\"index\"] pre_crs_group[\"index\"]", "api.put('wafpolicy/%s' %waf_policy_obj['uuid'], data=waf_policy_obj) if resp.status_code in range(200, 300): logger.debug('Create WAF", "index = rule[\"index\"] allowlist_rule[\"index\"] = index+1 waf_policy_obj[\"whitelist\"][\"rules\"].append(allowlist_rule) def get_id_from_group(group): pattern", "resp.text) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('-u', '--user',", "add_allowlist_rule(waf_policy_obj) disable_crs_response_rules(waf_policy_obj) add_pre_crs_group(waf_policy_obj) resp = api.post('wafpolicy', data=json.dumps(waf_policy_obj)) if resp.status_code in", "crs_group in waf_policy_obj.get(\"crs_groups\", []): group_id = get_id_from_group(crs_group) if group_id >=", "] pre_crs_group = { \"index\": 0, \"name\": \"VDI_409_ENFORCE_XML\", \"rules\": xml_rule", "uri beginning with /ice/ allowlist_rule={ \"index\": 0, \"name\": \"allowlist-start-with-ice\", \"description\":" ]
[ "= ResourceType.get_attributes_mapping(pc) assert attributes_mapping == {} @pytest.mark.parametrize( \"dep_chain,expected\", [ (", "(False, \"/category,5/\", \"category_id\", 5), (True, \"/department,3440/department,3443/\", \"parent_id\", 3443), (False, \"/department,3440/department,3443/\",", "pytest.mark.django_db class TestResourceTypeEnum: @pytest.mark.parametrize( \"is_leaf, path, f, v\", [ (True,", "(True, \"/category,5/department,3440/department,3443/\", \"parent_id\", 3443), (False, \"/category,5/department,3440/department,3443/\", \"id\", 3443), (True, \"/category,5/\",", "= make_simple_department(str(d), force_create_params={\"id\": d}, parent_id=parent_id) attributes_mapping = ResourceType.get_attributes_mapping(target_parent) assert attributes_mapping", "parent_id=parent_id) # 只添加 parent,mptt 树需要重建 Department.tree_objects.rebuild() nodes = ResourceType.get_instance_resource_nodes(target_parent) assert", "\"/department,3440/department,3443/\", \"id\", 3443), ], ) def test_get_key_mapping(self, is_leaf, path, f,", "IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "assert f == f assert v == v @pytest.mark.parametrize( \"dep_chain,", "a copy of the License at http://opensource.org/licenses/MIT Unless required by", "the License. \"\"\" import pytest from bkuser_core.bkiam.constants import ResourceType from", "3443), (False, \"/department,3440/department,3443/\", \"id\", 3443), ], ) def test_get_key_mapping(self, is_leaf,", "ProfileCategory.objects.get_default() nodes = ResourceType.get_instance_resource_nodes(pc) assert [(x[\"type\"], x[\"name\"]) for x in", "assert v == v @pytest.mark.parametrize( \"dep_chain, expected\", [ ( [1000,", "\"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "def test_get_attributes_mapping_other(self): pc = ProfileCategory.objects.get_default() attributes_mapping = ResourceType.get_attributes_mapping(pc) assert attributes_mapping", "distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS", "pleased to support the open source community by making 蓝鲸智云-用户管理(Bk-User)", "\"/category,1/department,1000/\"}, ), ], ) def test_get_attributes_mapping(self, dep_chain, expected): target_parent =", "A29 Limited, a Tencent company. All rights reserved. Licensed under", "ANY KIND, either express or implied. See the License for", "ResourceType from bkuser_core.categories.models import Department, ProfileCategory from bkuser_core.tests.utils import make_simple_department", "(False, \"/department,3440/department,3443/\", \"id\", 3443), ], ) def test_get_key_mapping(self, is_leaf, path,", "by making 蓝鲸智云-用户管理(Bk-User) available. Copyright (C) 2017-2021 THL A29 Limited,", "1002], {\"_bk_iam_path_\": \"/category,1/department,1000/department,1001/department,1002/\"}, ), ( [1000], {\"_bk_iam_path_\": \"/category,1/department,1000/\"}, ), ],", "v = path_method(data) assert f == f assert v ==", "path_method = key_mapping[\"department._bk_iam_path_\"] data = {\"value\": path} if not is_leaf:", "\"/category,1/department,1000/department,1001/department,1002/\"}, ), ( [1000], {\"_bk_iam_path_\": \"/category,1/department,1000/\"}, ), ], ) def", "{\"_bk_iam_path_\": \"/category,1/department,1000/department,1001/department,1002/\"}, ), ( [1000], {\"_bk_iam_path_\": \"/category,1/department,1000/\"}, ), ], )", "available. Copyright (C) 2017-2021 THL A29 Limited, a Tencent company.", "under the License is distributed on an \"AS IS\" BASIS,", "not is_leaf: data[\"node_type\"] = \"non-leaf\" f, v = path_method(data) assert", "the open source community by making 蓝鲸智云-用户管理(Bk-User) available. Copyright (C)", "copy of the License at http://opensource.org/licenses/MIT Unless required by applicable", "Licensed under the MIT License (the \"License\"); you may not", "this file except in compliance with the License. You may", "f, v): key_mapping = ResourceType.get_key_mapping(ResourceType.DEPARTMENT) path_method = key_mapping[\"department._bk_iam_path_\"] data =", "key_mapping = ResourceType.get_key_mapping(ResourceType.DEPARTMENT) path_method = key_mapping[\"department._bk_iam_path_\"] data = {\"value\": path}", "expected): target_parent = None for d in dep_chain: parent_id =", "v == v @pytest.mark.parametrize( \"dep_chain, expected\", [ ( [1000, 1001,", "test_get_resource_nodes_other(self): pc = ProfileCategory.objects.get_default() nodes = ResourceType.get_instance_resource_nodes(pc) assert [(x[\"type\"], x[\"name\"])", "and limitations under the License. \"\"\" import pytest from bkuser_core.bkiam.constants", "-*- \"\"\" TencentBlueKing is pleased to support the open source", "permissions and limitations under the License. \"\"\" import pytest from", "key_mapping[\"department._bk_iam_path_\"] data = {\"value\": path} if not is_leaf: data[\"node_type\"] =", "attributes_mapping = ResourceType.get_attributes_mapping(pc) assert attributes_mapping == {} @pytest.mark.parametrize( \"dep_chain,expected\", [", "a Tencent company. All rights reserved. Licensed under the MIT", "file except in compliance with the License. You may obtain", "make_simple_department(str(d), force_create_params={\"id\": d}, parent_id=parent_id) attributes_mapping = ResourceType.get_attributes_mapping(target_parent) assert attributes_mapping ==", "= None for d in dep_chain: parent_id = target_parent if", "OR CONDITIONS OF ANY KIND, either express or implied. See", "test_get_attributes_mapping_other(self): pc = ProfileCategory.objects.get_default() attributes_mapping = ResourceType.get_attributes_mapping(pc) assert attributes_mapping ==", "if not is_leaf: data[\"node_type\"] = \"non-leaf\" f, v = path_method(data)", "parent_id = target_parent if not target_parent else target_parent.pk target_parent =", "force_create_params={\"id\": d}, parent_id=parent_id) attributes_mapping = ResourceType.get_attributes_mapping(target_parent) assert attributes_mapping == expected", "bkuser_core.tests.utils import make_simple_department pytestmark = pytest.mark.django_db class TestResourceTypeEnum: @pytest.mark.parametrize( \"is_leaf,", "= path_method(data) assert f == f assert v == v", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "target_parent = None for d in dep_chain: parent_id = target_parent", "in dep_chain: parent_id = target_parent if not target_parent else target_parent.pk", "See the License for the specific language governing permissions and", "the License at http://opensource.org/licenses/MIT Unless required by applicable law or", "\"a\"), (\"department\", \"b\"), (\"department\", \"c\"), ], ), ( [\"a\", \"b\"],", "], ) def test_get_key_mapping(self, is_leaf, path, f, v): key_mapping =", "ResourceType.get_instance_resource_nodes(pc) assert [(x[\"type\"], x[\"name\"]) for x in nodes] == [(\"category\",", "f, v = path_method(data) assert f == f assert v", "under the License. \"\"\" import pytest from bkuser_core.bkiam.constants import ResourceType", "], ) def test_get_resource_nodes_dep(self, dep_chain, expected): target_parent = None for", "[(\"category\", \"默认目录\"), (\"department\", \"a\"), (\"department\", \"b\")], ), ], ) def", "in writing, software distributed under the License is distributed on", "required by applicable law or agreed to in writing, software", "path, f, v\", [ (True, \"/category,5/department,3440/department,3443/\", \"parent_id\", 3443), (False, \"/category,5/department,3440/department,3443/\",", "\"c\"), ], ), ( [\"a\", \"b\"], [(\"category\", \"默认目录\"), (\"department\", \"a\"),", "[1000, 1001, 1002], {\"_bk_iam_path_\": \"/category,1/department,1000/department,1001/department,1002/\"}, ), ( [1000], {\"_bk_iam_path_\": \"/category,1/department,1000/\"},", "source community by making 蓝鲸智云-用户管理(Bk-User) available. Copyright (C) 2017-2021 THL", "pc = ProfileCategory.objects.get_default() attributes_mapping = ResourceType.get_attributes_mapping(pc) assert attributes_mapping == {}", "x in nodes] == expected def test_get_resource_nodes_other(self): pc = ProfileCategory.objects.get_default()", "\"/category,5/\", \"category_id\", 5), (False, \"/category,5/\", \"category_id\", 5), (True, \"/department,3440/department,3443/\", \"parent_id\",", "if not target_parent else target_parent.pk target_parent = make_simple_department(str(d), force_create_params={\"id\": d},", "v @pytest.mark.parametrize( \"dep_chain, expected\", [ ( [1000, 1001, 1002], {\"_bk_iam_path_\":", "for x in nodes] == expected def test_get_resource_nodes_other(self): pc =", "attributes_mapping == {} @pytest.mark.parametrize( \"dep_chain,expected\", [ ( [\"a\", \"b\", \"c\"],", "http://opensource.org/licenses/MIT Unless required by applicable law or agreed to in", "Department.tree_objects.rebuild() nodes = ResourceType.get_instance_resource_nodes(target_parent) assert [(x[\"type\"], x[\"name\"]) for x in", "\"b\"], [(\"category\", \"默认目录\"), (\"department\", \"a\"), (\"department\", \"b\")], ), ], )", "software distributed under the License is distributed on an \"AS", "distributed under the License is distributed on an \"AS IS\"", "CONDITIONS OF ANY KIND, either express or implied. See the", "3443), (True, \"/category,5/\", \"category_id\", 5), (False, \"/category,5/\", \"category_id\", 5), (True,", "), ], ) def test_get_resource_nodes_dep(self, dep_chain, expected): target_parent = None", "not use this file except in compliance with the License.", "path_method(data) assert f == f assert v == v @pytest.mark.parametrize(", "you may not use this file except in compliance with", "making 蓝鲸智云-用户管理(Bk-User) available. Copyright (C) 2017-2021 THL A29 Limited, a", "\"a\"), (\"department\", \"b\")], ), ], ) def test_get_resource_nodes_dep(self, dep_chain, expected):", "is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR", "import pytest from bkuser_core.bkiam.constants import ResourceType from bkuser_core.categories.models import Department,", "the License. You may obtain a copy of the License", "ResourceType.get_key_mapping(ResourceType.DEPARTMENT) path_method = key_mapping[\"department._bk_iam_path_\"] data = {\"value\": path} if not", "make_simple_department(d, parent_id=parent_id) # 只添加 parent,mptt 树需要重建 Department.tree_objects.rebuild() nodes = ResourceType.get_instance_resource_nodes(target_parent)", "expected def test_get_resource_nodes_other(self): pc = ProfileCategory.objects.get_default() nodes = ResourceType.get_instance_resource_nodes(pc) assert", "\"/category,5/department,3440/department,3443/\", \"id\", 3443), (True, \"/category,5/\", \"category_id\", 5), (False, \"/category,5/\", \"category_id\",", "use this file except in compliance with the License. You", "assert [(x[\"type\"], x[\"name\"]) for x in nodes] == expected def", "# 只添加 parent,mptt 树需要重建 Department.tree_objects.rebuild() nodes = ResourceType.get_instance_resource_nodes(target_parent) assert [(x[\"type\"],", "path} if not is_leaf: data[\"node_type\"] = \"non-leaf\" f, v =", "class TestResourceTypeEnum: @pytest.mark.parametrize( \"is_leaf, path, f, v\", [ (True, \"/category,5/department,3440/department,3443/\",", "not target_parent else target_parent.pk target_parent = make_simple_department(d, parent_id=parent_id) # 只添加", "def test_get_key_mapping(self, is_leaf, path, f, v): key_mapping = ResourceType.get_key_mapping(ResourceType.DEPARTMENT) path_method", "], ) def test_get_attributes_mapping(self, dep_chain, expected): target_parent = None for", "= ResourceType.get_instance_resource_nodes(pc) assert [(x[\"type\"], x[\"name\"]) for x in nodes] ==", "import Department, ProfileCategory from bkuser_core.tests.utils import make_simple_department pytestmark = pytest.mark.django_db", "\"is_leaf, path, f, v\", [ (True, \"/category,5/department,3440/department,3443/\", \"parent_id\", 3443), (False,", "the MIT License (the \"License\"); you may not use this", "bkuser_core.categories.models import Department, ProfileCategory from bkuser_core.tests.utils import make_simple_department pytestmark =", "limitations under the License. \"\"\" import pytest from bkuser_core.bkiam.constants import", "(the \"License\"); you may not use this file except in", "(C) 2017-2021 THL A29 Limited, a Tencent company. All rights", "\"category_id\", 5), (True, \"/department,3440/department,3443/\", \"parent_id\", 3443), (False, \"/department,3440/department,3443/\", \"id\", 3443),", "= key_mapping[\"department._bk_iam_path_\"] data = {\"value\": path} if not is_leaf: data[\"node_type\"]", "[\"a\", \"b\", \"c\"], [ (\"category\", \"默认目录\"), (\"department\", \"a\"), (\"department\", \"b\"),", "pytestmark = pytest.mark.django_db class TestResourceTypeEnum: @pytest.mark.parametrize( \"is_leaf, path, f, v\",", "obtain a copy of the License at http://opensource.org/licenses/MIT Unless required", "not target_parent else target_parent.pk target_parent = make_simple_department(str(d), force_create_params={\"id\": d}, parent_id=parent_id)", "target_parent if not target_parent else target_parent.pk target_parent = make_simple_department(str(d), force_create_params={\"id\":", "= ProfileCategory.objects.get_default() nodes = ResourceType.get_instance_resource_nodes(pc) assert [(x[\"type\"], x[\"name\"]) for x", "= ResourceType.get_attributes_mapping(target_parent) assert attributes_mapping == expected def test_get_attributes_mapping_other(self): pc =", "or implied. See the License for the specific language governing", "make_simple_department pytestmark = pytest.mark.django_db class TestResourceTypeEnum: @pytest.mark.parametrize( \"is_leaf, path, f,", "KIND, either express or implied. See the License for the", "5), (True, \"/department,3440/department,3443/\", \"parent_id\", 3443), (False, \"/department,3440/department,3443/\", \"id\", 3443), ],", "assert attributes_mapping == expected def test_get_attributes_mapping_other(self): pc = ProfileCategory.objects.get_default() attributes_mapping", "to in writing, software distributed under the License is distributed", "parent_id=parent_id) attributes_mapping = ResourceType.get_attributes_mapping(target_parent) assert attributes_mapping == expected def test_get_attributes_mapping_other(self):", "Limited, a Tencent company. All rights reserved. Licensed under the", "target_parent else target_parent.pk target_parent = make_simple_department(d, parent_id=parent_id) # 只添加 parent,mptt", "nodes] == expected def test_get_resource_nodes_other(self): pc = ProfileCategory.objects.get_default() nodes =", "law or agreed to in writing, software distributed under the", "== v @pytest.mark.parametrize( \"dep_chain, expected\", [ ( [1000, 1001, 1002],", "None for d in dep_chain: parent_id = target_parent if not", "on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF", "License. \"\"\" import pytest from bkuser_core.bkiam.constants import ResourceType from bkuser_core.categories.models", "{} @pytest.mark.parametrize( \"dep_chain,expected\", [ ( [\"a\", \"b\", \"c\"], [ (\"category\",", "[ ( [\"a\", \"b\", \"c\"], [ (\"category\", \"默认目录\"), (\"department\", \"a\"),", "at http://opensource.org/licenses/MIT Unless required by applicable law or agreed to", "ResourceType.get_instance_resource_nodes(target_parent) assert [(x[\"type\"], x[\"name\"]) for x in nodes] == expected", "dep_chain, expected): target_parent = None for d in dep_chain: parent_id", "== expected def test_get_attributes_mapping_other(self): pc = ProfileCategory.objects.get_default() attributes_mapping = ResourceType.get_attributes_mapping(pc)", "\"默认目录\"), (\"department\", \"a\"), (\"department\", \"b\"), (\"department\", \"c\"), ], ), (", "\"dep_chain, expected\", [ ( [1000, 1001, 1002], {\"_bk_iam_path_\": \"/category,1/department,1000/department,1001/department,1002/\"}, ),", "for the specific language governing permissions and limitations under the", "3443), (False, \"/category,5/department,3440/department,3443/\", \"id\", 3443), (True, \"/category,5/\", \"category_id\", 5), (False,", "expected def test_get_attributes_mapping_other(self): pc = ProfileCategory.objects.get_default() attributes_mapping = ResourceType.get_attributes_mapping(pc) assert", "-*- coding: utf-8 -*- \"\"\" TencentBlueKing is pleased to support", "= {\"value\": path} if not is_leaf: data[\"node_type\"] = \"non-leaf\" f,", "attributes_mapping = ResourceType.get_attributes_mapping(target_parent) assert attributes_mapping == expected def test_get_attributes_mapping_other(self): pc", "nodes = ResourceType.get_instance_resource_nodes(pc) assert [(x[\"type\"], x[\"name\"]) for x in nodes]", "@pytest.mark.parametrize( \"is_leaf, path, f, v\", [ (True, \"/category,5/department,3440/department,3443/\", \"parent_id\", 3443),", "= \"non-leaf\" f, v = path_method(data) assert f == f", "= target_parent if not target_parent else target_parent.pk target_parent = make_simple_department(d,", "the License for the specific language governing permissions and limitations", "may not use this file except in compliance with the", "implied. See the License for the specific language governing permissions", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "You may obtain a copy of the License at http://opensource.org/licenses/MIT", "\"/category,5/\", \"category_id\", 5), (True, \"/department,3440/department,3443/\", \"parent_id\", 3443), (False, \"/department,3440/department,3443/\", \"id\",", "ResourceType.get_attributes_mapping(pc) assert attributes_mapping == {} @pytest.mark.parametrize( \"dep_chain,expected\", [ ( [\"a\",", "data[\"node_type\"] = \"non-leaf\" f, v = path_method(data) assert f ==", "== {} @pytest.mark.parametrize( \"dep_chain,expected\", [ ( [\"a\", \"b\", \"c\"], [", "Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All", "from bkuser_core.tests.utils import make_simple_department pytestmark = pytest.mark.django_db class TestResourceTypeEnum: @pytest.mark.parametrize(", "of the License at http://opensource.org/licenses/MIT Unless required by applicable law", "test_get_key_mapping(self, is_leaf, path, f, v): key_mapping = ResourceType.get_key_mapping(ResourceType.DEPARTMENT) path_method =", "data = {\"value\": path} if not is_leaf: data[\"node_type\"] = \"non-leaf\"", "( [\"a\", \"b\", \"c\"], [ (\"category\", \"默认目录\"), (\"department\", \"a\"), (\"department\",", "writing, software distributed under the License is distributed on an", "[1000], {\"_bk_iam_path_\": \"/category,1/department,1000/\"}, ), ], ) def test_get_attributes_mapping(self, dep_chain, expected):", "\"dep_chain,expected\", [ ( [\"a\", \"b\", \"c\"], [ (\"category\", \"默认目录\"), (\"department\",", "2017-2021 THL A29 Limited, a Tencent company. All rights reserved.", "@pytest.mark.parametrize( \"dep_chain, expected\", [ ( [1000, 1001, 1002], {\"_bk_iam_path_\": \"/category,1/department,1000/department,1001/department,1002/\"},", "in compliance with the License. You may obtain a copy", "agreed to in writing, software distributed under the License is", ") def test_get_key_mapping(self, is_leaf, path, f, v): key_mapping = ResourceType.get_key_mapping(ResourceType.DEPARTMENT)", "f, v\", [ (True, \"/category,5/department,3440/department,3443/\", \"parent_id\", 3443), (False, \"/category,5/department,3440/department,3443/\", \"id\",", "( [\"a\", \"b\"], [(\"category\", \"默认目录\"), (\"department\", \"a\"), (\"department\", \"b\")], ),", "(\"department\", \"b\"), (\"department\", \"c\"), ], ), ( [\"a\", \"b\"], [(\"category\",", "], ), ( [\"a\", \"b\"], [(\"category\", \"默认目录\"), (\"department\", \"a\"), (\"department\",", "1001, 1002], {\"_bk_iam_path_\": \"/category,1/department,1000/department,1001/department,1002/\"}, ), ( [1000], {\"_bk_iam_path_\": \"/category,1/department,1000/\"}, ),", "\"c\"], [ (\"category\", \"默认目录\"), (\"department\", \"a\"), (\"department\", \"b\"), (\"department\", \"c\"),", "@pytest.mark.parametrize( \"dep_chain,expected\", [ ( [\"a\", \"b\", \"c\"], [ (\"category\", \"默认目录\"),", "TencentBlueKing is pleased to support the open source community by", "if not target_parent else target_parent.pk target_parent = make_simple_department(d, parent_id=parent_id) #", "either express or implied. See the License for the specific", "utf-8 -*- \"\"\" TencentBlueKing is pleased to support the open", "BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "只添加 parent,mptt 树需要重建 Department.tree_objects.rebuild() nodes = ResourceType.get_instance_resource_nodes(target_parent) assert [(x[\"type\"], x[\"name\"])", "\"License\"); you may not use this file except in compliance", "License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES", "support the open source community by making 蓝鲸智云-用户管理(Bk-User) available. Copyright", "= ResourceType.get_instance_resource_nodes(target_parent) assert [(x[\"type\"], x[\"name\"]) for x in nodes] ==", "v\", [ (True, \"/category,5/department,3440/department,3443/\", \"parent_id\", 3443), (False, \"/category,5/department,3440/department,3443/\", \"id\", 3443),", "License for the specific language governing permissions and limitations under", "License at http://opensource.org/licenses/MIT Unless required by applicable law or agreed", "target_parent if not target_parent else target_parent.pk target_parent = make_simple_department(d, parent_id=parent_id)", "def test_get_attributes_mapping(self, dep_chain, expected): target_parent = None for d in", "for d in dep_chain: parent_id = target_parent if not target_parent", "assert attributes_mapping == {} @pytest.mark.parametrize( \"dep_chain,expected\", [ ( [\"a\", \"b\",", "test_get_attributes_mapping(self, dep_chain, expected): target_parent = None for d in dep_chain:", "\"id\", 3443), (True, \"/category,5/\", \"category_id\", 5), (False, \"/category,5/\", \"category_id\", 5),", "bkuser_core.bkiam.constants import ResourceType from bkuser_core.categories.models import Department, ProfileCategory from bkuser_core.tests.utils", "(\"department\", \"a\"), (\"department\", \"b\")], ), ], ) def test_get_resource_nodes_dep(self, dep_chain,", "f == f assert v == v @pytest.mark.parametrize( \"dep_chain, expected\",", "(\"department\", \"c\"), ], ), ( [\"a\", \"b\"], [(\"category\", \"默认目录\"), (\"department\",", "except in compliance with the License. You may obtain a", ") def test_get_attributes_mapping(self, dep_chain, expected): target_parent = None for d", "rights reserved. Licensed under the MIT License (the \"License\"); you", "dep_chain: parent_id = target_parent if not target_parent else target_parent.pk target_parent", "compliance with the License. You may obtain a copy of", "[(x[\"type\"], x[\"name\"]) for x in nodes] == expected def test_get_resource_nodes_other(self):", "\"\"\" import pytest from bkuser_core.bkiam.constants import ResourceType from bkuser_core.categories.models import", "target_parent.pk target_parent = make_simple_department(d, parent_id=parent_id) # 只添加 parent,mptt 树需要重建 Department.tree_objects.rebuild()", "language governing permissions and limitations under the License. \"\"\" import", "target_parent.pk target_parent = make_simple_department(str(d), force_create_params={\"id\": d}, parent_id=parent_id) attributes_mapping = ResourceType.get_attributes_mapping(target_parent)", "path, f, v): key_mapping = ResourceType.get_key_mapping(ResourceType.DEPARTMENT) path_method = key_mapping[\"department._bk_iam_path_\"] data", "License (the \"License\"); you may not use this file except", "def test_get_resource_nodes_dep(self, dep_chain, expected): target_parent = None for d in", "from bkuser_core.bkiam.constants import ResourceType from bkuser_core.categories.models import Department, ProfileCategory from", "target_parent else target_parent.pk target_parent = make_simple_department(str(d), force_create_params={\"id\": d}, parent_id=parent_id) attributes_mapping", "All rights reserved. Licensed under the MIT License (the \"License\");", "target_parent = make_simple_department(str(d), force_create_params={\"id\": d}, parent_id=parent_id) attributes_mapping = ResourceType.get_attributes_mapping(target_parent) assert", "\"/department,3440/department,3443/\", \"parent_id\", 3443), (False, \"/department,3440/department,3443/\", \"id\", 3443), ], ) def", "( [1000, 1001, 1002], {\"_bk_iam_path_\": \"/category,1/department,1000/department,1001/department,1002/\"}, ), ( [1000], {\"_bk_iam_path_\":", "# -*- coding: utf-8 -*- \"\"\" TencentBlueKing is pleased to", "parent,mptt 树需要重建 Department.tree_objects.rebuild() nodes = ResourceType.get_instance_resource_nodes(target_parent) assert [(x[\"type\"], x[\"name\"]) for", "\"b\"), (\"department\", \"c\"), ], ), ( [\"a\", \"b\"], [(\"category\", \"默认目录\"),", "THL A29 Limited, a Tencent company. All rights reserved. Licensed", "MIT License (the \"License\"); you may not use this file", "d}, parent_id=parent_id) attributes_mapping = ResourceType.get_attributes_mapping(target_parent) assert attributes_mapping == expected def", "is_leaf, path, f, v): key_mapping = ResourceType.get_key_mapping(ResourceType.DEPARTMENT) path_method = key_mapping[\"department._bk_iam_path_\"]", "def test_get_resource_nodes_other(self): pc = ProfileCategory.objects.get_default() nodes = ResourceType.get_instance_resource_nodes(pc) assert [(x[\"type\"],", "x[\"name\"]) for x in nodes] == expected def test_get_resource_nodes_other(self): pc", "= pytest.mark.django_db class TestResourceTypeEnum: @pytest.mark.parametrize( \"is_leaf, path, f, v\", [", "= target_parent if not target_parent else target_parent.pk target_parent = make_simple_department(str(d),", "ProfileCategory.objects.get_default() attributes_mapping = ResourceType.get_attributes_mapping(pc) assert attributes_mapping == {} @pytest.mark.parametrize( \"dep_chain,expected\",", "\"/category,5/department,3440/department,3443/\", \"parent_id\", 3443), (False, \"/category,5/department,3440/department,3443/\", \"id\", 3443), (True, \"/category,5/\", \"category_id\",", "company. All rights reserved. Licensed under the MIT License (the", "{\"value\": path} if not is_leaf: data[\"node_type\"] = \"non-leaf\" f, v", ") def test_get_resource_nodes_dep(self, dep_chain, expected): target_parent = None for d", "(\"department\", \"b\")], ), ], ) def test_get_resource_nodes_dep(self, dep_chain, expected): target_parent", "\"id\", 3443), ], ) def test_get_key_mapping(self, is_leaf, path, f, v):", "\"b\")], ), ], ) def test_get_resource_nodes_dep(self, dep_chain, expected): target_parent =", "树需要重建 Department.tree_objects.rebuild() nodes = ResourceType.get_instance_resource_nodes(target_parent) assert [(x[\"type\"], x[\"name\"]) for x", "ProfileCategory from bkuser_core.tests.utils import make_simple_department pytestmark = pytest.mark.django_db class TestResourceTypeEnum:", "Unless required by applicable law or agreed to in writing,", "by applicable law or agreed to in writing, software distributed", "== f assert v == v @pytest.mark.parametrize( \"dep_chain, expected\", [", "( [1000], {\"_bk_iam_path_\": \"/category,1/department,1000/\"}, ), ], ) def test_get_attributes_mapping(self, dep_chain,", "else target_parent.pk target_parent = make_simple_department(d, parent_id=parent_id) # 只添加 parent,mptt 树需要重建", "from bkuser_core.categories.models import Department, ProfileCategory from bkuser_core.tests.utils import make_simple_department pytestmark", "express or implied. See the License for the specific language", "= ResourceType.get_key_mapping(ResourceType.DEPARTMENT) path_method = key_mapping[\"department._bk_iam_path_\"] data = {\"value\": path} if", "\"parent_id\", 3443), (False, \"/department,3440/department,3443/\", \"id\", 3443), ], ) def test_get_key_mapping(self,", "\"\"\" TencentBlueKing is pleased to support the open source community", "Tencent company. All rights reserved. Licensed under the MIT License", "(\"category\", \"默认目录\"), (\"department\", \"a\"), (\"department\", \"b\"), (\"department\", \"c\"), ], ),", "community by making 蓝鲸智云-用户管理(Bk-User) available. Copyright (C) 2017-2021 THL A29", "\"parent_id\", 3443), (False, \"/category,5/department,3440/department,3443/\", \"id\", 3443), (True, \"/category,5/\", \"category_id\", 5),", "(True, \"/category,5/\", \"category_id\", 5), (False, \"/category,5/\", \"category_id\", 5), (True, \"/department,3440/department,3443/\",", "(\"department\", \"a\"), (\"department\", \"b\"), (\"department\", \"c\"), ], ), ( [\"a\",", "with the License. You may obtain a copy of the", "), ( [\"a\", \"b\"], [(\"category\", \"默认目录\"), (\"department\", \"a\"), (\"department\", \"b\")],", "3443), ], ) def test_get_key_mapping(self, is_leaf, path, f, v): key_mapping", "in nodes] == expected def test_get_resource_nodes_other(self): pc = ProfileCategory.objects.get_default() nodes", "open source community by making 蓝鲸智云-用户管理(Bk-User) available. Copyright (C) 2017-2021", "specific language governing permissions and limitations under the License. \"\"\"", "assert [(x[\"type\"], x[\"name\"]) for x in nodes] == [(\"category\", \"默认目录\")]", "test_get_resource_nodes_dep(self, dep_chain, expected): target_parent = None for d in dep_chain:", "), ( [1000], {\"_bk_iam_path_\": \"/category,1/department,1000/\"}, ), ], ) def test_get_attributes_mapping(self,", "), ], ) def test_get_attributes_mapping(self, dep_chain, expected): target_parent = None", "applicable law or agreed to in writing, software distributed under", "[ (True, \"/category,5/department,3440/department,3443/\", \"parent_id\", 3443), (False, \"/category,5/department,3440/department,3443/\", \"id\", 3443), (True,", "v): key_mapping = ResourceType.get_key_mapping(ResourceType.DEPARTMENT) path_method = key_mapping[\"department._bk_iam_path_\"] data = {\"value\":", "\"默认目录\"), (\"department\", \"a\"), (\"department\", \"b\")], ), ], ) def test_get_resource_nodes_dep(self,", "the License is distributed on an \"AS IS\" BASIS, WITHOUT", "is pleased to support the open source community by making", "\"non-leaf\" f, v = path_method(data) assert f == f assert", "the specific language governing permissions and limitations under the License.", "== expected def test_get_resource_nodes_other(self): pc = ProfileCategory.objects.get_default() nodes = ResourceType.get_instance_resource_nodes(pc)", "<reponame>Chace-wang/bk-user # -*- coding: utf-8 -*- \"\"\" TencentBlueKing is pleased", "[ ( [1000, 1001, 1002], {\"_bk_iam_path_\": \"/category,1/department,1000/department,1001/department,1002/\"}, ), ( [1000],", "governing permissions and limitations under the License. \"\"\" import pytest", "\"category_id\", 5), (False, \"/category,5/\", \"category_id\", 5), (True, \"/department,3440/department,3443/\", \"parent_id\", 3443),", "import ResourceType from bkuser_core.categories.models import Department, ProfileCategory from bkuser_core.tests.utils import", "蓝鲸智云-用户管理(Bk-User) available. Copyright (C) 2017-2021 THL A29 Limited, a Tencent", "or agreed to in writing, software distributed under the License", "else target_parent.pk target_parent = make_simple_department(str(d), force_create_params={\"id\": d}, parent_id=parent_id) attributes_mapping =", "may obtain a copy of the License at http://opensource.org/licenses/MIT Unless", "\"b\", \"c\"], [ (\"category\", \"默认目录\"), (\"department\", \"a\"), (\"department\", \"b\"), (\"department\",", "(True, \"/department,3440/department,3443/\", \"parent_id\", 3443), (False, \"/department,3440/department,3443/\", \"id\", 3443), ], )", "= make_simple_department(d, parent_id=parent_id) # 只添加 parent,mptt 树需要重建 Department.tree_objects.rebuild() nodes =", "reserved. Licensed under the MIT License (the \"License\"); you may", "OF ANY KIND, either express or implied. See the License", "expected\", [ ( [1000, 1001, 1002], {\"_bk_iam_path_\": \"/category,1/department,1000/department,1001/department,1002/\"}, ), (", "under the MIT License (the \"License\"); you may not use", "[ (\"category\", \"默认目录\"), (\"department\", \"a\"), (\"department\", \"b\"), (\"department\", \"c\"), ],", "to support the open source community by making 蓝鲸智云-用户管理(Bk-User) available.", "target_parent = make_simple_department(d, parent_id=parent_id) # 只添加 parent,mptt 树需要重建 Department.tree_objects.rebuild() nodes", "= ProfileCategory.objects.get_default() attributes_mapping = ResourceType.get_attributes_mapping(pc) assert attributes_mapping == {} @pytest.mark.parametrize(", "coding: utf-8 -*- \"\"\" TencentBlueKing is pleased to support the", "f assert v == v @pytest.mark.parametrize( \"dep_chain, expected\", [ (", "[\"a\", \"b\"], [(\"category\", \"默认目录\"), (\"department\", \"a\"), (\"department\", \"b\")], ), ],", "TestResourceTypeEnum: @pytest.mark.parametrize( \"is_leaf, path, f, v\", [ (True, \"/category,5/department,3440/department,3443/\", \"parent_id\",", "pytest from bkuser_core.bkiam.constants import ResourceType from bkuser_core.categories.models import Department, ProfileCategory", "is_leaf: data[\"node_type\"] = \"non-leaf\" f, v = path_method(data) assert f", "pc = ProfileCategory.objects.get_default() nodes = ResourceType.get_instance_resource_nodes(pc) assert [(x[\"type\"], x[\"name\"]) for", "Department, ProfileCategory from bkuser_core.tests.utils import make_simple_department pytestmark = pytest.mark.django_db class", "License. You may obtain a copy of the License at", "5), (False, \"/category,5/\", \"category_id\", 5), (True, \"/department,3440/department,3443/\", \"parent_id\", 3443), (False,", "an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY", "(False, \"/category,5/department,3440/department,3443/\", \"id\", 3443), (True, \"/category,5/\", \"category_id\", 5), (False, \"/category,5/\",", "attributes_mapping == expected def test_get_attributes_mapping_other(self): pc = ProfileCategory.objects.get_default() attributes_mapping =", "{\"_bk_iam_path_\": \"/category,1/department,1000/\"}, ), ], ) def test_get_attributes_mapping(self, dep_chain, expected): target_parent", "d in dep_chain: parent_id = target_parent if not target_parent else", "ResourceType.get_attributes_mapping(target_parent) assert attributes_mapping == expected def test_get_attributes_mapping_other(self): pc = ProfileCategory.objects.get_default()", "import make_simple_department pytestmark = pytest.mark.django_db class TestResourceTypeEnum: @pytest.mark.parametrize( \"is_leaf, path,", "nodes = ResourceType.get_instance_resource_nodes(target_parent) assert [(x[\"type\"], x[\"name\"]) for x in nodes]" ]
[ "<filename>votesim/benchmarks/__init__.py # from votesim.benchmarks.benchrunner import ( # run_benchmark, # get_benchmarks,", "import ( # run_benchmark, # get_benchmarks, # post_benchmark, # plot_benchmark,", "post_benchmark, # plot_benchmark, # ) from votesim.benchmarks import runtools, simple", "# from votesim.benchmarks.benchrunner import ( # run_benchmark, # get_benchmarks, #", "# run_benchmark, # get_benchmarks, # post_benchmark, # plot_benchmark, # )", "# get_benchmarks, # post_benchmark, # plot_benchmark, # ) from votesim.benchmarks", "# post_benchmark, # plot_benchmark, # ) from votesim.benchmarks import runtools,", "votesim.benchmarks.benchrunner import ( # run_benchmark, # get_benchmarks, # post_benchmark, #", "( # run_benchmark, # get_benchmarks, # post_benchmark, # plot_benchmark, #", "run_benchmark, # get_benchmarks, # post_benchmark, # plot_benchmark, # ) from", "get_benchmarks, # post_benchmark, # plot_benchmark, # ) from votesim.benchmarks import", "from votesim.benchmarks.benchrunner import ( # run_benchmark, # get_benchmarks, # post_benchmark," ]
[ "in csv_data: # datestr = row[0] #.replace('/', '-') # #", "os.environ[\"host\"] port = os.environ[\"port\"] user = os.environ[\"user\"] password = os.environ[\"pass\"]", "all_lines = [line for line in csv_data] # print(data) #", "print(datestr) # date_obj = datetime.strptime(datestr, '%d/%m/%Y %H:%M') # # print(date_obj)", "os from dotenv import load_dotenv load_dotenv() dbname = os.environ[\"db\"] host", "lambda in cloudwatch https://eu-west-1.console.aws.amazon.com/cloudwatch/home?region=eu-west-1#logsV2:log-groups\"} # Form all the lines of", "location = str(row[1]) # order = str(row[3]) # total =", "informaition key = event['Records'][0]['s3']['object']['key'] bucket = event['Records'][0]['s3']['bucket']['name'] # use boto3", "data = s3_object['Body'].read().decode('utf-8') all_lines = [] # read CSV #", "= os.environ[\"port\"] user = os.environ[\"user\"] password = os.environ[\"pass\"] connection =", "and bucket informaition key = event['Records'][0]['s3']['object']['key'] bucket = event['Records'][0]['s3']['bucket']['name'] #", "= ps.connect(dbname=dbname, host=host, port=port, user=user, password=password) def handle(event, context): cursor", "dbname = os.environ[\"db\"] host = os.environ[\"host\"] port = os.environ[\"port\"] user", "# use boto3 library to get object from S3 s3", "a list of lists # all_lines = [line for line", "= datetime.strptime(datestr, '%d/%m/%Y %H:%M') # # print(date_obj) # # time", "# Get key and bucket informaition key = event['Records'][0]['s3']['object']['key'] bucket", "all the lines of data into a list of lists", "row in csv_data: # datestr = row[0] #.replace('/', '-') #", "s3.get_object(Bucket = bucket, Key = key) data = s3_object['Body'].read().decode('utf-8') all_lines", "app import csv import psycopg2 as ps import os from", "= os.environ[\"db\"] host = os.environ[\"host\"] port = os.environ[\"port\"] user =", "= str(row[4]) # all_lines.append({'date':date_obj, 'location':location, 'order':order, 'total':total}) # return cached_list", "cursor = connection.cursor() cursor.execute(\"SELECT 1\", ()) print(cursor.fetchall()) # Get key", "read CSV # csv_data = csv.reader(data.splitlines()) # for row in", "import psycopg2 as ps import os from dotenv import load_dotenv", "csv_data: # datestr = row[0] #.replace('/', '-') # # print(datestr)", "for line in all_lines] print_all_lines return {\"message\": \"success!!! Check the", "= os.environ[\"pass\"] connection = ps.connect(dbname=dbname, host=host, port=port, user=user, password=password) def", "# Form all the lines of data into a list", "# csv_data = csv.reader(data.splitlines()) # for row in csv_data: #", "= [] # read CSV # csv_data = csv.reader(data.splitlines()) #", "'location':location, 'order':order, 'total':total}) # return cached_list # print(all_lines) app.start_app(all_lines, data)", "context): cursor = connection.cursor() cursor.execute(\"SELECT 1\", ()) print(cursor.fetchall()) # Get", "total = str(row[4]) # all_lines.append({'date':date_obj, 'location':location, 'order':order, 'total':total}) # return", "'%d/%m/%Y %H:%M') # # print(date_obj) # # time = str(row[0][-5:])", "Check the cloud watch logs for this lambda in cloudwatch", "# for row in csv_data: # datestr = row[0] #.replace('/',", "os.environ[\"pass\"] connection = ps.connect(dbname=dbname, host=host, port=port, user=user, password=password) def handle(event,", "as ps import os from dotenv import load_dotenv load_dotenv() dbname", "str(row[0][-5:]) # location = str(row[1]) # order = str(row[3]) #", "line in all_lines] print_all_lines return {\"message\": \"success!!! Check the cloud", "[] # read CSV # csv_data = csv.reader(data.splitlines()) # for", "= str(row[1]) # order = str(row[3]) # total = str(row[4])", "'-') # # print(datestr) # date_obj = datetime.strptime(datestr, '%d/%m/%Y %H:%M')", "str(row[1]) # order = str(row[3]) # total = str(row[4]) #", "all_lines = [] # read CSV # csv_data = csv.reader(data.splitlines())", "= event['Records'][0]['s3']['object']['key'] bucket = event['Records'][0]['s3']['bucket']['name'] # use boto3 library to", "import boto3 import src.app as app import csv import psycopg2", "to get object from S3 s3 = boto3.client('s3') s3_object =", "datetime.strptime(datestr, '%d/%m/%Y %H:%M') # # print(date_obj) # # time =", "time = str(row[0][-5:]) # location = str(row[1]) # order =", "# location = str(row[1]) # order = str(row[3]) # total", "from dotenv import load_dotenv load_dotenv() dbname = os.environ[\"db\"] host =", "import load_dotenv load_dotenv() dbname = os.environ[\"db\"] host = os.environ[\"host\"] port", "os.environ[\"db\"] host = os.environ[\"host\"] port = os.environ[\"port\"] user = os.environ[\"user\"]", "this lambda in cloudwatch https://eu-west-1.console.aws.amazon.com/cloudwatch/home?region=eu-west-1#logsV2:log-groups\"} # Form all the lines", "key and bucket informaition key = event['Records'][0]['s3']['object']['key'] bucket = event['Records'][0]['s3']['bucket']['name']", "connection.cursor() cursor.execute(\"SELECT 1\", ()) print(cursor.fetchall()) # Get key and bucket", "bucket informaition key = event['Records'][0]['s3']['object']['key'] bucket = event['Records'][0]['s3']['bucket']['name'] # use", "{\"message\": \"success!!! Check the cloud watch logs for this lambda", "into a list of lists # all_lines = [line for", "list of lists # all_lines = [line for line in", "# all_lines = [line for line in csv_data] # print(data)", "# # print(datestr) # date_obj = datetime.strptime(datestr, '%d/%m/%Y %H:%M') #", "boto3 library to get object from S3 s3 = boto3.client('s3')", "cursor.execute(\"SELECT 1\", ()) print(cursor.fetchall()) # Get key and bucket informaition", "s3_object = s3.get_object(Bucket = bucket, Key = key) data =", "= s3.get_object(Bucket = bucket, Key = key) data = s3_object['Body'].read().decode('utf-8')", "bucket, Key = key) data = s3_object['Body'].read().decode('utf-8') all_lines = []", "user=user, password=password) def handle(event, context): cursor = connection.cursor() cursor.execute(\"SELECT 1\",", "in cloudwatch https://eu-west-1.console.aws.amazon.com/cloudwatch/home?region=eu-west-1#logsV2:log-groups\"} # Form all the lines of data", "csv import psycopg2 as ps import os from dotenv import", "= csv.reader(data.splitlines()) # for row in csv_data: # datestr =", "ps import os from dotenv import load_dotenv load_dotenv() dbname =", "= row[0] #.replace('/', '-') # # print(datestr) # date_obj =", "'total':total}) # return cached_list # print(all_lines) app.start_app(all_lines, data) print_all_lines =", "event['Records'][0]['s3']['bucket']['name'] # use boto3 library to get object from S3", "#.replace('/', '-') # # print(datestr) # date_obj = datetime.strptime(datestr, '%d/%m/%Y", "object from S3 s3 = boto3.client('s3') s3_object = s3.get_object(Bucket =", "= str(row[3]) # total = str(row[4]) # all_lines.append({'date':date_obj, 'location':location, 'order':order,", "print_all_lines = [print(line) for line in all_lines] print_all_lines return {\"message\":", "port = os.environ[\"port\"] user = os.environ[\"user\"] password = os.environ[\"pass\"] connection", "print_all_lines return {\"message\": \"success!!! Check the cloud watch logs for", "# total = str(row[4]) # all_lines.append({'date':date_obj, 'location':location, 'order':order, 'total':total}) #", "# read CSV # csv_data = csv.reader(data.splitlines()) # for row", "# print(datestr) # date_obj = datetime.strptime(datestr, '%d/%m/%Y %H:%M') # #", "s3 = boto3.client('s3') s3_object = s3.get_object(Bucket = bucket, Key =", "row[0] #.replace('/', '-') # # print(datestr) # date_obj = datetime.strptime(datestr,", "handle(event, context): cursor = connection.cursor() cursor.execute(\"SELECT 1\", ()) print(cursor.fetchall()) #", "password = os.environ[\"pass\"] connection = ps.connect(dbname=dbname, host=host, port=port, user=user, password=password)", "= os.environ[\"host\"] port = os.environ[\"port\"] user = os.environ[\"user\"] password =", "s3_object['Body'].read().decode('utf-8') all_lines = [] # read CSV # csv_data =", "the lines of data into a list of lists #", "get object from S3 s3 = boto3.client('s3') s3_object = s3.get_object(Bucket", "= [print(line) for line in all_lines] print_all_lines return {\"message\": \"success!!!", "psycopg2 as ps import os from dotenv import load_dotenv load_dotenv()", "os.environ[\"port\"] user = os.environ[\"user\"] password = os.environ[\"pass\"] connection = ps.connect(dbname=dbname,", "event['Records'][0]['s3']['object']['key'] bucket = event['Records'][0]['s3']['bucket']['name'] # use boto3 library to get", "1\", ()) print(cursor.fetchall()) # Get key and bucket informaition key", "# date_obj = datetime.strptime(datestr, '%d/%m/%Y %H:%M') # # print(date_obj) #", "host = os.environ[\"host\"] port = os.environ[\"port\"] user = os.environ[\"user\"] password", "return cached_list # print(all_lines) app.start_app(all_lines, data) print_all_lines = [print(line) for", "of lists # all_lines = [line for line in csv_data]", "for this lambda in cloudwatch https://eu-west-1.console.aws.amazon.com/cloudwatch/home?region=eu-west-1#logsV2:log-groups\"} # Form all the", "load_dotenv load_dotenv() dbname = os.environ[\"db\"] host = os.environ[\"host\"] port =", "# time = str(row[0][-5:]) # location = str(row[1]) # order", "csv_data = csv.reader(data.splitlines()) # for row in csv_data: # datestr", "Form all the lines of data into a list of", "csv.reader(data.splitlines()) # for row in csv_data: # datestr = row[0]", "host=host, port=port, user=user, password=password) def handle(event, context): cursor = connection.cursor()", "= connection.cursor() cursor.execute(\"SELECT 1\", ()) print(cursor.fetchall()) # Get key and", "key = event['Records'][0]['s3']['object']['key'] bucket = event['Records'][0]['s3']['bucket']['name'] # use boto3 library", "of data into a list of lists # all_lines =", "import csv import psycopg2 as ps import os from dotenv", "boto3 import src.app as app import csv import psycopg2 as", "()) print(cursor.fetchall()) # Get key and bucket informaition key =", "= [line for line in csv_data] # print(data) # print(all_lines)", "all_lines.append({'date':date_obj, 'location':location, 'order':order, 'total':total}) # return cached_list # print(all_lines) app.start_app(all_lines,", "app.start_app(all_lines, data) print_all_lines = [print(line) for line in all_lines] print_all_lines", "print(all_lines) app.start_app(all_lines, data) print_all_lines = [print(line) for line in all_lines]", "= os.environ[\"user\"] password = os.environ[\"pass\"] connection = ps.connect(dbname=dbname, host=host, port=port,", "cloudwatch https://eu-west-1.console.aws.amazon.com/cloudwatch/home?region=eu-west-1#logsV2:log-groups\"} # Form all the lines of data into", "= s3_object['Body'].read().decode('utf-8') all_lines = [] # read CSV # csv_data", "str(row[3]) # total = str(row[4]) # all_lines.append({'date':date_obj, 'location':location, 'order':order, 'total':total})", "# print(all_lines) app.start_app(all_lines, data) print_all_lines = [print(line) for line in", "%H:%M') # # print(date_obj) # # time = str(row[0][-5:]) #", "# return cached_list # print(all_lines) app.start_app(all_lines, data) print_all_lines = [print(line)", "# datestr = row[0] #.replace('/', '-') # # print(datestr) #", "= event['Records'][0]['s3']['bucket']['name'] # use boto3 library to get object from", "Key = key) data = s3_object['Body'].read().decode('utf-8') all_lines = [] #", "cloud watch logs for this lambda in cloudwatch https://eu-west-1.console.aws.amazon.com/cloudwatch/home?region=eu-west-1#logsV2:log-groups\"} #", "# # print(date_obj) # # time = str(row[0][-5:]) # location", "# order = str(row[3]) # total = str(row[4]) # all_lines.append({'date':date_obj,", "boto3.client('s3') s3_object = s3.get_object(Bucket = bucket, Key = key) data", "key) data = s3_object['Body'].read().decode('utf-8') all_lines = [] # read CSV", "return {\"message\": \"success!!! Check the cloud watch logs for this", "import src.app as app import csv import psycopg2 as ps", "str(row[4]) # all_lines.append({'date':date_obj, 'location':location, 'order':order, 'total':total}) # return cached_list #", "def handle(event, context): cursor = connection.cursor() cursor.execute(\"SELECT 1\", ()) print(cursor.fetchall())", "as app import csv import psycopg2 as ps import os", "datestr = row[0] #.replace('/', '-') # # print(datestr) # date_obj", "[print(line) for line in all_lines] print_all_lines return {\"message\": \"success!!! Check", "Get key and bucket informaition key = event['Records'][0]['s3']['object']['key'] bucket =", "https://eu-west-1.console.aws.amazon.com/cloudwatch/home?region=eu-west-1#logsV2:log-groups\"} # Form all the lines of data into a", "port=port, user=user, password=password) def handle(event, context): cursor = connection.cursor() cursor.execute(\"SELECT", "'order':order, 'total':total}) # return cached_list # print(all_lines) app.start_app(all_lines, data) print_all_lines", "the cloud watch logs for this lambda in cloudwatch https://eu-west-1.console.aws.amazon.com/cloudwatch/home?region=eu-west-1#logsV2:log-groups\"}", "data) print_all_lines = [print(line) for line in all_lines] print_all_lines return", "= boto3.client('s3') s3_object = s3.get_object(Bucket = bucket, Key = key)", "user = os.environ[\"user\"] password = os.environ[\"pass\"] connection = ps.connect(dbname=dbname, host=host,", "load_dotenv() dbname = os.environ[\"db\"] host = os.environ[\"host\"] port = os.environ[\"port\"]", "# print(date_obj) # # time = str(row[0][-5:]) # location =", "= str(row[0][-5:]) # location = str(row[1]) # order = str(row[3])", "print(cursor.fetchall()) # Get key and bucket informaition key = event['Records'][0]['s3']['object']['key']", "use boto3 library to get object from S3 s3 =", "src.app as app import csv import psycopg2 as ps import", "date_obj = datetime.strptime(datestr, '%d/%m/%Y %H:%M') # # print(date_obj) # #", "# all_lines.append({'date':date_obj, 'location':location, 'order':order, 'total':total}) # return cached_list # print(all_lines)", "logs for this lambda in cloudwatch https://eu-west-1.console.aws.amazon.com/cloudwatch/home?region=eu-west-1#logsV2:log-groups\"} # Form all", "for row in csv_data: # datestr = row[0] #.replace('/', '-')", "\"success!!! Check the cloud watch logs for this lambda in", "bucket = event['Records'][0]['s3']['bucket']['name'] # use boto3 library to get object", "password=password) def handle(event, context): cursor = connection.cursor() cursor.execute(\"SELECT 1\", ())", "all_lines] print_all_lines return {\"message\": \"success!!! Check the cloud watch logs", "order = str(row[3]) # total = str(row[4]) # all_lines.append({'date':date_obj, 'location':location,", "print(date_obj) # # time = str(row[0][-5:]) # location = str(row[1])", "in all_lines] print_all_lines return {\"message\": \"success!!! Check the cloud watch", "= key) data = s3_object['Body'].read().decode('utf-8') all_lines = [] # read", "lines of data into a list of lists # all_lines", "connection = ps.connect(dbname=dbname, host=host, port=port, user=user, password=password) def handle(event, context):", "CSV # csv_data = csv.reader(data.splitlines()) # for row in csv_data:", "from S3 s3 = boto3.client('s3') s3_object = s3.get_object(Bucket = bucket,", "ps.connect(dbname=dbname, host=host, port=port, user=user, password=password) def handle(event, context): cursor =", "dotenv import load_dotenv load_dotenv() dbname = os.environ[\"db\"] host = os.environ[\"host\"]", "watch logs for this lambda in cloudwatch https://eu-west-1.console.aws.amazon.com/cloudwatch/home?region=eu-west-1#logsV2:log-groups\"} # Form", "= bucket, Key = key) data = s3_object['Body'].read().decode('utf-8') all_lines =", "lists # all_lines = [line for line in csv_data] #", "cached_list # print(all_lines) app.start_app(all_lines, data) print_all_lines = [print(line) for line", "S3 s3 = boto3.client('s3') s3_object = s3.get_object(Bucket = bucket, Key", "os.environ[\"user\"] password = os.environ[\"pass\"] connection = ps.connect(dbname=dbname, host=host, port=port, user=user,", "# # time = str(row[0][-5:]) # location = str(row[1]) #", "library to get object from S3 s3 = boto3.client('s3') s3_object", "import os from dotenv import load_dotenv load_dotenv() dbname = os.environ[\"db\"]", "data into a list of lists # all_lines = [line" ]
[ "# and/or other materials provided with the distribution. # #", "ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,", "must reproduce the above copyright notice, # this list of", "above copyright notice, # this list of conditions and the", "# # Redistribution and use in source and binary forms,", "with the distribution. # # * Neither the name of", "without specific prior written permission. # # THIS SOFTWARE IS", "# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF", "= set_up_test_cube(n_thresholds=2) expected = \"Expected a single valued threshold coordinate.*\"", "= [2, 2] shape = [n_thresholds, *shape] if n_thresholds >", "from # this software without specific prior written permission. #", "have a threshold coordinate.\"\"\" cube = set_up_test_cube() cube.remove_coord(\"texture_of_cloud_area_fraction\") expected =", "cube def test_basic(): \"\"\"Test that with a valid input the", "notice, this # list of conditions and the following disclaimer.", "THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR", "NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF", "endorse or promote products derived from # this software without", "copyright notice, this # list of conditions and the following", "EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE # LIABLE", "import numpy as np import pytest from iris.exceptions import CoordinateNotFoundError", "source and binary forms, with or without # modification, are", "used\" with pytest.raises(CoordinateNotFoundError, match=expected): make_shower_condition_cube(cube) def test_multi_valued_threshold_coord(): \"\"\"Test an exception", "\"shower_condition\" assert threshold_coord.units == 1 def test_no_threshold_coord(): \"\"\"Test an exception", "INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER", "COPYRIGHT HOLDER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT,", "CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,", "test_basic(): \"\"\"Test that with a valid input the cube is", "set_up_test_cube() cube.remove_coord(\"texture_of_cloud_area_fraction\") expected = \"Input has no threshold coordinate and", "test_no_threshold_coord(): \"\"\"Test an exception is raised if the proxy diagnostic", "of source code must retain the above copyright notice, this", "CoordinateNotFoundError from improver.metadata.constants import FLOAT_DTYPE from improver.precipitation_type.utilities import make_shower_condition_cube from", "= np.ones(shape, dtype=FLOAT_DTYPE) cube = set_up_probability_cube( data, thresholds, variable_name=\"texture_of_cloud_area_fraction\", threshold_units=1,", "DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE", "AND CONTRIBUTORS \"AS IS\" # AND ANY EXPRESS OR IMPLIED", "assert result.dtype == FLOAT_DTYPE assert (result.data == cube.data).all() assert threshold_coord.name()", "and binary forms, with or without # modification, are permitted", "names of its # contributors may be used to endorse", "== cube.data).all() assert threshold_coord.name() == \"shower_condition\" assert threshold_coord.units == 1", "# * Neither the name of the copyright holder nor", "source code must retain the above copyright notice, this #", "improver.metadata.constants import FLOAT_DTYPE from improver.precipitation_type.utilities import make_shower_condition_cube from improver.synthetic_data.set_up_test_cubes import", "\"\"\"Test an exception is raised if the proxy diagnostic cube", "EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE", "this software without specific prior written permission. # # THIS", "testing shower condition conversion\"\"\" thresholds = np.arange(n_thresholds) shape = [2,", "assert threshold_coord.name() == \"shower_condition\" assert threshold_coord.units == 1 def test_no_threshold_coord():", "written permission. # # THIS SOFTWARE IS PROVIDED BY THE", "cube is transformed into a shower condition cube.\"\"\" cube =", "notice, # this list of conditions and the following disclaimer", "GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR", "WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF", "shape = [2, 2] shape = [n_thresholds, *shape] if n_thresholds", "other materials provided with the distribution. # # * Neither", "cube does not have a threshold coordinate.\"\"\" cube = set_up_test_cube()", "software without specific prior written permission. # # THIS SOFTWARE", "retain the above copyright notice, this # list of conditions", "is transformed into a shower condition cube.\"\"\" cube = set_up_test_cube()", "2] shape = [n_thresholds, *shape] if n_thresholds > 0 else", "BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,", "* Redistributions in binary form must reproduce the above copyright", "= [n_thresholds, *shape] if n_thresholds > 0 else shape data", "are met: # # * Redistributions of source code must", "THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS", "disclaimer in the documentation # and/or other materials provided with", "\"AS IS\" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,", "BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR", "threshold coordinate and cannot be used\" with pytest.raises(CoordinateNotFoundError, match=expected): make_shower_condition_cube(cube)", "does not have a threshold coordinate.\"\"\" cube = set_up_test_cube() cube.remove_coord(\"texture_of_cloud_area_fraction\")", "TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR", "THE # POSSIBILITY OF SUCH DAMAGE. \"\"\" Tests of precipitation_type", "set_up_probability_cube def set_up_test_cube(n_thresholds=1): \"\"\"Set up a cube testing shower condition", "the proxy diagnostic cube has a multi valued threshold coordinate.\"\"\"", "cube = set_up_test_cube() result = make_shower_condition_cube(cube) threshold_coord = result.coord(var_name=\"threshold\") assert", "CONTRIBUTORS \"AS IS\" # AND ANY EXPRESS OR IMPLIED WARRANTIES,", "a threshold coordinate.\"\"\" cube = set_up_test_cube() cube.remove_coord(\"texture_of_cloud_area_fraction\") expected = \"Input", "def test_no_threshold_coord(): \"\"\"Test an exception is raised if the proxy", "disclaimer. # # * Redistributions in binary form must reproduce", "n_thresholds > 0 else shape data = np.ones(shape, dtype=FLOAT_DTYPE) cube", "spatial_grid=\"equalarea\", ) return cube def test_basic(): \"\"\"Test that with a", "== FLOAT_DTYPE assert (result.data == cube.data).all() assert threshold_coord.name() == \"shower_condition\"", "IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. \"\"\"", "FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL", "name of the copyright holder nor the names of its", "[2, 2] shape = [n_thresholds, *shape] if n_thresholds > 0", "shape = [n_thresholds, *shape] if n_thresholds > 0 else shape", "from improver.synthetic_data.set_up_test_cubes import set_up_probability_cube def set_up_test_cube(n_thresholds=1): \"\"\"Set up a cube", "# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR", "input the cube is transformed into a shower condition cube.\"\"\"", "CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN #", "expected = \"Expected a single valued threshold coordinate.*\" with pytest.raises(ValueError,", "# this software without specific prior written permission. # #", "shower condition conversion\"\"\" thresholds = np.arange(n_thresholds) shape = [2, 2]", "provided with the distribution. # # * Neither the name", "# -*- coding: utf-8 -*- # ----------------------------------------------------------------------------- # (C) British", "no threshold coordinate and cannot be used\" with pytest.raises(CoordinateNotFoundError, match=expected):", "AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT,", "Redistributions of source code must retain the above copyright notice,", "threshold_coord.name() == \"shower_condition\" assert threshold_coord.units == 1 def test_no_threshold_coord(): \"\"\"Test", "improver.precipitation_type.utilities import make_shower_condition_cube from improver.synthetic_data.set_up_test_cubes import set_up_probability_cube def set_up_test_cube(n_thresholds=1): \"\"\"Set", "cube testing shower condition conversion\"\"\" thresholds = np.arange(n_thresholds) shape =", "materials provided with the distribution. # # * Neither the", "OF SUCH DAMAGE. \"\"\" Tests of precipitation_type utilities\"\"\" import numpy", "and/or other materials provided with the distribution. # # *", "SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS #", "documentation # and/or other materials provided with the distribution. #", "that the following conditions are met: # # * Redistributions", "IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE", "or without # modification, are permitted provided that the following", "HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN", "Redistribution and use in source and binary forms, with or", "code must retain the above copyright notice, this # list", "# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT", "iris.exceptions import CoordinateNotFoundError from improver.metadata.constants import FLOAT_DTYPE from improver.precipitation_type.utilities import", "make_shower_condition_cube(cube) def test_multi_valued_threshold_coord(): \"\"\"Test an exception is raised if the", "valued threshold coordinate.\"\"\" cube = set_up_test_cube(n_thresholds=2) expected = \"Expected a", "conversion\"\"\" thresholds = np.arange(n_thresholds) shape = [2, 2] shape =", "\"\"\" Tests of precipitation_type utilities\"\"\" import numpy as np import", "this # list of conditions and the following disclaimer. #", "> 0 else shape data = np.ones(shape, dtype=FLOAT_DTYPE) cube =", "with pytest.raises(CoordinateNotFoundError, match=expected): make_shower_condition_cube(cube) def test_multi_valued_threshold_coord(): \"\"\"Test an exception is", "test_multi_valued_threshold_coord(): \"\"\"Test an exception is raised if the proxy diagnostic", "coordinate.\"\"\" cube = set_up_test_cube(n_thresholds=2) expected = \"Expected a single valued", "USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED", "the proxy diagnostic cube does not have a threshold coordinate.\"\"\"", "rights reserved. # # Redistribution and use in source and", "diagnostic cube has a multi valued threshold coordinate.\"\"\" cube =", "EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE.", "threshold_coord.units == 1 def test_no_threshold_coord(): \"\"\"Test an exception is raised", "IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR", "is raised if the proxy diagnostic cube has a multi", "nor the names of its # contributors may be used", "binary form must reproduce the above copyright notice, # this", "OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE", "if the proxy diagnostic cube has a multi valued threshold", "= set_up_test_cube() result = make_shower_condition_cube(cube) threshold_coord = result.coord(var_name=\"threshold\") assert result.name()", "PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" #", "= set_up_test_cube() cube.remove_coord(\"texture_of_cloud_area_fraction\") expected = \"Input has no threshold coordinate", "INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT", "CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) #", "OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS", "PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY", "TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY", "raised if the proxy diagnostic cube does not have a", "cube = set_up_test_cube() cube.remove_coord(\"texture_of_cloud_area_fraction\") expected = \"Input has no threshold", "THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE", "1 def test_no_threshold_coord(): \"\"\"Test an exception is raised if the", "in source and binary forms, with or without # modification,", "else shape data = np.ones(shape, dtype=FLOAT_DTYPE) cube = set_up_probability_cube( data,", "# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,", "A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL", "AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED", "distribution. # # * Neither the name of the copyright", "permitted provided that the following conditions are met: # #", "list of conditions and the following disclaimer. # # *", "Tests of precipitation_type utilities\"\"\" import numpy as np import pytest", "exception is raised if the proxy diagnostic cube has a", "in the documentation # and/or other materials provided with the", "of conditions and the following disclaimer in the documentation #", "products derived from # this software without specific prior written", "cube.data).all() assert threshold_coord.name() == \"shower_condition\" assert threshold_coord.units == 1 def", "cube.\"\"\" cube = set_up_test_cube() result = make_shower_condition_cube(cube) threshold_coord = result.coord(var_name=\"threshold\")", "form must reproduce the above copyright notice, # this list", "= result.coord(var_name=\"threshold\") assert result.name() == \"probability_of_shower_condition_above_threshold\" assert result.dtype == FLOAT_DTYPE", "use in source and binary forms, with or without #", "coding: utf-8 -*- # ----------------------------------------------------------------------------- # (C) British Crown Copyright", "== 1 def test_no_threshold_coord(): \"\"\"Test an exception is raised if", "valid input the cube is transformed into a shower condition", "shower condition cube.\"\"\" cube = set_up_test_cube() result = make_shower_condition_cube(cube) threshold_coord", "if the proxy diagnostic cube does not have a threshold", "threshold_coord = result.coord(var_name=\"threshold\") assert result.name() == \"probability_of_shower_condition_above_threshold\" assert result.dtype ==", "USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE #", "STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING", "EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,", "OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY", "np.ones(shape, dtype=FLOAT_DTYPE) cube = set_up_probability_cube( data, thresholds, variable_name=\"texture_of_cloud_area_fraction\", threshold_units=1, spatial_grid=\"equalarea\",", "FLOAT_DTYPE from improver.precipitation_type.utilities import make_shower_condition_cube from improver.synthetic_data.set_up_test_cubes import set_up_probability_cube def", "# POSSIBILITY OF SUCH DAMAGE. \"\"\" Tests of precipitation_type utilities\"\"\"", "assert result.name() == \"probability_of_shower_condition_above_threshold\" assert result.dtype == FLOAT_DTYPE assert (result.data", "# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER", "and the following disclaimer. # # * Redistributions in binary", "= \"Input has no threshold coordinate and cannot be used\"", "# contributors may be used to endorse or promote products", "THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF", "coordinate and cannot be used\" with pytest.raises(CoordinateNotFoundError, match=expected): make_shower_condition_cube(cube) def", "ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES", "= \"Expected a single valued threshold coordinate.*\" with pytest.raises(ValueError, match=expected):", "threshold coordinate.\"\"\" cube = set_up_test_cube(n_thresholds=2) expected = \"Expected a single", "result.dtype == FLOAT_DTYPE assert (result.data == cube.data).all() assert threshold_coord.name() ==", "not have a threshold coordinate.\"\"\" cube = set_up_test_cube() cube.remove_coord(\"texture_of_cloud_area_fraction\") expected", "(result.data == cube.data).all() assert threshold_coord.name() == \"shower_condition\" assert threshold_coord.units ==", "an exception is raised if the proxy diagnostic cube has", "OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED", "OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,", "make_shower_condition_cube(cube) threshold_coord = result.coord(var_name=\"threshold\") assert result.name() == \"probability_of_shower_condition_above_threshold\" assert result.dtype", "Copyright 2017-2021 Met Office. # All rights reserved. # #", "# # * Redistributions of source code must retain the", "LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION)", "diagnostic cube does not have a threshold coordinate.\"\"\" cube =", "thresholds, variable_name=\"texture_of_cloud_area_fraction\", threshold_units=1, spatial_grid=\"equalarea\", ) return cube def test_basic(): \"\"\"Test", "with or without # modification, are permitted provided that the", "# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR", "Crown Copyright 2017-2021 Met Office. # All rights reserved. #", "following disclaimer. # # * Redistributions in binary form must", "THE COPYRIGHT HOLDER OR CONTRIBUTORS BE # LIABLE FOR ANY", "import CoordinateNotFoundError from improver.metadata.constants import FLOAT_DTYPE from improver.precipitation_type.utilities import make_shower_condition_cube", "HOLDER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT,", "from improver.precipitation_type.utilities import make_shower_condition_cube from improver.synthetic_data.set_up_test_cubes import set_up_probability_cube def set_up_test_cube(n_thresholds=1):", "set_up_test_cube(n_thresholds=2) expected = \"Expected a single valued threshold coordinate.*\" with", "WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES", "NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES;", "assert threshold_coord.units == 1 def test_no_threshold_coord(): \"\"\"Test an exception is", "# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR", "# this list of conditions and the following disclaimer in", "must retain the above copyright notice, this # list of", "DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND", "prior written permission. # # THIS SOFTWARE IS PROVIDED BY", "import FLOAT_DTYPE from improver.precipitation_type.utilities import make_shower_condition_cube from improver.synthetic_data.set_up_test_cubes import set_up_probability_cube", "a valid input the cube is transformed into a shower", "and cannot be used\" with pytest.raises(CoordinateNotFoundError, match=expected): make_shower_condition_cube(cube) def test_multi_valued_threshold_coord():", "BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF", "met: # # * Redistributions of source code must retain", "that with a valid input the cube is transformed into", "condition conversion\"\"\" thresholds = np.arange(n_thresholds) shape = [2, 2] shape", "the following disclaimer in the documentation # and/or other materials", "HOLDERS AND CONTRIBUTORS \"AS IS\" # AND ANY EXPRESS OR", "of conditions and the following disclaimer. # # * Redistributions", "# * Redistributions in binary form must reproduce the above", "and use in source and binary forms, with or without", "2017-2021 Met Office. # All rights reserved. # # Redistribution", "utf-8 -*- # ----------------------------------------------------------------------------- # (C) British Crown Copyright 2017-2021", "IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE", "OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE", "the copyright holder nor the names of its # contributors", "import pytest from iris.exceptions import CoordinateNotFoundError from improver.metadata.constants import FLOAT_DTYPE", "# (C) British Crown Copyright 2017-2021 Met Office. # All", "copyright holder nor the names of its # contributors may", "-*- coding: utf-8 -*- # ----------------------------------------------------------------------------- # (C) British Crown", "conditions and the following disclaimer in the documentation # and/or", "OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF", "PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT", "reproduce the above copyright notice, # this list of conditions", "data = np.ones(shape, dtype=FLOAT_DTYPE) cube = set_up_probability_cube( data, thresholds, variable_name=\"texture_of_cloud_area_fraction\",", "if n_thresholds > 0 else shape data = np.ones(shape, dtype=FLOAT_DTYPE)", "cannot be used\" with pytest.raises(CoordinateNotFoundError, match=expected): make_shower_condition_cube(cube) def test_multi_valued_threshold_coord(): \"\"\"Test", "an exception is raised if the proxy diagnostic cube does", "in binary form must reproduce the above copyright notice, #", "Met Office. # All rights reserved. # # Redistribution and", "\"\"\"Set up a cube testing shower condition conversion\"\"\" thresholds =", "be used to endorse or promote products derived from #", "forms, with or without # modification, are permitted provided that", "binary forms, with or without # modification, are permitted provided", "utilities\"\"\" import numpy as np import pytest from iris.exceptions import", "= np.arange(n_thresholds) shape = [2, 2] shape = [n_thresholds, *shape]", "British Crown Copyright 2017-2021 Met Office. # All rights reserved.", "SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE # LIABLE FOR", "specific prior written permission. # # THIS SOFTWARE IS PROVIDED", "contributors may be used to endorse or promote products derived", "= set_up_probability_cube( data, thresholds, variable_name=\"texture_of_cloud_area_fraction\", threshold_units=1, spatial_grid=\"equalarea\", ) return cube", "provided that the following conditions are met: # # *", "improver.synthetic_data.set_up_test_cubes import set_up_probability_cube def set_up_test_cube(n_thresholds=1): \"\"\"Set up a cube testing", "with a valid input the cube is transformed into a", "raised if the proxy diagnostic cube has a multi valued", "the documentation # and/or other materials provided with the distribution.", "shape data = np.ones(shape, dtype=FLOAT_DTYPE) cube = set_up_probability_cube( data, thresholds,", "return cube def test_basic(): \"\"\"Test that with a valid input", "result.coord(var_name=\"threshold\") assert result.name() == \"probability_of_shower_condition_above_threshold\" assert result.dtype == FLOAT_DTYPE assert", "are permitted provided that the following conditions are met: #", "above copyright notice, this # list of conditions and the", "# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)", "# list of conditions and the following disclaimer. # #", "the name of the copyright holder nor the names of", "WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE #", "thresholds = np.arange(n_thresholds) shape = [2, 2] shape = [n_thresholds,", "ARISING IN ANY WAY OUT OF THE USE OF THIS", "# All rights reserved. # # Redistribution and use in", "ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN", "LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS", "up a cube testing shower condition conversion\"\"\" thresholds = np.arange(n_thresholds)", "Redistributions in binary form must reproduce the above copyright notice,", "ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR", "be used\" with pytest.raises(CoordinateNotFoundError, match=expected): make_shower_condition_cube(cube) def test_multi_valued_threshold_coord(): \"\"\"Test an", "data, thresholds, variable_name=\"texture_of_cloud_area_fraction\", threshold_units=1, spatial_grid=\"equalarea\", ) return cube def test_basic():", "# Redistribution and use in source and binary forms, with", "the cube is transformed into a shower condition cube.\"\"\" cube", "the above copyright notice, # this list of conditions and", "the following conditions are met: # # * Redistributions of", "ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. \"\"\" Tests", "-*- # ----------------------------------------------------------------------------- # (C) British Crown Copyright 2017-2021 Met", "Office. # All rights reserved. # # Redistribution and use", "def set_up_test_cube(n_thresholds=1): \"\"\"Set up a cube testing shower condition conversion\"\"\"", "coordinate.\"\"\" cube = set_up_test_cube() cube.remove_coord(\"texture_of_cloud_area_fraction\") expected = \"Input has no", "make_shower_condition_cube from improver.synthetic_data.set_up_test_cubes import set_up_probability_cube def set_up_test_cube(n_thresholds=1): \"\"\"Set up a", "LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN", "OF THE # POSSIBILITY OF SUCH DAMAGE. \"\"\" Tests of", "*shape] if n_thresholds > 0 else shape data = np.ones(shape,", "list of conditions and the following disclaimer in the documentation", "\"probability_of_shower_condition_above_threshold\" assert result.dtype == FLOAT_DTYPE assert (result.data == cube.data).all() assert", "== \"shower_condition\" assert threshold_coord.units == 1 def test_no_threshold_coord(): \"\"\"Test an", "* Redistributions of source code must retain the above copyright", "cube = set_up_probability_cube( data, thresholds, variable_name=\"texture_of_cloud_area_fraction\", threshold_units=1, spatial_grid=\"equalarea\", ) return", "(C) British Crown Copyright 2017-2021 Met Office. # All rights", "cube.remove_coord(\"texture_of_cloud_area_fraction\") expected = \"Input has no threshold coordinate and cannot", "# modification, are permitted provided that the following conditions are", "the following disclaimer. # # * Redistributions in binary form", "----------------------------------------------------------------------------- # (C) British Crown Copyright 2017-2021 Met Office. #", "DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING,", "DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS", "as np import pytest from iris.exceptions import CoordinateNotFoundError from improver.metadata.constants", "is raised if the proxy diagnostic cube does not have", "OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT", "OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,", "\"Input has no threshold coordinate and cannot be used\" with", "following disclaimer in the documentation # and/or other materials provided", "(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS", "LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING", "\"\"\"Test that with a valid input the cube is transformed", "may be used to endorse or promote products derived from", "0 else shape data = np.ones(shape, dtype=FLOAT_DTYPE) cube = set_up_probability_cube(", "a shower condition cube.\"\"\" cube = set_up_test_cube() result = make_shower_condition_cube(cube)", "# # * Redistributions in binary form must reproduce the", "IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED", "or promote products derived from # this software without specific", "copyright notice, # this list of conditions and the following", "np.arange(n_thresholds) shape = [2, 2] shape = [n_thresholds, *shape] if", "expected = \"Input has no threshold coordinate and cannot be", "following conditions are met: # # * Redistributions of source", "SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH", "# ----------------------------------------------------------------------------- # (C) British Crown Copyright 2017-2021 Met Office.", "OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE", "the names of its # contributors may be used to", "INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF", "* Neither the name of the copyright holder nor the", "FLOAT_DTYPE assert (result.data == cube.data).all() assert threshold_coord.name() == \"shower_condition\" assert", "the above copyright notice, this # list of conditions and", "and the following disclaimer in the documentation # and/or other", "# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND", "promote products derived from # this software without specific prior", "conditions and the following disclaimer. # # * Redistributions in", "result = make_shower_condition_cube(cube) threshold_coord = result.coord(var_name=\"threshold\") assert result.name() == \"probability_of_shower_condition_above_threshold\"", "= make_shower_condition_cube(cube) threshold_coord = result.coord(var_name=\"threshold\") assert result.name() == \"probability_of_shower_condition_above_threshold\" assert", "OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER", "POSSIBILITY OF SUCH DAMAGE. \"\"\" Tests of precipitation_type utilities\"\"\" import", "cube = set_up_test_cube(n_thresholds=2) expected = \"Expected a single valued threshold", "LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS", "match=expected): make_shower_condition_cube(cube) def test_multi_valued_threshold_coord(): \"\"\"Test an exception is raised if", "pytest from iris.exceptions import CoordinateNotFoundError from improver.metadata.constants import FLOAT_DTYPE from", "IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,", "LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR #", "a multi valued threshold coordinate.\"\"\" cube = set_up_test_cube(n_thresholds=2) expected =", "All rights reserved. # # Redistribution and use in source", "from improver.metadata.constants import FLOAT_DTYPE from improver.precipitation_type.utilities import make_shower_condition_cube from improver.synthetic_data.set_up_test_cubes", "== \"probability_of_shower_condition_above_threshold\" assert result.dtype == FLOAT_DTYPE assert (result.data == cube.data).all()", "without # modification, are permitted provided that the following conditions", "[n_thresholds, *shape] if n_thresholds > 0 else shape data =", "OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY", ") return cube def test_basic(): \"\"\"Test that with a valid", "a cube testing shower condition conversion\"\"\" thresholds = np.arange(n_thresholds) shape", "WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE", "condition cube.\"\"\" cube = set_up_test_cube() result = make_shower_condition_cube(cube) threshold_coord =", "this list of conditions and the following disclaimer in the", "used to endorse or promote products derived from # this", "modification, are permitted provided that the following conditions are met:", "PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,", "set_up_test_cube() result = make_shower_condition_cube(cube) threshold_coord = result.coord(var_name=\"threshold\") assert result.name() ==", "transformed into a shower condition cube.\"\"\" cube = set_up_test_cube() result", "of the copyright holder nor the names of its #", "# ARISING IN ANY WAY OUT OF THE USE OF", "AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN", "reserved. # # Redistribution and use in source and binary", "ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY,", "ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT", "numpy as np import pytest from iris.exceptions import CoordinateNotFoundError from", "assert (result.data == cube.data).all() assert threshold_coord.name() == \"shower_condition\" assert threshold_coord.units", "precipitation_type utilities\"\"\" import numpy as np import pytest from iris.exceptions", "# # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS", "# * Redistributions of source code must retain the above", "from iris.exceptions import CoordinateNotFoundError from improver.metadata.constants import FLOAT_DTYPE from improver.precipitation_type.utilities", "pytest.raises(CoordinateNotFoundError, match=expected): make_shower_condition_cube(cube) def test_multi_valued_threshold_coord(): \"\"\"Test an exception is raised", "OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE #", "SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED", "the distribution. # # * Neither the name of the", "of precipitation_type utilities\"\"\" import numpy as np import pytest from", "IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"", "import set_up_probability_cube def set_up_test_cube(n_thresholds=1): \"\"\"Set up a cube testing shower", "cube has a multi valued threshold coordinate.\"\"\" cube = set_up_test_cube(n_thresholds=2)", "into a shower condition cube.\"\"\" cube = set_up_test_cube() result =", "threshold coordinate.\"\"\" cube = set_up_test_cube() cube.remove_coord(\"texture_of_cloud_area_fraction\") expected = \"Input has", "NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE #", "holder nor the names of its # contributors may be", "permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT", "DAMAGE. \"\"\" Tests of precipitation_type utilities\"\"\" import numpy as np", "has a multi valued threshold coordinate.\"\"\" cube = set_up_test_cube(n_thresholds=2) expected", "FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT", "result.name() == \"probability_of_shower_condition_above_threshold\" assert result.dtype == FLOAT_DTYPE assert (result.data ==", "to endorse or promote products derived from # this software", "NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND", "variable_name=\"texture_of_cloud_area_fraction\", threshold_units=1, spatial_grid=\"equalarea\", ) return cube def test_basic(): \"\"\"Test that", "SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;", "SUCH DAMAGE. \"\"\" Tests of precipitation_type utilities\"\"\" import numpy as", "FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO", "BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY", "its # contributors may be used to endorse or promote", "IS\" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT", "SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS", "PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE", "(INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT", "import make_shower_condition_cube from improver.synthetic_data.set_up_test_cubes import set_up_probability_cube def set_up_test_cube(n_thresholds=1): \"\"\"Set up", "of its # contributors may be used to endorse or", "OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY", "# # * Neither the name of the copyright holder", "THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A", "dtype=FLOAT_DTYPE) cube = set_up_probability_cube( data, thresholds, variable_name=\"texture_of_cloud_area_fraction\", threshold_units=1, spatial_grid=\"equalarea\", )", "COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" # AND ANY EXPRESS", "has no threshold coordinate and cannot be used\" with pytest.raises(CoordinateNotFoundError,", "exception is raised if the proxy diagnostic cube does not", "\"Expected a single valued threshold coordinate.*\" with pytest.raises(ValueError, match=expected): make_shower_condition_cube(cube)", "INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT", "CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF #", "derived from # this software without specific prior written permission.", "set_up_test_cube(n_thresholds=1): \"\"\"Set up a cube testing shower condition conversion\"\"\" thresholds", "OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON", "def test_multi_valued_threshold_coord(): \"\"\"Test an exception is raised if the proxy", "def test_basic(): \"\"\"Test that with a valid input the cube", "conditions are met: # # * Redistributions of source code", "MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED.", "proxy diagnostic cube has a multi valued threshold coordinate.\"\"\" cube", "OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT", "proxy diagnostic cube does not have a threshold coordinate.\"\"\" cube", "multi valued threshold coordinate.\"\"\" cube = set_up_test_cube(n_thresholds=2) expected = \"Expected", "np import pytest from iris.exceptions import CoordinateNotFoundError from improver.metadata.constants import", "TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF", "threshold_units=1, spatial_grid=\"equalarea\", ) return cube def test_basic(): \"\"\"Test that with", "BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" # AND", "THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" # AND ANY", "set_up_probability_cube( data, thresholds, variable_name=\"texture_of_cloud_area_fraction\", threshold_units=1, spatial_grid=\"equalarea\", ) return cube def", "Neither the name of the copyright holder nor the names" ]
[ "FROM items WHERE name LIKE %s AND price = %s;\"\"\"", "%s, created_by = %s WHERE item_id= %s \"\"\" self.cur.execute( query,", "the databas and returns them''' model = ModelSetup() self.conn =", "of the items section''' def __init__( self, name=None, price=None, quantity=None,", "self.name = name self.price = price self.quantity = quantity self.category_id", "query_confirm = \"\"\"SELECT * FROM items;\"\"\" self.cur.execute(query_confirm) self.items = self.cur.fetchall()", "price)) self.item = self.cur.fetchone() return self.item def update_item( self, item_id,", "SET price = %s, quantity = %s, image = %s,", "quantity available''' model = ModelSetup() self.conn = model.conn self.cur =", "WHERE item_id = %s;\"\"\" self.cur.execute(query, (item_id, )) self.item = self.cur.fetchone()", "'''retrieves items by finding them using their category. all items", "quantity self.category_id = category_id self.reorder_point = reorder_point self.auth = auth", "AND price = %s;\"\"\" self.cur.execute(query_confirm, (name, price)) self.item = self.cur.fetchone()", "= self.cur.fetchall() return self.items def get_by_id(self, item_id): '''retrieves one item", "values in the db are changed to what is provided'''", "\"\"\"UPDATE items SET price = %s, quantity = %s, image", "item_id)) self.conn.commit() query_confirm = \"\"\"SELECT * FROM items WHERE item_id", "self.item def update_item( self, item_id, price, quantity, image, category_id, reorder_point,", "model.conn self.cur = model.cur query = \"\"\"INSERT INTO items(name, price,", "unique item_id''' model = ModelSetup() self.conn = model.conn self.cur =", "'''Handles the data logic of the items section''' def __init__(", "the items section''' def __init__( self, name=None, price=None, quantity=None, category_id=None,", "%s AND price = %s;\"\"\" self.cur.execute(query, (name, price)) self.item =", "to what is provided''' model = ModelSetup() self.conn = model.conn", "item's details. the values in the db are changed to", "FROM items WHERE category LIKE %s;\"\"\" self.cur.execute(query, (category)) self.item =", "= model.cur query = \"\"\"SELECT * FROM items;\"\"\" self.cur.execute(query) self.items", "self.cur.fetchall() return self.item def get_by_name_and_price(self, name, price): '''retrieves one item", "self, item_id, price, quantity, image, category_id, reorder_point, auth): '''updates item's", "db are changed to what is provided''' model = ModelSetup()", "category_id, reorder_point, auth, item_id)) self.conn.commit() query_confirm = \"\"\"SELECT * FROM", ")) self.item = self.cur.fetchone() return self.item def get_by_category(self, category): '''retrieves", "using their category. all items in the same category are", "* FROM items WHERE name = %s AND price =", "item''' model = ModelSetup() self.conn = model.conn self.cur = model.cur", "created_by = %s WHERE item_id= %s \"\"\" self.cur.execute( query, (price,", "above arguements. Then returns the created item''' model = ModelSetup()", "self.item def update_item_quantity(self, item_id, quantity): '''updates item's quantity.adds the quantity", "self.quantity = quantity self.category_id = category_id self.reorder_point = reorder_point self.auth", "* FROM items WHERE name LIKE %s AND price =", "self.item = self.cur.fetchone() return self.item def update_item_quantity(self, item_id, quantity): '''updates", "= category_id self.reorder_point = reorder_point self.auth = auth def add_item(", "= reorder_point self.auth = auth def add_item( self, name, price,", "the variables for the items class''' self.name = name self.price", "'''retrieves one item by finding them using their unique item_id'''", "self.items def get_by_id(self, item_id): '''retrieves one item by finding them", "query = \"\"\"UPDATE items SET quantity = %s WHERE item_id=", "= model.conn self.cur = model.cur query = \"\"\"SELECT * FROM", "available''' model = ModelSetup() self.conn = model.conn self.cur = model.cur", "__init__( self, name=None, price=None, quantity=None, category_id=None, reorder_point=None, auth=None): '''Initializes the", "= name self.price = price self.quantity = quantity self.category_id =", "'''Adds item given the above arguements. Then returns the created", "price): '''retrieves one item by finding them using their unique", "\"\"\" self.cur.execute(query, (quantity, item_id)) self.conn.commit() query_confirm = \"\"\"SELECT * FROM", "quantity, image, category_id, reorder_point, auth)) self.conn.commit() query_confirm = \"\"\"SELECT *", "category): '''retrieves items by finding them using their category. all", "= self.cur.fetchone() return self.item def get_by_category(self, category): '''retrieves items by", "to the quantity available''' model = ModelSetup() self.conn = model.conn", "self.cur.fetchone() return self.item def update_item( self, item_id, price, quantity, image,", "model.conn self.cur = model.cur query = \"\"\"SELECT * FROM items", "'''deletes an item by finding them using the item_id''' model", "price=None, quantity=None, category_id=None, reorder_point=None, auth=None): '''Initializes the variables for the", "= \"\"\"UPDATE items SET quantity = %s WHERE item_id= %s", "= %s;\"\"\" self.cur.execute(query_confirm, (name, price)) self.item = self.cur.fetchone() return self.item", "return self.item def delete_item(self, item_id): '''deletes an item by finding", "items WHERE item_id = %s;\"\"\" self.cur.execute(query, (item_id, )) self.item =", "item_id): '''deletes an item by finding them using the item_id'''", "quantity added to the quantity available''' model = ModelSetup() self.conn", "price = %s;\"\"\" self.cur.execute(query, (name, price)) self.item = self.cur.fetchone() return", "self.cur.execute( query, (name, price, quantity, image, category_id, reorder_point, auth)) self.conn.commit()", "what is provided''' model = ModelSetup() self.conn = model.conn self.cur", "query, (price, quantity, image, category_id, reorder_point, auth, item_id)) self.conn.commit() query_confirm", "def update_item( self, item_id, price, quantity, image, category_id, reorder_point, auth):", "self.item = self.cur.fetchall() return self.item def get_by_name_and_price(self, name, price): '''retrieves", "\"\"\"SELECT * FROM items;\"\"\" self.cur.execute(query) self.items = self.cur.fetchall() return self.items", "FROM items WHERE name = %s AND price = %s;\"\"\"", "%s, reorder_point = %s, created_by = %s WHERE item_id= %s", "the data logic of the items section''' def __init__( self,", "WHERE item_id = %s;\"\"\" self.cur.execute(query_confirm, (item_id, )) self.item = self.cur.fetchone()", "name = %s AND price = %s;\"\"\" self.cur.execute(query_confirm, (name, price))", "= model.cur query = \"\"\"SELECT * FROM items WHERE item_id", "are retrieved''' model = ModelSetup() self.conn = model.conn self.cur =", "auth): '''Adds item given the above arguements. Then returns the", "item's quantity.adds the quantity added to the quantity available''' model", "reorder_point, auth): '''Adds item given the above arguements. Then returns", "unique unique combination''' model = ModelSetup() self.conn = model.conn self.cur", "all records of items in the databas and returns them'''", "self.cur.execute( query, (price, quantity, image, category_id, reorder_point, auth, item_id)) self.conn.commit()", "self.cur = model.cur query = \"\"\"UPDATE items SET quantity =", "'''updates item's quantity.adds the quantity added to the quantity available'''", "section''' def __init__( self, name=None, price=None, quantity=None, category_id=None, reorder_point=None, auth=None):", "an item by finding them using the item_id''' model =", "def __init__( self, name=None, price=None, quantity=None, category_id=None, reorder_point=None, auth=None): '''Initializes", "name self.price = price self.quantity = quantity self.category_id = category_id", "self.conn.commit() query_confirm = \"\"\"SELECT * FROM items;\"\"\" self.cur.execute(query_confirm) self.items =", "%s;\"\"\" self.cur.execute(query_confirm, (item_id, )) self.item = self.cur.fetchone() return self.item def", "variables for the items class''' self.name = name self.price =", "= %s;\"\"\" self.cur.execute(query, (item_id, )) self.item = self.cur.fetchone() return self.item", "price, quantity, image, category, reorder_point, created_by)\\ VALUES(%s,%s,%s,%s,%s,%s,%s);\"\"\" self.cur.execute( query, (name,", "created_by)\\ VALUES(%s,%s,%s,%s,%s,%s,%s);\"\"\" self.cur.execute( query, (name, price, quantity, image, category_id, reorder_point,", "image = %s, category = %s, reorder_point = %s, created_by", "category LIKE %s;\"\"\" self.cur.execute(query, (category)) self.item = self.cur.fetchall() return self.item", "them''' model = ModelSetup() self.conn = model.conn self.cur = model.cur", "= self.cur.fetchone() return self.item def update_item_quantity(self, item_id, quantity): '''updates item's", "created item''' model = ModelSetup() self.conn = model.conn self.cur =", "item_id= %s \"\"\" self.cur.execute(query, (quantity, item_id)) self.conn.commit() query_confirm = \"\"\"SELECT", "returns the created item''' model = ModelSetup() self.conn = model.conn", "them using their unique item_id''' model = ModelSetup() self.conn =", "reorder_point self.auth = auth def add_item( self, name, price, quantity,", "ModelSetup() self.conn = model.conn self.cur = model.cur query = \"\"\"DELETE", "= self.cur.fetchone() return self.item def update_item( self, item_id, price, quantity,", "self.cur = model.cur query = \"\"\"SELECT * FROM items WHERE", "Then returns the created item''' model = ModelSetup() self.conn =", "self.cur.execute(query_confirm, (item_id, )) self.item = self.cur.fetchone() return self.item def update_item_quantity(self,", "price, quantity, image, category_id, reorder_point, auth): '''Adds item given the", "return self.items def get_by_id(self, item_id): '''retrieves one item by finding", "added to the quantity available''' model = ModelSetup() self.conn =", "item_id = %s;\"\"\" self.cur.execute(query_confirm, (item_id, )) self.item = self.cur.fetchone() return", "self.cur.fetchall() return self.items def get_by_id(self, item_id): '''retrieves one item by", "WHERE category LIKE %s;\"\"\" self.cur.execute(query, (category)) self.item = self.cur.fetchall() return", "in the db are changed to what is provided''' model", "auth): '''updates item's details. the values in the db are", "reorder_point, auth): '''updates item's details. the values in the db", "quantity, image, category, reorder_point, created_by)\\ VALUES(%s,%s,%s,%s,%s,%s,%s);\"\"\" self.cur.execute( query, (name, price,", "the quantity available''' model = ModelSetup() self.conn = model.conn self.cur", "= auth def add_item( self, name, price, quantity, image, category_id,", "query = \"\"\"SELECT * FROM items WHERE item_id = %s;\"\"\"", "in the databas and returns them''' model = ModelSetup() self.conn", "combination''' model = ModelSetup() self.conn = model.conn self.cur = model.cur", "data logic of the items section''' def __init__( self, name=None,", "items SET price = %s, quantity = %s, image =", "LIKE %s;\"\"\" self.cur.execute(query, (category)) self.item = self.cur.fetchall() return self.item def", "self.item def get_by_name_and_price(self, name, price): '''retrieves one item by finding", "= \"\"\"INSERT INTO items(name, price, quantity, image, category, reorder_point, created_by)\\", "finding them using the item_id''' model = ModelSetup() self.conn =", "= \"\"\"UPDATE items SET price = %s, quantity = %s,", "auth=None): '''Initializes the variables for the items class''' self.name =", "= %s, reorder_point = %s, created_by = %s WHERE item_id=", "= %s WHERE item_id= %s \"\"\" self.cur.execute( query, (price, quantity,", "add_item( self, name, price, quantity, image, category_id, reorder_point, auth): '''Adds", "for the items class''' self.name = name self.price = price", "finding them using their unique unique combination''' model = ModelSetup()", ".db_conn import ModelSetup class ItemsModel(ModelSetup): '''Handles the data logic of", "model.cur query = \"\"\"INSERT INTO items(name, price, quantity, image, category,", "(name, price, quantity, image, category_id, reorder_point, auth)) self.conn.commit() query_confirm =", "= %s;\"\"\" self.cur.execute(query_confirm, (item_id, )) self.item = self.cur.fetchone() return self.item", "query = \"\"\"SELECT * FROM items WHERE name LIKE %s", "their unique unique combination''' model = ModelSetup() self.conn = model.conn", "category_id, reorder_point, auth): '''Adds item given the above arguements. Then", "self.item def delete_item(self, item_id): '''deletes an item by finding them", "items section''' def __init__( self, name=None, price=None, quantity=None, category_id=None, reorder_point=None,", "= model.cur query = \"\"\"SELECT * FROM items WHERE category", "auth, item_id)) self.conn.commit() query_confirm = \"\"\"SELECT * FROM items WHERE", "= model.cur query = \"\"\"SELECT * FROM items WHERE name", "returns them''' model = ModelSetup() self.conn = model.conn self.cur =", "def get_by_category(self, category): '''retrieves items by finding them using their", "name, price): '''retrieves one item by finding them using their", ")) self.item = self.cur.fetchone() return self.item def update_item_quantity(self, item_id, quantity):", "self.cur.fetchone() return self.item def update_item_quantity(self, item_id, quantity): '''updates item's quantity.adds", "SET quantity = %s WHERE item_id= %s \"\"\" self.cur.execute(query, (quantity,", "= \"\"\"SELECT * FROM items WHERE name = %s AND", "given the above arguements. Then returns the created item''' model", "%s, image = %s, category = %s, reorder_point = %s,", "category_id, reorder_point, auth): '''updates item's details. the values in the", "items WHERE name LIKE %s AND price = %s;\"\"\" self.cur.execute(query,", "item_id, quantity): '''updates item's quantity.adds the quantity added to the", "self.cur.fetchone() return self.item def get_all(self): '''gets all records of items", "self.item = self.cur.fetchone() return self.item def delete_item(self, item_id): '''deletes an", "return self.item def update_item_quantity(self, item_id, quantity): '''updates item's quantity.adds the", "unique combination''' model = ModelSetup() self.conn = model.conn self.cur =", "model.cur query = \"\"\"SELECT * FROM items;\"\"\" self.cur.execute(query) self.items =", "reorder_point=None, auth=None): '''Initializes the variables for the items class''' self.name", "def update_item_quantity(self, item_id, quantity): '''updates item's quantity.adds the quantity added", "ModelSetup() self.conn = model.conn self.cur = model.cur query = \"\"\"SELECT", "items;\"\"\" self.cur.execute(query) self.items = self.cur.fetchall() return self.items def get_by_id(self, item_id):", "self.reorder_point = reorder_point self.auth = auth def add_item( self, name,", "(name, price)) self.item = self.cur.fetchone() return self.item def get_all(self): '''gets", "model.cur query = \"\"\"SELECT * FROM items WHERE category LIKE", "def add_item( self, name, price, quantity, image, category_id, reorder_point, auth):", "item by finding them using the item_id''' model = ModelSetup()", "= model.cur query = \"\"\"INSERT INTO items(name, price, quantity, image,", "is provided''' model = ModelSetup() self.conn = model.conn self.cur =", "(price, quantity, image, category_id, reorder_point, auth, item_id)) self.conn.commit() query_confirm =", "query = \"\"\"SELECT * FROM items WHERE category LIKE %s;\"\"\"", "model.conn self.cur = model.cur query = \"\"\"UPDATE items SET price", "FROM items WHERE item_id = %s\"\"\" self.cur.execute(query, (item_id, )) self.conn.commit()", "'''gets all records of items in the databas and returns", "query = \"\"\"UPDATE items SET price = %s, quantity =", "self.auth = auth def add_item( self, name, price, quantity, image,", "model.cur query = \"\"\"UPDATE items SET quantity = %s WHERE", "logic of the items section''' def __init__( self, name=None, price=None,", "VALUES(%s,%s,%s,%s,%s,%s,%s);\"\"\" self.cur.execute( query, (name, price, quantity, image, category_id, reorder_point, auth))", "image, category_id, reorder_point, auth)) self.conn.commit() query_confirm = \"\"\"SELECT * FROM", "reorder_point, auth, item_id)) self.conn.commit() query_confirm = \"\"\"SELECT * FROM items", "items WHERE item_id = %s;\"\"\" self.cur.execute(query_confirm, (item_id, )) self.item =", "by finding them using the item_id''' model = ModelSetup() self.conn", "model = ModelSetup() self.conn = model.conn self.cur = model.cur query", "(item_id, )) self.item = self.cur.fetchone() return self.item def get_by_category(self, category):", "provided''' model = ModelSetup() self.conn = model.conn self.cur = model.cur", "model.cur query = \"\"\"UPDATE items SET price = %s, quantity", "quantity=None, category_id=None, reorder_point=None, auth=None): '''Initializes the variables for the items", "= model.conn self.cur = model.cur query = \"\"\"UPDATE items SET", "WHERE item_id= %s \"\"\" self.cur.execute( query, (price, quantity, image, category_id,", "items by finding them using their category. all items in", "auth def add_item( self, name, price, quantity, image, category_id, reorder_point,", "quantity): '''updates item's quantity.adds the quantity added to the quantity", "arguements. Then returns the created item''' model = ModelSetup() self.conn", "model.cur query = \"\"\"SELECT * FROM items WHERE name LIKE", "item_id= %s \"\"\" self.cur.execute( query, (price, quantity, image, category_id, reorder_point,", "category_id self.reorder_point = reorder_point self.auth = auth def add_item( self,", "%s;\"\"\" self.cur.execute(query, (name, price)) self.item = self.cur.fetchone() return self.item def", "self.cur.execute(query, (item_id, )) self.conn.commit() query_confirm = \"\"\"SELECT * FROM items;\"\"\"", "self.item = self.cur.fetchone() return self.item def update_item( self, item_id, price,", "the above arguements. Then returns the created item''' model =", "self.conn.commit() query_confirm = \"\"\"SELECT * FROM items WHERE name =", "self.item def get_all(self): '''gets all records of items in the", "def get_by_id(self, item_id): '''retrieves one item by finding them using", "item_id, price, quantity, image, category_id, reorder_point, auth): '''updates item's details.", "finding them using their category. all items in the same", "model.cur query = \"\"\"SELECT * FROM items WHERE item_id =", "item_id): '''retrieves one item by finding them using their unique", "item by finding them using their unique unique combination''' model", "'''retrieves one item by finding them using their unique unique", "= self.cur.fetchone() return self.item def get_all(self): '''gets all records of", "reorder_point, auth)) self.conn.commit() query_confirm = \"\"\"SELECT * FROM items WHERE", "= price self.quantity = quantity self.category_id = category_id self.reorder_point =", "= ModelSetup() self.conn = model.conn self.cur = model.cur query =", "%s WHERE item_id= %s \"\"\" self.cur.execute( query, (price, quantity, image,", "self.cur.execute(query_confirm, (name, price)) self.item = self.cur.fetchone() return self.item def get_all(self):", "= quantity self.category_id = category_id self.reorder_point = reorder_point self.auth =", ")) self.item = self.cur.fetchone() return self.item def delete_item(self, item_id): '''deletes", "category_id=None, reorder_point=None, auth=None): '''Initializes the variables for the items class'''", "price = %s;\"\"\" self.cur.execute(query_confirm, (name, price)) self.item = self.cur.fetchone() return", "WHERE name LIKE %s AND price = %s;\"\"\" self.cur.execute(query, (name,", "items(name, price, quantity, image, category, reorder_point, created_by)\\ VALUES(%s,%s,%s,%s,%s,%s,%s);\"\"\" self.cur.execute( query,", "(name, price)) self.item = self.cur.fetchone() return self.item def update_item( self,", "get_by_category(self, category): '''retrieves items by finding them using their category.", "= \"\"\"SELECT * FROM items WHERE name LIKE %s AND", "query = \"\"\"DELETE FROM items WHERE item_id = %s\"\"\" self.cur.execute(query,", "quantity.adds the quantity added to the quantity available''' model =", "name, price, quantity, image, category_id, reorder_point, auth): '''Adds item given", "= \"\"\"SELECT * FROM items WHERE item_id = %s;\"\"\" self.cur.execute(query_confirm,", "= \"\"\"SELECT * FROM items WHERE category LIKE %s;\"\"\" self.cur.execute(query,", "price, quantity, image, category_id, reorder_point, auth): '''updates item's details. the", "query_confirm = \"\"\"SELECT * FROM items WHERE item_id = %s;\"\"\"", "model.conn self.cur = model.cur query = \"\"\"UPDATE items SET quantity", "'''Initializes the variables for the items class''' self.name = name", "= %s, quantity = %s, image = %s, category =", "\"\"\"DELETE FROM items WHERE item_id = %s\"\"\" self.cur.execute(query, (item_id, ))", "self.conn.commit() query_confirm = \"\"\"SELECT * FROM items WHERE item_id =", "databas and returns them''' model = ModelSetup() self.conn = model.conn", "the db are changed to what is provided''' model =", "%s, category = %s, reorder_point = %s, created_by = %s", "FROM items WHERE item_id = %s;\"\"\" self.cur.execute(query, (item_id, )) self.item", "\"\"\"SELECT * FROM items WHERE item_id = %s;\"\"\" self.cur.execute(query_confirm, (item_id,", "%s\"\"\" self.cur.execute(query, (item_id, )) self.conn.commit() query_confirm = \"\"\"SELECT * FROM", "category_id, reorder_point, auth)) self.conn.commit() query_confirm = \"\"\"SELECT * FROM items", "item_id = %s\"\"\" self.cur.execute(query, (item_id, )) self.conn.commit() query_confirm = \"\"\"SELECT", "class ItemsModel(ModelSetup): '''Handles the data logic of the items section'''", "quantity = %s WHERE item_id= %s \"\"\" self.cur.execute(query, (quantity, item_id))", "LIKE %s AND price = %s;\"\"\" self.cur.execute(query, (name, price)) self.item", "%s \"\"\" self.cur.execute( query, (price, quantity, image, category_id, reorder_point, auth,", "(category)) self.item = self.cur.fetchall() return self.item def get_by_name_and_price(self, name, price):", "%s;\"\"\" self.cur.execute(query_confirm, (name, price)) self.item = self.cur.fetchone() return self.item def", "in the same category are retrieved''' model = ModelSetup() self.conn", "= self.cur.fetchone() return self.item def delete_item(self, item_id): '''deletes an item", "the created item''' model = ModelSetup() self.conn = model.conn self.cur", "self.cur.execute(query_confirm, (item_id, )) self.item = self.cur.fetchone() return self.item def delete_item(self,", "self.cur = model.cur query = \"\"\"UPDATE items SET price =", "self.cur.execute(query, (item_id, )) self.item = self.cur.fetchone() return self.item def get_by_category(self,", "\"\"\"SELECT * FROM items WHERE item_id = %s;\"\"\" self.cur.execute(query, (item_id,", "= \"\"\"SELECT * FROM items WHERE item_id = %s;\"\"\" self.cur.execute(query,", "them using their category. all items in the same category", "retrieved''' model = ModelSetup() self.conn = model.conn self.cur = model.cur", "the item_id''' model = ModelSetup() self.conn = model.conn self.cur =", "items class''' self.name = name self.price = price self.quantity =", "= model.conn self.cur = model.cur query = \"\"\"DELETE FROM items", "get_all(self): '''gets all records of items in the databas and", "\"\"\"SELECT * FROM items;\"\"\" self.cur.execute(query_confirm) self.items = self.cur.fetchall() return self.items", "get_by_id(self, item_id): '''retrieves one item by finding them using their", "self.cur.execute(query, (name, price)) self.item = self.cur.fetchone() return self.item def update_item(", "self.item = self.cur.fetchone() return self.item def get_by_category(self, category): '''retrieves items", "item_id = %s;\"\"\" self.cur.execute(query, (item_id, )) self.item = self.cur.fetchone() return", "= model.cur query = \"\"\"DELETE FROM items WHERE item_id =", "\"\"\"SELECT * FROM items WHERE category LIKE %s;\"\"\" self.cur.execute(query, (category))", "ItemsModel(ModelSetup): '''Handles the data logic of the items section''' def", "delete_item(self, item_id): '''deletes an item by finding them using the", "and returns them''' model = ModelSetup() self.conn = model.conn self.cur", "self.cur.execute(query) self.items = self.cur.fetchall() return self.items def get_by_id(self, item_id): '''retrieves", "self.item def get_by_category(self, category): '''retrieves items by finding them using", "one item by finding them using their unique item_id''' model", "= %s, category = %s, reorder_point = %s, created_by =", "quantity = %s, image = %s, category = %s, reorder_point", "self.cur.execute(query, (quantity, item_id)) self.conn.commit() query_confirm = \"\"\"SELECT * FROM items", "def get_by_name_and_price(self, name, price): '''retrieves one item by finding them", "using their unique item_id''' model = ModelSetup() self.conn = model.conn", "(item_id, )) self.conn.commit() query_confirm = \"\"\"SELECT * FROM items;\"\"\" self.cur.execute(query_confirm)", "= %s, created_by = %s WHERE item_id= %s \"\"\" self.cur.execute(", "\"\"\"INSERT INTO items(name, price, quantity, image, category, reorder_point, created_by)\\ VALUES(%s,%s,%s,%s,%s,%s,%s);\"\"\"", "category. all items in the same category are retrieved''' model", "image, category_id, reorder_point, auth): '''updates item's details. the values in", "ModelSetup class ItemsModel(ModelSetup): '''Handles the data logic of the items", "= %s;\"\"\" self.cur.execute(query, (name, price)) self.item = self.cur.fetchone() return self.item", "%s, quantity = %s, image = %s, category = %s,", "WHERE item_id= %s \"\"\" self.cur.execute(query, (quantity, item_id)) self.conn.commit() query_confirm =", "return self.item def update_item( self, item_id, price, quantity, image, category_id,", "items WHERE category LIKE %s;\"\"\" self.cur.execute(query, (category)) self.item = self.cur.fetchall()", "import ModelSetup class ItemsModel(ModelSetup): '''Handles the data logic of the", "self.conn = model.conn self.cur = model.cur query = \"\"\"DELETE FROM", "def delete_item(self, item_id): '''deletes an item by finding them using", "update_item_quantity(self, item_id, quantity): '''updates item's quantity.adds the quantity added to", "%s WHERE item_id= %s \"\"\" self.cur.execute(query, (quantity, item_id)) self.conn.commit() query_confirm", "same category are retrieved''' model = ModelSetup() self.conn = model.conn", "self.item = self.cur.fetchone() return self.item def get_all(self): '''gets all records", "self.cur = model.cur query = \"\"\"INSERT INTO items(name, price, quantity,", "query_confirm = \"\"\"SELECT * FROM items WHERE name = %s", "ModelSetup() self.conn = model.conn self.cur = model.cur query = \"\"\"UPDATE", "reorder_point = %s, created_by = %s WHERE item_id= %s \"\"\"", "(quantity, item_id)) self.conn.commit() query_confirm = \"\"\"SELECT * FROM items WHERE", "\"\"\"SELECT * FROM items WHERE name = %s AND price", "of items in the databas and returns them''' model =", "self.cur = model.cur query = \"\"\"SELECT * FROM items;\"\"\" self.cur.execute(query)", "self, name, price, quantity, image, category_id, reorder_point, auth): '''Adds item", "price self.quantity = quantity self.category_id = category_id self.reorder_point = reorder_point", "price, quantity, image, category_id, reorder_point, auth)) self.conn.commit() query_confirm = \"\"\"SELECT", "ModelSetup() self.conn = model.conn self.cur = model.cur query = \"\"\"INSERT", "= model.cur query = \"\"\"UPDATE items SET price = %s,", "them using their unique unique combination''' model = ModelSetup() self.conn", "%s AND price = %s;\"\"\" self.cur.execute(query_confirm, (name, price)) self.item =", "query = \"\"\"INSERT INTO items(name, price, quantity, image, category, reorder_point,", "def get_all(self): '''gets all records of items in the databas", "model.conn self.cur = model.cur query = \"\"\"DELETE FROM items WHERE", "the items class''' self.name = name self.price = price self.quantity", "quantity, image, category_id, reorder_point, auth, item_id)) self.conn.commit() query_confirm = \"\"\"SELECT", "their category. all items in the same category are retrieved'''", "\"\"\" self.cur.execute( query, (price, quantity, image, category_id, reorder_point, auth, item_id))", "price)) self.item = self.cur.fetchone() return self.item def get_all(self): '''gets all", "details. the values in the db are changed to what", "= self.cur.fetchall() return self.item def get_by_name_and_price(self, name, price): '''retrieves one", "category = %s, reorder_point = %s, created_by = %s WHERE", "INTO items(name, price, quantity, image, category, reorder_point, created_by)\\ VALUES(%s,%s,%s,%s,%s,%s,%s);\"\"\" self.cur.execute(", "* FROM items;\"\"\" self.cur.execute(query) self.items = self.cur.fetchall() return self.items def", "quantity, image, category_id, reorder_point, auth): '''updates item's details. the values", "update_item( self, item_id, price, quantity, image, category_id, reorder_point, auth): '''updates", "image, category, reorder_point, created_by)\\ VALUES(%s,%s,%s,%s,%s,%s,%s);\"\"\" self.cur.execute( query, (name, price, quantity,", "%s \"\"\" self.cur.execute(query, (quantity, item_id)) self.conn.commit() query_confirm = \"\"\"SELECT *", "AND price = %s;\"\"\" self.cur.execute(query, (name, price)) self.item = self.cur.fetchone()", ")) self.conn.commit() query_confirm = \"\"\"SELECT * FROM items;\"\"\" self.cur.execute(query_confirm) self.items", "= \"\"\"SELECT * FROM items;\"\"\" self.cur.execute(query_confirm) self.items = self.cur.fetchall() return", "= %s WHERE item_id= %s \"\"\" self.cur.execute(query, (quantity, item_id)) self.conn.commit()", "category, reorder_point, created_by)\\ VALUES(%s,%s,%s,%s,%s,%s,%s);\"\"\" self.cur.execute( query, (name, price, quantity, image,", "\"\"\"SELECT * FROM items WHERE name LIKE %s AND price", "FROM items;\"\"\" self.cur.execute(query) self.items = self.cur.fetchall() return self.items def get_by_id(self,", "one item by finding them using their unique unique combination'''", "self.cur.fetchone() return self.item def get_by_category(self, category): '''retrieves items by finding", "their unique item_id''' model = ModelSetup() self.conn = model.conn self.cur", "= %s, image = %s, category = %s, reorder_point =", "return self.item def get_by_name_and_price(self, name, price): '''retrieves one item by", "image, category_id, reorder_point, auth): '''Adds item given the above arguements.", "return self.item def get_by_category(self, category): '''retrieves items by finding them", "items in the databas and returns them''' model = ModelSetup()", "by finding them using their unique item_id''' model = ModelSetup()", "item given the above arguements. Then returns the created item'''", "= %s AND price = %s;\"\"\" self.cur.execute(query_confirm, (name, price)) self.item", "class''' self.name = name self.price = price self.quantity = quantity", "them using the item_id''' model = ModelSetup() self.conn = model.conn", "= model.cur query = \"\"\"UPDATE items SET quantity = %s", "name LIKE %s AND price = %s;\"\"\" self.cur.execute(query, (name, price))", "'''updates item's details. the values in the db are changed", "(item_id, )) self.item = self.cur.fetchone() return self.item def update_item_quantity(self, item_id,", "WHERE item_id = %s\"\"\" self.cur.execute(query, (item_id, )) self.conn.commit() query_confirm =", "self.conn = model.conn self.cur = model.cur query = \"\"\"UPDATE items", "= model.conn self.cur = model.cur query = \"\"\"INSERT INTO items(name,", "from .db_conn import ModelSetup class ItemsModel(ModelSetup): '''Handles the data logic", "image, category_id, reorder_point, auth, item_id)) self.conn.commit() query_confirm = \"\"\"SELECT *", "using the item_id''' model = ModelSetup() self.conn = model.conn self.cur", "category are retrieved''' model = ModelSetup() self.conn = model.conn self.cur", "the values in the db are changed to what is", "self.price = price self.quantity = quantity self.category_id = category_id self.reorder_point", "using their unique unique combination''' model = ModelSetup() self.conn =", "self, name=None, price=None, quantity=None, category_id=None, reorder_point=None, auth=None): '''Initializes the variables", "%s;\"\"\" self.cur.execute(query, (item_id, )) self.item = self.cur.fetchone() return self.item def", "auth)) self.conn.commit() query_confirm = \"\"\"SELECT * FROM items WHERE name", "the same category are retrieved''' model = ModelSetup() self.conn =", "* FROM items WHERE item_id = %s;\"\"\" self.cur.execute(query_confirm, (item_id, ))", "reorder_point, created_by)\\ VALUES(%s,%s,%s,%s,%s,%s,%s);\"\"\" self.cur.execute( query, (name, price, quantity, image, category_id,", "= %s\"\"\" self.cur.execute(query, (item_id, )) self.conn.commit() query_confirm = \"\"\"SELECT *", "by finding them using their category. all items in the", "quantity, image, category_id, reorder_point, auth): '''Adds item given the above", "item by finding them using their unique item_id''' model =", "* FROM items WHERE category LIKE %s;\"\"\" self.cur.execute(query, (category)) self.item", "self.cur.execute(query, (category)) self.item = self.cur.fetchall() return self.item def get_by_name_and_price(self, name,", "self.items = self.cur.fetchall() return self.items def get_by_id(self, item_id): '''retrieves one", "model.conn self.cur = model.cur query = \"\"\"SELECT * FROM items;\"\"\"", "= \"\"\"DELETE FROM items WHERE item_id = %s\"\"\" self.cur.execute(query, (item_id,", "items WHERE item_id = %s\"\"\" self.cur.execute(query, (item_id, )) self.conn.commit() query_confirm", "self.category_id = category_id self.reorder_point = reorder_point self.auth = auth def", "self.conn = model.conn self.cur = model.cur query = \"\"\"SELECT *", "items SET quantity = %s WHERE item_id= %s \"\"\" self.cur.execute(query,", "all items in the same category are retrieved''' model =", "\"\"\"UPDATE items SET quantity = %s WHERE item_id= %s \"\"\"", "model.cur query = \"\"\"DELETE FROM items WHERE item_id = %s\"\"\"", "self.cur = model.cur query = \"\"\"DELETE FROM items WHERE item_id", "name=None, price=None, quantity=None, category_id=None, reorder_point=None, auth=None): '''Initializes the variables for", "query = \"\"\"SELECT * FROM items;\"\"\" self.cur.execute(query) self.items = self.cur.fetchall()", "the quantity added to the quantity available''' model = ModelSetup()", "self.conn = model.conn self.cur = model.cur query = \"\"\"INSERT INTO", "are changed to what is provided''' model = ModelSetup() self.conn", "price = %s, quantity = %s, image = %s, category", "WHERE name = %s AND price = %s;\"\"\" self.cur.execute(query_confirm, (name,", "self.cur.fetchone() return self.item def delete_item(self, item_id): '''deletes an item by", "query, (name, price, quantity, image, category_id, reorder_point, auth)) self.conn.commit() query_confirm", "records of items in the databas and returns them''' model", "by finding them using their unique unique combination''' model =", "FROM items WHERE item_id = %s;\"\"\" self.cur.execute(query_confirm, (item_id, )) self.item", "(item_id, )) self.item = self.cur.fetchone() return self.item def delete_item(self, item_id):", "* FROM items WHERE item_id = %s;\"\"\" self.cur.execute(query, (item_id, ))", "%s;\"\"\" self.cur.execute(query, (category)) self.item = self.cur.fetchall() return self.item def get_by_name_and_price(self,", "item_id''' model = ModelSetup() self.conn = model.conn self.cur = model.cur", "= \"\"\"SELECT * FROM items;\"\"\" self.cur.execute(query) self.items = self.cur.fetchall() return", "finding them using their unique item_id''' model = ModelSetup() self.conn", "return self.item def get_all(self): '''gets all records of items in", "get_by_name_and_price(self, name, price): '''retrieves one item by finding them using", "items WHERE name = %s AND price = %s;\"\"\" self.cur.execute(query_confirm,", "changed to what is provided''' model = ModelSetup() self.conn =", "items in the same category are retrieved''' model = ModelSetup()" ]
[ "'</a>') # Append a plural suffix. if node.attributes['is_plural']: self.body.append(self.starttag(node, 'span',", "self.body.append(self.starttag(node, 'code', suffix='', CLASS='docutils literal javadoc')) self.body.append(self.starttag(node, 'a', suffix='', CLASS='reference", "text).replace('$', '.') else: # Simple class name; find from the", "rendering of HTML literals. old_visitor = HTMLTranslator.visit_literal HTMLTranslator.visit_literal = lambda", "else: # Simple class name; find from the pre-calculated mappings.", "+ uri # Prepend the '@' back again if necessary", "upper-case English alphabet. continue simple_class_name = basename[:-5].replace('.', '$') javadoc_mappings[simple_class_name] =", "= {'class': directives.class_option} register_canonical_role('api', api_role) register_canonical_role('apiplural', apiplural_role) # Intercept the", "basename.endswith('.html'): # Ignore the non-class files. We rely on the", "text.replace('.', '/').replace('$', '.') + '.html' text = re.sub(r'^.*\\.', '', text).replace('$',", "'/') + '/index.html?' + uri # Prepend the '@' back", "sphinx.errors import ExtensionError import os import re def api_role(role, rawtext,", "os.path.relpath(dirname, javadoc_dir) \\ .replace(os.sep, '/') + '/' + basename else:", "uri = text.replace('.', '/').replace('$', '.') + '.html' text = re.sub(r'^.*\\.',", "Javadoc file. if not hasattr(env, '__javadoc_cache__'): env.__javadoc_mappings__ = javadoc_mappings =", "files: if re.match(r'^[^A-Z]', basename) or not basename.endswith('.html'): # Ignore the", "'@' + text # Emit the tags. self.body.append(self.starttag(node, 'code', suffix='',", "text.find('.') != -1: # FQCN or package name. if re.fullmatch(r'^[^A-Z$]+$',", "Javadoc: ' + text) uri = javadoc_mappings[text] text = text.replace('$',", "False if text.find('.') != -1: # FQCN or package name.", "inliner, options, content): set_classes(options) classes = ['code', 'api-reference'] if 'classes'", "in files: if re.match(r'^[^A-Z]', basename) or not basename.endswith('.html'): # Ignore", "node = nodes.literal(rawtext, text, classes=classes, api_reference=True, is_plural=plural) return [node], []", "'api_reference' not in node.attributes: return next_visitor(self, node) env = self.builder.env", "import ExtensionError import os import re def api_role(role, rawtext, text,", "text not in javadoc_mappings: raise ExtensionError('Cannot find a class from", "apiplural_role(role, rawtext, text, lineno, inliner, options={}, content=[]): return api_role_internal(True, role,", "lineno, inliner, options={}, content=[]): return api_role_internal(False, role, rawtext, text, lineno,", "= node.astext() if text.startswith('@'): text = text[1:] is_annotation = True", "Simple class name; find from the pre-calculated mappings. if text", "text.startswith('@'): text = text[1:] is_annotation = True else: is_annotation =", "import register_canonical_role, set_classes from docutils.parsers.rst import directives from docutils import", "rawtext, text, lineno, inliner, options={}, content=[]): return api_role_internal(True, role, rawtext,", "rawtext, text, lineno, inliner, options, content) def api_role_internal(plural, role, rawtext,", "rely on the simple assumption that # a class name", "alphabet. continue simple_class_name = basename[:-5].replace('.', '$') javadoc_mappings[simple_class_name] = os.path.relpath(dirname, javadoc_dir)", "= True else: is_annotation = False if text.find('.') != -1:", "a class from Javadoc: ' + text) uri = javadoc_mappings[text]", "content): set_classes(options) classes = ['code', 'api-reference'] if 'classes' in options:", "register_canonical_role('apiplural', apiplural_role) # Intercept the rendering of HTML literals. old_visitor", "# Append a plural suffix. if node.attributes['is_plural']: self.body.append(self.starttag(node, 'span', suffix='',", "CLASS='reference external javadoc', HREF=uri)) self.body.append(text + '</a>') # Append a", "file. if not hasattr(env, '__javadoc_cache__'): env.__javadoc_mappings__ = javadoc_mappings = {}", "content=[]): return api_role_internal(False, role, rawtext, text, lineno, inliner, options, content)", "\\ .replace(os.sep, '/') + '/' + basename else: javadoc_mappings =", "Ignore the non-class files. We rely on the simple assumption", "assumption that # a class name always starts with an", "find a class from Javadoc: ' + text) uri =", "{'class': directives.class_option} register_canonical_role('api', api_role) register_canonical_role('apiplural', apiplural_role) # Intercept the rendering", "register_canonical_role('api', api_role) register_canonical_role('apiplural', apiplural_role) # Intercept the rendering of HTML", "files. We rely on the simple assumption that # a", "package name. if re.fullmatch(r'^[^A-Z$]+$', text): # Package uri = text.replace('.',", "from the pre-calculated mappings. if text not in javadoc_mappings: raise", "a simple class name to its Javadoc file. if not", "of HTML literals. old_visitor = HTMLTranslator.visit_literal HTMLTranslator.visit_literal = lambda self,", "again if necessary if is_annotation: text = '@' + text", "dirname, subdirs, files in os.walk(javadoc_dir): for basename in files: if", "basename else: javadoc_mappings = env.__javadoc_mappings__ text = node.astext() if text.startswith('@'):", "in os.walk(javadoc_dir): for basename in files: if re.match(r'^[^A-Z]', basename) or", "app.add_config_value('javadoc_dir', os.path.join(app.outdir, 'apidocs'), 'html') # Register the 'javadoc' role. api_role.options", "if re.fullmatch(r'^[^A-Z$]+$', text): # Package uri = text.replace('.', '/') +", "# Package uri = text.replace('.', '/') + '/package-summary.html' else: #", "os.path.join(app.outdir, 'apidocs'), 'html') # Register the 'javadoc' role. api_role.options =", "os.walk(javadoc_dir): for basename in files: if re.match(r'^[^A-Z]', basename) or not", "non-class files. We rely on the simple assumption that #", "in javadoc_mappings: raise ExtensionError('Cannot find a class from Javadoc: '", "env.app.outdir).replace(os.sep, '/') + '/index.html?' + uri # Prepend the '@'", "self.body.append('s') self.body.append('</span>') self.body.append('</code>') raise nodes.SkipNode def setup(app): app.add_config_value('javadoc_dir', os.path.join(app.outdir, 'apidocs'),", "!= -1: # FQCN or package name. if re.fullmatch(r'^[^A-Z$]+$', text):", "the 'javadoc' role. api_role.options = {'class': directives.class_option} register_canonical_role('api', api_role) register_canonical_role('apiplural',", "javadoc_mappings[simple_class_name] = os.path.relpath(dirname, javadoc_dir) \\ .replace(os.sep, '/') + '/' +", "= self.builder.env javadoc_dir = os.path.abspath(env.config['javadoc_dir']) # Build the mappings from", "'.') + '.html' text = re.sub(r'^.*\\.', '', text).replace('$', '.') else:", "re.fullmatch(r'^.*(ch|s|sh|x|z)$', text): self.body.append('es') else: self.body.append('s') self.body.append('</span>') self.body.append('</code>') raise nodes.SkipNode def", "self.body.append(text + '</a>') # Append a plural suffix. if node.attributes['is_plural']:", "not in node.attributes: return next_visitor(self, node) env = self.builder.env javadoc_dir", "ExtensionError import os import re def api_role(role, rawtext, text, lineno,", "# a class name always starts with an upper-case English", "next_visitor): if 'api_reference' not in node.attributes: return next_visitor(self, node) env", "from sphinx.writers.html import HTMLTranslator from sphinx.errors import ExtensionError import os", "for dirname, subdirs, files in os.walk(javadoc_dir): for basename in files:", "the mappings from a simple class name to its Javadoc", "from docutils import nodes from sphinx.writers.html import HTMLTranslator from sphinx.errors", "CLASS='plural-suffix')) if re.fullmatch(r'^.*(ch|s|sh|x|z)$', text): self.body.append('es') else: self.body.append('s') self.body.append('</span>') self.body.append('</code>') raise", "if 'api_reference' not in node.attributes: return next_visitor(self, node) env =", "from sphinx.errors import ExtensionError import os import re def api_role(role,", "in options: classes.extend(options['classes']) node = nodes.literal(rawtext, text, classes=classes, api_reference=True, is_plural=plural)", "text = '@' + text # Emit the tags. self.body.append(self.starttag(node,", "os.path.relpath(javadoc_dir, env.app.outdir).replace(os.sep, '/') + '/index.html?' + uri # Prepend the", "mappings from a simple class name to its Javadoc file.", "' + text) uri = javadoc_mappings[text] text = text.replace('$', '.')", "javadoc_mappings = {} for dirname, subdirs, files in os.walk(javadoc_dir): for", "docutils.parsers.rst.roles import register_canonical_role, set_classes from docutils.parsers.rst import directives from docutils", "+ '/package-summary.html' else: # Class uri = text.replace('.', '/').replace('$', '.')", "inliner, options={}, content=[]): return api_role_internal(True, role, rawtext, text, lineno, inliner,", "name; find from the pre-calculated mappings. if text not in", "self.body.append('</code>') raise nodes.SkipNode def setup(app): app.add_config_value('javadoc_dir', os.path.join(app.outdir, 'apidocs'), 'html') #", "# FQCN or package name. if re.fullmatch(r'^[^A-Z$]+$', text): # Package", "inliner, options, content) def apiplural_role(role, rawtext, text, lineno, inliner, options={},", "self.body.append(self.starttag(node, 'span', suffix='', CLASS='plural-suffix')) if re.fullmatch(r'^.*(ch|s|sh|x|z)$', text): self.body.append('es') else: self.body.append('s')", "javadoc_mappings = env.__javadoc_mappings__ text = node.astext() if text.startswith('@'): text =", "'/') + '/' + basename else: javadoc_mappings = env.__javadoc_mappings__ text", "+ '</a>') # Append a plural suffix. if node.attributes['is_plural']: self.body.append(self.starttag(node,", "['code', 'api-reference'] if 'classes' in options: classes.extend(options['classes']) node = nodes.literal(rawtext,", "the non-class files. We rely on the simple assumption that", "node.astext() if text.startswith('@'): text = text[1:] is_annotation = True else:", "class name always starts with an upper-case English alphabet. continue", "javadoc', HREF=uri)) self.body.append(text + '</a>') # Append a plural suffix.", "with an upper-case English alphabet. continue simple_class_name = basename[:-5].replace('.', '$')", "if not hasattr(env, '__javadoc_cache__'): env.__javadoc_mappings__ = javadoc_mappings = {} for", "text.replace('.', '/') + '/package-summary.html' else: # Class uri = text.replace('.',", "lineno, inliner, options, content) def apiplural_role(role, rawtext, text, lineno, inliner,", "next_visitor(self, node) env = self.builder.env javadoc_dir = os.path.abspath(env.config['javadoc_dir']) # Build", "text): # Package uri = text.replace('.', '/') + '/package-summary.html' else:", "= text.replace('.', '/').replace('$', '.') + '.html' text = re.sub(r'^.*\\.', '',", "uri # Prepend the '@' back again if necessary if", "# Class uri = text.replace('.', '/').replace('$', '.') + '.html' text", "'javadoc' role. api_role.options = {'class': directives.class_option} register_canonical_role('api', api_role) register_canonical_role('apiplural', apiplural_role)", "or not basename.endswith('.html'): # Ignore the non-class files. We rely", "= {} for dirname, subdirs, files in os.walk(javadoc_dir): for basename", "necessary if is_annotation: text = '@' + text # Emit", "nodes from sphinx.writers.html import HTMLTranslator from sphinx.errors import ExtensionError import", "options, content): set_classes(options) classes = ['code', 'api-reference'] if 'classes' in", "external javadoc', HREF=uri)) self.body.append(text + '</a>') # Append a plural", "'a', suffix='', CLASS='reference external javadoc', HREF=uri)) self.body.append(text + '</a>') #", "Build the mappings from a simple class name to its", "# Emit the tags. self.body.append(self.starttag(node, 'code', suffix='', CLASS='docutils literal javadoc'))", "import directives from docutils import nodes from sphinx.writers.html import HTMLTranslator", "= ['code', 'api-reference'] if 'classes' in options: classes.extend(options['classes']) node =", "uri = javadoc_mappings[text] text = text.replace('$', '.') # Prepend the", "'span', suffix='', CLASS='plural-suffix')) if re.fullmatch(r'^.*(ch|s|sh|x|z)$', text): self.body.append('es') else: self.body.append('s') self.body.append('</span>')", "rawtext, text, lineno, inliner, options={}, content=[]): return api_role_internal(False, role, rawtext,", "import HTMLTranslator from sphinx.errors import ExtensionError import os import re", "<reponame>linxGnu/armeria from docutils.parsers.rst.roles import register_canonical_role, set_classes from docutils.parsers.rst import directives", "nodes.literal(rawtext, text, classes=classes, api_reference=True, is_plural=plural) return [node], [] def api_visit_literal(self,", "find from the pre-calculated mappings. if text not in javadoc_mappings:", "Package uri = text.replace('.', '/') + '/package-summary.html' else: # Class", "starts with an upper-case English alphabet. continue simple_class_name = basename[:-5].replace('.',", "'/package-summary.html' else: # Class uri = text.replace('.', '/').replace('$', '.') +", "classes=classes, api_reference=True, is_plural=plural) return [node], [] def api_visit_literal(self, node, next_visitor):", "else: self.body.append('s') self.body.append('</span>') self.body.append('</code>') raise nodes.SkipNode def setup(app): app.add_config_value('javadoc_dir', os.path.join(app.outdir,", "literals. old_visitor = HTMLTranslator.visit_literal HTMLTranslator.visit_literal = lambda self, node: api_visit_literal(self,", "def apiplural_role(role, rawtext, text, lineno, inliner, options={}, content=[]): return api_role_internal(True,", "Class uri = text.replace('.', '/').replace('$', '.') + '.html' text =", "HTMLTranslator.visit_literal HTMLTranslator.visit_literal = lambda self, node: api_visit_literal(self, node, old_visitor) pass", "simple_class_name = basename[:-5].replace('.', '$') javadoc_mappings[simple_class_name] = os.path.relpath(dirname, javadoc_dir) \\ .replace(os.sep,", "re.fullmatch(r'^[^A-Z$]+$', text): # Package uri = text.replace('.', '/') + '/package-summary.html'", "register_canonical_role, set_classes from docutils.parsers.rst import directives from docutils import nodes", "directives from docutils import nodes from sphinx.writers.html import HTMLTranslator from", "'.') else: # Simple class name; find from the pre-calculated", "HTML literals. old_visitor = HTMLTranslator.visit_literal HTMLTranslator.visit_literal = lambda self, node:", "api_role_internal(True, role, rawtext, text, lineno, inliner, options, content) def api_role_internal(plural,", "'api-reference'] if 'classes' in options: classes.extend(options['classes']) node = nodes.literal(rawtext, text,", "= text.replace('$', '.') # Prepend the frame index.html path. uri", "'__javadoc_cache__'): env.__javadoc_mappings__ = javadoc_mappings = {} for dirname, subdirs, files", "is_annotation = False if text.find('.') != -1: # FQCN or", "hasattr(env, '__javadoc_cache__'): env.__javadoc_mappings__ = javadoc_mappings = {} for dirname, subdirs,", "[] def api_visit_literal(self, node, next_visitor): if 'api_reference' not in node.attributes:", "text = text.replace('$', '.') # Prepend the frame index.html path.", "# Intercept the rendering of HTML literals. old_visitor = HTMLTranslator.visit_literal", "is_annotation = True else: is_annotation = False if text.find('.') !=", "javadoc_dir = os.path.abspath(env.config['javadoc_dir']) # Build the mappings from a simple", "Append a plural suffix. if node.attributes['is_plural']: self.body.append(self.starttag(node, 'span', suffix='', CLASS='plural-suffix'))", "suffix='', CLASS='reference external javadoc', HREF=uri)) self.body.append(text + '</a>') # Append", "simple assumption that # a class name always starts with", "the '@' back again if necessary if is_annotation: text =", "path. uri = os.path.relpath(javadoc_dir, env.app.outdir).replace(os.sep, '/') + '/index.html?' + uri", "class name; find from the pre-calculated mappings. if text not", "not basename.endswith('.html'): # Ignore the non-class files. We rely on", "docutils import nodes from sphinx.writers.html import HTMLTranslator from sphinx.errors import", "options: classes.extend(options['classes']) node = nodes.literal(rawtext, text, classes=classes, api_reference=True, is_plural=plural) return", "[node], [] def api_visit_literal(self, node, next_visitor): if 'api_reference' not in", "javadoc_mappings[text] text = text.replace('$', '.') # Prepend the frame index.html", "class name to its Javadoc file. if not hasattr(env, '__javadoc_cache__'):", "for basename in files: if re.match(r'^[^A-Z]', basename) or not basename.endswith('.html'):", "lineno, inliner, options={}, content=[]): return api_role_internal(True, role, rawtext, text, lineno,", "index.html path. uri = os.path.relpath(javadoc_dir, env.app.outdir).replace(os.sep, '/') + '/index.html?' +", "if text.find('.') != -1: # FQCN or package name. if", "role, rawtext, text, lineno, inliner, options, content) def api_role_internal(plural, role,", "def setup(app): app.add_config_value('javadoc_dir', os.path.join(app.outdir, 'apidocs'), 'html') # Register the 'javadoc'", "files in os.walk(javadoc_dir): for basename in files: if re.match(r'^[^A-Z]', basename)", "self.builder.env javadoc_dir = os.path.abspath(env.config['javadoc_dir']) # Build the mappings from a", "raise ExtensionError('Cannot find a class from Javadoc: ' + text)", "the tags. self.body.append(self.starttag(node, 'code', suffix='', CLASS='docutils literal javadoc')) self.body.append(self.starttag(node, 'a',", "node) env = self.builder.env javadoc_dir = os.path.abspath(env.config['javadoc_dir']) # Build the", "text # Emit the tags. self.body.append(self.starttag(node, 'code', suffix='', CLASS='docutils literal", "javadoc')) self.body.append(self.starttag(node, 'a', suffix='', CLASS='reference external javadoc', HREF=uri)) self.body.append(text +", "text = node.astext() if text.startswith('@'): text = text[1:] is_annotation =", "docutils.parsers.rst import directives from docutils import nodes from sphinx.writers.html import", "continue simple_class_name = basename[:-5].replace('.', '$') javadoc_mappings[simple_class_name] = os.path.relpath(dirname, javadoc_dir) \\", "True else: is_annotation = False if text.find('.') != -1: #", "HTMLTranslator from sphinx.errors import ExtensionError import os import re def", "back again if necessary if is_annotation: text = '@' +", "that # a class name always starts with an upper-case", "# Register the 'javadoc' role. api_role.options = {'class': directives.class_option} register_canonical_role('api',", "# Ignore the non-class files. We rely on the simple", "the rendering of HTML literals. old_visitor = HTMLTranslator.visit_literal HTMLTranslator.visit_literal =", "role. api_role.options = {'class': directives.class_option} register_canonical_role('api', api_role) register_canonical_role('apiplural', apiplural_role) #", "not in javadoc_mappings: raise ExtensionError('Cannot find a class from Javadoc:", "# Build the mappings from a simple class name to", "sphinx.writers.html import HTMLTranslator from sphinx.errors import ExtensionError import os import", "content) def apiplural_role(role, rawtext, text, lineno, inliner, options={}, content=[]): return", "always starts with an upper-case English alphabet. continue simple_class_name =", "frame index.html path. uri = os.path.relpath(javadoc_dir, env.app.outdir).replace(os.sep, '/') + '/index.html?'", "on the simple assumption that # a class name always", "not hasattr(env, '__javadoc_cache__'): env.__javadoc_mappings__ = javadoc_mappings = {} for dirname,", "content=[]): return api_role_internal(True, role, rawtext, text, lineno, inliner, options, content)", "return [node], [] def api_visit_literal(self, node, next_visitor): if 'api_reference' not", "text.replace('$', '.') # Prepend the frame index.html path. uri =", "= os.path.abspath(env.config['javadoc_dir']) # Build the mappings from a simple class", "= text[1:] is_annotation = True else: is_annotation = False if", "if re.fullmatch(r'^.*(ch|s|sh|x|z)$', text): self.body.append('es') else: self.body.append('s') self.body.append('</span>') self.body.append('</code>') raise nodes.SkipNode", "text, lineno, inliner, options={}, content=[]): return api_role_internal(True, role, rawtext, text,", "api_role(role, rawtext, text, lineno, inliner, options={}, content=[]): return api_role_internal(False, role,", "text, lineno, inliner, options={}, content=[]): return api_role_internal(False, role, rawtext, text,", "basename) or not basename.endswith('.html'): # Ignore the non-class files. We", "javadoc_mappings: raise ExtensionError('Cannot find a class from Javadoc: ' +", "classes.extend(options['classes']) node = nodes.literal(rawtext, text, classes=classes, api_reference=True, is_plural=plural) return [node],", "text, lineno, inliner, options, content): set_classes(options) classes = ['code', 'api-reference']", "'code', suffix='', CLASS='docutils literal javadoc')) self.body.append(self.starttag(node, 'a', suffix='', CLASS='reference external", "return api_role_internal(False, role, rawtext, text, lineno, inliner, options, content) def", "an upper-case English alphabet. continue simple_class_name = basename[:-5].replace('.', '$') javadoc_mappings[simple_class_name]", "if text not in javadoc_mappings: raise ExtensionError('Cannot find a class", "+ text # Emit the tags. self.body.append(self.starttag(node, 'code', suffix='', CLASS='docutils", "self.body.append('es') else: self.body.append('s') self.body.append('</span>') self.body.append('</code>') raise nodes.SkipNode def setup(app): app.add_config_value('javadoc_dir',", "Register the 'javadoc' role. api_role.options = {'class': directives.class_option} register_canonical_role('api', api_role)", "nodes.SkipNode def setup(app): app.add_config_value('javadoc_dir', os.path.join(app.outdir, 'apidocs'), 'html') # Register the", "from docutils.parsers.rst.roles import register_canonical_role, set_classes from docutils.parsers.rst import directives from", "'html') # Register the 'javadoc' role. api_role.options = {'class': directives.class_option}", "'', text).replace('$', '.') else: # Simple class name; find from", "+ '.html' text = re.sub(r'^.*\\.', '', text).replace('$', '.') else: #", "or package name. if re.fullmatch(r'^[^A-Z$]+$', text): # Package uri =", "= text.replace('.', '/') + '/package-summary.html' else: # Class uri =", "lineno, inliner, options, content) def api_role_internal(plural, role, rawtext, text, lineno,", "'.') # Prepend the frame index.html path. uri = os.path.relpath(javadoc_dir,", "# Prepend the frame index.html path. uri = os.path.relpath(javadoc_dir, env.app.outdir).replace(os.sep,", "env = self.builder.env javadoc_dir = os.path.abspath(env.config['javadoc_dir']) # Build the mappings", "node.attributes: return next_visitor(self, node) env = self.builder.env javadoc_dir = os.path.abspath(env.config['javadoc_dir'])", "suffix='', CLASS='plural-suffix')) if re.fullmatch(r'^.*(ch|s|sh|x|z)$', text): self.body.append('es') else: self.body.append('s') self.body.append('</span>') self.body.append('</code>')", "self.body.append('</span>') self.body.append('</code>') raise nodes.SkipNode def setup(app): app.add_config_value('javadoc_dir', os.path.join(app.outdir, 'apidocs'), 'html')", "CLASS='docutils literal javadoc')) self.body.append(self.starttag(node, 'a', suffix='', CLASS='reference external javadoc', HREF=uri))", "options={}, content=[]): return api_role_internal(True, role, rawtext, text, lineno, inliner, options,", "text = re.sub(r'^.*\\.', '', text).replace('$', '.') else: # Simple class", "English alphabet. continue simple_class_name = basename[:-5].replace('.', '$') javadoc_mappings[simple_class_name] = os.path.relpath(dirname,", "text): self.body.append('es') else: self.body.append('s') self.body.append('</span>') self.body.append('</code>') raise nodes.SkipNode def setup(app):", "mappings. if text not in javadoc_mappings: raise ExtensionError('Cannot find a", "'/index.html?' + uri # Prepend the '@' back again if", "in node.attributes: return next_visitor(self, node) env = self.builder.env javadoc_dir =", "javadoc_dir) \\ .replace(os.sep, '/') + '/' + basename else: javadoc_mappings", "a class name always starts with an upper-case English alphabet.", "literal javadoc')) self.body.append(self.starttag(node, 'a', suffix='', CLASS='reference external javadoc', HREF=uri)) self.body.append(text", "text, classes=classes, api_reference=True, is_plural=plural) return [node], [] def api_visit_literal(self, node,", "directives.class_option} register_canonical_role('api', api_role) register_canonical_role('apiplural', apiplural_role) # Intercept the rendering of", "inliner, options, content) def api_role_internal(plural, role, rawtext, text, lineno, inliner,", "= javadoc_mappings[text] text = text.replace('$', '.') # Prepend the frame", "= os.path.relpath(dirname, javadoc_dir) \\ .replace(os.sep, '/') + '/' + basename", "if re.match(r'^[^A-Z]', basename) or not basename.endswith('.html'): # Ignore the non-class", "lineno, inliner, options, content): set_classes(options) classes = ['code', 'api-reference'] if", "ExtensionError('Cannot find a class from Javadoc: ' + text) uri", "= javadoc_mappings = {} for dirname, subdirs, files in os.walk(javadoc_dir):", "return api_role_internal(True, role, rawtext, text, lineno, inliner, options, content) def", "if node.attributes['is_plural']: self.body.append(self.starttag(node, 'span', suffix='', CLASS='plural-suffix')) if re.fullmatch(r'^.*(ch|s|sh|x|z)$', text): self.body.append('es')", "def api_role(role, rawtext, text, lineno, inliner, options={}, content=[]): return api_role_internal(False,", "from Javadoc: ' + text) uri = javadoc_mappings[text] text =", "the pre-calculated mappings. if text not in javadoc_mappings: raise ExtensionError('Cannot", "else: is_annotation = False if text.find('.') != -1: # FQCN", "= env.__javadoc_mappings__ text = node.astext() if text.startswith('@'): text = text[1:]", "env.__javadoc_mappings__ text = node.astext() if text.startswith('@'): text = text[1:] is_annotation", "basename in files: if re.match(r'^[^A-Z]', basename) or not basename.endswith('.html'): #", "to its Javadoc file. if not hasattr(env, '__javadoc_cache__'): env.__javadoc_mappings__ =", "role, rawtext, text, lineno, inliner, options, content): set_classes(options) classes =", "'/' + basename else: javadoc_mappings = env.__javadoc_mappings__ text = node.astext()", "= nodes.literal(rawtext, text, classes=classes, api_reference=True, is_plural=plural) return [node], [] def", "def api_visit_literal(self, node, next_visitor): if 'api_reference' not in node.attributes: return", "# Simple class name; find from the pre-calculated mappings. if", "basename[:-5].replace('.', '$') javadoc_mappings[simple_class_name] = os.path.relpath(dirname, javadoc_dir) \\ .replace(os.sep, '/') +", "set_classes(options) classes = ['code', 'api-reference'] if 'classes' in options: classes.extend(options['classes'])", "rawtext, text, lineno, inliner, options, content): set_classes(options) classes = ['code',", "if text.startswith('@'): text = text[1:] is_annotation = True else: is_annotation", "-1: # FQCN or package name. if re.fullmatch(r'^[^A-Z$]+$', text): #", "inliner, options={}, content=[]): return api_role_internal(False, role, rawtext, text, lineno, inliner,", "= '@' + text # Emit the tags. self.body.append(self.starttag(node, 'code',", "Intercept the rendering of HTML literals. old_visitor = HTMLTranslator.visit_literal HTMLTranslator.visit_literal", "def api_role_internal(plural, role, rawtext, text, lineno, inliner, options, content): set_classes(options)", "import re def api_role(role, rawtext, text, lineno, inliner, options={}, content=[]):", "content) def api_role_internal(plural, role, rawtext, text, lineno, inliner, options, content):", "'/') + '/package-summary.html' else: # Class uri = text.replace('.', '/').replace('$',", "api_role) register_canonical_role('apiplural', apiplural_role) # Intercept the rendering of HTML literals.", "= HTMLTranslator.visit_literal HTMLTranslator.visit_literal = lambda self, node: api_visit_literal(self, node, old_visitor)", "from a simple class name to its Javadoc file. if", "node.attributes['is_plural']: self.body.append(self.starttag(node, 'span', suffix='', CLASS='plural-suffix')) if re.fullmatch(r'^.*(ch|s|sh|x|z)$', text): self.body.append('es') else:", "text, lineno, inliner, options, content) def apiplural_role(role, rawtext, text, lineno,", "class from Javadoc: ' + text) uri = javadoc_mappings[text] text", "import os import re def api_role(role, rawtext, text, lineno, inliner,", "env.__javadoc_mappings__ = javadoc_mappings = {} for dirname, subdirs, files in", "its Javadoc file. if not hasattr(env, '__javadoc_cache__'): env.__javadoc_mappings__ = javadoc_mappings", "re def api_role(role, rawtext, text, lineno, inliner, options={}, content=[]): return", "tags. self.body.append(self.starttag(node, 'code', suffix='', CLASS='docutils literal javadoc')) self.body.append(self.starttag(node, 'a', suffix='',", "+ basename else: javadoc_mappings = env.__javadoc_mappings__ text = node.astext() if", "= basename[:-5].replace('.', '$') javadoc_mappings[simple_class_name] = os.path.relpath(dirname, javadoc_dir) \\ .replace(os.sep, '/')", "name. if re.fullmatch(r'^[^A-Z$]+$', text): # Package uri = text.replace('.', '/')", "import nodes from sphinx.writers.html import HTMLTranslator from sphinx.errors import ExtensionError", "+ '/index.html?' + uri # Prepend the '@' back again", "text) uri = javadoc_mappings[text] text = text.replace('$', '.') # Prepend", "set_classes from docutils.parsers.rst import directives from docutils import nodes from", "options, content) def api_role_internal(plural, role, rawtext, text, lineno, inliner, options,", "from docutils.parsers.rst import directives from docutils import nodes from sphinx.writers.html", "os.path.abspath(env.config['javadoc_dir']) # Build the mappings from a simple class name", "'.html' text = re.sub(r'^.*\\.', '', text).replace('$', '.') else: # Simple", "else: javadoc_mappings = env.__javadoc_mappings__ text = node.astext() if text.startswith('@'): text", "+ text) uri = javadoc_mappings[text] text = text.replace('$', '.') #", "if 'classes' in options: classes.extend(options['classes']) node = nodes.literal(rawtext, text, classes=classes,", "suffix='', CLASS='docutils literal javadoc')) self.body.append(self.starttag(node, 'a', suffix='', CLASS='reference external javadoc',", "'$') javadoc_mappings[simple_class_name] = os.path.relpath(dirname, javadoc_dir) \\ .replace(os.sep, '/') + '/'", "rawtext, text, lineno, inliner, options, content) def apiplural_role(role, rawtext, text,", "else: # Class uri = text.replace('.', '/').replace('$', '.') + '.html'", "text[1:] is_annotation = True else: is_annotation = False if text.find('.')", "node, next_visitor): if 'api_reference' not in node.attributes: return next_visitor(self, node)", "pre-calculated mappings. if text not in javadoc_mappings: raise ExtensionError('Cannot find", "plural suffix. if node.attributes['is_plural']: self.body.append(self.starttag(node, 'span', suffix='', CLASS='plural-suffix')) if re.fullmatch(r'^.*(ch|s|sh|x|z)$',", "suffix. if node.attributes['is_plural']: self.body.append(self.starttag(node, 'span', suffix='', CLASS='plural-suffix')) if re.fullmatch(r'^.*(ch|s|sh|x|z)$', text):", "api_role_internal(plural, role, rawtext, text, lineno, inliner, options, content): set_classes(options) classes", "Emit the tags. self.body.append(self.starttag(node, 'code', suffix='', CLASS='docutils literal javadoc')) self.body.append(self.starttag(node,", "HREF=uri)) self.body.append(text + '</a>') # Append a plural suffix. if", "'apidocs'), 'html') # Register the 'javadoc' role. api_role.options = {'class':", "We rely on the simple assumption that # a class", "text = text[1:] is_annotation = True else: is_annotation = False", "uri = text.replace('.', '/') + '/package-summary.html' else: # Class uri", "os import re def api_role(role, rawtext, text, lineno, inliner, options={},", "text, lineno, inliner, options, content) def api_role_internal(plural, role, rawtext, text,", "classes = ['code', 'api-reference'] if 'classes' in options: classes.extend(options['classes']) node", "raise nodes.SkipNode def setup(app): app.add_config_value('javadoc_dir', os.path.join(app.outdir, 'apidocs'), 'html') # Register", "subdirs, files in os.walk(javadoc_dir): for basename in files: if re.match(r'^[^A-Z]',", "api_role_internal(False, role, rawtext, text, lineno, inliner, options, content) def apiplural_role(role,", "Prepend the frame index.html path. uri = os.path.relpath(javadoc_dir, env.app.outdir).replace(os.sep, '/')", "= False if text.find('.') != -1: # FQCN or package", "{} for dirname, subdirs, files in os.walk(javadoc_dir): for basename in", "= os.path.relpath(javadoc_dir, env.app.outdir).replace(os.sep, '/') + '/index.html?' + uri # Prepend", "FQCN or package name. if re.fullmatch(r'^[^A-Z$]+$', text): # Package uri", "api_role.options = {'class': directives.class_option} register_canonical_role('api', api_role) register_canonical_role('apiplural', apiplural_role) # Intercept", ".replace(os.sep, '/') + '/' + basename else: javadoc_mappings = env.__javadoc_mappings__", "re.match(r'^[^A-Z]', basename) or not basename.endswith('.html'): # Ignore the non-class files.", "options, content) def apiplural_role(role, rawtext, text, lineno, inliner, options={}, content=[]):", "return next_visitor(self, node) env = self.builder.env javadoc_dir = os.path.abspath(env.config['javadoc_dir']) #", "'/').replace('$', '.') + '.html' text = re.sub(r'^.*\\.', '', text).replace('$', '.')", "role, rawtext, text, lineno, inliner, options, content) def apiplural_role(role, rawtext,", "is_annotation: text = '@' + text # Emit the tags.", "= re.sub(r'^.*\\.', '', text).replace('$', '.') else: # Simple class name;", "the frame index.html path. uri = os.path.relpath(javadoc_dir, env.app.outdir).replace(os.sep, '/') +", "old_visitor = HTMLTranslator.visit_literal HTMLTranslator.visit_literal = lambda self, node: api_visit_literal(self, node,", "Prepend the '@' back again if necessary if is_annotation: text", "uri = os.path.relpath(javadoc_dir, env.app.outdir).replace(os.sep, '/') + '/index.html?' + uri #", "name to its Javadoc file. if not hasattr(env, '__javadoc_cache__'): env.__javadoc_mappings__", "simple class name to its Javadoc file. if not hasattr(env,", "name always starts with an upper-case English alphabet. continue simple_class_name", "a plural suffix. if node.attributes['is_plural']: self.body.append(self.starttag(node, 'span', suffix='', CLASS='plural-suffix')) if", "'classes' in options: classes.extend(options['classes']) node = nodes.literal(rawtext, text, classes=classes, api_reference=True,", "options={}, content=[]): return api_role_internal(False, role, rawtext, text, lineno, inliner, options,", "+ '/' + basename else: javadoc_mappings = env.__javadoc_mappings__ text =", "apiplural_role) # Intercept the rendering of HTML literals. old_visitor =", "'@' back again if necessary if is_annotation: text = '@'", "self.body.append(self.starttag(node, 'a', suffix='', CLASS='reference external javadoc', HREF=uri)) self.body.append(text + '</a>')", "api_visit_literal(self, node, next_visitor): if 'api_reference' not in node.attributes: return next_visitor(self,", "the simple assumption that # a class name always starts", "# Prepend the '@' back again if necessary if is_annotation:", "api_reference=True, is_plural=plural) return [node], [] def api_visit_literal(self, node, next_visitor): if", "is_plural=plural) return [node], [] def api_visit_literal(self, node, next_visitor): if 'api_reference'", "re.sub(r'^.*\\.', '', text).replace('$', '.') else: # Simple class name; find", "setup(app): app.add_config_value('javadoc_dir', os.path.join(app.outdir, 'apidocs'), 'html') # Register the 'javadoc' role.", "if is_annotation: text = '@' + text # Emit the", "if necessary if is_annotation: text = '@' + text #" ]
[ "Base(object): def __init__(self, manager, interval, *locks): self.manager = manager self.storage", "LICENSE file. import time from feaas import storage class Base(object):", "the LICENSE file. import time from feaas import storage class", "__init__(self, manager, interval, *locks): self.manager = manager self.storage = manager.storage", "source code is governed by a BSD-style # license that", "import storage class Base(object): def __init__(self, manager, interval, *locks): self.manager", "def __init__(self, manager, interval, *locks): self.manager = manager self.storage =", "*lock_names): self.locker = storage.MultiLocker(self.storage) for lock_name in lock_names: self.locker.init(lock_name) def", "in lock_names: self.locker.init(lock_name) def loop(self): self.running = True while self.running:", "def loop(self): self.running = True while self.running: self.run() time.sleep(self.interval) def", "in the LICENSE file. import time from feaas import storage", "= True while self.running: self.run() time.sleep(self.interval) def stop(self): self.running =", "import time from feaas import storage class Base(object): def __init__(self,", "be found in the LICENSE file. import time from feaas", "a BSD-style # license that can be found in the", "*locks): self.manager = manager self.storage = manager.storage self.interval = interval", "file. import time from feaas import storage class Base(object): def", "= storage.MultiLocker(self.storage) for lock_name in lock_names: self.locker.init(lock_name) def loop(self): self.running", "of this source code is governed by a BSD-style #", "from feaas import storage class Base(object): def __init__(self, manager, interval,", "True while self.running: self.run() time.sleep(self.interval) def stop(self): self.running = False", "storage class Base(object): def __init__(self, manager, interval, *locks): self.manager =", "varnishapi authors. All rights reserved. # Use of this source", "self.interval = interval def init_locker(self, *lock_names): self.locker = storage.MultiLocker(self.storage) for", "that can be found in the LICENSE file. import time", "= manager self.storage = manager.storage self.interval = interval def init_locker(self,", "class Base(object): def __init__(self, manager, interval, *locks): self.manager = manager", "found in the LICENSE file. import time from feaas import", "# Use of this source code is governed by a", "Copyright 2014 varnishapi authors. All rights reserved. # Use of", "# license that can be found in the LICENSE file.", "def init_locker(self, *lock_names): self.locker = storage.MultiLocker(self.storage) for lock_name in lock_names:", "lock_name in lock_names: self.locker.init(lock_name) def loop(self): self.running = True while", "lock_names: self.locker.init(lock_name) def loop(self): self.running = True while self.running: self.run()", "2014 varnishapi authors. All rights reserved. # Use of this", "Use of this source code is governed by a BSD-style", "self.storage = manager.storage self.interval = interval def init_locker(self, *lock_names): self.locker", "code is governed by a BSD-style # license that can", "is governed by a BSD-style # license that can be", "feaas import storage class Base(object): def __init__(self, manager, interval, *locks):", "manager, interval, *locks): self.manager = manager self.storage = manager.storage self.interval", "manager.storage self.interval = interval def init_locker(self, *lock_names): self.locker = storage.MultiLocker(self.storage)", "self.locker = storage.MultiLocker(self.storage) for lock_name in lock_names: self.locker.init(lock_name) def loop(self):", "All rights reserved. # Use of this source code is", "rights reserved. # Use of this source code is governed", "= manager.storage self.interval = interval def init_locker(self, *lock_names): self.locker =", "self.manager = manager self.storage = manager.storage self.interval = interval def", "BSD-style # license that can be found in the LICENSE", "governed by a BSD-style # license that can be found", "# Copyright 2014 varnishapi authors. All rights reserved. # Use", "storage.MultiLocker(self.storage) for lock_name in lock_names: self.locker.init(lock_name) def loop(self): self.running =", "by a BSD-style # license that can be found in", "interval def init_locker(self, *lock_names): self.locker = storage.MultiLocker(self.storage) for lock_name in", "= interval def init_locker(self, *lock_names): self.locker = storage.MultiLocker(self.storage) for lock_name", "for lock_name in lock_names: self.locker.init(lock_name) def loop(self): self.running = True", "this source code is governed by a BSD-style # license", "interval, *locks): self.manager = manager self.storage = manager.storage self.interval =", "manager self.storage = manager.storage self.interval = interval def init_locker(self, *lock_names):", "reserved. # Use of this source code is governed by", "self.running = True while self.running: self.run() time.sleep(self.interval) def stop(self): self.running", "time from feaas import storage class Base(object): def __init__(self, manager,", "init_locker(self, *lock_names): self.locker = storage.MultiLocker(self.storage) for lock_name in lock_names: self.locker.init(lock_name)", "license that can be found in the LICENSE file. import", "authors. All rights reserved. # Use of this source code", "self.locker.init(lock_name) def loop(self): self.running = True while self.running: self.run() time.sleep(self.interval)", "loop(self): self.running = True while self.running: self.run() time.sleep(self.interval) def stop(self):", "can be found in the LICENSE file. import time from" ]
[ "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "# # Licensed under the Apache License, Version 2.0 (the", "compliance with the License. # You may obtain a copy", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "2.0 (the \"License\"); # you may not use this file", "agreed to in writing, software # distributed under the License", "file except in compliance with the License. # You may", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "Unless required by applicable law or agreed to in writing,", "if target.arch == 'source': url = os.path.join(url, 'source', 'Sources.gz') else:", "cache_filename = fetch_and_cache_gzip(url, cache_dir) logging.debug('Reading file: %s' % cache_filename) #", "cache_dir) logging.debug('Reading file: %s' % cache_filename) # split package blocks", "number of every package package_versions = {} for lines in", "distributed under the License is distributed on an \"AS IS\"", "blocks: prefix = 'Package: ' assert lines[0].startswith(prefix) debian_pkg_name = lines[0][len(prefix):]", "in blocks: prefix = 'Package: ' assert lines[0].startswith(prefix) debian_pkg_name =", "prefix = 'Source: ' source_names = [l[len(prefix):] for l in", "= os.path.join(url, 'binary-%s' % target.arch, 'Packages.gz') cache_filename = fetch_and_cache_gzip(url, cache_dir)", "= 'Package: ' assert lines[0].startswith(prefix) debian_pkg_name = lines[0][len(prefix):] prefix =", "[l[len(prefix):] for l in lines if l.startswith(prefix)] version = versions[0]", "the specific language governing permissions and # limitations under the", "= [b.splitlines() for b in blocks if b] # extract", "= lines[0][len(prefix):] prefix = 'Version: ' versions = [l[len(prefix):] for", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "'Packages.gz') cache_filename = fetch_and_cache_gzip(url, cache_dir) logging.debug('Reading file: %s' % cache_filename)", "# split package blocks with open(cache_filename, 'rb') as f: blocks", "f.read().decode('utf8').split('\\n\\n') blocks = [b.splitlines() for b in blocks if b]", "blocks with open(cache_filename, 'rb') as f: blocks = f.read().decode('utf8').split('\\n\\n') blocks", "from .http_cache import fetch_and_cache_gzip def get_debian_repo_index(debian_repository_baseurl, target, cache_dir): url =", "os.path.join(url, 'binary-%s' % target.arch, 'Packages.gz') cache_filename = fetch_and_cache_gzip(url, cache_dir) logging.debug('Reading", "express or implied. # See the License for the specific", "applicable law or agreed to in writing, software # distributed", "== 1 else None prefix = 'Source: ' source_names =", "except in compliance with the License. # You may obtain", "package blocks with open(cache_filename, 'rb') as f: blocks = f.read().decode('utf8').split('\\n\\n')", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "Licensed under the Apache License, Version 2.0 (the \"License\"); #", "%s' % cache_filename) # split package blocks with open(cache_filename, 'rb')", "not use this file except in compliance with the License.", "open(cache_filename, 'rb') as f: blocks = f.read().decode('utf8').split('\\n\\n') blocks = [b.splitlines()", "= [l[len(prefix):] for l in lines if l.startswith(prefix)] version =", "if l.startswith(prefix)] source_name = source_names[0] if len(source_names) == 1 else", "logging import os from .common import PlatformPackageDescriptor from .http_cache import", "import os from .common import PlatformPackageDescriptor from .http_cache import fetch_and_cache_gzip", "lines if l.startswith(prefix)] version = versions[0] if len(versions) == 1", "writing, software # distributed under the License is distributed on", "in writing, software # distributed under the License is distributed", "assert lines[0].startswith(prefix) debian_pkg_name = lines[0][len(prefix):] prefix = 'Version: ' versions", "you may not use this file except in compliance with", "% cache_filename) # split package blocks with open(cache_filename, 'rb') as", "under the License. import logging import os from .common import", "# Licensed under the Apache License, Version 2.0 (the \"License\");", "language governing permissions and # limitations under the License. import", "and # limitations under the License. import logging import os", "# Copyright 2014 Open Source Robotics Foundation, Inc. # #", "' assert lines[0].startswith(prefix) debian_pkg_name = lines[0][len(prefix):] prefix = 'Version: '", "if len(versions) == 1 else None prefix = 'Source: '", "None prefix = 'Source: ' source_names = [l[len(prefix):] for l", "Source Robotics Foundation, Inc. # # Licensed under the Apache", "{} for lines in blocks: prefix = 'Package: ' assert", "package package_versions = {} for lines in blocks: prefix =", "source_names = [l[len(prefix):] for l in lines if l.startswith(prefix)] source_name", "source_names[0] if len(source_names) == 1 else None package_versions[debian_pkg_name] = PlatformPackageDescriptor(version,", "with open(cache_filename, 'rb') as f: blocks = f.read().decode('utf8').split('\\n\\n') blocks =", "use this file except in compliance with the License. #", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "= f.read().decode('utf8').split('\\n\\n') blocks = [b.splitlines() for b in blocks if", "package_versions = {} for lines in blocks: prefix = 'Package:", "# limitations under the License. import logging import os from", "'Version: ' versions = [l[len(prefix):] for l in lines if", "file: %s' % cache_filename) # split package blocks with open(cache_filename,", "lines[0].startswith(prefix) debian_pkg_name = lines[0][len(prefix):] prefix = 'Version: ' versions =", "CONDITIONS OF ANY KIND, either express or implied. # See", "= os.path.join(url, 'source', 'Sources.gz') else: url = os.path.join(url, 'binary-%s' %", "b] # extract version number of every package package_versions =", "' versions = [l[len(prefix):] for l in lines if l.startswith(prefix)]", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "'Package: ' assert lines[0].startswith(prefix) debian_pkg_name = lines[0][len(prefix):] prefix = 'Version:", "for b in blocks if b] # extract version number", "or implied. # See the License for the specific language", "License is distributed on an \"AS IS\" BASIS, # WITHOUT", "versions[0] if len(versions) == 1 else None prefix = 'Source:", "License. # You may obtain a copy of the License", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "License, Version 2.0 (the \"License\"); # you may not use", "# extract version number of every package package_versions = {}", "as f: blocks = f.read().decode('utf8').split('\\n\\n') blocks = [b.splitlines() for b", "# You may obtain a copy of the License at", "KIND, either express or implied. # See the License for", "specific language governing permissions and # limitations under the License.", ".common import PlatformPackageDescriptor from .http_cache import fetch_and_cache_gzip def get_debian_repo_index(debian_repository_baseurl, target,", "logging.debug('Reading file: %s' % cache_filename) # split package blocks with", "url = os.path.join( debian_repository_baseurl, 'dists', target.os_code_name, 'main') if target.arch ==", "under the License is distributed on an \"AS IS\" BASIS,", "os from .common import PlatformPackageDescriptor from .http_cache import fetch_and_cache_gzip def", "version number of every package package_versions = {} for lines", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "License for the specific language governing permissions and # limitations", "'Source: ' source_names = [l[len(prefix):] for l in lines if", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "in blocks if b] # extract version number of every", "import logging import os from .common import PlatformPackageDescriptor from .http_cache", "version = versions[0] if len(versions) == 1 else None prefix", "' source_names = [l[len(prefix):] for l in lines if l.startswith(prefix)]", "os.path.join(url, 'source', 'Sources.gz') else: url = os.path.join(url, 'binary-%s' % target.arch,", "== 'source': url = os.path.join(url, 'source', 'Sources.gz') else: url =", "'binary-%s' % target.arch, 'Packages.gz') cache_filename = fetch_and_cache_gzip(url, cache_dir) logging.debug('Reading file:", "l in lines if l.startswith(prefix)] source_name = source_names[0] if len(source_names)", "the License for the specific language governing permissions and #", "% target.arch, 'Packages.gz') cache_filename = fetch_and_cache_gzip(url, cache_dir) logging.debug('Reading file: %s'", "(the \"License\"); # you may not use this file except", "os.path.join( debian_repository_baseurl, 'dists', target.os_code_name, 'main') if target.arch == 'source': url", "Apache License, Version 2.0 (the \"License\"); # you may not", "'dists', target.os_code_name, 'main') if target.arch == 'source': url = os.path.join(url,", "# you may not use this file except in compliance", "either express or implied. # See the License for the", "for l in lines if l.startswith(prefix)] version = versions[0] if", "def get_debian_repo_index(debian_repository_baseurl, target, cache_dir): url = os.path.join( debian_repository_baseurl, 'dists', target.os_code_name,", "len(versions) == 1 else None prefix = 'Source: ' source_names", "PlatformPackageDescriptor from .http_cache import fetch_and_cache_gzip def get_debian_repo_index(debian_repository_baseurl, target, cache_dir): url", "l.startswith(prefix)] source_name = source_names[0] if len(source_names) == 1 else None", "OR CONDITIONS OF ANY KIND, either express or implied. #", "url = os.path.join(url, 'source', 'Sources.gz') else: url = os.path.join(url, 'binary-%s'", "split package blocks with open(cache_filename, 'rb') as f: blocks =", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "target.arch == 'source': url = os.path.join(url, 'source', 'Sources.gz') else: url", "every package package_versions = {} for lines in blocks: prefix", "lines if l.startswith(prefix)] source_name = source_names[0] if len(source_names) == 1", "the License is distributed on an \"AS IS\" BASIS, #", "1 else None prefix = 'Source: ' source_names = [l[len(prefix):]", "for l in lines if l.startswith(prefix)] source_name = source_names[0] if", "in compliance with the License. # You may obtain a", "software # distributed under the License is distributed on an", "debian_pkg_name = lines[0][len(prefix):] prefix = 'Version: ' versions = [l[len(prefix):]", "lines in blocks: prefix = 'Package: ' assert lines[0].startswith(prefix) debian_pkg_name", "'rb') as f: blocks = f.read().decode('utf8').split('\\n\\n') blocks = [b.splitlines() for", "lines[0][len(prefix):] prefix = 'Version: ' versions = [l[len(prefix):] for l", "governing permissions and # limitations under the License. import logging", "'source': url = os.path.join(url, 'source', 'Sources.gz') else: url = os.path.join(url,", "# # Unless required by applicable law or agreed to", "License. import logging import os from .common import PlatformPackageDescriptor from", "from .common import PlatformPackageDescriptor from .http_cache import fetch_and_cache_gzip def get_debian_repo_index(debian_repository_baseurl,", "f: blocks = f.read().decode('utf8').split('\\n\\n') blocks = [b.splitlines() for b in", "blocks = f.read().decode('utf8').split('\\n\\n') blocks = [b.splitlines() for b in blocks", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "fetch_and_cache_gzip(url, cache_dir) logging.debug('Reading file: %s' % cache_filename) # split package", "'source', 'Sources.gz') else: url = os.path.join(url, 'binary-%s' % target.arch, 'Packages.gz')", "Foundation, Inc. # # Licensed under the Apache License, Version", "2014 Open Source Robotics Foundation, Inc. # # Licensed under", "permissions and # limitations under the License. import logging import", "Version 2.0 (the \"License\"); # you may not use this", "debian_repository_baseurl, 'dists', target.os_code_name, 'main') if target.arch == 'source': url =", "law or agreed to in writing, software # distributed under", "blocks if b] # extract version number of every package", "'Sources.gz') else: url = os.path.join(url, 'binary-%s' % target.arch, 'Packages.gz') cache_filename", "the License. import logging import os from .common import PlatformPackageDescriptor", "= source_names[0] if len(source_names) == 1 else None package_versions[debian_pkg_name] =", "implied. # See the License for the specific language governing", "else: url = os.path.join(url, 'binary-%s' % target.arch, 'Packages.gz') cache_filename =", "= 'Source: ' source_names = [l[len(prefix):] for l in lines", "get_debian_repo_index(debian_repository_baseurl, target, cache_dir): url = os.path.join( debian_repository_baseurl, 'dists', target.os_code_name, 'main')", "= fetch_and_cache_gzip(url, cache_dir) logging.debug('Reading file: %s' % cache_filename) # split", "under the Apache License, Version 2.0 (the \"License\"); # you", "in lines if l.startswith(prefix)] version = versions[0] if len(versions) ==", "prefix = 'Version: ' versions = [l[len(prefix):] for l in", "\"License\"); # you may not use this file except in", "= [l[len(prefix):] for l in lines if l.startswith(prefix)] source_name =", "[b.splitlines() for b in blocks if b] # extract version", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "limitations under the License. import logging import os from .common", "of every package package_versions = {} for lines in blocks:", "by applicable law or agreed to in writing, software #", "# distributed under the License is distributed on an \"AS", "OF ANY KIND, either express or implied. # See the", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "in lines if l.startswith(prefix)] source_name = source_names[0] if len(source_names) ==", "[l[len(prefix):] for l in lines if l.startswith(prefix)] source_name = source_names[0]", "may obtain a copy of the License at # #", "# Unless required by applicable law or agreed to in", "ANY KIND, either express or implied. # See the License", "See the License for the specific language governing permissions and", ".http_cache import fetch_and_cache_gzip def get_debian_repo_index(debian_repository_baseurl, target, cache_dir): url = os.path.join(", "target, cache_dir): url = os.path.join( debian_repository_baseurl, 'dists', target.os_code_name, 'main') if", "if b] # extract version number of every package package_versions", "for lines in blocks: prefix = 'Package: ' assert lines[0].startswith(prefix)", "if l.startswith(prefix)] version = versions[0] if len(versions) == 1 else", "if len(source_names) == 1 else None package_versions[debian_pkg_name] = PlatformPackageDescriptor(version, source_name)", "prefix = 'Package: ' assert lines[0].startswith(prefix) debian_pkg_name = lines[0][len(prefix):] prefix", "the License. # You may obtain a copy of the", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "for the specific language governing permissions and # limitations under", "b in blocks if b] # extract version number of", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "to in writing, software # distributed under the License is", "fetch_and_cache_gzip def get_debian_repo_index(debian_repository_baseurl, target, cache_dir): url = os.path.join( debian_repository_baseurl, 'dists',", "cache_dir): url = os.path.join( debian_repository_baseurl, 'dists', target.os_code_name, 'main') if target.arch", "l.startswith(prefix)] version = versions[0] if len(versions) == 1 else None", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "Inc. # # Licensed under the Apache License, Version 2.0", "# See the License for the specific language governing permissions", "target.os_code_name, 'main') if target.arch == 'source': url = os.path.join(url, 'source',", "== 1 else None package_versions[debian_pkg_name] = PlatformPackageDescriptor(version, source_name) return package_versions", "'main') if target.arch == 'source': url = os.path.join(url, 'source', 'Sources.gz')", "Copyright 2014 Open Source Robotics Foundation, Inc. # # Licensed", "You may obtain a copy of the License at #", "extract version number of every package package_versions = {} for", "may not use this file except in compliance with the", "or agreed to in writing, software # distributed under the", "= os.path.join( debian_repository_baseurl, 'dists', target.os_code_name, 'main') if target.arch == 'source':", "= versions[0] if len(versions) == 1 else None prefix =", "target.arch, 'Packages.gz') cache_filename = fetch_and_cache_gzip(url, cache_dir) logging.debug('Reading file: %s' %", "required by applicable law or agreed to in writing, software", "= 'Version: ' versions = [l[len(prefix):] for l in lines", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "import PlatformPackageDescriptor from .http_cache import fetch_and_cache_gzip def get_debian_repo_index(debian_repository_baseurl, target, cache_dir):", "import fetch_and_cache_gzip def get_debian_repo_index(debian_repository_baseurl, target, cache_dir): url = os.path.join( debian_repository_baseurl,", "url = os.path.join(url, 'binary-%s' % target.arch, 'Packages.gz') cache_filename = fetch_and_cache_gzip(url,", "l in lines if l.startswith(prefix)] version = versions[0] if len(versions)", "with the License. # You may obtain a copy of", "versions = [l[len(prefix):] for l in lines if l.startswith(prefix)] version", "this file except in compliance with the License. # You", "= {} for lines in blocks: prefix = 'Package: '", "else None prefix = 'Source: ' source_names = [l[len(prefix):] for", "len(source_names) == 1 else None package_versions[debian_pkg_name] = PlatformPackageDescriptor(version, source_name) return", "the Apache License, Version 2.0 (the \"License\"); # you may", "Robotics Foundation, Inc. # # Licensed under the Apache License,", "Open Source Robotics Foundation, Inc. # # Licensed under the", "source_name = source_names[0] if len(source_names) == 1 else None package_versions[debian_pkg_name]", "blocks = [b.splitlines() for b in blocks if b] #", "cache_filename) # split package blocks with open(cache_filename, 'rb') as f:" ]
[]
[ "params[1] padding = params[2] output_size = (int((last_z.shape[2]-pooling[0]+2*padding[0])/stride[0]+1), \\ int((last_z.shape[3]-pooling[0]+2*padding[0])/stride[0]+1)) dLoss_dz", "in key1 or 'None' in value1: print('Not completed for '+key+'", "add_nodes(u[0]) if hasattr(var, 'saved_tensors'): for t in var.saved_tensors: dot.edge(str(id(t)), str(id(var)))", "else: tmp = find_next_layer_by_name(tmp_layers, current_layer_name, index_tmp_layers) return_layers[0][j].insert(0, tmp[0]) if not", "''' for i in range(len(last_connections)): print(last_connections[i]) for i in range(len(featuremap)):", "(kernel_size, kernel_size) else: Conv2d_params['kernel_size'] = kernel_size # stride stride =", "layer.__dict__.get('output_size') if not isinstance(output_size, tuple): AdaptiveAvgPool2d_params['output_size'] = (output_size, output_size) else:", "'ViewBackward' in name: return 'View' elif 'Mean' in name or", "2.0 (the \"License\"); # you may not use this file", "value == last_key: tmp_split.append(item) else: return_connections.append(tmp_split) tmp_split = [item] return", "= layer.__dict__.get('stride') if not isinstance(stride, tuple): AvgPool2d_params['stride'] = (stride, stride)", "MaxPool2d_params['padding'] = padding # return parameters[i][j][k] = MaxPool2d_params elif isinstance(tmp_layer,", "file_nums = [] for i in range(len(files)): if '.pth' in", "value = list(item.values())[0] last_key = list(tmp_split[-1].keys())[0] if value == last_key:", "layer == 'View': layer_name = 'View' parameters[i] = {'layer_name': layer_name}", "print('# last_z.shape: ', list(last_z.shape)) if params: pooling = params[0] stride", "node that require grad (TODO: make optional) \"\"\" if params", "c, strides[0] * i:strides[0] * i + pooling[0], strides[1] *", "padding = tmp_layer.__dict__.get('padding') if not isinstance(padding, tuple): Conv2d_params['padding'] = (padding,", "= dz[n,d,h//strides[0],w//strides[1]] return pz @torch.no_grad() def judge_tensors_equal(tensor_A, tensor_B): if(not tensor_A.shape", "processed in get_structure_parameters_v1!') return parameters, fc_conv_weights @torch.no_grad() def delete_allpths(pth_dir=None): import", "import os featuremap = [] if featuremap_dir == None: pth_dir", "range(len(last_connections[i][j])-1, -1, -1): current_layer_name = list(last_connections[i][j][k].keys())[0].split('_')[0] if 'ReLU' in current_layer_name:", "tensor_B): if(not tensor_A.shape == tensor_B.shape): print('Shape of two compard tensors", "gradient_backward_v1!') print('======================== {0:3} Layer: '.format(str(i))+'{0:9}'.format(layer['layer_name'])+' Backward End ==========================') delete_allpths(pth_dir=None) return", "len(lastpop.shape): dLoss_dz = dLoss_dz.reshape(lastpop.shape) elif layer['layer_name'] == 'ReLU': z =", "list(last_connections[i-1].keys())[0].split('_')[0] == 'Dropout': index_tmp_layers = tmp[1] + 1 else: return_layers.insert(0,", "fc_conv_weights.append(layer.weight) parameters.append(Conv2d_params) elif isinstance(layer, nn.ReLU): layer_name = 'ReLU' parameters.append({'layer_name': layer_name})", "+= 1 #print('Error rate: ', error/(C*N)) print('2D-error-rate: ', end=' ')", "z, w): print('# next_dz.shape: ', list(dLoss_dnextz.shape)) print('# z.shape: ', list(z.shape))", "= [] fc_conv_weights = [] for layer in layers: if", "== 'Dropout': if parameters[i-1]['layer_name'] == 'Dropout': return_dz[i] = dLoss_dz print('#", "elif isinstance(tmp_layer, nn.BatchNorm2d): layer_name = 'BatchNorm2d' BatchNorm2d_params = {} BatchNorm2d_params['layer_name']", "kernel_size kernel_size = tmp_layer.__dict__.get('kernel_size') if not isinstance(kernel_size, tuple): MaxPool2d_params['kernel_size'] =", "align='left', fontsize='12', ranksep='0.1', height='0.2') dot = Digraph(node_attr=node_attr, graph_attr=dict(size=\"12,12\")) seen =", "return 'View' elif 'Mean' in name or 'Avg' in name:", "N, C, H, W = tensor_A.shape for n in range(N):", "# padding padding = tmp_layer.__dict__.get('padding') if not isinstance(padding, tuple): Conv2d_params['padding']", "strides[1] * j + flat_idx % pooling[1] padding_dz[n, c, h_idx,", "torch.arange(out_h): for j in torch.arange(out_w): padding_dz[n, c, strides[0] * i:strides[0]", "'AvgPool2d' elif 'BatchNorm' in name: return 'BatchNorm2d' elif 'Conv' in", "1) dK = _conv_forward(torch.swapaxes(F.pad(z, pad=(padding[1],padding[1],\\ padding[0],padding[0],0,0), mode='constant', value=0), 0, 1),", "= 'AvgPool2d' AvgPool2d_params = {} AvgPool2d_params['layer_name'] = layer_name # kernel_size", "= torch.swapaxes(flip_K, 0, 1) ppadding_next_dz = F.pad(padding_next_dz, pad=(k2-1-padding[1],k2-1-padding[1],\\ k1-1-padding[0],k1-1-padding[0],0,0), mode='constant',", "make_dot(var, params=None): \"\"\" Produces Graphviz representation of PyTorch autograd graph", "or View if last_tensors[i] == 'Add': last_tensors[i] = last_tensors[i+1][0][0] +", "range(D): for h in range(0, H_last, strides[0]): for w in", "= dxhut.sum(axis=axis, keepdim=True) dz = ivar/m*(dz1-dz2-dz3) print('# dz.shape: ', list(dz.shape))", "tmp_layer['stride'] tmp_dLoss_dz[-1], dLoss_dW, dLoss_dB = conv_backward(tmp_dLoss_dz[-1], weight_z, z, padding, stride)", "1 return return_layers @torch.no_grad() def get_tensors(last_connections): tensors = get_featuremap(featuremap_dir=None) index_tensors", "saved for backward in torch.autograd.Function Args: var: output Variable params:", "AvgPool2d_params['stride'] = (stride, stride) else: AvgPool2d_params['stride'] = stride # padding", "', p) print('# next_dz.shape: ', list(next_dz.shape)) print('# mask.shape: ', list(mask.shape))", "len(connections)): if not list(connections[j].values())[0] == list(connections[j-1].keys())[0]: notchoosed.append(i) start, end =", "=================================\\n') return last_connections @torch.no_grad() def find_next_layer_by_name(layers, name, start_i): for i", "_conv_forward(ppadding_next_dz, swap_flip_K) swap_z = torch.swapaxes(z, 0, 1) dK = _conv_forward(torch.swapaxes(F.pad(z,", "dz.shape: ', list(dz.shape)) print('# dweight.shape: ', list(dK.transpose(0,1).shape)) print('# dbias.shape: ',", "layer_name = 'ReLU' parameters[i][j][k] = {'layer_name': layer_name} elif tmp_layer ==", "featuremap[i-1][j][k+1] weight_z = fc_conv_weights[i][j][k] try: padding = tmp_layer['padding'] except: padding", "{} AdaptiveAvgPool2d_params['layer_name'] = layer_name # output_size output_size = layer.__dict__.get('output_size') if", "', list(dLoss_dfcB.shape)) return dLoss_dz, dLoss_dfcW/N, dLoss_dfcB/N @torch.no_grad() def view_backward(dLoss_dnextz, last_z,", "> 0: return z[:, :, padding[0]:-padding[0], :] elif padding[1] >", "AdaptiveAvgPool2d_params['layer_name'] = layer_name # output_size output_size = layer.__dict__.get('output_size') if not", "Generate Tensors Start ====================================') result = model(img) print('=========================== Generate Tensors", "layer['eps'] z = featuremap[-1] gamma = fc_conv_weights[-1] dLoss_dz = batchnorm2d_backward(dLoss_dz,", "print('# gamma.shape: ', list(gamma.shape)) N, C, H, W = z.shape", "== {'None': 'None'}: last_connections.remove({'None': 'None'}) print('=========================== Restore network model End", "zeros_tensor), 1./(1.-p)) print('# dz.shape: ', list(dLoss_dz.shape)) return dLoss_dz @torch.no_grad() def", "print('# next_dz.shape: ', list(next_dz.shape)) print('# z.shape: ', list(z.shape)) print('# eps:", "fc_conv_weights[i] = layer.weight parameters[i] = Conv2d_params elif isinstance(layer, nn.ReLU): layer_name", "name: return 'Dropout_1' elif 'AddBackward' in name: return 'Add' elif", "= tmp_layer.__dict__.get('eps') BatchNorm2d_params['eps'] = eps # return fc_conv_weights[i][j][k] = tmp_layer.weight", "* j:strides[1] * j + pooling[1]] += next_dz[n, c, i,", "list(db.shape)) return dz, (dK/N).transpose(0,1), db/N @torch.no_grad() def _conv_forward(x, weight, strides=(1,1)):", "def add_nodes(var): if var not in seen: if torch.is_tensor(var): dot.node(str(id(var)),", "License for the specific language governing permissions and # limitations", "as np ax = list(np.arange(len(shape))) shape.pop(1) ax.pop(1) axis = tuple(ax)", "= layer_name # in_features in_features = tmp_layer.__dict__.get('in_features') Linear_params['in_features'] = in_features", "H, W = z.shape m = N*H*W shape = [N,C,H,W]", "np_A[n,c,h,w], np_B[n,c,h,w]) #print('Error rate: ', error/(N*C*H*W)) print('4D-error-rate: ', end=' ')", "z = featuremap[-1] pooling = layer['kernel_size'] stride = layer['stride'] padding", "= featuremap[i] pooling = layer['kernel_size'] stride = layer['stride'] padding =", "root, dirs, files in os.walk(pth_dir, topdown=False): for name in files:", "'ResNet' in str(model): print(judge_tensors_equal(dLoss_dW, model.conv1.weight.grad)) delete_allpths(pth_dir=None) return return_dz, dLoss_dW, dLoss_dB", "torch.einsum( 'nchwkj,dckj->ndhw', x_pad, weight) return out @torch.no_grad() def _insert_zeros(dz, strides):", "print('# weight.shape: ', list(K.shape)) print('# bias.shape: ', '['+str(K.shape[0])+']') print('# padding:", "get_split_connections(connections): return_connections = [] tmp_split = [] for i in", "C = tensor_A.shape for n in range(N): for c in", "'Conv2d': z = featuremap[-1] weight_z = fc_conv_weights[-1] try: padding =", "= layer.weight parameters[i] = Conv2d_params elif isinstance(layer, nn.ReLU): layer_name =", "Tensors Start ====================================') result = model(img) print('=========================== Generate Tensors End", "stride stride = tmp_layer.__dict__.get('stride') if not isinstance(stride, tuple): AvgPool2d_params['stride'] =", "tmp_dLoss_dz.append(copy.deepcopy(dLoss_dz)) for k in range(len(layer[j])): tmp_layer = layer[j][k] print('\\n=========================== {0:3}", "Dropout_params['p'] = p # return parameters[i][j][k] = Dropout_params elif isinstance(tmp_layer,", "delete_allpths(pth_dir=None): import os if pth_dir == None: pth_dir = \"./tmp_file/\"", "in str(model): print(judge_tensors_equal(dLoss_dW, model.features[0].weight.grad)) elif 'ResNet' in str(model): print(judge_tensors_equal(dLoss_dW, model.conv1.weight.grad))", "return_dz.append(dLoss_dz) #####################tensors ''' for i in range(len(last_connections)): print(last_connections[i]) for i", "name: return 'BatchNorm2d' elif 'Conv' in name: return 'Conv2d' elif", "output Variable params: dict of (name, Variable) to add names", "parameters[i] = {'layer_name': layer_name} elif layer == 'Cat': layer_name =", "= layer.__dict__.get('padding') if not isinstance(padding, tuple): Conv2d_params['padding'] = (padding, padding)", "tmp[0]) if isinstance(last_connections[i-1], list): index_tmp_layers = tmp[1] + 1 elif", "fc_backward(dLoss_dnextz, z, w): print('# next_dz.shape: ', list(dLoss_dnextz.shape)) print('# z.shape: ',", "if not isinstance(return_tensors[i], list) and not isinstance(return_tensors[i], str): print('=========', i,", "= cross_entropy_loss(featuremap[0], y_true) featuremap.pop(0) return_dz.append(dLoss_dz) #####################tensors ''' for i in", "', list(next_dz.shape)) print('# z.shape: ', list(z.shape)) zeros_tensor = torch.zeros_like(next_dz) dLoss_dz", "print('Generate connections not done! Check generate_connections function!') exit() new_connections.insert(0, {list(new_connections[0].values())[0]:", "error_tolerance or np_B[n,c,h,w]-np_A[n,c,h,w] > error_tolerance: error += 1 if error%20", "'saved_tensors'): for t in var.saved_tensors: dot.edge(str(id(t)), str(id(var))) add_nodes(t) print(var) add_nodes(var.grad_fn)", "', list(dLoss_dnextz.shape)) print('# z.shape: ', list(z.shape)) print('# weight.shape: ', list(w.shape))", "error/(N*C*H*W)) print('4D-error-rate: ', end=' ') return error/(N*C*H*W) elif len(tensor_A.shape) ==", "return_dz[i] = dLoss_dz elif layer['layer_name'] == 'Dropout': if parameters[i-1]['layer_name'] ==", "= layer_name # num_features num_features = layer.__dict__.get('num_features') BatchNorm2d_params['num_features'] = num_features", "return z[:, :, padding[0]:-padding[0], :] elif padding[1] > 0: return", "or np_B[c]-np_A[c] > error_tolerance: #print(np_A[c], np_B[c]) error += 1 #print('Error", "> 0: return z[:, :, :, padding[1]:-padding[1]] else: return z", "not None else '' node_name = '%s\\n %s' % (name,", "axis=-1), axis=-1), axis=0) # 在高度、宽度上相加;批量大小上相加 print('# dz.shape: ', list(dz.shape)) print('#", "+= 1 else: for j in range(len(last_connections[i])): if len(last_connections[i][j]) ==", "z - mu xmu2 = xmu**2 var = xmu2.sum(axis=axis, keepdim=True)/m", "', list(dLoss_dz.shape)) print('# dweight.shape: ', list(dLoss_dfcW.shape)) print('# dbias.shape: ', list(dLoss_dfcB.shape))", "= (W-1)*(strides[1]-1) + W pz = torch.zeros(N, D, H_last, W_last)", "os.listdir(pth_dir) file_nums = [] for i in range(len(files)): if '.pth'", "Dropout_params = {} Dropout_params['layer_name'] = layer_name # p p =", "return fc_conv_weights[i][j][k] = tmp_layer.weight parameters[i][j][k] = BatchNorm2d_params elif isinstance(tmp_layer, nn.Linear):", "0, 1)) db = torch.sum(torch.sum(torch.sum(next_dz, axis=-1), axis=-1), axis=0) # 在高度、宽度上相加;批量大小上相加", "get_layers(last_connections, model) return_tensors = get_tensors(last_connections) parameters, fc_conv_weights = get_structure_parameters(return_layers) '''", "', list(z.shape)) zeros_tensor = torch.zeros_like(next_dz) dLoss_dz = torch.where(torch.gt(z, 0), next_dz,", "(https://github.com/ConvolutedDog/) # # Licensed under the Apache License, Version 2.0", "D, H1, W1 = next_dz.shape print('# next_dz.shape: ', list(next_dz.shape)) print('#", ":, padding[0]:-padding[0], :] elif padding[1] > 0: return z[:, :,", "for c in range(C): if np_A[c]-np_B[c] > error_tolerance or np_B[c]-np_A[c]", "else: MaxPool2d_params['stride'] = stride # padding padding = tmp_layer.__dict__.get('padding') if", "tmp_layer.__dict__.get('stride') if not isinstance(stride, tuple): AvgPool2d_params['stride'] = (stride, stride) else:", "cross_entropy_loss(featuremap[-1], y_true) print('Self calculated loss: ', loss) featuremap.pop() return_dz.append(dLoss_dz) dW_dB_fc_conv", "print('# dz.shape: ', list(dLoss_dz.shape)) return dLoss_dz def add_backward(dLoss_dnextz): print('# next_dz.shape:", "for layer in layers: if isinstance(layer, nn.Conv2d): layer_name = 'Conv2d'", "padding: ', padding) print('# strides: ', strides) padding_next_dz = _insert_zeros(next_dz,", "dot def generate_g(model, x): delete_allpths(pth_dir=None) print('\\n=========================== Store network model Results", "tmp_split.append(item) continue value = list(item.values())[0] last_key = list(tmp_split[-1].keys())[0] if value", "key2 == list(list_dic_key_value[index].keys())[0]: end = index break return start+1, end-1", "featuremap[i] dLoss_dz = relu_backward(dLoss_dz, z) return_dz[i] = dLoss_dz elif layer['layer_name']", "dLoss_dz = view_backward(dLoss_dz, last_z, params) return_dz[i] = dLoss_dz elif layer['layer_name']", "0, 1) ppadding_next_dz = F.pad(padding_next_dz, pad=(k2-1-padding[1],k2-1-padding[1],\\ k1-1-padding[0],k1-1-padding[0],0,0), mode='constant', value=0) dz", "dLoss_dz @torch.no_grad() def dropback_backward(next_dz, mask, p): print('# zeros probability: ',", "not been processed in get_structure_parameters_v1!') return parameters, fc_conv_weights @torch.no_grad() def", "Generate Tensors End ======================================\\n') Loss = nn.CrossEntropyLoss() if 'GoogLeNet' in", "parameters[i][j][k] = AvgPool2d_params elif isinstance(tmp_layer, nn.Dropout): layer_name = 'Dropout' Dropout_params", "else: if n*c*h*w % 20000000000000 == 0: pass #print('right', np_A[n,c,h,w],", "= str(g).split('\\n') labels = {} connections = [] for i", "or 'TBackward' in item_key: pop_index.append(connections[i]) for i in range(len(pop_index)-1, -1,", "== []: last_connections = last_connections[:notchoosed[0]] else: pass for i in", "### if isinstance(tmp_layer, nn.Conv2d): layer_name = 'Conv2d' Conv2d_params = {}", "isinstance(tmp_layer, nn.AvgPool2d): layer_name = 'AvgPool2d' AvgPool2d_params = {} AvgPool2d_params['layer_name'] =", "u[0] is not None: dot.edge(str(id(u[0])), str(id(var))) add_nodes(u[0]) if hasattr(var, 'saved_tensors'):", "== last_item_key: for j in range(i+1, len(connections)): if not list(connections[j].values())[0]", "', list(dz.shape)) return dz @torch.no_grad() def batchnorm2d_backward(next_dz, z, eps, gamma=torch.Tensor([1.,1.,1.])):", "else: pth_dir = featuremap_dir files = os.listdir(pth_dir) file_nums = []", "= 'BatchNorm2d' BatchNorm2d_params = {} BatchNorm2d_params['layer_name'] = layer_name # num_features", "= list(connections[i].keys())[0] if not 'None' in item_key: if i ==", "'Dropout': index_tmp_layers = tmp[1] + 1 return return_layers @torch.no_grad() def", "weight_z, z, padding, stride) return_dz[i] = dLoss_dz elif layer['layer_name'] ==", "= [] tmp.append(connections[start:end+1]) tmp.append(connections[i:j-1]) last_connections[start:end+1] = [tmp] for kk in", "# p p = tmp_layer.__dict__.get('p') Dropout_params['p'] = p # return", "d in range(D): for h in range(0, H_last, strides[0]): for", "in str(layer): tmp_layers.append(layer) index_tmp_layers = 0 for i in range(len(last_connections)-1,", "weight_z, z, padding, stride) return_dz.append(dLoss_dz) fc_conv_weights.pop() if not len(featuremap) ==", "= (kernel_size, kernel_size) else: MaxPool2d_params['kernel_size'] = kernel_size # stride stride", "add_nodes(t) print(var) add_nodes(var.grad_fn) return dot def generate_g(model, x): delete_allpths(pth_dir=None) print('\\n===========================", "index_tmp_layers = 0 for i in range(len(last_connections)-1, -1, -1): if", "equal.') return None error = 0 error_tolerance = 0.001 np_A", "for w in range(W): if np_A[n,c,h,w]-np_B[n,c,h,w] > error_tolerance or np_B[n,c,h,w]-np_A[n,c,h,w]", "layer.__dict__.get('stride') if not isinstance(stride, tuple): Conv2d_params['stride'] = (stride, stride) else:", "print('================') print('================') for i in range(len(parameters)): print(i, parameters[i]) print('================') print('================')", "Blue nodes are the Variables that require grad, orange are", "exchange_name(key.split('_')[0]) + '_' + key.split('_')[1] value1 = exchange_name(value.split('_')[0]) + '_'", "x x_pad = x_pad.unfold(2, k, strides[0]) x_pad = x_pad.unfold(3, j,", "if key2 == list(list_dic_key_value[index].keys())[0]: end = index break return start+1,", "OF ANY KIND, either express or implied. # See the", "'BatchNorm2d' BatchNorm2d_params = {} BatchNorm2d_params['layer_name'] = layer_name # num_features num_features", "stride) else: MaxPool2d_params['stride'] = stride # padding padding = tmp_layer.__dict__.get('padding')", "torch.sum(y_exp, dim=1, keepdim=True)) ypred_loss = torch.mean(-torch.sum(torch.mul(y_true, torch.log(y_probability)), dim=1, keepdim=True)) dLoss_dypred", "parameters, fc_conv_weights def gradient_backward_v2(model, img, label, num_class=1000, g_view=False): x =", "', loss_torch.detach().numpy()) loss_torch.backward() if 'VGG' in str(model) or 'AlexNet' in", "See the License for the specific language governing permissions and", "W_last) for n in range(N): for d in range(D): for", "(H-1)*(strides[0]-1) + H W_last = (W-1)*(strides[1]-1) + W pz =", "'Add': last_tensors[i] = last_tensors[i+1][0][0] + last_tensors[i+1][1][0] if last_tensors[i] == 'View':", "output_size = (int((last_z.shape[2]-pooling[0]+2*padding[0])/stride[0]+1), \\ int((last_z.shape[3]-pooling[0]+2*padding[0])/stride[0]+1)) dLoss_dz = dLoss_dnextz.reshape(last_z.shape[0], last_z.shape[1], output_size[0],", "{0:3} Layer: '.format(str(len(parameters)-1-i))+'{0:11}'.\\ format(layer['layer_name'])+' Backward End ==========================') continue p =", "padding padding = tmp_layer.__dict__.get('padding') if not isinstance(padding, tuple): MaxPool2d_params['padding'] =", "-> ')[0]:\\ labels[graph[i].split('\\t')[1].split(' -> ')[1]]+'_'+\\ graph[i].split('\\t')[1].split(' -> ')[1]}) pop_index =", "get_featuremap(featuremap_dir=None) featuremap.insert(0, img) ### y_true = F.one_hot(label, num_classes=num_class).float() loss, dLoss_dz", "cross_entropy_loss(y_predict, y_true): print('\\n=========================== Layer:'+' {0:18}'.format('cross_entropy_loss')+' Start ===========================') print('# y_predict.shape: ',", "W = z.shape m = N*H*W shape = [N,C,H,W] import", "strides) padding_next_dz = _insert_zeros(next_dz, strides) flip_K = torch.flip(K, (2, 3))", "== 'MaxPool2d': z = featuremap[i] pooling = layer['kernel_size'] stride =", "find_start_end(list_dic_key_value, i, j): key1 = list(list_dic_key_value[i].values())[0] key2 = list(list_dic_key_value[j].keys())[0] start", "1)): N, C, H, W = z.shape D, C, k1,", "to in writing, software # distributed under the License is", "', end=' ') return error/(N*C*H*W) elif len(tensor_A.shape) == 1: C", "fc_conv_weights = [] for layer in layers: if isinstance(layer, nn.Conv2d):", "= output_size # return parameters[i][j][k] = AdaptiveAvgPool2d_params ### else: print('The", "tmp_layer['padding'] except: padding = (0, 0) stride = tmp_layer['stride'] tmp_dLoss_dz[-1],", "j in range(len(featuremap[i])): for k in range(len(featuremap[i][j])): print(' =========', i,", "processing {}/{}'.format(i, len(connections)-1)) item_key = list(connections[i].keys())[0] if not 'None' in", "# return parameters[i] = AvgPool2d_params elif isinstance(layer, nn.Dropout): layer_name =", "graph[i] and graph[i][-1] == ']': labels[graph[i].split('\\t')[1].split(' ')[0]]=\\ graph[i].split('\\t')[1].split('=')[1].split(']')[0] for i", "list(mask.shape)) zeros_tensor = torch.zeros_like(mask) dLoss_dz = torch.mul(torch.where(torch.eq(mask, 1.), next_dz, zeros_tensor),", "0: return z[:, :, padding[0]:-padding[0], :] elif padding[1] > 0:", "kernel_size) else: Conv2d_params['kernel_size'] = kernel_size # stride stride = tmp_layer.__dict__.get('stride')", "dLoss_dz = torch.matmul(dLoss_dnextz, w) #delta dLoss_dfcW = torch.matmul(dLoss_dnextz.t(), z) dLoss_dfcB", "= model(x) print('=========================== Store network model Results End ===========================\\n') if", "keepdim=True) xmu = z - mu xmu2 = xmu**2 var", "pooling, stride, padding) return_dz[i] = dLoss_dz elif layer['layer_name'] == 'AvgPool2d':", "if hasattr(var, 'next_functions'): for u in var.next_functions: if u[0] is", "or agreed to in writing, software # distributed under the", "tmp_dLoss_dz[-1] = batchnorm2d_backward(tmp_dLoss_dz[-1], z, eps, gamma) return_dz[i][j][k] = tmp_dLoss_dz[-1] print('===========================", "current_layer_name = list(last_connections[i].keys())[0].split('_')[0] if 'Add' in current_layer_name: last_tensors[i] = 'Add'", "elif 'MaxPool' in name: return 'MaxPool2d' elif 'MulBackward' in name:", "padding) else: Conv2d_params['padding'] = padding # return fc_conv_weights[i][j][k] = tmp_layer.weight", "= tmp_layer.__dict__.get('kernel_size') if not isinstance(kernel_size, tuple): Conv2d_params['kernel_size'] = (kernel_size, kernel_size)", "= fc_conv_weights[i] try: padding = layer['padding'] except: padding = (0,", "Start ====================') if tmp_layer['layer_name'] == 'Conv2d': if k+1 >= len(featuremap[i-1][j]):", "else: print('Not completed in gradient_backward!') print('# Torch calculated loss: ',", "weight_z = fc_conv_weights[i] z = featuremap[i] dLoss_dz, dLoss_dW, dLoss_dB =", "error_tolerance or np_B[n,c]-np_A[n,c] > error_tolerance: #print(np_A[n,c], np_B[n,c]) error += 1", "in range(len(graph)): if 'label' in graph[i] and graph[i][-1] == '\"':", "-1) dLoss_dz = torch.matmul(dLoss_dnextz, w) #delta dLoss_dfcW = torch.matmul(dLoss_dnextz.t(), z)", "out_features # return fc_conv_weights.append(layer.weight) parameters.append(Linear_params) elif isinstance(layer, nn.AdaptiveAvgPool2d): layer_name =", "dz3 = dxhut.sum(axis=axis, keepdim=True) dz = ivar/m*(dz1-dz2-dz3) print('# dz.shape: ',", "padding) else: MaxPool2d_params['padding'] = padding # return parameters.append(MaxPool2d_params) elif isinstance(layer,", "if 'None' in key1 or 'None' in value1: print('Not completed", "'Pool' in parameters[i+1]['layer_name']: params = (parameters[i+1]['kernel_size'], parameters[i+1]['stride'], parameters[i+1]['padding']) else: params", "loss_torch.detach().numpy()) loss_torch.backward() if 'VGG' in str(model) or 'AlexNet' in str(model):", "= MaxPool2d_params elif isinstance(tmp_layer, nn.AvgPool2d): layer_name = 'AvgPool2d' AvgPool2d_params =", "return 'Conv2d' elif 'MaxPool' in name: return 'MaxPool2d' elif 'MulBackward'", "pooling = params[0] stride = params[1] padding = params[2] output_size", "_insert_zeros(dz, strides): N, D, H, W = dz.shape H_last =", "-1): current_layer_name = list(last_connections[i][j][k].keys())[0].split('_')[0] if 'ReLU' in current_layer_name: return_layers[0][j].insert(0, torch.nn.ReLU(inplace=True))", "@torch.no_grad() def get_structure_parameters_v1(model): layers = [] for layer in model.modules():", "torch.mean(-torch.sum(torch.mul(y_true, torch.log(y_probability)), dim=1, keepdim=True)) dLoss_dypred = y_probability - y_true print('#", "= layer_name # kernel_size kernel_size = layer.__dict__.get('kernel_size') if not isinstance(kernel_size,", "elif 'AddmmBackward' in name: return 'Linear' elif 'ViewBackward' in name:", "os.remove(os.path.join(root, name)) @torch.no_grad() def mul_items(tensor_size): x = list(tensor_size) mul =", "'View': layer_name = 'View' parameters[i][j][k] = {'layer_name': layer_name} elif tmp_layer", "of (name, Variable) to add names to node that require", "'BatchNorm2d': eps = tmp_layer['eps'] z = featuremap[i-1][j][k+1] gamma = fc_conv_weights[i][j][k]", "in range(end-start): last_connections.insert(start, 'Throwed') num_Throwed += 1 break if not", "if not notchoosed == []: last_connections = last_connections[:notchoosed[0]] else: pass", "index_tensors = 0 import copy last_tensors = copy.deepcopy(last_connections) for i", "= fc_conv_weights[i][j][k] tmp_dLoss_dz[-1] = batchnorm2d_backward(tmp_dLoss_dz[-1], z, eps, gamma) return_dz[i][j][k] =", "in params.items()} node_attr = dict(style='filled', shape='box', align='left', fontsize='12', ranksep='0.1', height='0.2')", "return_layers.insert(0, tmp[0]) if isinstance(last_connections[i-1], list): index_tmp_layers = tmp[1] + 1", "compliance with the License. # You may obtain a copy", "0: pass #print('right', np_A[n,c,h,w], np_B[n,c,h,w]) #print('Error rate: ', error/(N*C*H*W)) print('4D-error-rate:", "break if not notchoosed == []: last_connections = last_connections[:notchoosed[0]] else:", "Variable(img) g = generate_g(model, x) if g_view: g.view() delete_allpths(pth_dir=None) print('\\n===========================", "if not isinstance(featuremap[i], list): print('=========', i, featuremap[i].shape) else: for j", "'MaxPool' in name: return 'MaxPool2d' elif 'MulBackward' in name: return", "= last_connections[:notchoosed[0]] else: pass for i in range(num_Throwed): last_connections.remove('Throwed') if", "BatchNorm2d_params elif isinstance(tmp_layer, nn.Linear): layer_name = 'Linear' Linear_params = {}", "len(last_connections[i][j]) == 0: continue for k in range(len(last_connections[i][j])-1, -1, -1):", "Linear_params['in_features'] = in_features # out_features out_features = tmp_layer.__dict__.get('out_features') Linear_params['out_features'] =", "tmp_layer.__dict__.get('in_channels') Conv2d_params['in_channel'] = in_channel # out_channel out_channel = tmp_layer.__dict__.get('out_channels') Conv2d_params['out_channel']", "padding[1]:-padding[1]] else: return z @torch.no_grad() def conv_backward(next_dz, K, z, padding=(0,", "= find_next_layer_by_name(tmp_layers, current_layer_name, index_tmp_layers) return_layers.insert(0, tmp[0]) if isinstance(last_connections[i-1], list): index_tmp_layers", "return z @torch.no_grad() def conv_backward(next_dz, K, z, padding=(0, 0), strides=(1,", "layer_name = 'AdaptiveAvgPool2d' AdaptiveAvgPool2d_params = {} AdaptiveAvgPool2d_params['layer_name'] = layer_name #", "[tmp] for kk in range(end-start): last_connections.insert(start, 'Throwed') num_Throwed += 1", "(padding, padding) else: MaxPool2d_params['padding'] = padding # return parameters.append(MaxPool2d_params) elif", "kernel_size) else: MaxPool2d_params['kernel_size'] = kernel_size # stride stride = layer.__dict__.get('stride')", "else: Conv2d_params['padding'] = padding # return fc_conv_weights[i][j][k] = tmp_layer.weight parameters[i][j][k]", "= [] for j in range(len(layer)): tmp_dLoss_dz.append(copy.deepcopy(dLoss_dz)) for k in", "not use this file except in compliance with the License.", "for n in range(N): for c in range(C): if np_A[n,c]-np_B[n,c]", "in range(len(connections)): item_key = list(connections[i].keys())[0] if '(' in item_key or", "calculated loss: ', loss_torch.detach().numpy()) loss_torch.backward() if 'VGG' in str(model) or", "nn.Dropout): layer_name = 'Dropout' Dropout_params = {} Dropout_params['layer_name'] = layer_name", "you may not use this file except in compliance with", "u in var.next_functions: if u[0] is not None: dot.edge(str(id(u[0])), str(id(var)))", "in str(model).split('\\n')[0]: g = make_dot(y[0]) return g else: g =", "weight_z = fc_conv_weights[i][j][k] try: padding = tmp_layer['padding'] except: padding =", "eps, gamma) return_dz[i][j][k] = tmp_dLoss_dz[-1] print('=========================== {0:3} Branch: '.format(str(len(parameters)-1-i))+'{0:11}'.format(tmp_layer['layer_name'])+' Backward", "= tmp_layer.__dict__.get('out_features') Linear_params['out_features'] = out_features # return fc_conv_weights[i][j][k] = tmp_layer.weight", "Loss = nn.CrossEntropyLoss() if 'GoogLeNet' in str(model).split('\\n')[0]: loss_torch = Loss(result[0],", "print('================') print('================') for i in range(len(return_layers)): print(i, return_layers[i]) print('================') print('================')", "list(next_dz.shape)) print('# z.shape: ', list(z.shape)) zeros_tensor = torch.zeros_like(next_dz) dLoss_dz =", "in range(C): if np_A[c]-np_B[c] > error_tolerance or np_B[c]-np_A[c] > error_tolerance:", "_conv_forward(torch.swapaxes(F.pad(z, pad=(padding[1],padding[1],\\ padding[0],padding[0],0,0), mode='constant', value=0), 0, 1), torch.swapaxes(padding_next_dz, 0, 1))", "{} connections = [] for i in range(len(graph)): if 'label'", "= list(item.values())[0] last_key = list(tmp_split[-1].keys())[0] if value == last_key: tmp_split.append(item)", "return_dz = [] parameters, fc_conv_weights = get_structure_parameters_v1(model) featuremap = get_featuremap(featuremap_dir=None)", "j-1) tmp = [] tmp.append(connections[start:end+1]) tmp.append(connections[i:j-1]) last_connections[start:end+1] = [tmp] for", "= torch.zeros_like(padding_z) for n in torch.arange(N): for c in torch.arange(C):", "{'layer_name': layer_name} elif layer == 'View': layer_name = 'View' parameters[i]", "i in range(len(last_connections)): print(last_connections[i]) for i in range(len(featuremap)): if not", "#print(np_A[n,c], np_B[n,c]) error += 1 #print('Error rate: ', error/(C*N)) print('2D-error-rate:", "range(len(return_layers)): layer = return_layers[i] if isinstance(layer, nn.Conv2d): layer_name = 'Conv2d'", "list(y_true.shape)) y_shift = torch.sub(y_predict, torch.max(y_predict, dim=1, keepdim=True).values) y_exp = torch.exp(y_shift)", "= stride # padding padding = tmp_layer.__dict__.get('padding') if not isinstance(padding,", "notchoosed == []: last_connections = last_connections[:notchoosed[0]] else: pass for i", "return '('+(', ').join(['%d' % v for v in size])+')' def", "kernel_size) else: Conv2d_params['kernel_size'] = kernel_size # stride stride = layer.__dict__.get('stride')", "= 'ReLU' parameters[i][j][k] = {'layer_name': layer_name} elif tmp_layer == 'Add':", "dim=1, keepdim=True).values) y_exp = torch.exp(y_shift) y_probability = torch.div(y_exp, torch.sum(y_exp, dim=1,", "for i in range(len(files)): if '.pth' in files[i]: file_nums.append(int(files[i].split('.pth')[0])) file_nums.sort()", "size_to_str(size): return '('+(', ').join(['%d' % v for v in size])+')'", "for i in range(len(parameters)): print(i, parameters[i]) print('================') print('================') for i", "tensors[index_tensors] index_tensors += 1 for i in range(len(last_tensors)-1, -1, -1):", "') return error/(C*N) @torch.no_grad() def get_featuremap(featuremap_dir=None): import os featuremap =", "'Add' in current_layer_name: last_tensors[i] = 'Add' elif 'View' in current_layer_name:", "stride = tmp_layer.__dict__.get('stride') if not isinstance(stride, tuple): Conv2d_params['stride'] = (stride,", "torch.arange(out_h): for j in torch.arange(out_w): flat_idx = torch.argmax(padding_z[n, c, strides[0]", "layer['p'] mask = featuremap[-1] dLoss_dz = dropback_backward(dLoss_dz, mask, p) return_dz.append(dLoss_dz)", "= list(connections[i-1].keys())[0] if not connections[i][item_key] == last_item_key: for j in", "'Conv2d' Conv2d_params = {} Conv2d_params['layer_name'] = layer_name # in_channel in_channel", "= torch.zeros_like(next_dz) dLoss_dz = torch.where(torch.gt(z, 0), next_dz, zeros_tensor) print('# dz.shape:", "= (padding, padding) else: Conv2d_params['padding'] = padding # return fc_conv_weights[i]", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "AvgPool2d_params['kernel_size'] = kernel_size # stride stride = tmp_layer.__dict__.get('stride') if not", "list(dLoss_dfcB.shape)) return dLoss_dz, dLoss_dfcW/N, dLoss_dfcB/N @torch.no_grad() def view_backward(dLoss_dnextz, last_z, params):", "dLoss_dz = max_pooling_backward(dLoss_dz, z, pooling, stride, padding) return_dz[i] = dLoss_dz", "'BatchNorm2d': eps = layer['eps'] z = featuremap[i] gamma = fc_conv_weights[i]", "'Add': dLoss_dz = add_backward(dLoss_dz) return_dz[i] = dLoss_dz elif layer['layer_name'] ==", "y_true): print('\\n=========================== Layer:'+' {0:18}'.format('cross_entropy_loss')+' Start ===========================') print('# y_predict.shape: ', list(y_predict.shape))", "generate_connections(g): graph = str(g).split('\\n') labels = {} connections = []", "W_last = (W-1)*(strides[1]-1) + W pz = torch.zeros(N, D, H_last,", "'ReLU': z = featuremap[i] dLoss_dz = relu_backward(dLoss_dz, z) return_dz[i] =", "p = layer.__dict__.get('p') Dropout_params['p'] = p # return parameters.append(Dropout_params) elif", "connections.remove(pop_index[i]) new_connections = [] for item in connections: key, value", "in current_layer_name: last_tensors[i][j][k] = 'Add' elif 'View' in current_layer_name: last_tensors[i][j][k]", "'View': layer_name = 'View' parameters[i] = {'layer_name': layer_name} elif layer", "else: Conv2d_params['padding'] = padding # return fc_conv_weights.append(layer.weight) parameters.append(Conv2d_params) elif isinstance(layer,", "# kernel_size kernel_size = layer.__dict__.get('kernel_size') if not isinstance(kernel_size, tuple): AvgPool2d_params['kernel_size']", "layer_name = 'Conv2d' Conv2d_params = {} Conv2d_params['layer_name'] = layer_name #", "= layer['stride'] dLoss_dz, dLoss_dW, dLoss_dB = conv_backward(dLoss_dz, weight_z, z, padding,", "1: lastpop = featuremap.pop() if not len(dLoss_dz.shape) == len(lastpop.shape): dLoss_dz", "item_key: pop_index.append(connections[i]) for i in range(len(pop_index)-1, -1, -1): connections.remove(pop_index[i]) new_connections", "if u[0] is not None: dot.edge(str(id(u[0])), str(id(var))) add_nodes(u[0]) if hasattr(var,", "has not been processed in get_structure_parameters!') return parameters, fc_conv_weights def", "'View' else: last_tensors[i][j][k] = tensors[index_tensors] index_tensors += 1 for i", "nn.AvgPool2d): layer_name = 'AvgPool2d' AvgPool2d_params = {} AvgPool2d_params['layer_name'] = layer_name", "= tensor_A.shape for n in range(N): for c in range(C):", "params: dict of (name, Variable) to add names to node", "= index break for index in range(len(list_dic_key_value)): if key2 ==", "'.format(str(i))+'{0:9}'.format(layer['layer_name'])+' Backward End ==========================') delete_allpths(pth_dir=None) return return_dz, dLoss_dW, dLoss_dB @torch.no_grad()", "'None'}: last_connections.remove({'None': 'None'}) print('=========================== Restore network model End =================================\\n') return", "= layer_name # p p = layer.__dict__.get('p') Dropout_params['p'] = p", "return parameters.append(Dropout_params) elif isinstance(layer, nn.BatchNorm2d): layer_name = 'BatchNorm2d' BatchNorm2d_params =", "'' node_name = '%s\\n %s' % (name, size_to_str(u.size())) dot.node(str(id(var)), node_name,", "= Dropout_params elif isinstance(layer, nn.BatchNorm2d): layer_name = 'BatchNorm2d' BatchNorm2d_params =", "'_' + key.split('_')[1] value1 = exchange_name(value.split('_')[0]) + '_' + value.split('_')[1]", "= featuremap[i-1][j][k+1] gamma = fc_conv_weights[i][j][k] tmp_dLoss_dz[-1] = batchnorm2d_backward(tmp_dLoss_dz[-1], z, eps,", "')[0]]+'_'+\\ graph[i].split('\\t')[1].split(' -> ')[0]:\\ labels[graph[i].split('\\t')[1].split(' -> ')[1]]+'_'+\\ graph[i].split('\\t')[1].split(' -> ')[1]})", "# 单一层,无分支 current_layer_name = list(last_connections[i].keys())[0].split('_')[0] if 'ReLU' in current_layer_name: return_layers.insert(0,", "#!/usr/bin/python3 import torch import torch.nn as nn import torch.nn.functional as", "padding): if padding[0] > 0 and padding[1] > 0: return", "model(img) print('=========================== Generate Tensors End ======================================\\n') Loss = nn.CrossEntropyLoss() if", "= K.shape N, D, H1, W1 = next_dz.shape print('# next_dz.shape:", "isinstance(layer, nn.MaxPool2d): layer_name = 'MaxPool2d' MaxPool2d_params = {} MaxPool2d_params['layer_name'] =", "dW_dB_fc_conv = [] for i in range(len(parameters)-1, -1, -1): layer", "exit() new_connections.append({key1: value1}) if not len(new_connections) == len(connections): print('Generate connections", "in current_layer_name: return_layers.insert(0, 'View') else: tmp = find_next_layer_by_name(tmp_layers, current_layer_name, index_tmp_layers)", "== list(list_dic_key_value[index].keys())[0]: start = index break for index in range(len(list_dic_key_value)):", "#delta dLoss_dfcW = torch.matmul(dLoss_dnextz.t(), z) dLoss_dfcB = torch.sum(dLoss_dnextz, dim=0) print('#", "key1 == list(list_dic_key_value[index].keys())[0]: start = index break for index in", "= AvgPool2d_params elif isinstance(tmp_layer, nn.Dropout): layer_name = 'Dropout' Dropout_params =", "dLoss_dypred = y_probability - y_true print('# dLoss_dypred.shape: ', list(dLoss_dypred.shape)) print('#", "last_z.shape[1], output_size[0], output_size[1]) else: dLoss_dz = dLoss_dnextz.reshape(last_z.shape) print('# dz.shape: ',", "exchange_name(name): if 'Relu' in name: return 'ReLU' elif 'AddmmBackward' in", "'AlexNet' in str(model): print(judge_tensors_equal(dLoss_dW, model.features[0].weight.grad)) elif 'ResNet' in str(model): print(judge_tensors_equal(dLoss_dW,", "= z - mu xmu2 = xmu**2 var = xmu2.sum(axis=axis,", "None: assert isinstance(params.values()[0], Variable) param_map = {id(v): k for k,", "db = torch.sum(torch.sum(torch.sum(next_dz, axis=-1), axis=-1), axis=0) # 在高度、宽度上相加;批量大小上相加 print('# dz.shape:", "= 'Add' elif 'View' in current_layer_name: last_tensors[i][j][k] = 'View' else:", "print('\\n=========================== {0:3} Branch: '.format(str(len(parameters)-1-i))+'{0:11}'.format(tmp_layer['layer_name'])+' Backward Start ====================') if tmp_layer['layer_name'] ==", "for backward in torch.autograd.Function Args: var: output Variable params: dict", "dLoss_dW, dLoss_dB = conv_backward(dLoss_dz, weight_z, z, padding, stride) return_dz.append(dLoss_dz) fc_conv_weights.pop()", "index_tensors += 1 else: for j in range(len(last_connections[i])): if len(last_connections[i][j])", "@torch.no_grad() def mul_items(tensor_size): x = list(tensor_size) mul = 1. for", "for k, v in params.items()} node_attr = dict(style='filled', shape='box', align='left',", "last_item_key: for j in range(i+1, len(connections)): if not list(connections[j].values())[0] ==", "tmp_dLoss_dz = [] for j in range(len(layer)): tmp_dLoss_dz.append(copy.deepcopy(dLoss_dz)) for k", "padding # return fc_conv_weights.append(layer.weight) parameters.append(Conv2d_params) elif isinstance(layer, nn.ReLU): layer_name =", "N, C, H, W = z.shape D, C, k1, k2", "dz @torch.no_grad() def batchnorm2d_backward(next_dz, z, eps, gamma=torch.Tensor([1.,1.,1.])): print('# next_dz.shape: ',", "+ pooling[1]] += next_dz[n, c, i, j] / (pooling[0] *", "return start+1, end-1 @torch.no_grad() def merge_connections(connections): import copy last_connections =", "if k+1 >= len(featuremap[i-1][j]): z = featuremap[i] else: z =", "list(K.shape)) print('# bias.shape: ', '['+str(K.shape[0])+']') print('# padding: ', padding) print('#", "except: padding = (0, 0) stride = tmp_layer['stride'] tmp_dLoss_dz[-1], dLoss_dW,", "= _conv_forward(torch.swapaxes(F.pad(z, pad=(padding[1],padding[1],\\ padding[0],padding[0],0,0), mode='constant', value=0), 0, 1), torch.swapaxes(padding_next_dz, 0,", "N*H*W shape = [N,C,H,W] import numpy as np ax =", "orange are Tensors saved for backward in torch.autograd.Function Args: var:", "range(len(last_tensors)-1, -1, -1): if isinstance(last_tensors[i], str): # Add or View", "list(dLoss_dypred.shape)) print('# Self calculated loss: ', ypred_loss.item()) print('=========================== Layer:'+' {0:18}'.format('cross_entropy_loss')+'", "in_features in_features = layer.__dict__.get('in_features') Linear_params['in_features'] = in_features # out_features out_features", "'BatchNorm' in name: return 'BatchNorm2d' elif 'Conv' in name: return", "= F.pad(padding_next_dz, pad=(k2-1-padding[1],k2-1-padding[1],\\ k1-1-padding[0],k1-1-padding[0],0,0), mode='constant', value=0) dz = _conv_forward(ppadding_next_dz, swap_flip_K)", "connections = [] for i in range(len(graph)): if 'label' in", "h_idx, w_idx] += next_dz[n, c, i, j] dz = _remove_padding(padding_dz,", "output_size output_size = layer.__dict__.get('output_size') if not isinstance(output_size, tuple): AdaptiveAvgPool2d_params['output_size'] =", "kernel_size # stride stride = tmp_layer.__dict__.get('stride') if not isinstance(stride, tuple):", "layer_name = 'View' parameters[i][j][k] = {'layer_name': layer_name} elif tmp_layer ==", "= layer_name # num_features num_features = tmp_layer.__dict__.get('num_features') BatchNorm2d_params['num_features'] = num_features", "@torch.no_grad() def gradient_backward_v1(model, img, label, num_class=1000): return_dz = [] parameters,", "tuple): AvgPool2d_params['stride'] = (stride, stride) else: AvgPool2d_params['stride'] = stride #", "in gradient_backward!') print('# Torch calculated loss: ', loss_torch.detach().numpy()) loss_torch.backward() if", "hasattr(var, 'next_functions'): for u in var.next_functions: if u[0] is not", "else: AdaptiveAvgPool2d_params['output_size'] = output_size # return parameters[i] = AdaptiveAvgPool2d_params elif", "==========================') continue p = layer['p'] mask = featuremap[i] dLoss_dz =", "= 0 error_tolerance = 0.001 np_A = tensor_A.detach().numpy() np_B =", "tensor_B.shape): print('Shape of two compard tensors is not equal.') return", "+ 1 else: return_layers.insert(0, []) for j in range(len(last_connections[i])): return_layers[0].append([])", "var = xmu2.sum(axis=axis, keepdim=True)/m ivar = 1./torch.pow(var+eps, 0.5) dz2 =", "'['+str(K.shape[0])+']') print('# padding: ', padding) print('# strides: ', strides) padding_next_dz", "featuremap[-1] gamma = fc_conv_weights[-1] dLoss_dz = batchnorm2d_backward(dLoss_dz, z, eps, gamma)", "if value == last_key: tmp_split.append(item) else: return_connections.append(tmp_split) tmp_split = [item]", "in range(C): if np_A[n,c]-np_B[n,c] > error_tolerance or np_B[n,c]-np_A[n,c] > error_tolerance:", "% pooling[1] padding_dz[n, c, h_idx, w_idx] += next_dz[n, c, i,", "layer_name} elif layer == 'Add': layer_name = 'Add' parameters[i] =", "dxhut = torch.zeros_like(next_dz) for c in range(C): dxhut[:,c] = next_dz[:,c]*gamma[c]", "fc_conv_weights[-1] z = featuremap[-1] dLoss_dz, dLoss_dW, dLoss_dB = fc_backward(dLoss_dz, z,", "copy last_connections = copy.deepcopy(connections) connections.append({'None':'None'}) num_Throwed = 0 notchoosed =", "dim=1, keepdim=True)) ypred_loss = torch.mean(-torch.sum(torch.mul(y_true, torch.log(y_probability)), dim=1, keepdim=True)) dLoss_dypred =", "== 0: last_tensors[i][j].append(last_tensors[i+1]) return last_tensors @torch.no_grad() def get_structure_parameters(return_layers): import copy", "last_tensors[i] == 'Add': last_tensors[i] = last_tensors[i+1][0][0] + last_tensors[i+1][1][0] if last_tensors[i]", "# out_features out_features = layer.__dict__.get('out_features') Linear_params['out_features'] = out_features # return", "Dropout_params['p'] = p # return parameters[i] = Dropout_params elif isinstance(layer,", "Linear_params['in_features'] = in_features # out_features out_features = layer.__dict__.get('out_features') Linear_params['out_features'] =", "(stride, stride) else: AvgPool2d_params['stride'] = stride # padding padding =", "for i in range(len(return_layers)): layer = return_layers[i] if isinstance(layer, nn.Conv2d):", "print('Shape of two compard tensors is not equal.') return None", "= 'ReLU' parameters.append({'layer_name': layer_name}) elif isinstance(layer, nn.MaxPool2d): layer_name = 'MaxPool2d'", "if not ':' in str(layer): layers.append(layer) parameters = [] fc_conv_weights", "if not len(featuremap) == 1: lastpop = featuremap.pop() if not", "% (name, size_to_str(u.size())) dot.node(str(id(var)), node_name, fillcolor='lightblue') else: dot.node(str(id(var)), str(type(var).__name__)) seen.add(var)", "else: AdaptiveAvgPool2d_params['output_size'] = output_size # return parameters[i][j][k] = AdaptiveAvgPool2d_params ###", "in size])+')' def add_nodes(var): if var not in seen: if", "def judge_tensors_equal(tensor_A, tensor_B): if(not tensor_A.shape == tensor_B.shape): print('Shape of two", "======================================\\n') Loss = nn.CrossEntropyLoss() if 'GoogLeNet' in str(model).split('\\n')[0]: loss_torch =", "= get_layers(last_connections, model) return_tensors = get_tensors(last_connections) parameters, fc_conv_weights = get_structure_parameters(return_layers)", "layer['layer_name'] == 'AvgPool2d': z = featuremap[i] pooling = layer['kernel_size'] stride", "= layer['padding'] dLoss_dz = max_pooling_backward(dLoss_dz, z, pooling, stride, padding) return_dz.append(dLoss_dz)", "node_name = '%s\\n %s' % (name, size_to_str(u.size())) dot.node(str(id(var)), node_name, fillcolor='lightblue')", "not isinstance(kernel_size, tuple): AvgPool2d_params['kernel_size'] = (kernel_size, kernel_size) else: AvgPool2d_params['kernel_size'] =", "dot.node(str(id(var)), size_to_str(var.size()), fillcolor='orange') elif hasattr(var, 'variable'): u = var.variable name", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "return parameters, fc_conv_weights @torch.no_grad() def delete_allpths(pth_dir=None): import os if pth_dir", "eps, gamma) return_dz.append(dLoss_dz) fc_conv_weights.pop() lastpop = featuremap.pop() if not len(dLoss_dz.shape)", "N, C, H, W = z.shape _, _, out_h, out_w", "{} MaxPool2d_params['layer_name'] = layer_name # kernel_size kernel_size = tmp_layer.__dict__.get('kernel_size') if", "for i in range(len(return_layers)): print(i, return_layers[i]) print('================') print('================') for i", "if 'GoogLeNet' in str(model).split('\\n')[0]: loss_torch = Loss(result[0], label) else: loss_torch", "key.split('_')[1] value1 = exchange_name(value.split('_')[0]) + '_' + value.split('_')[1] if 'None'", "graph[i].split('\\t')[1].split(' -> ')[0]:\\ labels[graph[i].split('\\t')[1].split(' -> ')[1]]+'_'+\\ graph[i].split('\\t')[1].split(' -> ')[1]}) pop_index", "strides): N, D, H, W = dz.shape H_last = (H-1)*(strides[0]-1)", "else: for j in range(len(featuremap[i])): for k in range(len(featuremap[i][j])): print('", "fc_conv_weights[i] = layer.weight parameters[i] = Linear_params elif isinstance(layer, nn.AdaptiveAvgPool2d): layer_name", "(int((last_z.shape[2]-pooling[0]+2*padding[0])/stride[0]+1), \\ int((last_z.shape[3]-pooling[0]+2*padding[0])/stride[0]+1)) dLoss_dz = dLoss_dnextz.reshape(last_z.shape[0], last_z.shape[1], output_size[0], output_size[1]) else:", "import copy last_connections = copy.deepcopy(connections) connections.append({'None':'None'}) num_Throwed = 0 notchoosed", "= dLoss_dz.reshape(lastpop.shape) elif layer['layer_name'] == 'Dropout': p = layer['p'] mask", "isinstance(last_connections[i], list): current_layer_name = list(last_connections[i].keys())[0].split('_')[0] if 'Add' in current_layer_name: last_tensors[i]", "- mu xmu2 = xmu**2 var = xmu2.sum(axis=axis, keepdim=True)/m ivar", "judge_tensors_equal(tensor_A, tensor_B): if(not tensor_A.shape == tensor_B.shape): print('Shape of two compard", "break for index in range(len(list_dic_key_value)): if key2 == list(list_dic_key_value[index].keys())[0]: end", "dLoss_dW, dLoss_dB = fc_backward(dLoss_dz, z, weight_z) return_dz[i] = dLoss_dz elif", "End =================================\\n') return last_connections @torch.no_grad() def find_next_layer_by_name(layers, name, start_i): for", "BatchNorm2d_params['eps'] = eps # return fc_conv_weights[i][j][k] = tmp_layer.weight parameters[i][j][k] =", "{'None': 'None'}: last_connections.remove({'None': 'None'}) print('=========================== Restore network model End =================================\\n')", "else: print('The layer has not been processed in get_structure_parameters_v1!') return", "model.modules(): if not ':' in str(layer): tmp_layers.append(layer) index_tmp_layers = 0", "layer_name}) elif isinstance(layer, nn.MaxPool2d): layer_name = 'MaxPool2d' MaxPool2d_params = {}", "= params[0] stride = params[1] padding = params[2] output_size =", "for h in range(0, H_last, strides[0]): for w in range(0,", "return ypred_loss, dLoss_dypred @torch.no_grad() def fc_backward(dLoss_dnextz, z, w): print('# next_dz.shape:", "calcualted!') print('======================== {0:3} Layer: '.format(str(len(parameters)-1-i))+'{0:11}'.\\ format(layer['layer_name'])+' Backward End ==========================') continue", "torch.div(y_exp, torch.sum(y_exp, dim=1, keepdim=True)) ypred_loss = torch.mean(-torch.sum(torch.mul(y_true, torch.log(y_probability)), dim=1, keepdim=True))", "list(next_dz.shape)) print('# z.shape: ', list(z.shape)) print('# eps: ', eps) print('#", "range(start_i, len(layers)): layer = layers[i] if name in str(layer): return", "in_features # out_features out_features = tmp_layer.__dict__.get('out_features') Linear_params['out_features'] = out_features #", "not isinstance(stride, tuple): MaxPool2d_params['stride'] = (stride, stride) else: MaxPool2d_params['stride'] =", "layer.__dict__.get('kernel_size') if not isinstance(kernel_size, tuple): MaxPool2d_params['kernel_size'] = (kernel_size, kernel_size) else:", "0.5) dz2 = (ivar**2)*((dxhut*xmu).sum(axis=axis, keepdim=True))*xmu dz3 = dxhut.sum(axis=axis, keepdim=True) dz", "BatchNorm2d_params elif isinstance(layer, nn.Linear): layer_name = 'Linear' Linear_params = {}", "file except in compliance with the License. # You may", "key1 = list(list_dic_key_value[i].values())[0] key2 = list(list_dic_key_value[j].keys())[0] start = 0 end", "dLoss_dz.reshape(lastpop.shape) elif layer['layer_name'] == 'Linear': weight_z = fc_conv_weights[-1] z =", "======================') print(tmp_dLoss_dz[0].shape, tmp_dLoss_dz[1].shape) dLoss_dz = tmp_dLoss_dz[0] + tmp_dLoss_dz[1] else: print('Not", "list(last_connections[i][j][k-1].keys())[0].split('_')[0] == 'Dropout': index_tmp_layers = tmp[1] + 1 return return_layers", "@torch.no_grad() def get_layers(last_connections, model): return_layers = [] tmp_layers = []", "next_dz.shape padding_z = F.pad(z, pad=(padding[1],padding[1],padding[0],\\ padding[0],0,0), mode='constant', value=0) padding_dz =", "= (output_size, output_size) else: AdaptiveAvgPool2d_params['output_size'] = output_size # return parameters.append(AdaptiveAvgPool2d_params)", "i, featuremap[i].shape) else: for j in range(len(featuremap[i])): for k in", "function!') exit() new_connections.insert(0, {list(new_connections[0].values())[0]: None}) new_connections.append({'None': 'None'}) return connections, new_connections", "- y_true print('# dLoss_dypred.shape: ', list(dLoss_dypred.shape)) print('# Self calculated loss:", "= {} Dropout_params['layer_name'] = layer_name # p p = tmp_layer.__dict__.get('p')", "dz.shape: ', list(dz.shape)) return dz @torch.no_grad() def _remove_padding(z, padding): if", "layer_name = 'ReLU' parameters.append({'layer_name': layer_name}) elif isinstance(layer, nn.MaxPool2d): layer_name =", "ypred_loss.item()) print('=========================== Layer:'+' {0:18}'.format('cross_entropy_loss')+' End =============================') return ypred_loss, dLoss_dypred @torch.no_grad()", "BatchNorm2d_params['layer_name'] = layer_name # num_features num_features = tmp_layer.__dict__.get('num_features') BatchNorm2d_params['num_features'] =", "dLoss_dz = tmp_dLoss_dz[0] + tmp_dLoss_dz[1] else: print('Not completed in gradient_backward!')", "== 0: pass #print('right', np_A[n,c,h,w], np_B[n,c,h,w]) #print('Error rate: ', error/(N*C*H*W))", "range(len(connections)): item = connections[i] if len(tmp_split) == 0: tmp_split.append(item) continue", "layer_name = 'Linear' Linear_params = {} Linear_params['layer_name'] = layer_name #", "+= 1 if error%20 == 0: pass print('error', np_A[n,c,h,w], np_B[n,c,h,w])", "np_B[n,c,h,w]-np_A[n,c,h,w] > error_tolerance: error += 1 if error%20 == 0:", "= layer.__dict__.get('eps') BatchNorm2d_params['eps'] = eps # return fc_conv_weights[i] = layer.weight", "AvgPool2d_params['kernel_size'] = kernel_size # stride stride = layer.__dict__.get('stride') if not", ":, padding[0]:-padding[0], padding[1]:-padding[1]] elif padding[0] > 0: return z[:, :,", "not isinstance(stride, tuple): AvgPool2d_params['stride'] = (stride, stride) else: AvgPool2d_params['stride'] =", "def conv_backward(next_dz, K, z, padding=(0, 0), strides=(1, 1)): N, C,", "return featuremap @torch.no_grad() def get_structure_parameters_v1(model): layers = [] for layer", "for n in range(N): for c in range(C): for h", "tmp_layer.__dict__.get('stride') if not isinstance(stride, tuple): MaxPool2d_params['stride'] = (stride, stride) else:", "in name: return 'View' elif 'Mean' in name or 'Avg'", "padding = tmp_layer.__dict__.get('padding') if not isinstance(padding, tuple): AvgPool2d_params['padding'] = (padding,", "torch.arange(C): for i in torch.arange(out_h): for j in torch.arange(out_w): flat_idx", "torch.zeros(N, D, H_last, W_last) for n in range(N): for d", "gamma) return_dz[i][j][k] = tmp_dLoss_dz[-1] print('=========================== {0:3} Branch: '.format(str(len(parameters)-1-i))+'{0:11}'.format(tmp_layer['layer_name'])+' Backward End", "# 前面n层倒序遍历 for i in range(len(parameters)): layer = parameters[i] if", "i in torch.arange(out_h): for j in torch.arange(out_w): flat_idx = torch.argmax(padding_z[n,", "fc_conv_weights @torch.no_grad() def delete_allpths(pth_dir=None): import os if pth_dir == None:", "generate_connections(g) last_connections = merge_connections(connections) return_layers = get_layers(last_connections, model) return_tensors =", "layer has not been processed in get_structure_parameters_v1!') return parameters, fc_conv_weights", "return parameters[i][j][k] = Dropout_params elif isinstance(tmp_layer, nn.BatchNorm2d): layer_name = 'BatchNorm2d'", "if 'Add' in current_layer_name: last_tensors[i][j][k] = 'Add' elif 'View' in", "error += 1 if error%20 == 0: pass print('error', np_A[n,c,h,w],", "range(len(featuremap[i][j])): print(' =========', i, j, k, featuremap[i][j][k].shape) ''' ##################### #", "return_dz[i] = dLoss_dz elif layer['layer_name'] == 'ReLU': z = featuremap[i]", "if padding[0] > 0 and padding[1] > 0: return z[:,", "{} Linear_params['layer_name'] = layer_name # in_features in_features = tmp_layer.__dict__.get('in_features') Linear_params['in_features']", "list): for j in range(len(last_tensors[i])): if len(last_tensors[i][j]) == 0: last_tensors[i][j].append(last_tensors[i+1])", "== 1: C = tensor_A.shape[0] for c in range(C): if", "MaxPool2d_params elif isinstance(layer, nn.AvgPool2d): layer_name = 'AvgPool2d' AvgPool2d_params = {}", "@torch.no_grad() def get_structure_parameters(return_layers): import copy parameters = copy.deepcopy(return_layers) fc_conv_weights =", "'MulBackward' in name: return 'Dropout_2' elif 'DivBackward' in name: return", "list(item.items())[0] key1 = exchange_name(key.split('_')[0]) + '_' + key.split('_')[1] value1 =", "for h in range(H): for w in range(W): if np_A[n,c,h,w]-np_B[n,c,h,w]", "AdaptiveAvgPool2d_params elif isinstance(layer, list): for j in range(len(layer)): for k", "= tensor_B.detach().numpy() if len(tensor_A.shape) == 4: N, C, H, W", "> 0 and padding[1] > 0: return z[:, :, padding[0]:-padding[0],", "not ':' in str(layer): layers.append(layer) parameters = [] fc_conv_weights =", "name: return 'ReLU' elif 'AddmmBackward' in name: return 'Linear' elif", "stride) return_dz[i][j][k] = tmp_dLoss_dz[-1] elif tmp_layer['layer_name'] == 'ReLU': z =", "Layer: '.format(str(i))+'{0:9}'.format(layer['layer_name'])+' Backward End ==========================') delete_allpths(pth_dir=None) return return_dz, dLoss_dW, dLoss_dB", "eps eps = tmp_layer.__dict__.get('eps') BatchNorm2d_params['eps'] = eps # return fc_conv_weights[i][j][k]", "KIND, either express or implied. # See the License for", "# stride stride = tmp_layer.__dict__.get('stride') if not isinstance(stride, tuple): Conv2d_params['stride']", "out_channel # kernel_size kernel_size = layer.__dict__.get('kernel_size') if not isinstance(kernel_size, tuple):", "layer.__dict__.get('in_channels') Conv2d_params['in_channel'] = in_channel # out_channel out_channel = layer.__dict__.get('out_channels') Conv2d_params['out_channel']", "name, start_i): for i in range(start_i, len(layers)): layer = layers[i]", "featuremap[i] dLoss_dz = dropback_backward(dLoss_dz, mask, p) return_dz[i] = dLoss_dz elif", "last_tensors[i] = 'Add' elif 'View' in current_layer_name: last_tensors[i] = 'View'", "return g else: g = make_dot(y) return g @torch.no_grad() def", "g_view=False): x = Variable(img) g = generate_g(model, x) if g_view:", "= xmu**2 var = xmu2.sum(axis=axis, keepdim=True)/m ivar = 1./torch.pow(var+eps, 0.5)", "range(len(last_connections)-1, -1, -1): if not isinstance(last_connections[i], list): # 单一层,无分支 current_layer_name", "elif tmp_layer['layer_name'] == 'ReLU': z = featuremap[i-1][j][k+1] tmp_dLoss_dz[-1] = relu_backward(tmp_dLoss_dz[-1],", "0 notchoosed = [] print('\\n=========================== Restore network model Start ===============================')", "next_dz.shape: ', list(dLoss_dnextz.shape)) print('# last_z.shape: ', list(last_z.shape)) if params: pooling", "== len(lastpop.shape): dLoss_dz = dLoss_dz.reshape(lastpop.shape) elif layer['layer_name'] == 'Linear': weight_z", "files[i]: file_nums.append(int(files[i].split('.pth')[0])) file_nums.sort() for file_num in file_nums: tensor = torch.load(pth_dir+str(file_num)+'.pth')", "{'layer_name': layer_name} elif tmp_layer == 'View': layer_name = 'View' parameters[i][j][k]", "parameters[i-1]['layer_name'] == 'Dropout': return_dz[i] = dLoss_dz print('# Skip this layer", "y_probability = torch.div(y_exp, torch.sum(y_exp, dim=1, keepdim=True)) ypred_loss = torch.mean(-torch.sum(torch.mul(y_true, torch.log(y_probability)),", "len(dLoss_dz.shape) == len(lastpop.shape): dLoss_dz = dLoss_dz.reshape(lastpop.shape) elif layer['layer_name'] == 'AvgPool2d':", "@torch.no_grad() def _insert_zeros(dz, strides): N, D, H, W = dz.shape", "output_size = layer.__dict__.get('output_size') if not isinstance(output_size, tuple): AdaptiveAvgPool2d_params['output_size'] = (output_size,", "k, v in params.items()} node_attr = dict(style='filled', shape='box', align='left', fontsize='12',", "print('\\n=========================== Restore network model Start ===============================') for i in range(len(connections)):", "find_next_layer_by_name(tmp_layers, current_layer_name, index_tmp_layers) return_layers[0][j].insert(0, tmp[0]) if not list(last_connections[i][j][k-1].keys())[0].split('_')[0] == 'Dropout':", "print('================') for i in range(len(return_tensors)): if not isinstance(return_tensors[i], list) and", "= layer['padding'] dLoss_dz = average_pooling_backward(dLoss_dz, z, pooling, stride, padding) return_dz[i]", "layer['eps'] z = featuremap[i] gamma = fc_conv_weights[i] dLoss_dz = batchnorm2d_backward(dLoss_dz,", "in connections: key, value = list(item.items())[0] key1 = exchange_name(key.split('_')[0]) +", "elif padding[0] > 0: return z[:, :, padding[0]:-padding[0], :] elif", "return fc_conv_weights[i][j][k] = tmp_layer.weight parameters[i][j][k] = Conv2d_params elif isinstance(tmp_layer, nn.ReLU):", "y_true print('# dLoss_dypred.shape: ', list(dLoss_dypred.shape)) print('# Self calculated loss: ',", "rate: ', error/(N*C*H*W)) print('4D-error-rate: ', end=' ') return error/(N*C*H*W) elif", "img) ### y_true = F.one_hot(label, num_classes=num_class).float() loss, dLoss_dz = cross_entropy_loss(featuremap[-1],", "(the \"License\"); # you may not use this file except", "layer[j][k] ### if isinstance(tmp_layer, nn.Conv2d): layer_name = 'Conv2d' Conv2d_params =", "Variable params: dict of (name, Variable) to add names to", "because the layer has been calcualted!') print('======================== {0:3} Layer: '.format(str(len(parameters)-1-i))+'{0:11}'.\\", "', list(dK.transpose(0,1).shape)) print('# dbias.shape: ', list(db.shape)) return dz, (dK/N).transpose(0,1), db/N", "d, c, k, j = weight.shape x_pad = x x_pad", "padding_z = F.pad(z, pad=(padding[1],padding[1],padding[0],\\ padding[0],0,0), mode='constant', value=0) padding_dz = torch.zeros_like(padding_z)", "n in range(N): for c in range(C): if np_A[n,c]-np_B[n,c] >", "padding) else: Conv2d_params['padding'] = padding # return fc_conv_weights.append(layer.weight) parameters.append(Conv2d_params) elif", "layer['layer_name'] == 'Linear': weight_z = fc_conv_weights[-1] z = featuremap[-1] dLoss_dz,", "featuremap.pop() return_dz.append(dLoss_dz) dW_dB_fc_conv = [] for i in range(len(parameters)-1, -1,", "dLoss_dz print('======================== {0:3} Layer: '.format(str(len(parameters)-1-i))+'{0:11}'.format(layer['layer_name'])+' Backward End ==========================') elif isinstance(layer,", "layer = return_layers[i] if isinstance(layer, nn.Conv2d): layer_name = 'Conv2d' Conv2d_params", "parameters[i] = AvgPool2d_params elif isinstance(layer, nn.Dropout): layer_name = 'Dropout' Dropout_params", "_insert_zeros(next_dz, strides) flip_K = torch.flip(K, (2, 3)) swap_flip_K = torch.swapaxes(flip_K,", "+= 1 #print('Error rate: ', error/C) print('1D-error-rate: ', end=' ')", "Variables that require grad, orange are Tensors saved for backward", "'AvgPool2d': z = featuremap[i] pooling = layer['kernel_size'] stride = layer['stride']", "i in torch.arange(out_h): for j in torch.arange(out_w): padding_dz[n, c, strides[0]", "dLoss_dz elif layer['layer_name'] == 'MaxPool2d': z = featuremap[i] pooling =", "graph[i]: connections.append({labels[graph[i].split('\\t')[1].split(' -> ')[0]]+'_'+\\ graph[i].split('\\t')[1].split(' -> ')[0]:\\ labels[graph[i].split('\\t')[1].split(' -> ')[1]]+'_'+\\", "next_dz[:,c]*gamma[c] dz1 = m*dxhut mu = z.mean(axis=axis, keepdim=True) xmu =", "seen.add(var) if hasattr(var, 'next_functions'): for u in var.next_functions: if u[0]", "> error_tolerance: #print(np_A[n,c], np_B[n,c]) error += 1 #print('Error rate: ',", "0 for i in range(len(last_connections)-1, -1, -1): if not isinstance(last_connections[i],", "error/(C*N) @torch.no_grad() def get_featuremap(featuremap_dir=None): import os featuremap = [] if", "tmp = find_next_layer_by_name(tmp_layers, current_layer_name, index_tmp_layers) return_layers.insert(0, tmp[0]) if isinstance(last_connections[i-1], list):", "try: padding = tmp_layer['padding'] except: padding = (0, 0) stride", "# kernel_size kernel_size = tmp_layer.__dict__.get('kernel_size') if not isinstance(kernel_size, tuple): Conv2d_params['kernel_size']", "# # Unless required by applicable law or agreed to", "* j + flat_idx % pooling[1] padding_dz[n, c, h_idx, w_idx]", "c, h_idx, w_idx] += next_dz[n, c, i, j] dz =", "list(z.shape)) print('# eps: ', eps) print('# gamma.shape: ', list(gamma.shape)) N,", "list(next_dz.shape)) print('# z.shape: ', list(z.shape)) print('# padding: ', padding) print('#", "padding # return fc_conv_weights[i] = layer.weight parameters[i] = Conv2d_params elif", "layer_name} elif isinstance(tmp_layer, nn.MaxPool2d): layer_name = 'MaxPool2d' MaxPool2d_params = {}", "model Results Start =========================') y = model(x) print('=========================== Store network", "= out_features # return fc_conv_weights.append(layer.weight) parameters.append(Linear_params) elif isinstance(layer, nn.AdaptiveAvgPool2d): layer_name", "i == 0: pass else: last_item_key = list(connections[i-1].keys())[0] if not", "featuremap.pop() if not len(dLoss_dz.shape) == len(lastpop.shape): dLoss_dz = dLoss_dz.reshape(lastpop.shape) else:", "= [tmp] for kk in range(end-start): last_connections.insert(start, 'Throwed') num_Throwed +=", "elif 'View' in current_layer_name: last_tensors[i] = 'View' else: last_tensors[i] =", "print('=========', i, return_tensors[i].shape) print('================') ''' import copy return_dz = copy.deepcopy(last_connections)", "range(len(last_connections)): print(last_connections[i]) for i in range(len(featuremap)): if not isinstance(featuremap[i], list):", "= featuremap[-1] dLoss_dz, dLoss_dW, dLoss_dB = fc_backward(dLoss_dz, z, weight_z) return_dz.append(dLoss_dz)", "N, D, H, W = dz.shape H_last = (H-1)*(strides[0]-1) +", "-1, -1): current_layer_name = list(last_connections[i][j][k].keys())[0].split('_')[0] if 'Add' in current_layer_name: last_tensors[i][j][k]", "', list(z.shape)) print('# weight.shape: ', list(w.shape)) print('# bias.shape: ', '['+str(dLoss_dnextz.shape[1])+']')", "print('# dz.shape: ', list(dz.shape)) return dz @torch.no_grad() def average_pooling_backward(next_dz, z,", "layers = [] for layer in model.modules(): if not ':'", "{'layer_name': layer_name} elif isinstance(layer, nn.MaxPool2d): layer_name = 'MaxPool2d' MaxPool2d_params =", "return_dz[i] = dLoss_dz elif layer['layer_name'] == 'MaxPool2d': z = featuremap[i]", "= [] for layer in model.modules(): if not ':' in", "'Add') elif 'View' in current_layer_name: return_layers.insert(0, 'View') else: tmp =", "nn.MaxPool2d): layer_name = 'MaxPool2d' MaxPool2d_params = {} MaxPool2d_params['layer_name'] = layer_name", "\"./tmp_file/\" else: pth_dir = featuremap_dir files = os.listdir(pth_dir) file_nums =", "eps eps = layer.__dict__.get('eps') BatchNorm2d_params['eps'] = eps # return fc_conv_weights[i]", "featuremap[-1] dLoss_dz = relu_backward(dLoss_dz, z) return_dz.append(dLoss_dz) lastpop = featuremap.pop() if", "Conv2d_params['padding'] = padding # return fc_conv_weights[i] = layer.weight parameters[i] =", "layer.__dict__.get('num_features') BatchNorm2d_params['num_features'] = num_features # eps eps = layer.__dict__.get('eps') BatchNorm2d_params['eps']", "= set() def size_to_str(size): return '('+(', ').join(['%d' % v for", "elif 'ViewBackward' in name: return 'View' elif 'Mean' in name", "implied. # See the License for the specific language governing", "elif 'DivBackward' in name: return 'Dropout_1' elif 'AddBackward' in name:", "'_' + value.split('_')[1] if 'None' in key1 or 'None' in", "strides[1]): pz[n,d,h,w] = dz[n,d,h//strides[0],w//strides[1]] return pz @torch.no_grad() def judge_tensors_equal(tensor_A, tensor_B):", "= layer.__dict__.get('out_channels') Conv2d_params['out_channel'] = out_channel # kernel_size kernel_size = layer.__dict__.get('kernel_size')", "in torch.arange(out_w): flat_idx = torch.argmax(padding_z[n, c, strides[0] * i:strides[0] *", "for name in files: if name.endswith('.pth',): os.remove(os.path.join(root, name)) @torch.no_grad() def", "* i:strides[0] * i + pooling[0], strides[1] * j:strides[1] *", "= layer_name # in_channel in_channel = tmp_layer.__dict__.get('in_channels') Conv2d_params['in_channel'] = in_channel", "========================') if layer['layer_name'] == 'Conv2d': z = featuremap[i] weight_z =", "h in range(0, H_last, strides[0]): for w in range(0, W_last,", "in layers: if isinstance(layer, nn.Conv2d): layer_name = 'Conv2d' Conv2d_params =", "in file_nums: tensor = torch.load(pth_dir+str(file_num)+'.pth') featuremap.append(tensor) delete_allpths(pth_dir=None) return featuremap @torch.no_grad()", "params) return_dz[i] = dLoss_dz elif layer['layer_name'] == 'Add': dLoss_dz =", "out_channel out_channel = tmp_layer.__dict__.get('out_channels') Conv2d_params['out_channel'] = out_channel # kernel_size kernel_size", "fc_conv_weights.append(layer.weight) parameters.append(BatchNorm2d_params) elif isinstance(layer, nn.Linear): layer_name = 'Linear' Linear_params =", "% 20000000000000 == 0: pass #print('right', np_A[n,c,h,w], np_B[n,c,h,w]) #print('Error rate:", "else: for j in range(len(last_connections[i])): if len(last_connections[i][j]) == 0: continue", "param_map = {id(v): k for k, v in params.items()} node_attr", "print('# dz.shape: ', list(dLoss_dz.shape)) return dLoss_dz @torch.no_grad() def relu_backward(next_dz, z):", "# num_features num_features = tmp_layer.__dict__.get('num_features') BatchNorm2d_params['num_features'] = num_features # eps", "copy last_tensors = copy.deepcopy(last_connections) for i in range(len(last_connections)-1, -1, -1):", "{'layer_name': layer_name} elif layer == 'Add': layer_name = 'Add' parameters[i]", "-> ')[0]]+'_'+\\ graph[i].split('\\t')[1].split(' -> ')[0]:\\ labels[graph[i].split('\\t')[1].split(' -> ')[1]]+'_'+\\ graph[i].split('\\t')[1].split(' ->", "start_i): for i in range(start_i, len(layers)): layer = layers[i] if", "# return fc_conv_weights.append(layer.weight) parameters.append(Conv2d_params) elif isinstance(layer, nn.ReLU): layer_name = 'ReLU'", "= [] tmp_split = [] for i in range(len(connections)): item", "in range(H): for w in range(W): if np_A[n,c,h,w]-np_B[n,c,h,w] > error_tolerance", "= layer.__dict__.get('kernel_size') if not isinstance(kernel_size, tuple): AvgPool2d_params['kernel_size'] = (kernel_size, kernel_size)", "exchange_name function!') exit() new_connections.append({key1: value1}) if not len(new_connections) == len(connections):", "1.), next_dz, zeros_tensor), 1./(1.-p)) print('# dz.shape: ', list(dLoss_dz.shape)) return dLoss_dz", "print('# bias.shape: ', '['+str(dLoss_dnextz.shape[1])+']') N = z.shape[0] if len(z.shape) ==", "if not isinstance(kernel_size, tuple): AvgPool2d_params['kernel_size'] = (kernel_size, kernel_size) else: AvgPool2d_params['kernel_size']", "print('The layer has not been processed in get_structure_parameters_v1!') return parameters,", "in graph[i] and graph[i][-1] == ']': labels[graph[i].split('\\t')[1].split(' ')[0]]=\\ graph[i].split('\\t')[1].split('=')[1].split(']')[0] for", "= Loss(result[0], label) else: loss_torch = Loss(result, label) _, connections", "layer because the layer has been calcualted!') print('======================== {0:3} Layer:", "layer in model.modules(): if not ':' in str(layer): layers.append(layer) parameters", "= copy.deepcopy(return_layers) fc_conv_weights = copy.deepcopy(return_layers) for i in range(len(return_layers)): layer", "padding padding = layer.__dict__.get('padding') if not isinstance(padding, tuple): MaxPool2d_params['padding'] =", "z = featuremap[i] weight_z = fc_conv_weights[i] try: padding = layer['padding']", "= dLoss_dnextz.reshape(last_z.shape[0], last_z.shape[1], output_size[0], output_size[1]) else: dLoss_dz = dLoss_dnextz.reshape(last_z.shape) print('#", "mask = featuremap[i] dLoss_dz = dropback_backward(dLoss_dz, mask, p) return_dz[i] =", "return_layers.insert(0, 'Add') elif 'View' in current_layer_name: return_layers.insert(0, 'View') else: tmp", "in range(len(featuremap[i])): for k in range(len(featuremap[i][j])): print(' =========', i, j,", "= list(tmp_split[-1].keys())[0] if value == last_key: tmp_split.append(item) else: return_connections.append(tmp_split) tmp_split", "torch.zeros_like(mask) dLoss_dz = torch.mul(torch.where(torch.eq(mask, 1.), next_dz, zeros_tensor), 1./(1.-p)) print('# dz.shape:", "c, i, j] / (pooling[0] * pooling[1]) dz = _remove_padding(padding_dz,", "4: N, C, H, W = tensor_A.shape for n in", "def get_tensors(last_connections): tensors = get_featuremap(featuremap_dir=None) index_tensors = 0 import copy", "Conv2d_params = {} Conv2d_params['layer_name'] = layer_name # in_channel in_channel =", "MaxPool2d_params['kernel_size'] = kernel_size # stride stride = layer.__dict__.get('stride') if not", "# padding padding = layer.__dict__.get('padding') if not isinstance(padding, tuple): AvgPool2d_params['padding']", "in value1: print('Not completed for '+key+' or '+value+'! Check exchange_name", "for root, dirs, files in os.walk(pth_dir, topdown=False): for name in", "= x_pad.unfold(2, k, strides[0]) x_pad = x_pad.unfold(3, j, strides[1]) out", "= _remove_padding(padding_dz, padding) # padding_z[:, :, padding[0]:-padding[0], padding[1]:-padding[1]] print('# dz.shape:", "pooling = layer['kernel_size'] stride = layer['stride'] padding = layer['padding'] dLoss_dz", "in_channel in_channel = tmp_layer.__dict__.get('in_channels') Conv2d_params['in_channel'] = in_channel # out_channel out_channel", "0) stride = tmp_layer['stride'] tmp_dLoss_dz[-1], dLoss_dW, dLoss_dB = conv_backward(tmp_dLoss_dz[-1], weight_z,", "def average_pooling_backward(next_dz, z, pooling, strides, padding=(0, 0)): print('# next_dz.shape: ',", "Start ====================================') result = model(img) print('=========================== Generate Tensors End ======================================\\n')", "featuremap = return_tensors featuremap.append(img) y_true = F.one_hot(label, num_classes=num_class).float() loss, dLoss_dz", "current_layer_name = list(last_connections[i].keys())[0].split('_')[0] if 'ReLU' in current_layer_name: return_layers.insert(0, torch.nn.ReLU(inplace=True)) elif", "'Dropout': if parameters[i-1]['layer_name'] == 'Dropout': return_dz[i] = dLoss_dz print('# Skip", "fc_conv_weights[i] try: padding = layer['padding'] except: padding = (0, 0)", "torch.matmul(dLoss_dnextz.t(), z) dLoss_dfcB = torch.sum(dLoss_dnextz, dim=0) print('# dz.shape: ', list(dLoss_dz.shape))", "= [N,C,H,W] import numpy as np ax = list(np.arange(len(shape))) shape.pop(1)", "dz = _remove_padding(padding_dz, padding) # padding_z[:, :, padding[0]:-padding[0], padding[1]:-padding[1]] print('#", "# return parameters.append(AdaptiveAvgPool2d_params) else: print('The layer has not been processed", "{list(new_connections[0].values())[0]: None}) new_connections.append({'None': 'None'}) return connections, new_connections @torch.no_grad() def get_split_connections(connections):", "# padding padding = layer.__dict__.get('padding') if not isinstance(padding, tuple): Conv2d_params['padding']", "node_name, fillcolor='lightblue') else: dot.node(str(id(var)), str(type(var).__name__)) seen.add(var) if hasattr(var, 'next_functions'): for", "Unless required by applicable law or agreed to in writing,", "= layer['p'] mask = featuremap[-1] dLoss_dz = dropback_backward(dLoss_dz, mask, p)", "for v in size])+')' def add_nodes(var): if var not in", "strides[0] * i:strides[0] * i + pooling[0], strides[1] * j:strides[1]", "== None: pth_dir = \"./tmp_file/\" else: pth_dir = featuremap_dir files", "= (int((last_z.shape[2]-pooling[0]+2*padding[0])/stride[0]+1), \\ int((last_z.shape[3]-pooling[0]+2*padding[0])/stride[0]+1)) dLoss_dz = dLoss_dnextz.reshape(last_z.shape[0], last_z.shape[1], output_size[0], output_size[1])", "mu xmu2 = xmu**2 var = xmu2.sum(axis=axis, keepdim=True)/m ivar =", "dz.shape: ', list(dLoss_dz.shape)) print('# dweight.shape: ', list(dLoss_dfcW.shape)) print('# dbias.shape: ',", "dweight.shape: ', list(dK.transpose(0,1).shape)) print('# dbias.shape: ', list(db.shape)) return dz, (dK/N).transpose(0,1),", "in seen: if torch.is_tensor(var): dot.node(str(id(var)), size_to_str(var.size()), fillcolor='orange') elif hasattr(var, 'variable'):", "list(connections[j].values())[0] == list(connections[j-1].keys())[0]: notchoosed.append(i) start, end = find_start_end(connections, i, j-1)", "= BatchNorm2d_params elif isinstance(tmp_layer, nn.Linear): layer_name = 'Linear' Linear_params =", "parameters[i][j][k] = Linear_params elif isinstance(tmp_layer, nn.AdaptiveAvgPool2d): layer_name = 'AdaptiveAvgPool2d' AdaptiveAvgPool2d_params", "len(dLoss_dz.shape) == len(lastpop.shape): dLoss_dz = dLoss_dz.reshape(lastpop.shape) elif layer['layer_name'] == 'Linear':", "the specific language governing permissions and # limitations under the", "print('# dbias.shape: ', list(db.shape)) return dz, (dK/N).transpose(0,1), db/N @torch.no_grad() def", "= dLoss_dz elif layer['layer_name'] == 'ReLU': z = featuremap[i] dLoss_dz", "next_dz.shape: ', list(next_dz.shape)) print('# z.shape: ', list(z.shape)) print('# weight.shape: ',", "= layer.__dict__.get('eps') BatchNorm2d_params['eps'] = eps # return fc_conv_weights.append(layer.weight) parameters.append(BatchNorm2d_params) elif", "done! Check generate_connections function!') exit() new_connections.insert(0, {list(new_connections[0].values())[0]: None}) new_connections.append({'None': 'None'})", "padding padding = layer.__dict__.get('padding') if not isinstance(padding, tuple): Conv2d_params['padding'] =", "= {'layer_name': layer_name} elif layer == 'Cat': layer_name = 'Cat'", "eps: ', eps) print('# gamma.shape: ', list(gamma.shape)) N, C, H,", "not None: dot.edge(str(id(u[0])), str(id(var))) add_nodes(u[0]) if hasattr(var, 'saved_tensors'): for t", "# padding_z[:, :, padding[0]:-padding[0], padding[1]:-padding[1]] print('# dz.shape: ', list(dz.shape)) return", "else: AvgPool2d_params['padding'] = padding # return parameters[i][j][k] = AvgPool2d_params elif", "== 'ReLU': z = featuremap[-1] dLoss_dz = relu_backward(dLoss_dz, z) return_dz.append(dLoss_dz)", "stride) return_dz.append(dLoss_dz) fc_conv_weights.pop() if not len(featuremap) == 1: lastpop =", "== 'MaxPool2d': z = featuremap[-1] pooling = layer['kernel_size'] stride =", "if 'label' in graph[i] and graph[i][-1] == ']': labels[graph[i].split('\\t')[1].split(' ')[0]]=\\", "tmp.append(connections[start:end+1]) tmp.append(connections[i:j-1]) last_connections[start:end+1] = [tmp] for kk in range(end-start): last_connections.insert(start,", "= layer_name # kernel_size kernel_size = tmp_layer.__dict__.get('kernel_size') if not isinstance(kernel_size,", "new_connections.insert(0, {list(new_connections[0].values())[0]: None}) new_connections.append({'None': 'None'}) return connections, new_connections @torch.no_grad() def", "new_connections = [] for item in connections: key, value =", "'None' in value1: print('Not completed for '+key+' or '+value+'! Check", "output_size[0], output_size[1]) else: dLoss_dz = dLoss_dnextz.reshape(last_z.shape) print('# dz.shape: ', list(dLoss_dz.shape))", "for layer in model.modules(): if not ':' in str(layer): tmp_layers.append(layer)", "= {'layer_name': layer_name} elif layer == 'View': layer_name = 'View'", "padding) # padding_z[:, :, padding[0]:-padding[0], padding[1]:-padding[1]] print('# dz.shape: ', list(dz.shape))", "in range(len(layer[j])): tmp_layer = layer[j][k] print('\\n=========================== {0:3} Branch: '.format(str(len(parameters)-1-i))+'{0:11}'.format(tmp_layer['layer_name'])+' Backward", "Linear_params elif isinstance(tmp_layer, nn.AdaptiveAvgPool2d): layer_name = 'AdaptiveAvgPool2d' AdaptiveAvgPool2d_params = {}", "nn.ReLU): layer_name = 'ReLU' parameters[i][j][k] = {'layer_name': layer_name} elif tmp_layer", "featuremap.append(img) y_true = F.one_hot(label, num_classes=num_class).float() loss, dLoss_dz = cross_entropy_loss(featuremap[0], y_true)", "Linear_params['out_features'] = out_features # return fc_conv_weights.append(layer.weight) parameters.append(Linear_params) elif isinstance(layer, nn.AdaptiveAvgPool2d):", "# output_size output_size = tmp_layer.__dict__.get('output_size') if not isinstance(output_size, tuple): AdaptiveAvgPool2d_params['output_size']", "{id(v): k for k, v in params.items()} node_attr = dict(style='filled',", "0 end = len(list_dic_key_value)-1 for index in range(len(list_dic_key_value)): if key1", "= dLoss_dz elif layer['layer_name'] == 'MaxPool2d': z = featuremap[i] pooling", "MaxPool2d_params['stride'] = (stride, stride) else: MaxPool2d_params['stride'] = stride # padding", "j] / (pooling[0] * pooling[1]) dz = _remove_padding(padding_dz, padding) #", "dLoss_dz = dLoss_dz.reshape(lastpop.shape) elif layer['layer_name'] == 'Linear': weight_z = fc_conv_weights[-1]", "(2, 3)) swap_flip_K = torch.swapaxes(flip_K, 0, 1) ppadding_next_dz = F.pad(padding_next_dz,", "tmp_dLoss_dz[1] else: print('Not completed in gradient_backward!') print('# Torch calculated loss:", "= param_map[id(u)] if params is not None else '' node_name", "= 1./torch.pow(var+eps, 0.5) dz2 = (ivar**2)*((dxhut*xmu).sum(axis=axis, keepdim=True))*xmu dz3 = dxhut.sum(axis=axis,", "', list(dLoss_dypred.shape)) print('# Self calculated loss: ', ypred_loss.item()) print('=========================== Layer:'+'", "= parameters[i] print('\\n======================== {0:3} Layer: '.format(str(i))+'{0:9}'.format(layer['layer_name'])+' Backward Start ========================') if", "else: MaxPool2d_params['stride'] = stride # padding padding = layer.__dict__.get('padding') if", "in torch.arange(C): for i in torch.arange(out_h): for j in torch.arange(out_w):", "in_channel # out_channel out_channel = tmp_layer.__dict__.get('out_channels') Conv2d_params['out_channel'] = out_channel #", "N, C = tensor_A.shape for n in range(N): for c", "torch.nn as nn import torch.nn.functional as F from graphviz import", "u = var.variable name = param_map[id(u)] if params is not", "= list(tensor_size) mul = 1. for i in range(len(x)): mul", "= last_tensors[i+1][0][0] + last_tensors[i+1][1][0] if last_tensors[i] == 'View': last_tensors[i] =", "for j in range(len(last_tensors[i])): if len(last_tensors[i][j]) == 0: last_tensors[i][j].append(last_tensors[i+1]) return", "if featuremap_dir == None: pth_dir = \"./tmp_file/\" else: pth_dir =", "p # return parameters[i] = Dropout_params elif isinstance(layer, nn.BatchNorm2d): layer_name", "for i in range(len(return_tensors)): if not isinstance(return_tensors[i], list) and not", "===============================') for i in range(len(connections)): print('# Restore network model: processing", "name = param_map[id(u)] if params is not None else ''", "bias.shape: ', '['+str(dLoss_dnextz.shape[1])+']') N = z.shape[0] if len(z.shape) == 4:", "n in range(N): for c in range(C): for h in", "make_dot(y[0]) return g else: g = make_dot(y) return g @torch.no_grad()", "fc_conv_weights = get_structure_parameters(return_layers) ''' print('================') for i in range(len(last_connections)): print(i,", "@torch.no_grad() def relu_backward(next_dz, z): print('# next_dz.shape: ', list(next_dz.shape)) print('# z.shape:", "= [] parameters, fc_conv_weights = get_structure_parameters_v1(model) featuremap = get_featuremap(featuremap_dir=None) featuremap.insert(0,", "print('# next_dz.shape: ', list(next_dz.shape)) print('# mask.shape: ', list(mask.shape)) zeros_tensor =", "(padding, padding) else: AvgPool2d_params['padding'] = padding # return parameters.append(AvgPool2d_params) elif", "return_layers[0][j].insert(0, torch.nn.ReLU(inplace=True)) elif 'Add' in current_layer_name: return_layers[0][j].insert(0, 'Add') elif 'View'", "if '.pth' in files[i]: file_nums.append(int(files[i].split('.pth')[0])) file_nums.sort() for file_num in file_nums:", "kernel_size = tmp_layer.__dict__.get('kernel_size') if not isinstance(kernel_size, tuple): AvgPool2d_params['kernel_size'] = (kernel_size,", "return fc_conv_weights[i][j][k] = tmp_layer.weight parameters[i][j][k] = Linear_params elif isinstance(tmp_layer, nn.AdaptiveAvgPool2d):", "layer['layer_name'] == 'ReLU': z = featuremap[i] dLoss_dz = relu_backward(dLoss_dz, z)", "= return_tensors featuremap.append(img) y_true = F.one_hot(label, num_classes=num_class).float() loss, dLoss_dz =", "tmp_layer == 'View': layer_name = 'View' parameters[i][j][k] = {'layer_name': layer_name}", "return_dz[i] = dLoss_dz elif layer['layer_name'] == 'BatchNorm2d': eps = layer['eps']", "out_channel = tmp_layer.__dict__.get('out_channels') Conv2d_params['out_channel'] = out_channel # kernel_size kernel_size =", "Dropout_params['layer_name'] = layer_name # p p = tmp_layer.__dict__.get('p') Dropout_params['p'] =", "parameters, fc_conv_weights = get_structure_parameters(return_layers) ''' print('================') for i in range(len(last_connections)):", "+ flat_idx % pooling[1] padding_dz[n, c, h_idx, w_idx] += next_dz[n,", "str(id(var))) add_nodes(t) print(var) add_nodes(var.grad_fn) return dot def generate_g(model, x): delete_allpths(pth_dir=None)", "= tmp[1] + 1 return return_layers @torch.no_grad() def get_tensors(last_connections): tensors", "p) print('# next_dz.shape: ', list(next_dz.shape)) print('# mask.shape: ', list(mask.shape)) zeros_tensor", "print('# z.shape: ', list(z.shape)) print('# weight.shape: ', list(K.shape)) print('# bias.shape:", "print('# padding: ', padding) print('# strides: ', strides) padding_next_dz =", "dLoss_dz.reshape(lastpop.shape) else: print('Not completed in gradient_backward_v1!') print('======================== {0:3} Layer: '.format(str(i))+'{0:9}'.format(layer['layer_name'])+'", "loss) featuremap.pop() return_dz.append(dLoss_dz) dW_dB_fc_conv = [] for i in range(len(parameters)-1,", "dict of (name, Variable) to add names to node that", "0) stride = layer['stride'] dLoss_dz, dLoss_dW, dLoss_dB = conv_backward(dLoss_dz, weight_z,", "for i in range(len(connections)): item_key = list(connections[i].keys())[0] if '(' in", "isinstance(tmp_layer, nn.Conv2d): layer_name = 'Conv2d' Conv2d_params = {} Conv2d_params['layer_name'] =", "= Variable(img) g = generate_g(model, x) if g_view: g.view() delete_allpths(pth_dir=None)", "# return parameters[i][j][k] = Dropout_params elif isinstance(tmp_layer, nn.BatchNorm2d): layer_name =", "print(tmp_dLoss_dz[0].shape, tmp_dLoss_dz[1].shape) dLoss_dz = tmp_dLoss_dz[0] + tmp_dLoss_dz[1] else: print('Not completed", "[] for item in connections: key, value = list(item.items())[0] key1", "dz.shape: ', list(dLoss_dz.shape)) return dLoss_dz @torch.no_grad() def max_pooling_backward(next_dz, z, pooling,", "# return fc_conv_weights.append(layer.weight) parameters.append(Linear_params) elif isinstance(layer, nn.AdaptiveAvgPool2d): layer_name = 'AdaptiveAvgPool2d'", "out_features out_features = layer.__dict__.get('out_features') Linear_params['out_features'] = out_features # return fc_conv_weights.append(layer.weight)", "(0, 0) stride = layer['stride'] dLoss_dz, dLoss_dW, dLoss_dB = conv_backward(dLoss_dz,", "'ReLU' in current_layer_name: return_layers[0][j].insert(0, torch.nn.ReLU(inplace=True)) elif 'Add' in current_layer_name: return_layers[0][j].insert(0,", "parameters[i] print('\\n======================== {0:3} Layer: '.format(str(i))+'{0:9}'.format(layer['layer_name'])+' Backward Start ========================') if layer['layer_name']", "dLoss_dfcB = torch.sum(dLoss_dnextz, dim=0) print('# dz.shape: ', list(dLoss_dz.shape)) print('# dweight.shape:", "z[:, :, :, padding[1]:-padding[1]] else: return z @torch.no_grad() def conv_backward(next_dz,", "pth_dir = \"./tmp_file/\" for root, dirs, files in os.walk(pth_dir, topdown=False):", "j] dz = _remove_padding(padding_dz, padding) # padding_z[:, :, padding[0]:-padding[0], padding[1]:-padding[1]]", "import numpy as np ax = list(np.arange(len(shape))) shape.pop(1) ax.pop(1) axis", "padding) print('# strides: ', strides) N, C, H, W =", "list(list_dic_key_value[index].keys())[0]: end = index break return start+1, end-1 @torch.no_grad() def", "padding # return parameters.append(MaxPool2d_params) elif isinstance(layer, nn.AvgPool2d): layer_name = 'AvgPool2d'", "in range(len(parameters)): layer = parameters[i] if not isinstance(layer, list): print('\\n========================", "i:strides[0] * i + pooling[0], strides[1] * j:strides[1] * j", "torch.load(pth_dir+str(file_num)+'.pth') featuremap.append(tensor) delete_allpths(pth_dir=None) return featuremap @torch.no_grad() def get_structure_parameters_v1(model): layers =", "for i in range(len(last_connections)): print(last_connections[i]) for i in range(len(featuremap)): if", "= Linear_params elif isinstance(tmp_layer, nn.AdaptiveAvgPool2d): layer_name = 'AdaptiveAvgPool2d' AdaptiveAvgPool2d_params =", "m = N*H*W shape = [N,C,H,W] import numpy as np", "if params: pooling = params[0] stride = params[1] padding =", "== len(connections): print('Generate connections not done! Check generate_connections function!') exit()", "parameters[i][j][k] = AdaptiveAvgPool2d_params ### else: print('The layer has not been", "i in range(len(last_connections)): print(i, last_connections[i]) print('================') print('================') for i in", "in range(len(last_connections[i][j])-1, -1, -1): current_layer_name = list(last_connections[i][j][k].keys())[0].split('_')[0] if 'Add' in", "torch.exp(y_shift) y_probability = torch.div(y_exp, torch.sum(y_exp, dim=1, keepdim=True)) ypred_loss = torch.mean(-torch.sum(torch.mul(y_true,", "out @torch.no_grad() def _insert_zeros(dz, strides): N, D, H, W =", "= featuremap.pop() if not len(dLoss_dz.shape) == len(lastpop.shape): dLoss_dz = dLoss_dz.reshape(lastpop.shape)", "= tmp_layer.__dict__.get('padding') if not isinstance(padding, tuple): MaxPool2d_params['padding'] = (padding, padding)", "{'layer_name': layer_name} elif layer == 'Cat': layer_name = 'Cat' parameters[i]", "in name: return 'Add' elif 'Cat' in name: return 'Cat'", "= layers[i] if name in str(layer): return layer, i @torch.no_grad()", "pop_index.append(connections[i]) for i in range(len(pop_index)-1, -1, -1): connections.remove(pop_index[i]) new_connections =", "= get_structure_parameters(return_layers) ''' print('================') for i in range(len(last_connections)): print(i, last_connections[i])", "has been calcualted!') print('======================== {0:3} Layer: '.format(str(len(parameters)-1-i))+'{0:11}'.\\ format(layer['layer_name'])+' Backward End", "MaxPool2d_params['layer_name'] = layer_name # kernel_size kernel_size = tmp_layer.__dict__.get('kernel_size') if not", "strides) N, C, H, W = z.shape _, _, out_h,", "get_structure_parameters_v1(model) featuremap = get_featuremap(featuremap_dir=None) featuremap.insert(0, img) ### y_true = F.one_hot(label,", "-1, -1): if not isinstance(last_connections[i], list): # 单一层,无分支 current_layer_name =", "= in_channel # out_channel out_channel = tmp_layer.__dict__.get('out_channels') Conv2d_params['out_channel'] = out_channel", "if layer['layer_name'] == 'Conv2d': z = featuremap[-1] weight_z = fc_conv_weights[-1]", "featuremap[i-1][j][k+1] gamma = fc_conv_weights[i][j][k] tmp_dLoss_dz[-1] = batchnorm2d_backward(tmp_dLoss_dz[-1], z, eps, gamma)", "tmp_layer['eps'] z = featuremap[i-1][j][k+1] gamma = fc_conv_weights[i][j][k] tmp_dLoss_dz[-1] = batchnorm2d_backward(tmp_dLoss_dz[-1],", "layer_name = 'Add' parameters[i] = {'layer_name': layer_name} elif layer ==", "return_connections @torch.no_grad() def find_start_end(list_dic_key_value, i, j): key1 = list(list_dic_key_value[i].values())[0] key2", "tmp = [] tmp.append(connections[start:end+1]) tmp.append(connections[i:j-1]) last_connections[start:end+1] = [tmp] for kk", "j in range(len(last_tensors[i])): if len(last_tensors[i][j]) == 0: last_tensors[i][j].append(last_tensors[i+1]) return last_tensors", "output_size output_size = tmp_layer.__dict__.get('output_size') if not isinstance(output_size, tuple): AdaptiveAvgPool2d_params['output_size'] =", "return parameters.append(AdaptiveAvgPool2d_params) else: print('The layer has not been processed in", "break return start+1, end-1 @torch.no_grad() def merge_connections(connections): import copy last_connections", "isinstance(return_tensors[i], str): print('=========', i, return_tensors[i].shape) print('================') ''' import copy return_dz", "int((last_z.shape[3]-pooling[0]+2*padding[0])/stride[0]+1)) dLoss_dz = dLoss_dnextz.reshape(last_z.shape[0], last_z.shape[1], output_size[0], output_size[1]) else: dLoss_dz =", "W_last, strides[1]): pz[n,d,h,w] = dz[n,d,h//strides[0],w//strides[1]] return pz @torch.no_grad() def judge_tensors_equal(tensor_A,", "copy.deepcopy(last_connections) featuremap = return_tensors featuremap.append(img) y_true = F.one_hot(label, num_classes=num_class).float() loss,", "')[1]}) pop_index = [] for i in range(len(connections)): item_key =", "dxhut.sum(axis=axis, keepdim=True) dz = ivar/m*(dz1-dz2-dz3) print('# dz.shape: ', list(dz.shape)) return", "connections not done! Check generate_connections function!') exit() new_connections.insert(0, {list(new_connections[0].values())[0]: None})", "+ 1 elif not list(last_connections[i-1].keys())[0].split('_')[0] == 'Dropout': index_tmp_layers = tmp[1]", "list(last_connections[i][j][k].keys())[0].split('_')[0] if 'Add' in current_layer_name: last_tensors[i][j][k] = 'Add' elif 'View'", "return error/(N*C*H*W) elif len(tensor_A.shape) == 1: C = tensor_A.shape[0] for", "torch.sum(torch.sum(torch.sum(next_dz, axis=-1), axis=-1), axis=0) # 在高度、宽度上相加;批量大小上相加 print('# dz.shape: ', list(dz.shape))", "else: return_layers.insert(0, []) for j in range(len(last_connections[i])): return_layers[0].append([]) if len(last_connections[i][j])", "dLoss_dz = max_pooling_backward(dLoss_dz, z, pooling, stride, padding) return_dz.append(dLoss_dz) lastpop =", "tmp_layer.__dict__.get('out_features') Linear_params['out_features'] = out_features # return fc_conv_weights[i][j][k] = tmp_layer.weight parameters[i][j][k]", "list): index_tmp_layers = tmp[1] + 1 elif not list(last_connections[i-1].keys())[0].split('_')[0] ==", "= (padding, padding) else: MaxPool2d_params['padding'] = padding # return parameters[i]", "return_layers = [] tmp_layers = [] for layer in model.modules():", "= (padding, padding) else: Conv2d_params['padding'] = padding # return fc_conv_weights.append(layer.weight)", "len(lastpop.shape): dLoss_dz = dLoss_dz.reshape(lastpop.shape) elif layer['layer_name'] == 'MaxPool2d': z =", "featuremap[-1] dLoss_dz, dLoss_dW, dLoss_dB = fc_backward(dLoss_dz, z, weight_z) return_dz.append(dLoss_dz) fc_conv_weights.pop()", "print('# next_dz.shape: ', list(dLoss_dnextz.shape)) dLoss_dz = dLoss_dnextz print('# dz.shape: ',", "padding_dz[n, c, strides[0] * i:strides[0] * i + pooling[0], strides[1]", "= (kernel_size, kernel_size) else: AvgPool2d_params['kernel_size'] = kernel_size # stride stride", "\"./tmp_file/\" for root, dirs, files in os.walk(pth_dir, topdown=False): for name", "z, pooling, stride, padding) return_dz.append(dLoss_dz) lastpop = featuremap.pop() if not", "for n in range(N): for d in range(D): for h", "current_layer_name = list(last_connections[i][j][k].keys())[0].split('_')[0] if 'ReLU' in current_layer_name: return_layers[0][j].insert(0, torch.nn.ReLU(inplace=True)) elif", "= (padding, padding) else: AvgPool2d_params['padding'] = padding # return parameters[i][j][k]", "kernel_size = layer.__dict__.get('kernel_size') if not isinstance(kernel_size, tuple): AvgPool2d_params['kernel_size'] = (kernel_size,", "params = (parameters[i+1]['kernel_size'], parameters[i+1]['stride'], parameters[i+1]['padding']) else: params = None dLoss_dz", "print(judge_tensors_equal(dLoss_dW, model.features[0].weight.grad)) elif 'ResNet' in str(model): print(judge_tensors_equal(dLoss_dW, model.conv1.weight.grad)) delete_allpths(pth_dir=None) return", "last_connections @torch.no_grad() def find_next_layer_by_name(layers, name, start_i): for i in range(start_i,", "not done! Check generate_connections function!') exit() new_connections.insert(0, {list(new_connections[0].values())[0]: None}) new_connections.append({'None':", "kernel_size kernel_size = layer.__dict__.get('kernel_size') if not isinstance(kernel_size, tuple): Conv2d_params['kernel_size'] =", "stride) else: MaxPool2d_params['stride'] = stride # padding padding = layer.__dict__.get('padding')", "MaxPool2d_params['stride'] = stride # padding padding = tmp_layer.__dict__.get('padding') if not", "featuremap[i].shape) else: for j in range(len(featuremap[i])): for k in range(len(featuremap[i][j])):", "pz @torch.no_grad() def judge_tensors_equal(tensor_A, tensor_B): if(not tensor_A.shape == tensor_B.shape): print('Shape", "len(lastpop.shape): dLoss_dz = dLoss_dz.reshape(lastpop.shape) else: print('Not completed in gradient_backward_v1!') print('========================", "i in range(num_Throwed): last_connections.remove('Throwed') if last_connections[-1] == {'None': 'None'}: last_connections.remove({'None':", "isinstance(layer, nn.Conv2d): layer_name = 'Conv2d' Conv2d_params = {} Conv2d_params['layer_name'] =", "for i in range(num_Throwed): last_connections.remove('Throwed') if last_connections[-1] == {'None': 'None'}:", "H, W = tensor_A.shape for n in range(N): for c", "parameters[i][j][k] = MaxPool2d_params elif isinstance(tmp_layer, nn.AvgPool2d): layer_name = 'AvgPool2d' AvgPool2d_params", "shape.pop(1) ax.pop(1) axis = tuple(ax) dxhut = torch.zeros_like(next_dz) for c", "dLoss_dW, dLoss_dB = conv_backward(tmp_dLoss_dz[-1], weight_z, z, padding, stride) return_dz[i][j][k] =", "ConvolutedDog (https://github.com/ConvolutedDog/) # # Licensed under the Apache License, Version", "c, k, j = weight.shape x_pad = x x_pad =", "k in range(len(last_connections[i][j])-1, -1, -1): current_layer_name = list(last_connections[i][j][k].keys())[0].split('_')[0] if 'ReLU'", "num_classes=num_class).float() loss, dLoss_dz = cross_entropy_loss(featuremap[0], y_true) featuremap.pop(0) return_dz.append(dLoss_dz) #####################tensors '''", "_conv_forward(x, weight, strides=(1,1)): n, c, h_in, w_in = x.shape d,", "= tmp_layer.__dict__.get('kernel_size') if not isinstance(kernel_size, tuple): MaxPool2d_params['kernel_size'] = (kernel_size, kernel_size)", "dLoss_dB = conv_backward(tmp_dLoss_dz[-1], weight_z, z, padding, stride) return_dz[i][j][k] = tmp_dLoss_dz[-1]", "that require grad, orange are Tensors saved for backward in", "else: AvgPool2d_params['kernel_size'] = kernel_size # stride stride = layer.__dict__.get('stride') if", "# return parameters.append(AvgPool2d_params) elif isinstance(layer, nn.Dropout): layer_name = 'Dropout' Dropout_params", "You may obtain a copy of the License at #", "for c in torch.arange(C): for i in torch.arange(out_h): for j", "0 error_tolerance = 0.001 np_A = tensor_A.detach().numpy() np_B = tensor_B.detach().numpy()", "parameters, fc_conv_weights @torch.no_grad() def delete_allpths(pth_dir=None): import os if pth_dir ==", "= dLoss_dz elif layer['layer_name'] == 'AvgPool2d': z = featuremap[i] pooling", "= params[2] output_size = (int((last_z.shape[2]-pooling[0]+2*padding[0])/stride[0]+1), \\ int((last_z.shape[3]-pooling[0]+2*padding[0])/stride[0]+1)) dLoss_dz = dLoss_dnextz.reshape(last_z.shape[0],", "else: MaxPool2d_params['kernel_size'] = kernel_size # stride stride = tmp_layer.__dict__.get('stride') if", "and not isinstance(return_tensors[i], str): print('=========', i, return_tensors[i].shape) print('================') ''' import", "Conv2d_params['kernel_size'] = kernel_size # stride stride = layer.__dict__.get('stride') if not", "= layer.__dict__.get('kernel_size') if not isinstance(kernel_size, tuple): MaxPool2d_params['kernel_size'] = (kernel_size, kernel_size)", "generate_connections function!') exit() new_connections.insert(0, {list(new_connections[0].values())[0]: None}) new_connections.append({'None': 'None'}) return connections,", "padding[1]:-padding[1]] elif padding[0] > 0: return z[:, :, padding[0]:-padding[0], :]", "conv_backward(dLoss_dz, weight_z, z, padding, stride) return_dz.append(dLoss_dz) fc_conv_weights.pop() if not len(featuremap)", "return_connections.append(tmp_split) tmp_split = [item] return return_connections @torch.no_grad() def find_start_end(list_dic_key_value, i,", "strides) flip_K = torch.flip(K, (2, 3)) swap_flip_K = torch.swapaxes(flip_K, 0,", "% v for v in size])+')' def add_nodes(var): if var", "v in params.items()} node_attr = dict(style='filled', shape='box', align='left', fontsize='12', ranksep='0.1',", "'%s\\n %s' % (name, size_to_str(u.size())) dot.node(str(id(var)), node_name, fillcolor='lightblue') else: dot.node(str(id(var)),", "AdaptiveAvgPool2d_params['output_size'] = output_size # return parameters[i] = AdaptiveAvgPool2d_params elif isinstance(layer,", "#print('right', np_A[n,c,h,w], np_B[n,c,h,w]) #print('Error rate: ', error/(N*C*H*W)) print('4D-error-rate: ', end='", "grad (TODO: make optional) \"\"\" if params is not None:", "AvgPool2d_params['stride'] = stride # padding padding = layer.__dict__.get('padding') if not", "len(connections): print('Generate connections not done! Check generate_connections function!') exit() new_connections.insert(0,", "(0, 0) stride = tmp_layer['stride'] tmp_dLoss_dz[-1], dLoss_dW, dLoss_dB = conv_backward(tmp_dLoss_dz[-1],", "if len(last_connections[i][j]) == 0: continue for k in range(len(last_connections[i][j])-1, -1,", "padding_z[:, :, padding[0]:-padding[0], padding[1]:-padding[1]] print('# dz.shape: ', list(dz.shape)) return dz", "in var.saved_tensors: dot.edge(str(id(t)), str(id(var))) add_nodes(t) print(var) add_nodes(var.grad_fn) return dot def", "tmp_dLoss_dz[1].shape) dLoss_dz = tmp_dLoss_dz[0] + tmp_dLoss_dz[1] else: print('Not completed in", "== 'Cat': layer_name = 'Cat' parameters[i][j][k] = {'layer_name': layer_name} elif", "labels[graph[i].split('\\t')[1].split(' -> ')[1]]+'_'+\\ graph[i].split('\\t')[1].split(' -> ')[1]}) pop_index = [] for", "= x_pad.unfold(3, j, strides[1]) out = torch.einsum( 'nchwkj,dckj->ndhw', x_pad, weight)", "value.split('_')[1] if 'None' in key1 or 'None' in value1: print('Not", "view_backward(dLoss_dnextz, last_z, params): print('# next_dz.shape: ', list(dLoss_dnextz.shape)) print('# last_z.shape: ',", "', list(mask.shape)) zeros_tensor = torch.zeros_like(mask) dLoss_dz = torch.mul(torch.where(torch.eq(mask, 1.), next_dz,", "== 'Dropout': return_dz[i] = dLoss_dz print('# Skip this layer because", "0), next_dz, zeros_tensor) print('# dz.shape: ', list(dLoss_dz.shape)) return dLoss_dz @torch.no_grad()", "= list(list_dic_key_value[i].values())[0] key2 = list(list_dic_key_value[j].keys())[0] start = 0 end =", "elif 'View' in current_layer_name: return_layers.insert(0, 'View') else: tmp = find_next_layer_by_name(tmp_layers,", "for layer in model.modules(): if not ':' in str(layer): layers.append(layer)", "= tmp_layer.__dict__.get('kernel_size') if not isinstance(kernel_size, tuple): AvgPool2d_params['kernel_size'] = (kernel_size, kernel_size)", "= tmp_layer.weight parameters[i][j][k] = BatchNorm2d_params elif isinstance(tmp_layer, nn.Linear): layer_name =", "{'layer_name': layer_name} elif isinstance(tmp_layer, nn.MaxPool2d): layer_name = 'MaxPool2d' MaxPool2d_params =", "AdaptiveAvgPool2d_params['output_size'] = (output_size, output_size) else: AdaptiveAvgPool2d_params['output_size'] = output_size # return", "else: params = None dLoss_dz = view_backward(dLoss_dz, last_z, params) return_dz[i]", "w in range(W): if np_A[n,c,h,w]-np_B[n,c,h,w] > error_tolerance or np_B[n,c,h,w]-np_A[n,c,h,w] >", "Dropout_params['p'] = p # return parameters.append(Dropout_params) elif isinstance(layer, nn.BatchNorm2d): layer_name", "', list(db.shape)) return dz, (dK/N).transpose(0,1), db/N @torch.no_grad() def _conv_forward(x, weight,", "mul @torch.no_grad() def gradient_backward_v1(model, img, label, num_class=1000): return_dz = []", "H_last, W_last) for n in range(N): for d in range(D):", "notchoosed.append(i) start, end = find_start_end(connections, i, j-1) tmp = []", "= 1. for i in range(len(x)): mul *= x[i] return", ":, padding[0]:-padding[0], padding[1]:-padding[1]] print('# dz.shape: ', list(dz.shape)) return dz @torch.no_grad()", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "= layer.__dict__.get('stride') if not isinstance(stride, tuple): Conv2d_params['stride'] = (stride, stride)", "layer['layer_name'] == 'ReLU': z = featuremap[-1] dLoss_dz = relu_backward(dLoss_dz, z)", "notchoosed = [] print('\\n=========================== Restore network model Start ===============================') for", "layer['layer_name'] == 'Conv2d': z = featuremap[i] weight_z = fc_conv_weights[i] try:", "= model(img) print('=========================== Generate Tensors End ======================================\\n') Loss = nn.CrossEntropyLoss()", "i in range(len(parameters)-1, -1, -1): layer = parameters[i] print('\\n======================== {0:3}", "tmp_layer = layer[j][k] ### if isinstance(tmp_layer, nn.Conv2d): layer_name = 'Conv2d'", "= layer[j][k] ### if isinstance(tmp_layer, nn.Conv2d): layer_name = 'Conv2d' Conv2d_params", "', list(gamma.shape)) N, C, H, W = z.shape m =", "*= x[i] return mul @torch.no_grad() def gradient_backward_v1(model, img, label, num_class=1000):", "item_key = list(connections[i].keys())[0] if not 'None' in item_key: if i", "item_key: if i == 0: pass else: last_item_key = list(connections[i-1].keys())[0]", "dLoss_dz elif layer['layer_name'] == 'BatchNorm2d': eps = layer['eps'] z =", "# out_features out_features = tmp_layer.__dict__.get('out_features') Linear_params['out_features'] = out_features # return", "isinstance(layer, nn.BatchNorm2d): layer_name = 'BatchNorm2d' BatchNorm2d_params = {} BatchNorm2d_params['layer_name'] =", "= exchange_name(value.split('_')[0]) + '_' + value.split('_')[1] if 'None' in key1", "cross_entropy_loss(featuremap[0], y_true) featuremap.pop(0) return_dz.append(dLoss_dz) #####################tensors ''' for i in range(len(last_connections)):", "'ReLU' parameters.append({'layer_name': layer_name}) elif isinstance(layer, nn.MaxPool2d): layer_name = 'MaxPool2d' MaxPool2d_params", "new_connections.append({key1: value1}) if not len(new_connections) == len(connections): print('Generate connections not", "isinstance(padding, tuple): AvgPool2d_params['padding'] = (padding, padding) else: AvgPool2d_params['padding'] = padding", "z @torch.no_grad() def conv_backward(next_dz, K, z, padding=(0, 0), strides=(1, 1)):", "dbias.shape: ', list(dLoss_dfcB.shape)) return dLoss_dz, dLoss_dfcW/N, dLoss_dfcB/N @torch.no_grad() def view_backward(dLoss_dnextz,", "= p # return parameters[i] = Dropout_params elif isinstance(layer, nn.BatchNorm2d):", "y_true) featuremap.pop(0) return_dz.append(dLoss_dz) #####################tensors ''' for i in range(len(last_connections)): print(last_connections[i])", "num_features # eps eps = layer.__dict__.get('eps') BatchNorm2d_params['eps'] = eps #", "= relu_backward(tmp_dLoss_dz[-1], z) return_dz[i][j][k] = tmp_dLoss_dz[-1] elif tmp_layer['layer_name'] == 'BatchNorm2d':", "dLoss_dW, dLoss_dB = fc_backward(dLoss_dz, z, weight_z) return_dz.append(dLoss_dz) fc_conv_weights.pop() lastpop =", "current_layer_name: return_layers.insert(0, 'View') else: tmp = find_next_layer_by_name(tmp_layers, current_layer_name, index_tmp_layers) return_layers[0][j].insert(0,", "last_tensors[i] = 'View' else: last_tensors[i] = tensors[index_tensors] index_tensors += 1", "n, c, h_in, w_in = x.shape d, c, k, j", "in range(len(last_connections[i])): if len(last_connections[i][j]) == 0: continue for k in", "z = featuremap[i] pooling = layer['kernel_size'] stride = layer['stride'] padding", "== 'Conv2d': z = featuremap[i] weight_z = fc_conv_weights[i] try: padding", "print('# z.shape: ', list(z.shape)) print('# eps: ', eps) print('# gamma.shape:", "end=' ') return error/(N*C*H*W) elif len(tensor_A.shape) == 1: C =", "print('# next_dz.shape: ', list(dLoss_dnextz.shape)) print('# z.shape: ', list(z.shape)) print('# weight.shape:", "tensor_A.shape[0] for c in range(C): if np_A[c]-np_B[c] > error_tolerance or", "Tensors saved for backward in torch.autograd.Function Args: var: output Variable", "= list(last_connections[i][j][k].keys())[0].split('_')[0] if 'Add' in current_layer_name: last_tensors[i][j][k] = 'Add' elif", "# return fc_conv_weights[i][j][k] = tmp_layer.weight parameters[i][j][k] = Conv2d_params elif isinstance(tmp_layer,", "print('\\n======================== {0:3} Layer: '.format(str(len(parameters)-1-i))+'{0:11}'.format(layer['layer_name'])+' Backward Start ========================') if layer['layer_name'] ==", "layer['layer_name'] == 'Linear': weight_z = fc_conv_weights[i] z = featuremap[i] dLoss_dz,", "stride, padding) return_dz.append(dLoss_dz) lastpop = featuremap.pop() if not len(dLoss_dz.shape) ==", "return dz @torch.no_grad() def _remove_padding(z, padding): if padding[0] > 0", "mul_items(tensor_size): x = list(tensor_size) mul = 1. for i in", "Restore network model End =================================\\n') return last_connections @torch.no_grad() def find_next_layer_by_name(layers,", "= {'layer_name': layer_name} elif isinstance(layer, nn.MaxPool2d): layer_name = 'MaxPool2d' MaxPool2d_params", "featuremap @torch.no_grad() def get_structure_parameters_v1(model): layers = [] for layer in", "layer['layer_name'] == 'Add': dLoss_dz = add_backward(dLoss_dz) return_dz[i] = dLoss_dz elif", "img, label, num_class=1000): return_dz = [] parameters, fc_conv_weights = get_structure_parameters_v1(model)", "return 'ReLU6' else: return 'None' @torch.no_grad() def generate_connections(g): graph =", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "= (parameters[i+1]['kernel_size'], parameters[i+1]['stride'], parameters[i+1]['padding']) else: params = None dLoss_dz =", "c in range(C): if np_A[n,c]-np_B[n,c] > error_tolerance or np_B[n,c]-np_A[n,c] >", "@torch.no_grad() def average_pooling_backward(next_dz, z, pooling, strides, padding=(0, 0)): print('# next_dz.shape:", "kernel_size = tmp_layer.__dict__.get('kernel_size') if not isinstance(kernel_size, tuple): Conv2d_params['kernel_size'] = (kernel_size,", "dot = Digraph(node_attr=node_attr, graph_attr=dict(size=\"12,12\")) seen = set() def size_to_str(size): return", "[] fc_conv_weights = [] for layer in layers: if isinstance(layer,", "License. # You may obtain a copy of the License", "is not None: assert isinstance(params.values()[0], Variable) param_map = {id(v): k", "layer = layers[i] if name in str(layer): return layer, i", "padding) else: AvgPool2d_params['padding'] = padding # return parameters.append(AvgPool2d_params) elif isinstance(layer,", "pth_dir = featuremap_dir files = os.listdir(pth_dir) file_nums = [] for", "Conv2d_params['padding'] = padding # return fc_conv_weights.append(layer.weight) parameters.append(Conv2d_params) elif isinstance(layer, nn.ReLU):", "parameters.append(Conv2d_params) elif isinstance(layer, nn.ReLU): layer_name = 'ReLU' parameters.append({'layer_name': layer_name}) elif", "list(last_connections[i].keys())[0].split('_')[0] if 'Add' in current_layer_name: last_tensors[i] = 'Add' elif 'View'", "in range(len(featuremap[i][j])): print(' =========', i, j, k, featuremap[i][j][k].shape) ''' #####################", "average_pooling_backward(dLoss_dz, z, pooling, stride, padding) return_dz.append(dLoss_dz) lastpop = featuremap.pop() if", "range(len(last_connections[i][j])-1, -1, -1): current_layer_name = list(last_connections[i][j][k].keys())[0].split('_')[0] if 'Add' in current_layer_name:", "'label' in graph[i] and graph[i][-1] == '\"': labels[(graph[i]+graph[i+1][1:]).split('\\t')[1].split(' ')[0]]=\\ (graph[i]+graph[i+1][1:]).split('\\t')[1].split('\"')[1]", "').join(['%d' % v for v in size])+')' def add_nodes(var): if", "= y_probability - y_true print('# dLoss_dypred.shape: ', list(dLoss_dypred.shape)) print('# Self", "z): print('# next_dz.shape: ', list(next_dz.shape)) print('# z.shape: ', list(z.shape)) zeros_tensor", "gamma.shape: ', list(gamma.shape)) N, C, H, W = z.shape m", "= torch.load(pth_dir+str(file_num)+'.pth') featuremap.append(tensor) delete_allpths(pth_dir=None) return featuremap @torch.no_grad() def get_structure_parameters_v1(model): layers", "j:strides[1] * j + pooling[1]]) h_idx = strides[0] * i", "def cross_entropy_loss(y_predict, y_true): print('\\n=========================== Layer:'+' {0:18}'.format('cross_entropy_loss')+' Start ===========================') print('# y_predict.shape:", "g.view() delete_allpths(pth_dir=None) print('\\n=========================== Generate Tensors Start ====================================') result = model(img)", "range(len(connections)): item_key = list(connections[i].keys())[0] if '(' in item_key or 'TBackward'", "strides[0]): for w in range(0, W_last, strides[1]): pz[n,d,h,w] = dz[n,d,h//strides[0],w//strides[1]]", "= layer['p'] mask = featuremap[i] dLoss_dz = dropback_backward(dLoss_dz, mask, p)", "for i in range(len(graph)): if '->' in graph[i]: connections.append({labels[graph[i].split('\\t')[1].split(' ->", "return dLoss_dz def add_backward(dLoss_dnextz): print('# next_dz.shape: ', list(dLoss_dnextz.shape)) dLoss_dz =", "name: return 'Cat' elif 'Hardtanh' in name: return 'ReLU6' else:", "eps, gamma=torch.Tensor([1.,1.,1.])): print('# next_dz.shape: ', list(next_dz.shape)) print('# z.shape: ', list(z.shape))", "if not connections[i][item_key] == last_item_key: for j in range(i+1, len(connections)):", "zeros probability: ', p) print('# next_dz.shape: ', list(next_dz.shape)) print('# mask.shape:", "parameters = [] fc_conv_weights = [] for layer in layers:", "isinstance(tmp_layer, nn.ReLU): layer_name = 'ReLU' parameters[i][j][k] = {'layer_name': layer_name} elif", "len(lastpop.shape): dLoss_dz = dLoss_dz.reshape(lastpop.shape) elif layer['layer_name'] == 'Dropout': p =", "= in_features # out_features out_features = layer.__dict__.get('out_features') Linear_params['out_features'] = out_features", "params=None): \"\"\" Produces Graphviz representation of PyTorch autograd graph Blue", "elif 'Cat' in name: return 'Cat' elif 'Hardtanh' in name:", "= layer.__dict__.get('out_features') Linear_params['out_features'] = out_features # return fc_conv_weights[i] = layer.weight", "+= next_dz[n, c, i, j] dz = _remove_padding(padding_dz, padding) #", "try: padding = layer['padding'] except: padding = (0, 0) stride", "dLoss_dz = cross_entropy_loss(featuremap[0], y_true) featuremap.pop(0) return_dz.append(dLoss_dz) #####################tensors ''' for i", "> error_tolerance or np_B[n,c,h,w]-np_A[n,c,h,w] > error_tolerance: error += 1 if", "index break return start+1, end-1 @torch.no_grad() def merge_connections(connections): import copy", "0: last_tensors[i][j].append(last_tensors[i+1]) return last_tensors @torch.no_grad() def get_structure_parameters(return_layers): import copy parameters", "end = len(list_dic_key_value)-1 for index in range(len(list_dic_key_value)): if key1 ==", "= torch.zeros(N, D, H_last, W_last) for n in range(N): for", "'->' in graph[i]: connections.append({labels[graph[i].split('\\t')[1].split(' -> ')[0]]+'_'+\\ graph[i].split('\\t')[1].split(' -> ')[0]:\\ labels[graph[i].split('\\t')[1].split('", "-1, -1): connections.remove(pop_index[i]) new_connections = [] for item in connections:", "ranksep='0.1', height='0.2') dot = Digraph(node_attr=node_attr, graph_attr=dict(size=\"12,12\")) seen = set() def", "in current_layer_name: last_tensors[i] = 'Add' elif 'View' in current_layer_name: last_tensors[i]", "+ value.split('_')[1] if 'None' in key1 or 'None' in value1:", "graph[i].split('\\t')[1].split('=')[1].split(']')[0] for i in range(len(graph)): if '->' in graph[i]: connections.append({labels[graph[i].split('\\t')[1].split('", "tuple): AvgPool2d_params['kernel_size'] = (kernel_size, kernel_size) else: AvgPool2d_params['kernel_size'] = kernel_size #", "layer = parameters[i] if not isinstance(layer, list): print('\\n======================== {0:3} Layer:", "i in range(start_i, len(layers)): layer = layers[i] if name in", "for c in range(C): for h in range(H): for w", "if 'Pool' in parameters[i+1]['layer_name']: params = (parameters[i+1]['kernel_size'], parameters[i+1]['stride'], parameters[i+1]['padding']) else:", "= torch.flip(K, (2, 3)) swap_flip_K = torch.swapaxes(flip_K, 0, 1) ppadding_next_dz", "if name.endswith('.pth',): os.remove(os.path.join(root, name)) @torch.no_grad() def mul_items(tensor_size): x = list(tensor_size)", "index_tmp_layers) return_layers.insert(0, tmp[0]) if isinstance(last_connections[i-1], list): index_tmp_layers = tmp[1] +", "stride # padding padding = tmp_layer.__dict__.get('padding') if not isinstance(padding, tuple):", "ypred_loss = torch.mean(-torch.sum(torch.mul(y_true, torch.log(y_probability)), dim=1, keepdim=True)) dLoss_dypred = y_probability -", "return fc_conv_weights[i] = layer.weight parameters[i] = BatchNorm2d_params elif isinstance(layer, nn.Linear):", "db/N @torch.no_grad() def _conv_forward(x, weight, strides=(1,1)): n, c, h_in, w_in", "= dropback_backward(dLoss_dz, mask, p) return_dz.append(dLoss_dz) featuremap.pop() lastpop = featuremap.pop() if", "print('# dz.shape: ', list(dz.shape)) return dz @torch.no_grad() def _remove_padding(z, padding):", "current_layer_name: last_tensors[i] = 'View' else: last_tensors[i] = tensors[index_tensors] index_tensors +=", "fc_conv_weights[i] z = featuremap[i] dLoss_dz, dLoss_dW, dLoss_dB = fc_backward(dLoss_dz, z,", "mask, p) return_dz.append(dLoss_dz) featuremap.pop() lastpop = featuremap.pop() if not len(dLoss_dz.shape)", "dLoss_dfcW/N, dLoss_dfcB/N @torch.no_grad() def view_backward(dLoss_dnextz, last_z, params): print('# next_dz.shape: ',", "= layer['eps'] z = featuremap[i] gamma = fc_conv_weights[i] dLoss_dz =", "(name, Variable) to add names to node that require grad", "item in connections: key, value = list(item.items())[0] key1 = exchange_name(key.split('_')[0])", "\"\"\" if params is not None: assert isinstance(params.values()[0], Variable) param_map", "== 0: continue for k in range(len(last_connections[i][j])-1, -1, -1): current_layer_name", "kernel_size kernel_size = tmp_layer.__dict__.get('kernel_size') if not isinstance(kernel_size, tuple): AvgPool2d_params['kernel_size'] =", "elif layer == 'View': layer_name = 'View' parameters[i] = {'layer_name':", "dLoss_dz.reshape(lastpop.shape) elif layer['layer_name'] == 'AvgPool2d': z = featuremap[-1] pooling =", "None dLoss_dz = view_backward(dLoss_dz, last_z, params) return_dz[i] = dLoss_dz elif", "1 elif not list(last_connections[i-1].keys())[0].split('_')[0] == 'Dropout': index_tmp_layers = tmp[1] +", "layer_name = 'BatchNorm2d' BatchNorm2d_params = {} BatchNorm2d_params['layer_name'] = layer_name #", "print('# next_dz.shape: ', list(next_dz.shape)) print('# z.shape: ', list(z.shape)) print('# weight.shape:", "', list(z.shape)) print('# eps: ', eps) print('# gamma.shape: ', list(gamma.shape))", "= layer_name # in_features in_features = layer.__dict__.get('in_features') Linear_params['in_features'] = in_features", "tensor = torch.load(pth_dir+str(file_num)+'.pth') featuremap.append(tensor) delete_allpths(pth_dir=None) return featuremap @torch.no_grad() def get_structure_parameters_v1(model):", "0, 1) dK = _conv_forward(torch.swapaxes(F.pad(z, pad=(padding[1],padding[1],\\ padding[0],padding[0],0,0), mode='constant', value=0), 0,", "= AvgPool2d_params elif isinstance(layer, nn.Dropout): layer_name = 'Dropout' Dropout_params =", "dLoss_dB = conv_backward(dLoss_dz, weight_z, z, padding, stride) return_dz[i] = dLoss_dz", "import os if pth_dir == None: pth_dir = \"./tmp_file/\" for", "= {'layer_name': layer_name} elif tmp_layer == 'Cat': layer_name = 'Cat'", "in name: return 'Cat' elif 'Hardtanh' in name: return 'ReLU6'", "layer['layer_name'] == 'Dropout': if parameters[i-1]['layer_name'] == 'Dropout': return_dz[i] = dLoss_dz", "= torch.argmax(padding_z[n, c, strides[0] * i:strides[0] * i + pooling[0],", "') return error/C elif len(tensor_A.shape) == 2: N, C =", "or 'Avg' in name: return 'AvgPool2d' elif 'BatchNorm' in name:", "fc_conv_weights[i][j][k] = tmp_layer.weight parameters[i][j][k] = Linear_params elif isinstance(tmp_layer, nn.AdaptiveAvgPool2d): layer_name", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "compard tensors is not equal.') return None error = 0", "list(item.values())[0] last_key = list(tmp_split[-1].keys())[0] if value == last_key: tmp_split.append(item) else:", "= layer_name # output_size output_size = tmp_layer.__dict__.get('output_size') if not isinstance(output_size,", "else: AvgPool2d_params['padding'] = padding # return parameters.append(AvgPool2d_params) elif isinstance(layer, nn.Dropout):", "name in files: if name.endswith('.pth',): os.remove(os.path.join(root, name)) @torch.no_grad() def mul_items(tensor_size):", "{} AvgPool2d_params['layer_name'] = layer_name # kernel_size kernel_size = tmp_layer.__dict__.get('kernel_size') if", "torch.max(y_predict, dim=1, keepdim=True).values) y_exp = torch.exp(y_shift) y_probability = torch.div(y_exp, torch.sum(y_exp,", "m*dxhut mu = z.mean(axis=axis, keepdim=True) xmu = z - mu", "_, out_h, out_w = next_dz.shape padding_z = F.pad(z, pad=(padding[1],padding[1],padding[0],\\ padding[0],0,0),", "= dLoss_dz.reshape(lastpop.shape) elif layer['layer_name'] == 'MaxPool2d': z = featuremap[-1] pooling", "np_A[n,c,h,w], np_B[n,c,h,w]) else: if n*c*h*w % 20000000000000 == 0: pass", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "for the specific language governing permissions and # limitations under", "1 #print('Error rate: ', error/C) print('1D-error-rate: ', end=' ') return", "else: AvgPool2d_params['kernel_size'] = kernel_size # stride stride = tmp_layer.__dict__.get('stride') if", "# output_size output_size = layer.__dict__.get('output_size') if not isinstance(output_size, tuple): AdaptiveAvgPool2d_params['output_size']", "stride stride = layer.__dict__.get('stride') if not isinstance(stride, tuple): AvgPool2d_params['stride'] =", "pooling[1] w_idx = strides[1] * j + flat_idx % pooling[1]", "last_tensors[i][j][k] = 'View' else: last_tensors[i][j][k] = tensors[index_tensors] index_tensors += 1", "parameters[i] = Dropout_params elif isinstance(layer, nn.BatchNorm2d): layer_name = 'BatchNorm2d' BatchNorm2d_params", "'['+str(dLoss_dnextz.shape[1])+']') N = z.shape[0] if len(z.shape) == 4: z =", "= torch.zeros_like(next_dz) for c in range(C): dxhut[:,c] = next_dz[:,c]*gamma[c] dz1", "layer_name = 'Dropout' Dropout_params = {} Dropout_params['layer_name'] = layer_name #", "# p p = layer.__dict__.get('p') Dropout_params['p'] = p # return", "list(dLoss_dz.shape)) return dLoss_dz @torch.no_grad() def dropback_backward(next_dz, mask, p): print('# zeros", "y_true) print('Self calculated loss: ', loss) featuremap.pop() return_dz.append(dLoss_dz) dW_dB_fc_conv =", "parameters[i] = {'layer_name': layer_name} elif layer == 'View': layer_name =", "end = find_start_end(connections, i, j-1) tmp = [] tmp.append(connections[start:end+1]) tmp.append(connections[i:j-1])", "been processed in get_structure_parameters!') return parameters, fc_conv_weights def gradient_backward_v2(model, img,", "==========================') elif isinstance(layer, list): import copy tmp_dLoss_dz = [] for", "'ReLU6' else: return 'None' @torch.no_grad() def generate_connections(g): graph = str(g).split('\\n')", "= Conv2d_params elif isinstance(layer, nn.ReLU): layer_name = 'ReLU' parameters[i] =", "required by applicable law or agreed to in writing, software", "== 'AvgPool2d': z = featuremap[i] pooling = layer['kernel_size'] stride =", "z, weight_z) return_dz[i] = dLoss_dz elif layer['layer_name'] == 'View': last_z", "delete_allpths(pth_dir=None) print('\\n=========================== Store network model Results Start =========================') y =", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "tmp_layer.__dict__.get('out_channels') Conv2d_params['out_channel'] = out_channel # kernel_size kernel_size = tmp_layer.__dict__.get('kernel_size') if", "= padding # return parameters[i] = AvgPool2d_params elif isinstance(layer, nn.Dropout):", "{} AdaptiveAvgPool2d_params['layer_name'] = layer_name # output_size output_size = tmp_layer.__dict__.get('output_size') if", "BatchNorm2d_params['layer_name'] = layer_name # num_features num_features = layer.__dict__.get('num_features') BatchNorm2d_params['num_features'] =", "print(last_connections[i]) for i in range(len(featuremap)): if not isinstance(featuremap[i], list): print('=========',", "MaxPool2d_params['padding'] = padding # return parameters[i] = MaxPool2d_params elif isinstance(layer,", "z) return_dz[i][j][k] = tmp_dLoss_dz[-1] elif tmp_layer['layer_name'] == 'BatchNorm2d': eps =", "dict(style='filled', shape='box', align='left', fontsize='12', ranksep='0.1', height='0.2') dot = Digraph(node_attr=node_attr, graph_attr=dict(size=\"12,12\"))", "'None'}) return connections, new_connections @torch.no_grad() def get_split_connections(connections): return_connections = []", "range(len(last_connections[i])): return_layers[0].append([]) if len(last_connections[i][j]) == 0: continue for k in", "dLoss_dz elif layer['layer_name'] == 'Add': dLoss_dz = add_backward(dLoss_dz) return_dz[i] =", "xmu2 = xmu**2 var = xmu2.sum(axis=axis, keepdim=True)/m ivar = 1./torch.pow(var+eps,", "== len(lastpop.shape): dLoss_dz = dLoss_dz.reshape(lastpop.shape) else: print('Not completed in gradient_backward_v1!')", "rate: ', error/C) print('1D-error-rate: ', end=' ') return error/C elif", "z, eps, gamma) return_dz[i] = dLoss_dz print('======================== {0:3} Layer: '.format(str(len(parameters)-1-i))+'{0:11}'.format(layer['layer_name'])+'", "in_channel in_channel = layer.__dict__.get('in_channels') Conv2d_params['in_channel'] = in_channel # out_channel out_channel", "== 'View': layer_name = 'View' parameters[i][j][k] = {'layer_name': layer_name} elif", "torch.zeros_like(padding_z) for n in torch.arange(N): for c in torch.arange(C): for", "i in range(len(last_tensors)-1, -1, -1): if isinstance(last_tensors[i], str): # Add", "[] tmp_layers = [] for layer in model.modules(): if not", "return parameters[i][j][k] = AvgPool2d_params elif isinstance(tmp_layer, nn.Dropout): layer_name = 'Dropout'", "agreed to in writing, software # distributed under the License", "padding[0],padding[0],0,0), mode='constant', value=0), 0, 1), torch.swapaxes(padding_next_dz, 0, 1)) db =", "gamma=torch.Tensor([1.,1.,1.])): print('# next_dz.shape: ', list(next_dz.shape)) print('# z.shape: ', list(z.shape)) print('#", "mode='constant', value=0), 0, 1), torch.swapaxes(padding_next_dz, 0, 1)) db = torch.sum(torch.sum(torch.sum(next_dz,", "C, H, W = tensor_A.shape for n in range(N): for", "range(C): if np_A[c]-np_B[c] > error_tolerance or np_B[c]-np_A[c] > error_tolerance: #print(np_A[c],", "delete_allpths(pth_dir=None) return featuremap @torch.no_grad() def get_structure_parameters_v1(model): layers = [] for", "if 'ReLU' in current_layer_name: return_layers.insert(0, torch.nn.ReLU(inplace=True)) elif 'Add' in current_layer_name:", "distributed under the License is distributed on an \"AS IS\"", "end=' ') return error/C elif len(tensor_A.shape) == 2: N, C", "print('error', np_A[n,c,h,w], np_B[n,c,h,w]) else: if n*c*h*w % 20000000000000 == 0:", "Conv2d_params['stride'] = stride # padding padding = tmp_layer.__dict__.get('padding') if not", "= layer['stride'] padding = layer['padding'] dLoss_dz = average_pooling_backward(dLoss_dz, z, pooling,", "End ======================') print(tmp_dLoss_dz[0].shape, tmp_dLoss_dz[1].shape) dLoss_dz = tmp_dLoss_dz[0] + tmp_dLoss_dz[1] else:", "xmu**2 var = xmu2.sum(axis=axis, keepdim=True)/m ivar = 1./torch.pow(var+eps, 0.5) dz2", "print('# mask.shape: ', list(mask.shape)) zeros_tensor = torch.zeros_like(mask) dLoss_dz = torch.mul(torch.where(torch.eq(mask,", "'ReLU' in current_layer_name: return_layers.insert(0, torch.nn.ReLU(inplace=True)) elif 'Add' in current_layer_name: return_layers.insert(0,", "== 0: pass print('error', np_A[n,c,h,w], np_B[n,c,h,w]) else: if n*c*h*w %", "= 0 import copy last_tensors = copy.deepcopy(last_connections) for i in", "== 'Add': layer_name = 'Add' parameters[i] = {'layer_name': layer_name} elif", "return_layers.insert(0, 'View') else: tmp = find_next_layer_by_name(tmp_layers, current_layer_name, index_tmp_layers) return_layers[0][j].insert(0, tmp[0])", "W = dz.shape H_last = (H-1)*(strides[0]-1) + H W_last =", "'BatchNorm2d' elif 'Conv' in name: return 'Conv2d' elif 'MaxPool' in", "= dLoss_dz elif layer['layer_name'] == 'Linear': weight_z = fc_conv_weights[i] z", "print('# Skip this layer because the layer has been calcualted!')", "'AddBackward' in name: return 'Add' elif 'Cat' in name: return", "parameters.append(MaxPool2d_params) elif isinstance(layer, nn.AvgPool2d): layer_name = 'AvgPool2d' AvgPool2d_params = {}", "y_shift = torch.sub(y_predict, torch.max(y_predict, dim=1, keepdim=True).values) y_exp = torch.exp(y_shift) y_probability", "current_layer_name: return_layers[0][j].insert(0, 'Add') elif 'View' in current_layer_name: return_layers.insert(0, 'View') else:", "str(type(var).__name__)) seen.add(var) if hasattr(var, 'next_functions'): for u in var.next_functions: if", "last_connections.remove('Throwed') if last_connections[-1] == {'None': 'None'}: last_connections.remove({'None': 'None'}) print('=========================== Restore", "next_dz, zeros_tensor), 1./(1.-p)) print('# dz.shape: ', list(dLoss_dz.shape)) return dLoss_dz @torch.no_grad()", "w_in = x.shape d, c, k, j = weight.shape x_pad", "else: last_item_key = list(connections[i-1].keys())[0] if not connections[i][item_key] == last_item_key: for", "0 import copy last_tensors = copy.deepcopy(last_connections) for i in range(len(last_connections)-1,", "value = list(item.items())[0] key1 = exchange_name(key.split('_')[0]) + '_' + key.split('_')[1]", "error += 1 #print('Error rate: ', error/C) print('1D-error-rate: ', end='", "padding[0] > 0: return z[:, :, padding[0]:-padding[0], :] elif padding[1]", "print('\\n=========================== Generate Tensors Start ====================================') result = model(img) print('=========================== Generate", "layer.__dict__.get('out_features') Linear_params['out_features'] = out_features # return fc_conv_weights.append(layer.weight) parameters.append(Linear_params) elif isinstance(layer,", "isinstance(layer, nn.ReLU): layer_name = 'ReLU' parameters.append({'layer_name': layer_name}) elif isinstance(layer, nn.MaxPool2d):", "layer_name # p p = layer.__dict__.get('p') Dropout_params['p'] = p #", "isinstance(layer, list): print('\\n======================== {0:3} Layer: '.format(str(len(parameters)-1-i))+'{0:11}'.format(layer['layer_name'])+' Backward Start ========================') if", "MaxPool2d_params['layer_name'] = layer_name # kernel_size kernel_size = layer.__dict__.get('kernel_size') if not", "在高度、宽度上相加;批量大小上相加 print('# dz.shape: ', list(dz.shape)) print('# dweight.shape: ', list(dK.transpose(0,1).shape)) print('#", "var not in seen: if torch.is_tensor(var): dot.node(str(id(var)), size_to_str(var.size()), fillcolor='orange') elif", "tensors[index_tensors] index_tensors += 1 else: for j in range(len(last_connections[i])): if", "elif 'View' in current_layer_name: last_tensors[i][j][k] = 'View' else: last_tensors[i][j][k] =", "layer_name # num_features num_features = tmp_layer.__dict__.get('num_features') BatchNorm2d_params['num_features'] = num_features #", "= tmp_dLoss_dz[-1] elif tmp_layer['layer_name'] == 'BatchNorm2d': eps = tmp_layer['eps'] z", "Variable) to add names to node that require grad (TODO:", "var.next_functions: if u[0] is not None: dot.edge(str(id(u[0])), str(id(var))) add_nodes(u[0]) if", "k in range(len(layer[j])): tmp_layer = layer[j][k] ### if isinstance(tmp_layer, nn.Conv2d):", "len(tensor_A.shape) == 1: C = tensor_A.shape[0] for c in range(C):", "return 'ReLU' elif 'AddmmBackward' in name: return 'Linear' elif 'ViewBackward'", "isinstance(last_tensors[i], list): for j in range(len(last_tensors[i])): if len(last_tensors[i][j]) == 0:", "range(H): for w in range(W): if np_A[n,c,h,w]-np_B[n,c,h,w] > error_tolerance or", "'View') else: tmp = find_next_layer_by_name(tmp_layers, current_layer_name, index_tmp_layers) return_layers.insert(0, tmp[0]) if", "dLoss_dz = relu_backward(dLoss_dz, z) return_dz.append(dLoss_dz) lastpop = featuremap.pop() if not", "print('=========================== Layer:'+' {0:18}'.format('cross_entropy_loss')+' End =============================') return ypred_loss, dLoss_dypred @torch.no_grad() def", "mask.shape: ', list(mask.shape)) zeros_tensor = torch.zeros_like(mask) dLoss_dz = torch.mul(torch.where(torch.eq(mask, 1.),", "+ key.split('_')[1] value1 = exchange_name(value.split('_')[0]) + '_' + value.split('_')[1] if", "range(len(last_connections[i])): if len(last_connections[i][j]) == 0: continue for k in range(len(last_connections[i][j])-1,", "padding) else: MaxPool2d_params['padding'] = padding # return parameters[i] = MaxPool2d_params", "dLoss_dz elif layer['layer_name'] == 'Dropout': if parameters[i-1]['layer_name'] == 'Dropout': return_dz[i]", "fc_conv_weights[i][j][k] try: padding = tmp_layer['padding'] except: padding = (0, 0)", "tmp_layer == 'Cat': layer_name = 'Cat' parameters[i][j][k] = {'layer_name': layer_name}", "z, padding, stride) return_dz.append(dLoss_dz) fc_conv_weights.pop() if not len(featuremap) == 1:", "in name: return 'Linear' elif 'ViewBackward' in name: return 'View'", "= featuremap[i] dLoss_dz = dropback_backward(dLoss_dz, mask, p) return_dz[i] = dLoss_dz", "return dz, (dK/N).transpose(0,1), db/N @torch.no_grad() def _conv_forward(x, weight, strides=(1,1)): n,", "return dLoss_dz @torch.no_grad() def relu_backward(next_dz, z): print('# next_dz.shape: ', list(next_dz.shape))", "get_structure_parameters_v1!') return parameters, fc_conv_weights @torch.no_grad() def delete_allpths(pth_dir=None): import os if", "@torch.no_grad() def merge_connections(connections): import copy last_connections = copy.deepcopy(connections) connections.append({'None':'None'}) num_Throwed", "tmp_layer.__dict__.get('padding') if not isinstance(padding, tuple): Conv2d_params['padding'] = (padding, padding) else:", "w) #delta dLoss_dfcW = torch.matmul(dLoss_dnextz.t(), z) dLoss_dfcB = torch.sum(dLoss_dnextz, dim=0)", "Graphviz representation of PyTorch autograd graph Blue nodes are the", "_, connections = generate_connections(g) last_connections = merge_connections(connections) return_layers = get_layers(last_connections,", "'Add' in current_layer_name: return_layers.insert(0, 'Add') elif 'View' in current_layer_name: return_layers.insert(0,", "'Dropout_2' elif 'DivBackward' in name: return 'Dropout_1' elif 'AddBackward' in", "stride) return_dz[i] = dLoss_dz elif layer['layer_name'] == 'ReLU': z =", "swap_z = torch.swapaxes(z, 0, 1) dK = _conv_forward(torch.swapaxes(F.pad(z, pad=(padding[1],padding[1],\\ padding[0],padding[0],0,0),", "torch.nn.ReLU(inplace=True)) elif 'Add' in current_layer_name: return_layers.insert(0, 'Add') elif 'View' in", "strides[1] * j:strides[1] * j + pooling[1]]) h_idx = strides[0]", "Backward End ==========================') continue p = layer['p'] mask = featuremap[i]", "dLoss_dz print('# Skip this layer because the layer has been", "[] for i in range(len(connections)): item_key = list(connections[i].keys())[0] if '('", "labels[(graph[i]+graph[i+1][1:]).split('\\t')[1].split(' ')[0]]=\\ (graph[i]+graph[i+1][1:]).split('\\t')[1].split('\"')[1] if 'label' in graph[i] and graph[i][-1] ==", "if name in str(layer): return layer, i @torch.no_grad() def get_layers(last_connections,", "= (padding, padding) else: MaxPool2d_params['padding'] = padding # return parameters[i][j][k]", "numpy as np ax = list(np.arange(len(shape))) shape.pop(1) ax.pop(1) axis =", "for j in range(len(last_connections[i])): if len(last_connections[i][j]) == 0: continue for", "License. #!/usr/bin/python3 import torch import torch.nn as nn import torch.nn.functional", "return_connections = [] tmp_split = [] for i in range(len(connections)):", "+ flat_idx // pooling[1] w_idx = strides[1] * j +", "dLoss_dz elif layer['layer_name'] == 'View': last_z = featuremap[i+1] if 'Pool'", "else: AdaptiveAvgPool2d_params['output_size'] = output_size # return parameters.append(AdaptiveAvgPool2d_params) else: print('The layer", "'AdaptiveAvgPool2d' AdaptiveAvgPool2d_params = {} AdaptiveAvgPool2d_params['layer_name'] = layer_name # output_size output_size", "BatchNorm2d_params['num_features'] = num_features # eps eps = layer.__dict__.get('eps') BatchNorm2d_params['eps'] =", "= tmp_dLoss_dz[0] + tmp_dLoss_dz[1] else: print('Not completed in gradient_backward!') print('#", "name: return 'AvgPool2d' elif 'BatchNorm' in name: return 'BatchNorm2d' elif", "= MaxPool2d_params elif isinstance(layer, nn.AvgPool2d): layer_name = 'AvgPool2d' AvgPool2d_params =", "= _conv_forward(ppadding_next_dz, swap_flip_K) swap_z = torch.swapaxes(z, 0, 1) dK =", "next_dz[n, c, i, j] / (pooling[0] * pooling[1]) dz =", "# return fc_conv_weights.append(layer.weight) parameters.append(BatchNorm2d_params) elif isinstance(layer, nn.Linear): layer_name = 'Linear'", "= fc_backward(dLoss_dz, z, weight_z) return_dz.append(dLoss_dz) fc_conv_weights.pop() lastpop = featuremap.pop() if", "OR CONDITIONS OF ANY KIND, either express or implied. #", "{} BatchNorm2d_params['layer_name'] = layer_name # num_features num_features = layer.__dict__.get('num_features') BatchNorm2d_params['num_features']", "kernel_size = layer.__dict__.get('kernel_size') if not isinstance(kernel_size, tuple): MaxPool2d_params['kernel_size'] = (kernel_size,", "the License is distributed on an \"AS IS\" BASIS, #", "weight) return out @torch.no_grad() def _insert_zeros(dz, strides): N, D, H,", "eps = layer['eps'] z = featuremap[-1] gamma = fc_conv_weights[-1] dLoss_dz", "Digraph(node_attr=node_attr, graph_attr=dict(size=\"12,12\")) seen = set() def size_to_str(size): return '('+(', ').join(['%d'", "range(len(featuremap)): if not isinstance(featuremap[i], list): print('=========', i, featuremap[i].shape) else: for", "= Dropout_params elif isinstance(tmp_layer, nn.BatchNorm2d): layer_name = 'BatchNorm2d' BatchNorm2d_params =", "torch.arange(out_w): padding_dz[n, c, strides[0] * i:strides[0] * i + pooling[0],", "elif 'MulBackward' in name: return 'Dropout_2' elif 'DivBackward' in name:", "PyTorch autograd graph Blue nodes are the Variables that require", "= (stride, stride) else: AvgPool2d_params['stride'] = stride # padding padding", "str(model).split('\\n')[0]: g = make_dot(y[0]) return g else: g = make_dot(y)", "= layer['padding'] dLoss_dz = max_pooling_backward(dLoss_dz, z, pooling, stride, padding) return_dz[i]", "parameters.append({'layer_name': layer_name}) elif isinstance(layer, nn.MaxPool2d): layer_name = 'MaxPool2d' MaxPool2d_params =", "y_predict.shape: ', list(y_predict.shape)) print('# y_true.shape: ', list(y_true.shape)) y_shift = torch.sub(y_predict,", "return dz @torch.no_grad() def average_pooling_backward(next_dz, z, pooling, strides, padding=(0, 0)):", "print('=========', i, featuremap[i].shape) else: for j in range(len(featuremap[i])): for k", "for j in range(len(layer)): tmp_dLoss_dz.append(copy.deepcopy(dLoss_dz)) for k in range(len(layer[j])): tmp_layer", "= dLoss_dz.reshape(lastpop.shape) else: print('Not completed in gradient_backward_v1!') print('======================== {0:3} Layer:", "if var not in seen: if torch.is_tensor(var): dot.node(str(id(var)), size_to_str(var.size()), fillcolor='orange')", "== 4: z = z.view(z.size(0), -1) dLoss_dz = torch.matmul(dLoss_dnextz, w)", "else: return 'None' @torch.no_grad() def generate_connections(g): graph = str(g).split('\\n') labels", "[] parameters, fc_conv_weights = get_structure_parameters_v1(model) featuremap = get_featuremap(featuremap_dir=None) featuremap.insert(0, img)", "AdaptiveAvgPool2d_params['layer_name'] = layer_name # output_size output_size = tmp_layer.__dict__.get('output_size') if not", "z = featuremap[i] dLoss_dz = relu_backward(dLoss_dz, z) return_dz[i] = dLoss_dz", "if len(last_tensors[i][j]) == 0: last_tensors[i][j].append(last_tensors[i+1]) return last_tensors @torch.no_grad() def get_structure_parameters(return_layers):", "relu_backward(tmp_dLoss_dz[-1], z) return_dz[i][j][k] = tmp_dLoss_dz[-1] elif tmp_layer['layer_name'] == 'BatchNorm2d': eps", "torch.arange(N): for c in torch.arange(C): for i in torch.arange(out_h): for", "F.one_hot(label, num_classes=num_class).float() loss, dLoss_dz = cross_entropy_loss(featuremap[0], y_true) featuremap.pop(0) return_dz.append(dLoss_dz) #####################tensors", "for i in torch.arange(out_h): for j in torch.arange(out_w): padding_dz[n, c,", "index break for index in range(len(list_dic_key_value)): if key2 == list(list_dic_key_value[index].keys())[0]:", "except: padding = (0, 0) stride = layer['stride'] dLoss_dz, dLoss_dW,", "#print('Error rate: ', error/(C*N)) print('2D-error-rate: ', end=' ') return error/(C*N)", "elif layer['layer_name'] == 'Dropout': p = layer['p'] mask = featuremap[-1]", "'next_functions'): for u in var.next_functions: if u[0] is not None:", "MaxPool2d_params['padding'] = padding # return parameters.append(MaxPool2d_params) elif isinstance(layer, nn.AvgPool2d): layer_name", "law or agreed to in writing, software # distributed under", "'.format(str(i))+'{0:9}'.format(layer['layer_name'])+' Backward Start ========================') if layer['layer_name'] == 'Conv2d': z =", "AvgPool2d_params['padding'] = padding # return parameters.append(AvgPool2d_params) elif isinstance(layer, nn.Dropout): layer_name", "= merge_connections(connections) return_layers = get_layers(last_connections, model) return_tensors = get_tensors(last_connections) parameters,", "in range(len(parameters)): print(i, parameters[i]) print('================') print('================') for i in range(len(return_tensors)):", "'Cat': layer_name = 'Cat' parameters[i][j][k] = {'layer_name': layer_name} elif isinstance(tmp_layer,", "= z.mean(axis=axis, keepdim=True) xmu = z - mu xmu2 =", "file_nums: tensor = torch.load(pth_dir+str(file_num)+'.pth') featuremap.append(tensor) delete_allpths(pth_dir=None) return featuremap @torch.no_grad() def", "AdaptiveAvgPool2d_params['output_size'] = output_size # return parameters.append(AdaptiveAvgPool2d_params) else: print('The layer has", "= AdaptiveAvgPool2d_params elif isinstance(layer, list): for j in range(len(layer)): for", "= Conv2d_params elif isinstance(tmp_layer, nn.ReLU): layer_name = 'ReLU' parameters[i][j][k] =", "gradient_backward_v2(model, img, label, num_class=1000, g_view=False): x = Variable(img) g =", "z = featuremap[-1] dLoss_dz = relu_backward(dLoss_dz, z) return_dz.append(dLoss_dz) lastpop =", "out_w = next_dz.shape padding_z = F.pad(z, pad=(padding[1],padding[1],padding[0],\\ padding[0],0,0), mode='constant', value=0)", "> error_tolerance: #print(np_A[c], np_B[c]) error += 1 #print('Error rate: ',", "(stride, stride) else: Conv2d_params['stride'] = stride # padding padding =", "{} BatchNorm2d_params['layer_name'] = layer_name # num_features num_features = tmp_layer.__dict__.get('num_features') BatchNorm2d_params['num_features']", "len(dLoss_dz.shape) == len(lastpop.shape): dLoss_dz = dLoss_dz.reshape(lastpop.shape) else: print('Not completed in", "list(last_connections[i][j][k].keys())[0].split('_')[0] if 'ReLU' in current_layer_name: return_layers[0][j].insert(0, torch.nn.ReLU(inplace=True)) elif 'Add' in", "list(dLoss_dz.shape)) return dLoss_dz def add_backward(dLoss_dnextz): print('# next_dz.shape: ', list(dLoss_dnextz.shape)) dLoss_dz", "= featuremap[i] dLoss_dz = relu_backward(dLoss_dz, z) return_dz[i] = dLoss_dz elif", "MaxPool2d_params['padding'] = (padding, padding) else: MaxPool2d_params['padding'] = padding # return", "= conv_backward(dLoss_dz, weight_z, z, padding, stride) return_dz[i] = dLoss_dz elif", "model.features[0].weight.grad)) elif 'ResNet' in str(model): print(judge_tensors_equal(dLoss_dW, model.conv1.weight.grad)) delete_allpths(pth_dir=None) return return_dz,", "%s' % (name, size_to_str(u.size())) dot.node(str(id(var)), node_name, fillcolor='lightblue') else: dot.node(str(id(var)), str(type(var).__name__))", "dz.shape: ', list(dLoss_dz.shape)) return dLoss_dz def add_backward(dLoss_dnextz): print('# next_dz.shape: ',", "pooling, strides, padding=(0, 0)): print('# next_dz.shape: ', list(next_dz.shape)) print('# z.shape:", "next_dz.shape: ', list(next_dz.shape)) print('# z.shape: ', list(z.shape)) print('# padding: ',", "== 4: N, C, H, W = tensor_A.shape for n", "list): for j in range(len(layer)): for k in range(len(layer[j])): tmp_layer", "AvgPool2d_params['padding'] = padding # return parameters[i][j][k] = AvgPool2d_params elif isinstance(tmp_layer,", "range(num_Throwed): last_connections.remove('Throwed') if last_connections[-1] == {'None': 'None'}: last_connections.remove({'None': 'None'}) print('===========================", "if params is not None else '' node_name = '%s\\n", "return parameters[i] = AdaptiveAvgPool2d_params elif isinstance(layer, list): for j in", "'AvgPool2d': z = featuremap[-1] pooling = layer['kernel_size'] stride = layer['stride']", "in range(len(list_dic_key_value)): if key2 == list(list_dic_key_value[index].keys())[0]: end = index break", "def make_dot(var, params=None): \"\"\" Produces Graphviz representation of PyTorch autograd", "padding[1] > 0: return z[:, :, padding[0]:-padding[0], padding[1]:-padding[1]] elif padding[0]", "may obtain a copy of the License at # #", "ppadding_next_dz = F.pad(padding_next_dz, pad=(k2-1-padding[1],k2-1-padding[1],\\ k1-1-padding[0],k1-1-padding[0],0,0), mode='constant', value=0) dz = _conv_forward(ppadding_next_dz,", "', eps) print('# gamma.shape: ', list(gamma.shape)) N, C, H, W", "Conv2d_params['stride'] = stride # padding padding = layer.__dict__.get('padding') if not", "= get_featuremap(featuremap_dir=None) featuremap.insert(0, img) ### y_true = F.one_hot(label, num_classes=num_class).float() loss,", "Layer: '.format(str(i))+'{0:9}'.format(layer['layer_name'])+' Backward Start ========================') if layer['layer_name'] == 'Conv2d': z", "= AdaptiveAvgPool2d_params ### else: print('The layer has not been processed", "* i + pooling[0], strides[1] * j:strides[1] * j +", "params[2] output_size = (int((last_z.shape[2]-pooling[0]+2*padding[0])/stride[0]+1), \\ int((last_z.shape[3]-pooling[0]+2*padding[0])/stride[0]+1)) dLoss_dz = dLoss_dnextz.reshape(last_z.shape[0], last_z.shape[1],", "C, H, W = z.shape D, C, k1, k2 =", "\"\"\" Produces Graphviz representation of PyTorch autograd graph Blue nodes", "_, _, out_h, out_w = next_dz.shape padding_z = F.pad(z, pad=(padding[1],padding[1],padding[0],\\", "dLoss_dz = average_pooling_backward(dLoss_dz, z, pooling, stride, padding) return_dz[i] = dLoss_dz", "last_tensors[i][j][k] = 'Add' elif 'View' in current_layer_name: last_tensors[i][j][k] = 'View'", "print('======================== {0:3} Layer: '.format(str(len(parameters)-1-i))+'{0:11}'.\\ format(layer['layer_name'])+' Backward End ==========================') continue p", "= \"./tmp_file/\" else: pth_dir = featuremap_dir files = os.listdir(pth_dir) file_nums", "key2 = list(list_dic_key_value[j].keys())[0] start = 0 end = len(list_dic_key_value)-1 for", "'Linear' Linear_params = {} Linear_params['layer_name'] = layer_name # in_features in_features", "next_dz, zeros_tensor) print('# dz.shape: ', list(dLoss_dz.shape)) return dLoss_dz @torch.no_grad() def", "None: dot.edge(str(id(u[0])), str(id(var))) add_nodes(u[0]) if hasattr(var, 'saved_tensors'): for t in", "list(y_predict.shape)) print('# y_true.shape: ', list(y_true.shape)) y_shift = torch.sub(y_predict, torch.max(y_predict, dim=1,", "for n in torch.arange(N): for c in torch.arange(C): for i", "[N,C,H,W] import numpy as np ax = list(np.arange(len(shape))) shape.pop(1) ax.pop(1)", "= copy.deepcopy(return_layers) for i in range(len(return_layers)): layer = return_layers[i] if", "may not use this file except in compliance with the", "= next_dz.shape print('# next_dz.shape: ', list(next_dz.shape)) print('# z.shape: ', list(z.shape))", "add_nodes(var): if var not in seen: if torch.is_tensor(var): dot.node(str(id(var)), size_to_str(var.size()),", "dLoss_dz = batchnorm2d_backward(dLoss_dz, z, eps, gamma) return_dz.append(dLoss_dz) fc_conv_weights.pop() lastpop =", "layer['padding'] dLoss_dz = max_pooling_backward(dLoss_dz, z, pooling, stride, padding) return_dz[i] =", "return return_layers @torch.no_grad() def get_tensors(last_connections): tensors = get_featuremap(featuremap_dir=None) index_tensors =", "range(C): for h in range(H): for w in range(W): if", "def exchange_name(name): if 'Relu' in name: return 'ReLU' elif 'AddmmBackward'", "{0:3} Branch: '.format(str(len(parameters)-1-i))+'{0:11}'.format(tmp_layer['layer_name'])+' Backward Start ====================') if tmp_layer['layer_name'] == 'Conv2d':", "next_dz.shape: ', list(next_dz.shape)) print('# z.shape: ', list(z.shape)) print('# eps: ',", "j in range(len(layer)): for k in range(len(layer[j])): tmp_layer = layer[j][k]", "nn.Linear): layer_name = 'Linear' Linear_params = {} Linear_params['layer_name'] = layer_name", "this file except in compliance with the License. # You", "i, j): key1 = list(list_dic_key_value[i].values())[0] key2 = list(list_dic_key_value[j].keys())[0] start =", "z = featuremap[i] else: z = featuremap[i-1][j][k+1] weight_z = fc_conv_weights[i][j][k]", "out_h, out_w = next_dz.shape padding_z = F.pad(z, pad=(padding[1],padding[1],padding[0],\\ padding[0],0,0), mode='constant',", "j in range(i+1, len(connections)): if not list(connections[j].values())[0] == list(connections[j-1].keys())[0]: notchoosed.append(i)", "= padding # return parameters[i][j][k] = AvgPool2d_params elif isinstance(tmp_layer, nn.Dropout):", "= BatchNorm2d_params elif isinstance(layer, nn.Linear): layer_name = 'Linear' Linear_params =", "D, H_last, W_last) for n in range(N): for d in", "key1 or 'None' in value1: print('Not completed for '+key+' or", "* i + flat_idx // pooling[1] w_idx = strides[1] *", "== 'Dropout': index_tmp_layers = tmp[1] + 1 return return_layers @torch.no_grad()", "c, h_in, w_in = x.shape d, c, k, j =", "def _conv_forward(x, weight, strides=(1,1)): n, c, h_in, w_in = x.shape", "layer_name} elif tmp_layer == 'Add': layer_name = 'Add' parameters[i][j][k] =", "= tmp_layer.__dict__.get('padding') if not isinstance(padding, tuple): Conv2d_params['padding'] = (padding, padding)", "print('1D-error-rate: ', end=' ') return error/C elif len(tensor_A.shape) == 2:", "num_features = layer.__dict__.get('num_features') BatchNorm2d_params['num_features'] = num_features # eps eps =", "fc_conv_weights = copy.deepcopy(return_layers) for i in range(len(return_layers)): layer = return_layers[i]", "dLoss_dz def add_backward(dLoss_dnextz): print('# next_dz.shape: ', list(dLoss_dnextz.shape)) dLoss_dz = dLoss_dnextz", "featuremap[i+1] if 'Pool' in parameters[i+1]['layer_name']: params = (parameters[i+1]['kernel_size'], parameters[i+1]['stride'], parameters[i+1]['padding'])", "of PyTorch autograd graph Blue nodes are the Variables that", "in range(len(pop_index)-1, -1, -1): connections.remove(pop_index[i]) new_connections = [] for item", "layer, i @torch.no_grad() def get_layers(last_connections, model): return_layers = [] tmp_layers", "'Dropout_1' elif 'AddBackward' in name: return 'Add' elif 'Cat' in", "'ReLU' parameters[i][j][k] = {'layer_name': layer_name} elif tmp_layer == 'Add': layer_name", "np_B[n,c]) error += 1 #print('Error rate: ', error/(C*N)) print('2D-error-rate: ',", "str(model): print(judge_tensors_equal(dLoss_dW, model.features[0].weight.grad)) elif 'ResNet' in str(model): print(judge_tensors_equal(dLoss_dW, model.conv1.weight.grad)) delete_allpths(pth_dir=None)", "for index in range(len(list_dic_key_value)): if key1 == list(list_dic_key_value[index].keys())[0]: start =", "# # Licensed under the Apache License, Version 2.0 (the", "copy.deepcopy(last_connections) for i in range(len(last_connections)-1, -1, -1): if not isinstance(last_connections[i],", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "torch.is_tensor(var): dot.node(str(id(var)), size_to_str(var.size()), fillcolor='orange') elif hasattr(var, 'variable'): u = var.variable", "in name or 'Avg' in name: return 'AvgPool2d' elif 'BatchNorm'", "list(gamma.shape)) N, C, H, W = z.shape m = N*H*W", "axis=-1), axis=0) # 在高度、宽度上相加;批量大小上相加 print('# dz.shape: ', list(dz.shape)) print('# dweight.shape:", "x_pad = x_pad.unfold(3, j, strides[1]) out = torch.einsum( 'nchwkj,dckj->ndhw', x_pad,", "= make_dot(y) return g @torch.no_grad() def exchange_name(name): if 'Relu' in", "y_true.shape: ', list(y_true.shape)) y_shift = torch.sub(y_predict, torch.max(y_predict, dim=1, keepdim=True).values) y_exp", "= exchange_name(key.split('_')[0]) + '_' + key.split('_')[1] value1 = exchange_name(value.split('_')[0]) +", "in range(len(last_connections[i])): return_layers[0].append([]) if len(last_connections[i][j]) == 0: continue for k", "# padding padding = tmp_layer.__dict__.get('padding') if not isinstance(padding, tuple): MaxPool2d_params['padding']", "list(np.arange(len(shape))) shape.pop(1) ax.pop(1) axis = tuple(ax) dxhut = torch.zeros_like(next_dz) for", "return return_connections @torch.no_grad() def find_start_end(list_dic_key_value, i, j): key1 = list(list_dic_key_value[i].values())[0]", "K, z, padding=(0, 0), strides=(1, 1)): N, C, H, W", "= fc_conv_weights[i] dLoss_dz = batchnorm2d_backward(dLoss_dz, z, eps, gamma) return_dz[i] =", "= dz.shape H_last = (H-1)*(strides[0]-1) + H W_last = (W-1)*(strides[1]-1)", "merge_connections(connections) return_layers = get_layers(last_connections, model) return_tensors = get_tensors(last_connections) parameters, fc_conv_weights", "dLoss_dz = dLoss_dnextz.reshape(last_z.shape[0], last_z.shape[1], output_size[0], output_size[1]) else: dLoss_dz = dLoss_dnextz.reshape(last_z.shape)", "layer['layer_name'] == 'AvgPool2d': z = featuremap[-1] pooling = layer['kernel_size'] stride", "= [] for i in range(len(parameters)-1, -1, -1): layer =", "#print('Error rate: ', error/C) print('1D-error-rate: ', end=' ') return error/C", "p) return_dz.append(dLoss_dz) featuremap.pop() lastpop = featuremap.pop() if not len(dLoss_dz.shape) ==", "Conv2d_params['stride'] = (stride, stride) else: Conv2d_params['stride'] = stride # padding", "= out_channel # kernel_size kernel_size = tmp_layer.__dict__.get('kernel_size') if not isinstance(kernel_size,", "k+1 >= len(featuremap[i-1][j]): z = featuremap[i] else: z = featuremap[i-1][j][k+1]", "= featuremap[i] else: z = featuremap[i-1][j][k+1] weight_z = fc_conv_weights[i][j][k] try:", "== len(lastpop.shape): dLoss_dz = dLoss_dz.reshape(lastpop.shape) elif layer['layer_name'] == 'Dropout': p", "value=0), 0, 1), torch.swapaxes(padding_next_dz, 0, 1)) db = torch.sum(torch.sum(torch.sum(next_dz, axis=-1),", "in torch.arange(out_w): padding_dz[n, c, strides[0] * i:strides[0] * i +", "in current_layer_name: return_layers.insert(0, 'Add') elif 'View' in current_layer_name: return_layers.insert(0, 'View')", "= Linear_params elif isinstance(layer, nn.AdaptiveAvgPool2d): layer_name = 'AdaptiveAvgPool2d' AdaptiveAvgPool2d_params =", "End ======================================\\n') Loss = nn.CrossEntropyLoss() if 'GoogLeNet' in str(model).split('\\n')[0]: loss_torch", "print('================') for i in range(len(parameters)): print(i, parameters[i]) print('================') print('================') for", "name: return 'Linear' elif 'ViewBackward' in name: return 'View' elif", "i in range(len(graph)): if '->' in graph[i]: connections.append({labels[graph[i].split('\\t')[1].split(' -> ')[0]]+'_'+\\", "len(lastpop.shape): dLoss_dz = dLoss_dz.reshape(lastpop.shape) elif layer['layer_name'] == 'Linear': weight_z =", "'ReLU' elif 'AddmmBackward' in name: return 'Linear' elif 'ViewBackward' in", "range(N): for c in range(C): if np_A[n,c]-np_B[n,c] > error_tolerance or", "weight.shape x_pad = x x_pad = x_pad.unfold(2, k, strides[0]) x_pad", "= copy.deepcopy(connections) connections.append({'None':'None'}) num_Throwed = 0 notchoosed = [] print('\\n===========================", "len(dLoss_dz.shape) == len(lastpop.shape): dLoss_dz = dLoss_dz.reshape(lastpop.shape) elif layer['layer_name'] == 'MaxPool2d':", "layer_name # output_size output_size = layer.__dict__.get('output_size') if not isinstance(output_size, tuple):", "= find_next_layer_by_name(tmp_layers, current_layer_name, index_tmp_layers) return_layers[0][j].insert(0, tmp[0]) if not list(last_connections[i][j][k-1].keys())[0].split('_')[0] ==", "= 'Cat' parameters[i] = {'layer_name': layer_name} elif isinstance(layer, nn.MaxPool2d): layer_name", "= (output_size, output_size) else: AdaptiveAvgPool2d_params['output_size'] = output_size # return parameters[i]", "Backward Start ====================') if tmp_layer['layer_name'] == 'Conv2d': if k+1 >=", "def size_to_str(size): return '('+(', ').join(['%d' % v for v in", "layer['layer_name'] == 'Conv2d': z = featuremap[-1] weight_z = fc_conv_weights[-1] try:", "(stride, stride) else: MaxPool2d_params['stride'] = stride # padding padding =", "if parameters[i-1]['layer_name'] == 'Dropout': return_dz[i] = dLoss_dz print('# Skip this", "=========================') y = model(x) print('=========================== Store network model Results End", "= torch.div(y_exp, torch.sum(y_exp, dim=1, keepdim=True)) ypred_loss = torch.mean(-torch.sum(torch.mul(y_true, torch.log(y_probability)), dim=1,", "if not isinstance(output_size, tuple): AdaptiveAvgPool2d_params['output_size'] = (output_size, output_size) else: AdaptiveAvgPool2d_params['output_size']", "= fc_conv_weights[i] z = featuremap[i] dLoss_dz, dLoss_dW, dLoss_dB = fc_backward(dLoss_dz,", "', list(K.shape)) print('# bias.shape: ', '['+str(K.shape[0])+']') print('# padding: ', padding)", "weight_z = fc_conv_weights[-1] z = featuremap[-1] dLoss_dz, dLoss_dW, dLoss_dB =", "// pooling[1] w_idx = strides[1] * j + flat_idx %", "dLoss_dz = dLoss_dz.reshape(lastpop.shape) elif layer['layer_name'] == 'ReLU': z = featuremap[-1]", "= make_dot(y[0]) return g else: g = make_dot(y) return g", "gamma = fc_conv_weights[-1] dLoss_dz = batchnorm2d_backward(dLoss_dz, z, eps, gamma) return_dz.append(dLoss_dz)", "1 for i in range(len(last_tensors)-1, -1, -1): if isinstance(last_tensors[i], str):", "'View' parameters[i] = {'layer_name': layer_name} elif layer == 'Cat': layer_name", "z = featuremap[-1] weight_z = fc_conv_weights[-1] try: padding = layer['padding']", "in str(model) or 'AlexNet' in str(model): print(judge_tensors_equal(dLoss_dW, model.features[0].weight.grad)) elif 'ResNet'", "list(list_dic_key_value[j].keys())[0] start = 0 end = len(list_dic_key_value)-1 for index in", "if '(' in item_key or 'TBackward' in item_key: pop_index.append(connections[i]) for", "= tensors[index_tensors] index_tensors += 1 for i in range(len(last_tensors)-1, -1,", "k1-1-padding[0],k1-1-padding[0],0,0), mode='constant', value=0) dz = _conv_forward(ppadding_next_dz, swap_flip_K) swap_z = torch.swapaxes(z,", "padding # return parameters[i][j][k] = AvgPool2d_params elif isinstance(tmp_layer, nn.Dropout): layer_name", "print('# weight.shape: ', list(w.shape)) print('# bias.shape: ', '['+str(dLoss_dnextz.shape[1])+']') N =", "torch.autograd import Variable @torch.no_grad() def cross_entropy_loss(y_predict, y_true): print('\\n=========================== Layer:'+' {0:18}'.format('cross_entropy_loss')+'", "or implied. # See the License for the specific language", "last_tensors[i] = last_tensors[i+1].view(last_tensors[i+1].size(0), -1) elif isinstance(last_tensors[i], list): for j in", "last_z = featuremap[i+1] if 'Pool' in parameters[i+1]['layer_name']: params = (parameters[i+1]['kernel_size'],", "return 'Linear' elif 'ViewBackward' in name: return 'View' elif 'Mean'", "'None'}) print('=========================== Restore network model End =================================\\n') return last_connections @torch.no_grad()", "elif padding[1] > 0: return z[:, :, :, padding[1]:-padding[1]] else:", "num_Throwed += 1 break if not notchoosed == []: last_connections", "str(layer): return layer, i @torch.no_grad() def get_layers(last_connections, model): return_layers =", "def max_pooling_backward(next_dz, z, pooling, strides, padding=(0, 0)): print('# next_dz.shape: ',", "None}) new_connections.append({'None': 'None'}) return connections, new_connections @torch.no_grad() def get_split_connections(connections): return_connections", "str(g).split('\\n') labels = {} connections = [] for i in", "if last_connections[-1] == {'None': 'None'}: last_connections.remove({'None': 'None'}) print('=========================== Restore network", "for j in range(len(featuremap[i])): for k in range(len(featuremap[i][j])): print(' =========',", "== 'Add': layer_name = 'Add' parameters[i][j][k] = {'layer_name': layer_name} elif", "for c in range(C): if np_A[n,c]-np_B[n,c] > error_tolerance or np_B[n,c]-np_A[n,c]", "layer_name # in_channel in_channel = layer.__dict__.get('in_channels') Conv2d_params['in_channel'] = in_channel #", "= padding # return parameters[i][j][k] = MaxPool2d_params elif isinstance(tmp_layer, nn.AvgPool2d):", "(padding, padding) else: Conv2d_params['padding'] = padding # return fc_conv_weights.append(layer.weight) parameters.append(Conv2d_params)", "keepdim=True)) ypred_loss = torch.mean(-torch.sum(torch.mul(y_true, torch.log(y_probability)), dim=1, keepdim=True)) dLoss_dypred = y_probability", "return_tensors[i].shape) print('================') ''' import copy return_dz = copy.deepcopy(last_connections) featuremap =", "y = model(x) print('=========================== Store network model Results End ===========================\\n')", "loss, dLoss_dz = cross_entropy_loss(featuremap[-1], y_true) print('Self calculated loss: ', loss)", "size])+')' def add_nodes(var): if var not in seen: if torch.is_tensor(var):", "def find_next_layer_by_name(layers, name, start_i): for i in range(start_i, len(layers)): layer", "layer_name} elif tmp_layer == 'View': layer_name = 'View' parameters[i][j][k] =", "featuremap.pop(0) return_dz.append(dLoss_dz) #####################tensors ''' for i in range(len(last_connections)): print(last_connections[i]) for", "= 0 for i in range(len(last_connections)-1, -1, -1): if not", "= [] for i in range(len(files)): if '.pth' in files[i]:", "in range(len(featuremap)): if not isinstance(featuremap[i], list): print('=========', i, featuremap[i].shape) else:", "import Digraph, render from torch.autograd import Variable @torch.no_grad() def cross_entropy_loss(y_predict,", "if last_tensors[i] == 'View': last_tensors[i] = last_tensors[i+1].view(last_tensors[i+1].size(0), -1) elif isinstance(last_tensors[i],", "not None: assert isinstance(params.values()[0], Variable) param_map = {id(v): k for", "Copyright 2022 ConvolutedDog (https://github.com/ConvolutedDog/) # # Licensed under the Apache", "nn.Conv2d): layer_name = 'Conv2d' Conv2d_params = {} Conv2d_params['layer_name'] = layer_name", "model) return_tensors = get_tensors(last_connections) parameters, fc_conv_weights = get_structure_parameters(return_layers) ''' print('================')", "'variable'): u = var.variable name = param_map[id(u)] if params is", "= parameters[i] if not isinstance(layer, list): print('\\n======================== {0:3} Layer: '.format(str(len(parameters)-1-i))+'{0:11}'.format(layer['layer_name'])+'", "AvgPool2d_params['padding'] = (padding, padding) else: AvgPool2d_params['padding'] = padding # return", "gradient_backward_v1(model, img, label, num_class=1000): return_dz = [] parameters, fc_conv_weights =", "return error/(C*N) @torch.no_grad() def get_featuremap(featuremap_dir=None): import os featuremap = []", "add names to node that require grad (TODO: make optional)", "AvgPool2d_params = {} AvgPool2d_params['layer_name'] = layer_name # kernel_size kernel_size =", "output_size # return parameters[i] = AdaptiveAvgPool2d_params elif isinstance(layer, list): for", "'Conv2d': z = featuremap[i] weight_z = fc_conv_weights[i] try: padding =", "def gradient_backward_v2(model, img, label, num_class=1000, g_view=False): x = Variable(img) g", "return fc_conv_weights.append(layer.weight) parameters.append(BatchNorm2d_params) elif isinstance(layer, nn.Linear): layer_name = 'Linear' Linear_params", "print('# strides: ', strides) padding_next_dz = _insert_zeros(next_dz, strides) flip_K =", "elif 'Add' in current_layer_name: return_layers[0][j].insert(0, 'Add') elif 'View' in current_layer_name:", "value=0) padding_dz = torch.zeros_like(padding_z) for n in torch.arange(N): for c", "parameters[i][j][k] = {'layer_name': layer_name} elif isinstance(tmp_layer, nn.MaxPool2d): layer_name = 'MaxPool2d'", "in range(len(last_connections)-1, -1, -1): if not isinstance(last_connections[i], list): # 单一层,无分支", "in current_layer_name: return_layers[0][j].insert(0, 'Add') elif 'View' in current_layer_name: return_layers.insert(0, 'View')", "next_dz.shape print('# next_dz.shape: ', list(next_dz.shape)) print('# z.shape: ', list(z.shape)) print('#", "Conv2d_params['in_channel'] = in_channel # out_channel out_channel = layer.__dict__.get('out_channels') Conv2d_params['out_channel'] =", "layer in model.modules(): if not ':' in str(layer): tmp_layers.append(layer) index_tmp_layers", "not list(last_connections[i-1].keys())[0].split('_')[0] == 'Dropout': index_tmp_layers = tmp[1] + 1 else:", "layer['layer_name'] == 'View': last_z = featuremap[i+1] if 'Pool' in parameters[i+1]['layer_name']:", "not len(dLoss_dz.shape) == len(lastpop.shape): dLoss_dz = dLoss_dz.reshape(lastpop.shape) else: print('Not completed", "= torch.sum(dLoss_dnextz, dim=0) print('# dz.shape: ', list(dLoss_dz.shape)) print('# dweight.shape: ',", "print('# dz.shape: ', list(dLoss_dz.shape)) print('# dweight.shape: ', list(dLoss_dfcW.shape)) print('# dbias.shape:", "for k in range(len(last_connections[i][j])-1, -1, -1): current_layer_name = list(last_connections[i][j][k].keys())[0].split('_')[0] if", "return fc_conv_weights[i] = layer.weight parameters[i] = Linear_params elif isinstance(layer, nn.AdaptiveAvgPool2d):", "dLoss_dz elif layer['layer_name'] == 'Linear': weight_z = fc_conv_weights[i] z =", "= {'layer_name': layer_name} elif tmp_layer == 'Add': layer_name = 'Add'", "= torch.swapaxes(z, 0, 1) dK = _conv_forward(torch.swapaxes(F.pad(z, pad=(padding[1],padding[1],\\ padding[0],padding[0],0,0), mode='constant',", "tmp_layer == 'Add': layer_name = 'Add' parameters[i][j][k] = {'layer_name': layer_name}", "return None error = 0 error_tolerance = 0.001 np_A =", "for i in range(len(featuremap)): if not isinstance(featuremap[i], list): print('=========', i,", "dLoss_dz = dLoss_dz.reshape(lastpop.shape) elif layer['layer_name'] == 'MaxPool2d': z = featuremap[-1]", "print('4D-error-rate: ', end=' ') return error/(N*C*H*W) elif len(tensor_A.shape) == 1:", "j in range(len(layer)): tmp_dLoss_dz.append(copy.deepcopy(dLoss_dz)) for k in range(len(layer[j])): tmp_layer =", "range(0, H_last, strides[0]): for w in range(0, W_last, strides[1]): pz[n,d,h,w]", "layer.__dict__.get('p') Dropout_params['p'] = p # return parameters[i] = Dropout_params elif", "# num_features num_features = layer.__dict__.get('num_features') BatchNorm2d_params['num_features'] = num_features # eps", "= os.listdir(pth_dir) file_nums = [] for i in range(len(files)): if", "k for k, v in params.items()} node_attr = dict(style='filled', shape='box',", "return_dz.append(dLoss_dz) lastpop = featuremap.pop() if not len(dLoss_dz.shape) == len(lastpop.shape): dLoss_dz", "out = torch.einsum( 'nchwkj,dckj->ndhw', x_pad, weight) return out @torch.no_grad() def", "-1): connections.remove(pop_index[i]) new_connections = [] for item in connections: key,", "print('=========================== Store network model Results End ===========================\\n') if 'GoogLeNet' in", "', list(dLoss_dnextz.shape)) print('# last_z.shape: ', list(last_z.shape)) if params: pooling =", "# eps eps = layer.__dict__.get('eps') BatchNorm2d_params['eps'] = eps # return", "error_tolerance = 0.001 np_A = tensor_A.detach().numpy() np_B = tensor_B.detach().numpy() if", "# padding padding = tmp_layer.__dict__.get('padding') if not isinstance(padding, tuple): AvgPool2d_params['padding']", "= (padding, padding) else: AvgPool2d_params['padding'] = padding # return parameters[i]", "'View' in current_layer_name: last_tensors[i][j][k] = 'View' else: last_tensors[i][j][k] = tensors[index_tensors]", "if pth_dir == None: pth_dir = \"./tmp_file/\" for root, dirs,", "print('# Torch calculated loss: ', loss_torch.detach().numpy()) loss_torch.backward() if 'VGG' in", "= [] tmp_layers = [] for layer in model.modules(): if", "add_nodes(var.grad_fn) return dot def generate_g(model, x): delete_allpths(pth_dir=None) print('\\n=========================== Store network", "Conv2d_params['layer_name'] = layer_name # in_channel in_channel = layer.__dict__.get('in_channels') Conv2d_params['in_channel'] =", "fc_conv_weights[i][j][k] tmp_dLoss_dz[-1] = batchnorm2d_backward(tmp_dLoss_dz[-1], z, eps, gamma) return_dz[i][j][k] = tmp_dLoss_dz[-1]", "pz = torch.zeros(N, D, H_last, W_last) for n in range(N):", "return g @torch.no_grad() def exchange_name(name): if 'Relu' in name: return", "', list(w.shape)) print('# bias.shape: ', '['+str(dLoss_dnextz.shape[1])+']') N = z.shape[0] if", "exchange_name(value.split('_')[0]) + '_' + value.split('_')[1] if 'None' in key1 or", "parameters[i] = {'layer_name': layer_name} elif layer == 'Add': layer_name =", "new_connections @torch.no_grad() def get_split_connections(connections): return_connections = [] tmp_split = []", "= (output_size, output_size) else: AdaptiveAvgPool2d_params['output_size'] = output_size # return parameters[i][j][k]", "= tmp_dLoss_dz[-1] elif tmp_layer['layer_name'] == 'ReLU': z = featuremap[i-1][j][k+1] tmp_dLoss_dz[-1]", "return parameters[i] = AvgPool2d_params elif isinstance(layer, nn.Dropout): layer_name = 'Dropout'", "if not 'None' in item_key: if i == 0: pass", "> 0: return z[:, :, padding[0]:-padding[0], padding[1]:-padding[1]] elif padding[0] >", "name: return 'Add' elif 'Cat' in name: return 'Cat' elif", "assert isinstance(params.values()[0], Variable) param_map = {id(v): k for k, v", "processed in get_structure_parameters!') return parameters, fc_conv_weights def gradient_backward_v2(model, img, label,", "index_tmp_layers = tmp[1] + 1 return return_layers @torch.no_grad() def get_tensors(last_connections):", "padding # return parameters[i][j][k] = MaxPool2d_params elif isinstance(tmp_layer, nn.AvgPool2d): layer_name", "num_features num_features = tmp_layer.__dict__.get('num_features') BatchNorm2d_params['num_features'] = num_features # eps eps", "print(var) add_nodes(var.grad_fn) return dot def generate_g(model, x): delete_allpths(pth_dir=None) print('\\n=========================== Store", "isinstance(tmp_layer, nn.Dropout): layer_name = 'Dropout' Dropout_params = {} Dropout_params['layer_name'] =", "+ pooling[1]]) h_idx = strides[0] * i + flat_idx //", "current_layer_name, index_tmp_layers) return_layers.insert(0, tmp[0]) if isinstance(last_connections[i-1], list): index_tmp_layers = tmp[1]", "i in range(len(return_tensors)): if not isinstance(return_tensors[i], list) and not isinstance(return_tensors[i],", "keepdim=True)) dLoss_dypred = y_probability - y_true print('# dLoss_dypred.shape: ', list(dLoss_dypred.shape))", "print('# dbias.shape: ', list(dLoss_dfcB.shape)) return dLoss_dz, dLoss_dfcW/N, dLoss_dfcB/N @torch.no_grad() def", "parameters[i] if not isinstance(layer, list): print('\\n======================== {0:3} Layer: '.format(str(len(parameters)-1-i))+'{0:11}'.format(layer['layer_name'])+' Backward", "x_pad.unfold(2, k, strides[0]) x_pad = x_pad.unfold(3, j, strides[1]) out =", "num_class=1000, g_view=False): x = Variable(img) g = generate_g(model, x) if", "in range(len(list_dic_key_value)): if key1 == list(list_dic_key_value[index].keys())[0]: start = index break", "Loss(result, label) _, connections = generate_connections(g) last_connections = merge_connections(connections) return_layers", "nn.CrossEntropyLoss() if 'GoogLeNet' in str(model).split('\\n')[0]: loss_torch = Loss(result[0], label) else:", "torch.nn.functional as F from graphviz import Digraph, render from torch.autograd", "= 'Add' parameters[i] = {'layer_name': layer_name} elif layer == 'View':", "')[0]:\\ labels[graph[i].split('\\t')[1].split(' -> ')[1]]+'_'+\\ graph[i].split('\\t')[1].split(' -> ')[1]}) pop_index = []", "tmp[1] + 1 else: return_layers.insert(0, []) for j in range(len(last_connections[i])):", "return dLoss_dz @torch.no_grad() def max_pooling_backward(next_dz, z, pooling, strides, padding=(0, 0)):", "= N*H*W shape = [N,C,H,W] import numpy as np ax", "tmp_layer.__dict__.get('p') Dropout_params['p'] = p # return parameters[i][j][k] = Dropout_params elif", "p): print('# zeros probability: ', p) print('# next_dz.shape: ', list(next_dz.shape))", "= tmp_layer.__dict__.get('p') Dropout_params['p'] = p # return parameters[i][j][k] = Dropout_params", "params is not None else '' node_name = '%s\\n %s'", "layer == 'Cat': layer_name = 'Cat' parameters[i] = {'layer_name': layer_name}", "nn.ReLU): layer_name = 'ReLU' parameters[i] = {'layer_name': layer_name} elif layer", "# in_features in_features = tmp_layer.__dict__.get('in_features') Linear_params['in_features'] = in_features # out_features", "error_tolerance or np_B[c]-np_A[c] > error_tolerance: #print(np_A[c], np_B[c]) error += 1", "pad=(padding[1],padding[1],\\ padding[0],padding[0],0,0), mode='constant', value=0), 0, 1), torch.swapaxes(padding_next_dz, 0, 1)) db", "# return fc_conv_weights[i] = layer.weight parameters[i] = BatchNorm2d_params elif isinstance(layer,", "print('# dz.shape: ', list(dz.shape)) return dz @torch.no_grad() def batchnorm2d_backward(next_dz, z,", "return out @torch.no_grad() def _insert_zeros(dz, strides): N, D, H, W", "size_to_str(u.size())) dot.node(str(id(var)), node_name, fillcolor='lightblue') else: dot.node(str(id(var)), str(type(var).__name__)) seen.add(var) if hasattr(var,", "torch.arange(C): for i in torch.arange(out_h): for j in torch.arange(out_w): padding_dz[n,", "tmp_layer['layer_name'] == 'BatchNorm2d': eps = tmp_layer['eps'] z = featuremap[i-1][j][k+1] gamma", "i, j, k, featuremap[i][j][k].shape) ''' ##################### # 前面n层倒序遍历 for i", "elif isinstance(layer, nn.Linear): layer_name = 'Linear' Linear_params = {} Linear_params['layer_name']", "End ==========================') delete_allpths(pth_dir=None) return return_dz, dLoss_dW, dLoss_dB @torch.no_grad() def make_dot(var,", "result = model(img) print('=========================== Generate Tensors End ======================================\\n') Loss =", "-1, -1): layer = parameters[i] print('\\n======================== {0:3} Layer: '.format(str(i))+'{0:9}'.format(layer['layer_name'])+' Backward", "== 'BatchNorm2d': eps = layer['eps'] z = featuremap[-1] gamma =", "eps = layer['eps'] z = featuremap[i] gamma = fc_conv_weights[i] dLoss_dz", "= tmp_layer['stride'] tmp_dLoss_dz[-1], dLoss_dW, dLoss_dB = conv_backward(tmp_dLoss_dz[-1], weight_z, z, padding,", "padding, stride) return_dz[i][j][k] = tmp_dLoss_dz[-1] elif tmp_layer['layer_name'] == 'ReLU': z", "in range(len(return_layers)): print(i, return_layers[i]) print('================') print('================') for i in range(len(parameters)):", "layer in layers: if isinstance(layer, nn.Conv2d): layer_name = 'Conv2d' Conv2d_params", "elif isinstance(layer, list): import copy tmp_dLoss_dz = [] for j", "stride = layer['stride'] dLoss_dz, dLoss_dW, dLoss_dB = conv_backward(dLoss_dz, weight_z, z,", "else: MaxPool2d_params['padding'] = padding # return parameters[i] = MaxPool2d_params elif", "parameters[i+1]['stride'], parameters[i+1]['padding']) else: params = None dLoss_dz = view_backward(dLoss_dz, last_z,", "for t in var.saved_tensors: dot.edge(str(id(t)), str(id(var))) add_nodes(t) print(var) add_nodes(var.grad_fn) return", "j = weight.shape x_pad = x x_pad = x_pad.unfold(2, k,", "dLoss_dz = cross_entropy_loss(featuremap[-1], y_true) print('Self calculated loss: ', loss) featuremap.pop()", "relu_backward(dLoss_dz, z) return_dz[i] = dLoss_dz elif layer['layer_name'] == 'MaxPool2d': z", "last_connections.remove({'None': 'None'}) print('=========================== Restore network model End =================================\\n') return last_connections", "{} Dropout_params['layer_name'] = layer_name # p p = layer.__dict__.get('p') Dropout_params['p']", "not connections[i][item_key] == last_item_key: for j in range(i+1, len(connections)): if", "MaxPool2d_params['kernel_size'] = kernel_size # stride stride = tmp_layer.__dict__.get('stride') if not", "tuple): Conv2d_params['kernel_size'] = (kernel_size, kernel_size) else: Conv2d_params['kernel_size'] = kernel_size #", "= eps # return fc_conv_weights[i][j][k] = tmp_layer.weight parameters[i][j][k] = BatchNorm2d_params", "== 'BatchNorm2d': eps = layer['eps'] z = featuremap[i] gamma =", "else: last_tensors[i] = tensors[index_tensors] index_tensors += 1 else: for j", "torch.nn.ReLU(inplace=True)) elif 'Add' in current_layer_name: return_layers[0][j].insert(0, 'Add') elif 'View' in", "for j in torch.arange(out_w): padding_dz[n, c, strides[0] * i:strides[0] *", "calculated loss: ', loss) featuremap.pop() return_dz.append(dLoss_dz) dW_dB_fc_conv = [] for", "'None' @torch.no_grad() def generate_connections(g): graph = str(g).split('\\n') labels = {}", "== 1: lastpop = featuremap.pop() if not len(dLoss_dz.shape) == len(lastpop.shape):", "def batchnorm2d_backward(next_dz, z, eps, gamma=torch.Tensor([1.,1.,1.])): print('# next_dz.shape: ', list(next_dz.shape)) print('#", "def find_start_end(list_dic_key_value, i, j): key1 = list(list_dic_key_value[i].values())[0] key2 = list(list_dic_key_value[j].keys())[0]", "', '['+str(dLoss_dnextz.shape[1])+']') N = z.shape[0] if len(z.shape) == 4: z", "padding=(0, 0)): print('# next_dz.shape: ', list(next_dz.shape)) print('# z.shape: ', list(z.shape))", "Conv2d_params['out_channel'] = out_channel # kernel_size kernel_size = layer.__dict__.get('kernel_size') if not", "x = list(tensor_size) mul = 1. for i in range(len(x)):", "tmp_layer.__dict__.get('output_size') if not isinstance(output_size, tuple): AdaptiveAvgPool2d_params['output_size'] = (output_size, output_size) else:", "else: dLoss_dz = dLoss_dnextz.reshape(last_z.shape) print('# dz.shape: ', list(dLoss_dz.shape)) return dLoss_dz", "and graph[i][-1] == ']': labels[graph[i].split('\\t')[1].split(' ')[0]]=\\ graph[i].split('\\t')[1].split('=')[1].split(']')[0] for i in", "H1, W1 = next_dz.shape print('# next_dz.shape: ', list(next_dz.shape)) print('# z.shape:", "= padding # return parameters.append(MaxPool2d_params) elif isinstance(layer, nn.AvgPool2d): layer_name =", "= {} Linear_params['layer_name'] = layer_name # in_features in_features = layer.__dict__.get('in_features')", "# kernel_size kernel_size = layer.__dict__.get('kernel_size') if not isinstance(kernel_size, tuple): Conv2d_params['kernel_size']", "elif isinstance(tmp_layer, nn.MaxPool2d): layer_name = 'MaxPool2d' MaxPool2d_params = {} MaxPool2d_params['layer_name']", "dLoss_dW, dLoss_dB @torch.no_grad() def make_dot(var, params=None): \"\"\" Produces Graphviz representation", "dLoss_dypred.shape: ', list(dLoss_dypred.shape)) print('# Self calculated loss: ', ypred_loss.item()) print('===========================", "= \"./tmp_file/\" for root, dirs, files in os.walk(pth_dir, topdown=False): for", "if not list(connections[j].values())[0] == list(connections[j-1].keys())[0]: notchoosed.append(i) start, end = find_start_end(connections,", "AvgPool2d_params elif isinstance(layer, nn.Dropout): layer_name = 'Dropout' Dropout_params = {}", "representation of PyTorch autograd graph Blue nodes are the Variables", "(padding, padding) else: Conv2d_params['padding'] = padding # return fc_conv_weights[i][j][k] =", "range(len(last_connections)-1, -1, -1): if not isinstance(last_connections[i], list): current_layer_name = list(last_connections[i].keys())[0].split('_')[0]", "relu_backward(next_dz, z): print('# next_dz.shape: ', list(next_dz.shape)) print('# z.shape: ', list(z.shape))", "featuremap_dir == None: pth_dir = \"./tmp_file/\" else: pth_dir = featuremap_dir", "+ pooling[0], strides[1] * j:strides[1] * j + pooling[1]] +=", "tmp_layer.__dict__.get('kernel_size') if not isinstance(kernel_size, tuple): MaxPool2d_params['kernel_size'] = (kernel_size, kernel_size) else:", "w in range(0, W_last, strides[1]): pz[n,d,h,w] = dz[n,d,h//strides[0],w//strides[1]] return pz", "= (0, 0) stride = layer['stride'] dLoss_dz, dLoss_dW, dLoss_dB =", "dLoss_dz = add_backward(dLoss_dz) return_dz[i] = dLoss_dz elif layer['layer_name'] == 'Dropout':", "in writing, software # distributed under the License is distributed", "= dLoss_dz print('# Skip this layer because the layer has", "dz, (dK/N).transpose(0,1), db/N @torch.no_grad() def _conv_forward(x, weight, strides=(1,1)): n, c,", "', list(next_dz.shape)) print('# z.shape: ', list(z.shape)) print('# eps: ', eps)", "(kernel_size, kernel_size) else: AvgPool2d_params['kernel_size'] = kernel_size # stride stride =", "= '%s\\n %s' % (name, size_to_str(u.size())) dot.node(str(id(var)), node_name, fillcolor='lightblue') else:", "dLoss_dz, dLoss_dW, dLoss_dB = conv_backward(dLoss_dz, weight_z, z, padding, stride) return_dz.append(dLoss_dz)", "# kernel_size kernel_size = tmp_layer.__dict__.get('kernel_size') if not isinstance(kernel_size, tuple): AvgPool2d_params['kernel_size']", "k in range(len(layer[j])): tmp_layer = layer[j][k] print('\\n=========================== {0:3} Branch: '.format(str(len(parameters)-1-i))+'{0:11}'.format(tmp_layer['layer_name'])+'", "\\ int((last_z.shape[3]-pooling[0]+2*padding[0])/stride[0]+1)) dLoss_dz = dLoss_dnextz.reshape(last_z.shape[0], last_z.shape[1], output_size[0], output_size[1]) else: dLoss_dz", "the layer has been calcualted!') print('======================== {0:3} Layer: '.format(str(len(parameters)-1-i))+'{0:11}'.\\ format(layer['layer_name'])+'", "eps, gamma) return_dz[i] = dLoss_dz print('======================== {0:3} Layer: '.format(str(len(parameters)-1-i))+'{0:11}'.format(layer['layer_name'])+' Backward", "j + pooling[1]] += next_dz[n, c, i, j] / (pooling[0]", "= conv_backward(tmp_dLoss_dz[-1], weight_z, z, padding, stride) return_dz[i][j][k] = tmp_dLoss_dz[-1] elif", "stride) else: Conv2d_params['stride'] = stride # padding padding = tmp_layer.__dict__.get('padding')", "files in os.walk(pth_dir, topdown=False): for name in files: if name.endswith('.pth',):", "= [item] return return_connections @torch.no_grad() def find_start_end(list_dic_key_value, i, j): key1", "in_channel # out_channel out_channel = layer.__dict__.get('out_channels') Conv2d_params['out_channel'] = out_channel #", "torch.swapaxes(z, 0, 1) dK = _conv_forward(torch.swapaxes(F.pad(z, pad=(padding[1],padding[1],\\ padding[0],padding[0],0,0), mode='constant', value=0),", "os.walk(pth_dir, topdown=False): for name in files: if name.endswith('.pth',): os.remove(os.path.join(root, name))", "dz.shape: ', list(dLoss_dz.shape)) return dLoss_dz @torch.no_grad() def dropback_backward(next_dz, mask, p):", "', error/(N*C*H*W)) print('4D-error-rate: ', end=' ') return error/(N*C*H*W) elif len(tensor_A.shape)", "@torch.no_grad() def find_next_layer_by_name(layers, name, start_i): for i in range(start_i, len(layers)):", "p) return_dz[i] = dLoss_dz elif layer['layer_name'] == 'BatchNorm2d': eps =", "error/(N*C*H*W) elif len(tensor_A.shape) == 1: C = tensor_A.shape[0] for c", "= z.shape D, C, k1, k2 = K.shape N, D,", "License is distributed on an \"AS IS\" BASIS, # WITHOUT", "= 0 notchoosed = [] print('\\n=========================== Restore network model Start", "parameters[i][j][k] = BatchNorm2d_params elif isinstance(tmp_layer, nn.Linear): layer_name = 'Linear' Linear_params", "License, Version 2.0 (the \"License\"); # you may not use", "torch.zeros_like(next_dz) dLoss_dz = torch.where(torch.gt(z, 0), next_dz, zeros_tensor) print('# dz.shape: ',", "img, label, num_class=1000, g_view=False): x = Variable(img) g = generate_g(model,", "stride = tmp_layer['stride'] tmp_dLoss_dz[-1], dLoss_dW, dLoss_dB = conv_backward(tmp_dLoss_dz[-1], weight_z, z,", "dLoss_dz, dLoss_dW, dLoss_dB = fc_backward(dLoss_dz, z, weight_z) return_dz[i] = dLoss_dz", "Store network model Results Start =========================') y = model(x) print('===========================", "'MaxPool2d' elif 'MulBackward' in name: return 'Dropout_2' elif 'DivBackward' in", "last_key: tmp_split.append(item) else: return_connections.append(tmp_split) tmp_split = [item] return return_connections @torch.no_grad()", "@torch.no_grad() def _remove_padding(z, padding): if padding[0] > 0 and padding[1]", "output_size # return parameters[i][j][k] = AdaptiveAvgPool2d_params ### else: print('The layer", "dropback_backward(next_dz, mask, p): print('# zeros probability: ', p) print('# next_dz.shape:", "z = featuremap[-1] dLoss_dz, dLoss_dW, dLoss_dB = fc_backward(dLoss_dz, z, weight_z)", "current_layer_name: last_tensors[i] = 'Add' elif 'View' in current_layer_name: last_tensors[i] =", "else: AvgPool2d_params['stride'] = stride # padding padding = tmp_layer.__dict__.get('padding') if", "===========================') print('# y_predict.shape: ', list(y_predict.shape)) print('# y_true.shape: ', list(y_true.shape)) y_shift", "graph = str(g).split('\\n') labels = {} connections = [] for", "layer_name = 'View' parameters[i] = {'layer_name': layer_name} elif layer ==", "list(connections[i].keys())[0] if '(' in item_key or 'TBackward' in item_key: pop_index.append(connections[i])", "in name: return 'BatchNorm2d' elif 'Conv' in name: return 'Conv2d'", "= list(np.arange(len(shape))) shape.pop(1) ax.pop(1) axis = tuple(ax) dxhut = torch.zeros_like(next_dz)", "num_features num_features = layer.__dict__.get('num_features') BatchNorm2d_params['num_features'] = num_features # eps eps", "BatchNorm2d_params['num_features'] = num_features # eps eps = tmp_layer.__dict__.get('eps') BatchNorm2d_params['eps'] =", "elif isinstance(layer, nn.ReLU): layer_name = 'ReLU' parameters.append({'layer_name': layer_name}) elif isinstance(layer,", "'Add' elif 'View' in current_layer_name: last_tensors[i] = 'View' else: last_tensors[i]", "the License for the specific language governing permissions and #", "return dz @torch.no_grad() def batchnorm2d_backward(next_dz, z, eps, gamma=torch.Tensor([1.,1.,1.])): print('# next_dz.shape:", "W = z.shape D, C, k1, k2 = K.shape N,", "z.shape: ', list(z.shape)) print('# padding: ', padding) print('# strides: ',", "z = featuremap[i-1][j][k+1] gamma = fc_conv_weights[i][j][k] tmp_dLoss_dz[-1] = batchnorm2d_backward(tmp_dLoss_dz[-1], z,", "os featuremap = [] if featuremap_dir == None: pth_dir =", "v for v in size])+')' def add_nodes(var): if var not", "return_dz[i] = dLoss_dz print('# Skip this layer because the layer", "', padding) print('# strides: ', strides) padding_next_dz = _insert_zeros(next_dz, strides)", "+ '_' + key.split('_')[1] value1 = exchange_name(value.split('_')[0]) + '_' +", "Layer:'+' {0:18}'.format('cross_entropy_loss')+' End =============================') return ypred_loss, dLoss_dypred @torch.no_grad() def fc_backward(dLoss_dnextz,", "make optional) \"\"\" if params is not None: assert isinstance(params.values()[0],", "@torch.no_grad() def get_split_connections(connections): return_connections = [] tmp_split = [] for", "featuremap[i-1][j][k+1] tmp_dLoss_dz[-1] = relu_backward(tmp_dLoss_dz[-1], z) return_dz[i][j][k] = tmp_dLoss_dz[-1] elif tmp_layer['layer_name']", "padding) else: AvgPool2d_params['padding'] = padding # return parameters[i] = AvgPool2d_params", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "'View' in current_layer_name: last_tensors[i] = 'View' else: last_tensors[i] = tensors[index_tensors]", "not isinstance(kernel_size, tuple): Conv2d_params['kernel_size'] = (kernel_size, kernel_size) else: Conv2d_params['kernel_size'] =", "eps eps = layer.__dict__.get('eps') BatchNorm2d_params['eps'] = eps # return fc_conv_weights.append(layer.weight)", "range(len(layer[j])): tmp_layer = layer[j][k] ### if isinstance(tmp_layer, nn.Conv2d): layer_name =", "= dict(style='filled', shape='box', align='left', fontsize='12', ranksep='0.1', height='0.2') dot = Digraph(node_attr=node_attr,", "[] for i in range(len(graph)): if 'label' in graph[i] and", "tmp_split.append(item) else: return_connections.append(tmp_split) tmp_split = [item] return return_connections @torch.no_grad() def", "@torch.no_grad() def max_pooling_backward(next_dz, z, pooling, strides, padding=(0, 0)): print('# next_dz.shape:", "if isinstance(layer, nn.Conv2d): layer_name = 'Conv2d' Conv2d_params = {} Conv2d_params['layer_name']", "str(id(var))) add_nodes(u[0]) if hasattr(var, 'saved_tensors'): for t in var.saved_tensors: dot.edge(str(id(t)),", "pooling[1]] += next_dz[n, c, i, j] / (pooling[0] * pooling[1])", "= 'Dropout' Dropout_params = {} Dropout_params['layer_name'] = layer_name # p", "in range(N): for c in range(C): for h in range(H):", "j + pooling[1]]) h_idx = strides[0] * i + flat_idx", "+ pooling[0], strides[1] * j:strides[1] * j + pooling[1]]) h_idx", "bias.shape: ', '['+str(K.shape[0])+']') print('# padding: ', padding) print('# strides: ',", "strides, padding=(0, 0)): print('# next_dz.shape: ', list(next_dz.shape)) print('# z.shape: ',", "swap_flip_K) swap_z = torch.swapaxes(z, 0, 1) dK = _conv_forward(torch.swapaxes(F.pad(z, pad=(padding[1],padding[1],\\", "np ax = list(np.arange(len(shape))) shape.pop(1) ax.pop(1) axis = tuple(ax) dxhut", "fc_conv_weights[i] = layer.weight parameters[i] = BatchNorm2d_params elif isinstance(layer, nn.Linear): layer_name", "kernel_size kernel_size = tmp_layer.__dict__.get('kernel_size') if not isinstance(kernel_size, tuple): Conv2d_params['kernel_size'] =", "weight_z) return_dz[i] = dLoss_dz elif layer['layer_name'] == 'View': last_z =", "or np_B[n,c,h,w]-np_A[n,c,h,w] > error_tolerance: error += 1 if error%20 ==", "{0:3} Layer: '.format(str(i))+'{0:9}'.format(layer['layer_name'])+' Backward End ==========================') delete_allpths(pth_dir=None) return return_dz, dLoss_dW,", "as F from graphviz import Digraph, render from torch.autograd import", "= torch.zeros_like(mask) dLoss_dz = torch.mul(torch.where(torch.eq(mask, 1.), next_dz, zeros_tensor), 1./(1.-p)) print('#", "featuremap.pop() lastpop = featuremap.pop() if not len(dLoss_dz.shape) == len(lastpop.shape): dLoss_dz", "= connections[i] if len(tmp_split) == 0: tmp_split.append(item) continue value =", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "strides=(1, 1)): N, C, H, W = z.shape D, C,", "[] for layer in layers: if isinstance(layer, nn.Conv2d): layer_name =", "= padding # return fc_conv_weights[i] = layer.weight parameters[i] = Conv2d_params", "stride, padding) return_dz[i] = dLoss_dz elif layer['layer_name'] == 'AvgPool2d': z", "np_A[n,c]-np_B[n,c] > error_tolerance or np_B[n,c]-np_A[n,c] > error_tolerance: #print(np_A[n,c], np_B[n,c]) error", "out_channel = layer.__dict__.get('out_channels') Conv2d_params['out_channel'] = out_channel # kernel_size kernel_size =", "@torch.no_grad() def batchnorm2d_backward(next_dz, z, eps, gamma=torch.Tensor([1.,1.,1.])): print('# next_dz.shape: ', list(next_dz.shape))", "item_key = list(connections[i].keys())[0] if '(' in item_key or 'TBackward' in", "= layer.__dict__.get('padding') if not isinstance(padding, tuple): MaxPool2d_params['padding'] = (padding, padding)", "'Relu' in name: return 'ReLU' elif 'AddmmBackward' in name: return", "== 'Dropout': index_tmp_layers = tmp[1] + 1 else: return_layers.insert(0, [])", "return_dz = copy.deepcopy(last_connections) featuremap = return_tensors featuremap.append(img) y_true = F.one_hot(label,", "mode='constant', value=0) dz = _conv_forward(ppadding_next_dz, swap_flip_K) swap_z = torch.swapaxes(z, 0,", "name: return 'MaxPool2d' elif 'MulBackward' in name: return 'Dropout_2' elif", "= {} AdaptiveAvgPool2d_params['layer_name'] = layer_name # output_size output_size = tmp_layer.__dict__.get('output_size')", "label) _, connections = generate_connections(g) last_connections = merge_connections(connections) return_layers =", "labels = {} connections = [] for i in range(len(graph)):", "get_layers(last_connections, model): return_layers = [] tmp_layers = [] for layer", "elif layer['layer_name'] == 'Linear': weight_z = fc_conv_weights[i] z = featuremap[i]", "#print('Error rate: ', error/(N*C*H*W)) print('4D-error-rate: ', end=' ') return error/(N*C*H*W)", "dLoss_dz, dLoss_dW, dLoss_dB = conv_backward(dLoss_dz, weight_z, z, padding, stride) return_dz[i]", "Linear_params['out_features'] = out_features # return fc_conv_weights[i] = layer.weight parameters[i] =", "in item_key: if i == 0: pass else: last_item_key =", "== ']': labels[graph[i].split('\\t')[1].split(' ')[0]]=\\ graph[i].split('\\t')[1].split('=')[1].split(']')[0] for i in range(len(graph)): if", "@torch.no_grad() def make_dot(var, params=None): \"\"\" Produces Graphviz representation of PyTorch", "zeros_tensor = torch.zeros_like(next_dz) dLoss_dz = torch.where(torch.gt(z, 0), next_dz, zeros_tensor) print('#", "if not isinstance(last_connections[i], list): current_layer_name = list(last_connections[i].keys())[0].split('_')[0] if 'Add' in", "k, j = weight.shape x_pad = x x_pad = x_pad.unfold(2,", "Store network model Results End ===========================\\n') if 'GoogLeNet' in str(model).split('\\n')[0]:", "= list(last_connections[i].keys())[0].split('_')[0] if 'ReLU' in current_layer_name: return_layers.insert(0, torch.nn.ReLU(inplace=True)) elif 'Add'", "= Digraph(node_attr=node_attr, graph_attr=dict(size=\"12,12\")) seen = set() def size_to_str(size): return '('+(',", "featuremap[-1] dLoss_dz = dropback_backward(dLoss_dz, mask, p) return_dz.append(dLoss_dz) featuremap.pop() lastpop =", "= F.one_hot(label, num_classes=num_class).float() loss, dLoss_dz = cross_entropy_loss(featuremap[0], y_true) featuremap.pop(0) return_dz.append(dLoss_dz)", "if not isinstance(kernel_size, tuple): MaxPool2d_params['kernel_size'] = (kernel_size, kernel_size) else: MaxPool2d_params['kernel_size']", "dLoss_dz.reshape(lastpop.shape) elif layer['layer_name'] == 'ReLU': z = featuremap[-1] dLoss_dz =", "= weight.shape x_pad = x x_pad = x_pad.unfold(2, k, strides[0])", "np_A[c]-np_B[c] > error_tolerance or np_B[c]-np_A[c] > error_tolerance: #print(np_A[c], np_B[c]) error", "= p # return parameters.append(Dropout_params) elif isinstance(layer, nn.BatchNorm2d): layer_name =", "tmp_dLoss_dz[-1] elif tmp_layer['layer_name'] == 'BatchNorm2d': eps = tmp_layer['eps'] z =", "weight_z = fc_conv_weights[-1] try: padding = layer['padding'] except: padding =", "loss: ', loss_torch.detach().numpy()) loss_torch.backward() if 'VGG' in str(model) or 'AlexNet'", "elif hasattr(var, 'variable'): u = var.variable name = param_map[id(u)] if", "= dLoss_dz elif layer['layer_name'] == 'BatchNorm2d': eps = layer['eps'] z", "layer['stride'] padding = layer['padding'] dLoss_dz = average_pooling_backward(dLoss_dz, z, pooling, stride,", "# distributed under the License is distributed on an \"AS", "dLoss_dz elif layer['layer_name'] == 'AvgPool2d': z = featuremap[i] pooling =", "= [] for layer in layers: if isinstance(layer, nn.Conv2d): layer_name", "# Unless required by applicable law or agreed to in", "not isinstance(return_tensors[i], list) and not isinstance(return_tensors[i], str): print('=========', i, return_tensors[i].shape)", "==========================') delete_allpths(pth_dir=None) return return_dz, dLoss_dW, dLoss_dB @torch.no_grad() def make_dot(var, params=None):", "last_tensors = copy.deepcopy(last_connections) for i in range(len(last_connections)-1, -1, -1): if", "= padding # return fc_conv_weights[i][j][k] = tmp_layer.weight parameters[i][j][k] = Conv2d_params", "-1): if isinstance(last_tensors[i], str): # Add or View if last_tensors[i]", "been calcualted!') print('======================== {0:3} Layer: '.format(str(len(parameters)-1-i))+'{0:11}'.\\ format(layer['layer_name'])+' Backward End ==========================')", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "i, return_tensors[i].shape) print('================') ''' import copy return_dz = copy.deepcopy(last_connections) featuremap", "import copy tmp_dLoss_dz = [] for j in range(len(layer)): tmp_dLoss_dz.append(copy.deepcopy(dLoss_dz))", "torch.matmul(dLoss_dnextz, w) #delta dLoss_dfcW = torch.matmul(dLoss_dnextz.t(), z) dLoss_dfcB = torch.sum(dLoss_dnextz,", "= layer.weight parameters[i] = BatchNorm2d_params elif isinstance(layer, nn.Linear): layer_name =", "# kernel_size kernel_size = layer.__dict__.get('kernel_size') if not isinstance(kernel_size, tuple): MaxPool2d_params['kernel_size']", "= featuremap[-1] pooling = layer['kernel_size'] stride = layer['stride'] padding =", "+ '_' + value.split('_')[1] if 'None' in key1 or 'None'", "h in range(H): for w in range(W): if np_A[n,c,h,w]-np_B[n,c,h,w] >", "= dLoss_dnextz.reshape(last_z.shape) print('# dz.shape: ', list(dLoss_dz.shape)) return dLoss_dz def add_backward(dLoss_dnextz):", "padding) return_dz[i] = dLoss_dz elif layer['layer_name'] == 'Linear': weight_z =", "pooling[1] padding_dz[n, c, h_idx, w_idx] += next_dz[n, c, i, j]", "dz2 = (ivar**2)*((dxhut*xmu).sum(axis=axis, keepdim=True))*xmu dz3 = dxhut.sum(axis=axis, keepdim=True) dz =", "z.shape[0] if len(z.shape) == 4: z = z.view(z.size(0), -1) dLoss_dz", "'Dropout' Dropout_params = {} Dropout_params['layer_name'] = layer_name # p p", "dLoss_dnextz print('# dz.shape: ', list(dLoss_dz.shape)) return dLoss_dz @torch.no_grad() def relu_backward(next_dz,", "Conv2d_params['kernel_size'] = kernel_size # stride stride = tmp_layer.__dict__.get('stride') if not", "num_features = tmp_layer.__dict__.get('num_features') BatchNorm2d_params['num_features'] = num_features # eps eps =", "layer.weight parameters[i] = Linear_params elif isinstance(layer, nn.AdaptiveAvgPool2d): layer_name = 'AdaptiveAvgPool2d'", "MaxPool2d_params['kernel_size'] = (kernel_size, kernel_size) else: MaxPool2d_params['kernel_size'] = kernel_size # stride", "else '' node_name = '%s\\n %s' % (name, size_to_str(u.size())) dot.node(str(id(var)),", "fc_conv_weights[-1] dLoss_dz = batchnorm2d_backward(dLoss_dz, z, eps, gamma) return_dz.append(dLoss_dz) fc_conv_weights.pop() lastpop", "* j:strides[1] * j + pooling[1]]) h_idx = strides[0] *", "None error = 0 error_tolerance = 0.001 np_A = tensor_A.detach().numpy()", "dropback_backward(dLoss_dz, mask, p) return_dz.append(dLoss_dz) featuremap.pop() lastpop = featuremap.pop() if not", "range(len(connections)): print('# Restore network model: processing {}/{}'.format(i, len(connections)-1)) item_key =", "axis = tuple(ax) dxhut = torch.zeros_like(next_dz) for c in range(C):", "Linear_params elif isinstance(layer, nn.AdaptiveAvgPool2d): layer_name = 'AdaptiveAvgPool2d' AdaptiveAvgPool2d_params = {}", "out_channel out_channel = layer.__dict__.get('out_channels') Conv2d_params['out_channel'] = out_channel # kernel_size kernel_size", "featuremap[i] gamma = fc_conv_weights[i] dLoss_dz = batchnorm2d_backward(dLoss_dz, z, eps, gamma)", "torch.sub(y_predict, torch.max(y_predict, dim=1, keepdim=True).values) y_exp = torch.exp(y_shift) y_probability = torch.div(y_exp,", "[] tmp.append(connections[start:end+1]) tmp.append(connections[i:j-1]) last_connections[start:end+1] = [tmp] for kk in range(end-start):", "def relu_backward(next_dz, z): print('# next_dz.shape: ', list(next_dz.shape)) print('# z.shape: ',", "== 'Cat': layer_name = 'Cat' parameters[i] = {'layer_name': layer_name} elif", "y_exp = torch.exp(y_shift) y_probability = torch.div(y_exp, torch.sum(y_exp, dim=1, keepdim=True)) ypred_loss", "AvgPool2d_params['layer_name'] = layer_name # kernel_size kernel_size = tmp_layer.__dict__.get('kernel_size') if not", "= {} Linear_params['layer_name'] = layer_name # in_features in_features = tmp_layer.__dict__.get('in_features')", "layers: if isinstance(layer, nn.Conv2d): layer_name = 'Conv2d' Conv2d_params = {}", "np_B = tensor_B.detach().numpy() if len(tensor_A.shape) == 4: N, C, H,", "z) return_dz.append(dLoss_dz) lastpop = featuremap.pop() if not len(dLoss_dz.shape) == len(lastpop.shape):", "padding # return parameters.append(AvgPool2d_params) elif isinstance(layer, nn.Dropout): layer_name = 'Dropout'", "the Apache License, Version 2.0 (the \"License\"); # you may", "== 'Add': last_tensors[i] = last_tensors[i+1][0][0] + last_tensors[i+1][1][0] if last_tensors[i] ==", "return 'Dropout_1' elif 'AddBackward' in name: return 'Add' elif 'Cat'", "j:strides[1] * j + pooling[1]] += next_dz[n, c, i, j]", "output_size) else: AdaptiveAvgPool2d_params['output_size'] = output_size # return parameters.append(AdaptiveAvgPool2d_params) else: print('The", "graph Blue nodes are the Variables that require grad, orange", "dLoss_dz elif layer['layer_name'] == 'ReLU': z = featuremap[i] dLoss_dz =", "return parameters, fc_conv_weights def gradient_backward_v2(model, img, label, num_class=1000, g_view=False): x", "lastpop = featuremap.pop() if not len(dLoss_dz.shape) == len(lastpop.shape): dLoss_dz =", "last_tensors[i] = last_tensors[i+1][0][0] + last_tensors[i+1][1][0] if last_tensors[i] == 'View': last_tensors[i]", "torch.arange(out_w): flat_idx = torch.argmax(padding_z[n, c, strides[0] * i:strides[0] * i", "and # limitations under the License. #!/usr/bin/python3 import torch import", "g else: g = make_dot(y) return g @torch.no_grad() def exchange_name(name):", "if layer['layer_name'] == 'Conv2d': z = featuremap[i] weight_z = fc_conv_weights[i]", "== 'ReLU': z = featuremap[i-1][j][k+1] tmp_dLoss_dz[-1] = relu_backward(tmp_dLoss_dz[-1], z) return_dz[i][j][k]", "pass print('error', np_A[n,c,h,w], np_B[n,c,h,w]) else: if n*c*h*w % 20000000000000 ==", "padding padding = layer.__dict__.get('padding') if not isinstance(padding, tuple): AvgPool2d_params['padding'] =", "completed for '+key+' or '+value+'! Check exchange_name function!') exit() new_connections.append({key1:", "graphviz import Digraph, render from torch.autograd import Variable @torch.no_grad() def", "eps = layer.__dict__.get('eps') BatchNorm2d_params['eps'] = eps # return fc_conv_weights.append(layer.weight) parameters.append(BatchNorm2d_params)", "', list(dLoss_dnextz.shape)) dLoss_dz = dLoss_dnextz print('# dz.shape: ', list(dLoss_dz.shape)) return", "[] for i in range(len(parameters)-1, -1, -1): layer = parameters[i]", "layer_name = 'ReLU' parameters[i] = {'layer_name': layer_name} elif layer ==", "= featuremap[i+1] if 'Pool' in parameters[i+1]['layer_name']: params = (parameters[i+1]['kernel_size'], parameters[i+1]['stride'],", "= tmp_layer.__dict__.get('output_size') if not isinstance(output_size, tuple): AdaptiveAvgPool2d_params['output_size'] = (output_size, output_size)", "c in torch.arange(C): for i in torch.arange(out_h): for j in", "print('================') for i in range(len(last_connections)): print(i, last_connections[i]) print('================') print('================') for", "last_connections[-1] == {'None': 'None'}: last_connections.remove({'None': 'None'}) print('=========================== Restore network model", "ivar = 1./torch.pow(var+eps, 0.5) dz2 = (ivar**2)*((dxhut*xmu).sum(axis=axis, keepdim=True))*xmu dz3 =", "range(len(featuremap[i])): for k in range(len(featuremap[i][j])): print(' =========', i, j, k,", "average_pooling_backward(next_dz, z, pooling, strides, padding=(0, 0)): print('# next_dz.shape: ', list(next_dz.shape))", "1)) db = torch.sum(torch.sum(torch.sum(next_dz, axis=-1), axis=-1), axis=0) # 在高度、宽度上相加;批量大小上相加 print('#", "torch.swapaxes(flip_K, 0, 1) ppadding_next_dz = F.pad(padding_next_dz, pad=(k2-1-padding[1],k2-1-padding[1],\\ k1-1-padding[0],k1-1-padding[0],0,0), mode='constant', value=0)", "{0:18}'.format('cross_entropy_loss')+' Start ===========================') print('# y_predict.shape: ', list(y_predict.shape)) print('# y_true.shape: ',", "print('Not completed in gradient_backward_v1!') print('======================== {0:3} Layer: '.format(str(i))+'{0:9}'.format(layer['layer_name'])+' Backward End", "= featuremap[i] weight_z = fc_conv_weights[i] try: padding = layer['padding'] except:", "elif layer['layer_name'] == 'View': last_z = featuremap[i+1] if 'Pool' in", "list): # 单一层,无分支 current_layer_name = list(last_connections[i].keys())[0].split('_')[0] if 'ReLU' in current_layer_name:", "for c in range(C): dxhut[:,c] = next_dz[:,c]*gamma[c] dz1 = m*dxhut", "p p = layer.__dict__.get('p') Dropout_params['p'] = p # return parameters[i]", "def add_backward(dLoss_dnextz): print('# next_dz.shape: ', list(dLoss_dnextz.shape)) dLoss_dz = dLoss_dnextz print('#", "/ (pooling[0] * pooling[1]) dz = _remove_padding(padding_dz, padding) # padding_z[:,", "featuremap[-1] pooling = layer['kernel_size'] stride = layer['stride'] padding = layer['padding']", "range(len(graph)): if '->' in graph[i]: connections.append({labels[graph[i].split('\\t')[1].split(' -> ')[0]]+'_'+\\ graph[i].split('\\t')[1].split(' ->", "### else: print('The layer has not been processed in get_structure_parameters!')", "print('Not completed in gradient_backward!') print('# Torch calculated loss: ', loss_torch.detach().numpy())", "last_key = list(tmp_split[-1].keys())[0] if value == last_key: tmp_split.append(item) else: return_connections.append(tmp_split)", "Dropout_params elif isinstance(tmp_layer, nn.BatchNorm2d): layer_name = 'BatchNorm2d' BatchNorm2d_params = {}", "fc_conv_weights.pop() if not len(featuremap) == 1: lastpop = featuremap.pop() if", "parameters[i][j][k] = Dropout_params elif isinstance(tmp_layer, nn.BatchNorm2d): layer_name = 'BatchNorm2d' BatchNorm2d_params", "= layer_name # p p = tmp_layer.__dict__.get('p') Dropout_params['p'] = p", "x = Variable(img) g = generate_g(model, x) if g_view: g.view()", "Add or View if last_tensors[i] == 'Add': last_tensors[i] = last_tensors[i+1][0][0]", "strides[1] * j:strides[1] * j + pooling[1]] += next_dz[n, c,", "in range(len(return_tensors)): if not isinstance(return_tensors[i], list) and not isinstance(return_tensors[i], str):", "as nn import torch.nn.functional as F from graphviz import Digraph,", "', loss) featuremap.pop() return_dz.append(dLoss_dz) dW_dB_fc_conv = [] for i in", "in name: return 'Dropout_2' elif 'DivBackward' in name: return 'Dropout_1'", "network model End =================================\\n') return last_connections @torch.no_grad() def find_next_layer_by_name(layers, name,", "layer[j][k] print('\\n=========================== {0:3} Branch: '.format(str(len(parameters)-1-i))+'{0:11}'.format(tmp_layer['layer_name'])+' Backward Start ====================') if tmp_layer['layer_name']", "layer.__dict__.get('eps') BatchNorm2d_params['eps'] = eps # return fc_conv_weights[i] = layer.weight parameters[i]", "strides: ', strides) padding_next_dz = _insert_zeros(next_dz, strides) flip_K = torch.flip(K,", "return pz @torch.no_grad() def judge_tensors_equal(tensor_A, tensor_B): if(not tensor_A.shape == tensor_B.shape):", "{} Conv2d_params['layer_name'] = layer_name # in_channel in_channel = layer.__dict__.get('in_channels') Conv2d_params['in_channel']", "# return parameters[i] = MaxPool2d_params elif isinstance(layer, nn.AvgPool2d): layer_name =", "# return parameters.append(Dropout_params) elif isinstance(layer, nn.BatchNorm2d): layer_name = 'BatchNorm2d' BatchNorm2d_params", "for d in range(D): for h in range(0, H_last, strides[0]):", "# return parameters[i] = Dropout_params elif isinstance(layer, nn.BatchNorm2d): layer_name =", "stride, padding) return_dz[i] = dLoss_dz elif layer['layer_name'] == 'Linear': weight_z", ">= len(featuremap[i-1][j]): z = featuremap[i] else: z = featuremap[i-1][j][k+1] weight_z", "前面n层倒序遍历 for i in range(len(parameters)): layer = parameters[i] if not", "range(N): for c in range(C): for h in range(H): for", "return fc_conv_weights[i] = layer.weight parameters[i] = Conv2d_params elif isinstance(layer, nn.ReLU):", "is not None: dot.edge(str(id(u[0])), str(id(var))) add_nodes(u[0]) if hasattr(var, 'saved_tensors'): for", "print('# z.shape: ', list(z.shape)) print('# weight.shape: ', list(w.shape)) print('# bias.shape:", "if not len(new_connections) == len(connections): print('Generate connections not done! Check", "return_layers.insert(0, []) for j in range(len(last_connections[i])): return_layers[0].append([]) if len(last_connections[i][j]) ==", "i in range(len(parameters)): print(i, parameters[i]) print('================') print('================') for i in", "', list(y_predict.shape)) print('# y_true.shape: ', list(y_true.shape)) y_shift = torch.sub(y_predict, torch.max(y_predict,", "in range(C): dxhut[:,c] = next_dz[:,c]*gamma[c] dz1 = m*dxhut mu =", "flip_K = torch.flip(K, (2, 3)) swap_flip_K = torch.swapaxes(flip_K, 0, 1)", "connections[i] if len(tmp_split) == 0: tmp_split.append(item) continue value = list(item.values())[0]", "tmp_layers.append(layer) index_tmp_layers = 0 for i in range(len(last_connections)-1, -1, -1):", "'None' in item_key: if i == 0: pass else: last_item_key", "in range(len(last_tensors)-1, -1, -1): if isinstance(last_tensors[i], str): # Add or", "last_z, params) return_dz[i] = dLoss_dz elif layer['layer_name'] == 'Add': dLoss_dz", "current_layer_name: return_layers.insert(0, 'View') else: tmp = find_next_layer_by_name(tmp_layers, current_layer_name, index_tmp_layers) return_layers.insert(0,", "tmp_layer.weight parameters[i][j][k] = Conv2d_params elif isinstance(tmp_layer, nn.ReLU): layer_name = 'ReLU'", "### y_true = F.one_hot(label, num_classes=num_class).float() loss, dLoss_dz = cross_entropy_loss(featuremap[-1], y_true)", "isinstance(layer, list): import copy tmp_dLoss_dz = [] for j in", "tmp_dLoss_dz[-1] print('=========================== {0:3} Branch: '.format(str(len(parameters)-1-i))+'{0:11}'.format(tmp_layer['layer_name'])+' Backward End ======================') print(tmp_dLoss_dz[0].shape, tmp_dLoss_dz[1].shape)", "under the License is distributed on an \"AS IS\" BASIS,", "'Add' in current_layer_name: return_layers[0][j].insert(0, 'Add') elif 'View' in current_layer_name: return_layers.insert(0,", "in range(len(x)): mul *= x[i] return mul @torch.no_grad() def gradient_backward_v1(model,", "padding = tmp_layer.__dict__.get('padding') if not isinstance(padding, tuple): MaxPool2d_params['padding'] = (padding,", "0: return z[:, :, padding[0]:-padding[0], padding[1]:-padding[1]] elif padding[0] > 0:", "if i == 0: pass else: last_item_key = list(connections[i-1].keys())[0] if", "Backward Start ========================') if layer['layer_name'] == 'Conv2d': z = featuremap[-1]", "layer_name = 'AvgPool2d' AvgPool2d_params = {} AvgPool2d_params['layer_name'] = layer_name #", "z.mean(axis=axis, keepdim=True) xmu = z - mu xmu2 = xmu**2", "# Copyright 2022 ConvolutedDog (https://github.com/ConvolutedDog/) # # Licensed under the", "= layer.__dict__.get('output_size') if not isinstance(output_size, tuple): AdaptiveAvgPool2d_params['output_size'] = (output_size, output_size)", "# stride stride = layer.__dict__.get('stride') if not isinstance(stride, tuple): MaxPool2d_params['stride']", "def generate_connections(g): graph = str(g).split('\\n') labels = {} connections =", "current_layer_name: return_layers.insert(0, 'Add') elif 'View' in current_layer_name: return_layers.insert(0, 'View') else:", "padding, stride) return_dz.append(dLoss_dz) fc_conv_weights.pop() if not len(featuremap) == 1: lastpop", "'GoogLeNet' in str(model).split('\\n')[0]: g = make_dot(y[0]) return g else: g", "in range(len(last_connections[i][j])-1, -1, -1): current_layer_name = list(last_connections[i][j][k].keys())[0].split('_')[0] if 'ReLU' in", "dLoss_dnextz.reshape(last_z.shape) print('# dz.shape: ', list(dLoss_dz.shape)) return dLoss_dz def add_backward(dLoss_dnextz): print('#", "kk in range(end-start): last_connections.insert(start, 'Throwed') num_Throwed += 1 break if", "last_tensors[i+1].view(last_tensors[i+1].size(0), -1) elif isinstance(last_tensors[i], list): for j in range(len(last_tensors[i])): if", "tmp_layer.__dict__.get('kernel_size') if not isinstance(kernel_size, tuple): Conv2d_params['kernel_size'] = (kernel_size, kernel_size) else:", "(padding, padding) else: MaxPool2d_params['padding'] = padding # return parameters[i][j][k] =", "H, W = z.shape _, _, out_h, out_w = next_dz.shape", "padding) return_dz[i] = dLoss_dz elif layer['layer_name'] == 'AvgPool2d': z =", "Skip this layer because the layer has been calcualted!') print('========================", "'View' in current_layer_name: return_layers.insert(0, 'View') else: tmp = find_next_layer_by_name(tmp_layers, current_layer_name,", "'BatchNorm2d': eps = layer['eps'] z = featuremap[-1] gamma = fc_conv_weights[-1]", "copy.deepcopy(return_layers) for i in range(len(return_layers)): layer = return_layers[i] if isinstance(layer,", "{'layer_name': layer_name} elif tmp_layer == 'Cat': layer_name = 'Cat' parameters[i][j][k]", "if tmp_layer['layer_name'] == 'Conv2d': if k+1 >= len(featuremap[i-1][j]): z =", "Conv2d_params['padding'] = padding # return fc_conv_weights[i][j][k] = tmp_layer.weight parameters[i][j][k] =", "next_dz.shape: ', list(dLoss_dnextz.shape)) print('# z.shape: ', list(z.shape)) print('# weight.shape: ',", "np_B[c]-np_A[c] > error_tolerance: #print(np_A[c], np_B[c]) error += 1 #print('Error rate:", "model Results End ===========================\\n') if 'GoogLeNet' in str(model).split('\\n')[0]: g =", "@torch.no_grad() def generate_connections(g): graph = str(g).split('\\n') labels = {} connections", "padding[0] > 0 and padding[1] > 0: return z[:, :,", "else: loss_torch = Loss(result, label) _, connections = generate_connections(g) last_connections", "start, end = find_start_end(connections, i, j-1) tmp = [] tmp.append(connections[start:end+1])", "D, H, W = dz.shape H_last = (H-1)*(strides[0]-1) + H", "'DivBackward' in name: return 'Dropout_1' elif 'AddBackward' in name: return", "last_tensors @torch.no_grad() def get_structure_parameters(return_layers): import copy parameters = copy.deepcopy(return_layers) fc_conv_weights", "layer_name # kernel_size kernel_size = tmp_layer.__dict__.get('kernel_size') if not isinstance(kernel_size, tuple):", "params = None dLoss_dz = view_backward(dLoss_dz, last_z, params) return_dz[i] =", "import torch import torch.nn as nn import torch.nn.functional as F", "', list(dLoss_dz.shape)) return dLoss_dz def add_backward(dLoss_dnextz): print('# next_dz.shape: ', list(dLoss_dnextz.shape))", "in torch.arange(out_h): for j in torch.arange(out_w): padding_dz[n, c, strides[0] *", "parameters[i] = Conv2d_params elif isinstance(layer, nn.ReLU): layer_name = 'ReLU' parameters[i]", "weight_z) return_dz.append(dLoss_dz) fc_conv_weights.pop() lastpop = featuremap.pop() if not len(dLoss_dz.shape) ==", "out_features out_features = layer.__dict__.get('out_features') Linear_params['out_features'] = out_features # return fc_conv_weights[i]", "graph[i][-1] == ']': labels[graph[i].split('\\t')[1].split(' ')[0]]=\\ graph[i].split('\\t')[1].split('=')[1].split(']')[0] for i in range(len(graph)):", "tmp_layer.__dict__.get('padding') if not isinstance(padding, tuple): AvgPool2d_params['padding'] = (padding, padding) else:", "gamma = fc_conv_weights[i][j][k] tmp_dLoss_dz[-1] = batchnorm2d_backward(tmp_dLoss_dz[-1], z, eps, gamma) return_dz[i][j][k]", "tmp_layer.__dict__.get('in_features') Linear_params['in_features'] = in_features # out_features out_features = tmp_layer.__dict__.get('out_features') Linear_params['out_features']", "gamma) return_dz[i] = dLoss_dz print('======================== {0:3} Layer: '.format(str(len(parameters)-1-i))+'{0:11}'.format(layer['layer_name'])+' Backward End", "tmp[1] + 1 return return_layers @torch.no_grad() def get_tensors(last_connections): tensors =", "c, i, j] dz = _remove_padding(padding_dz, padding) # padding_z[:, :,", "K.shape N, D, H1, W1 = next_dz.shape print('# next_dz.shape: ',", "'label' in graph[i] and graph[i][-1] == ']': labels[graph[i].split('\\t')[1].split(' ')[0]]=\\ graph[i].split('\\t')[1].split('=')[1].split(']')[0]", "error_tolerance: #print(np_A[c], np_B[c]) error += 1 #print('Error rate: ', error/C)", "padding_next_dz = _insert_zeros(next_dz, strides) flip_K = torch.flip(K, (2, 3)) swap_flip_K", "'Cat' elif 'Hardtanh' in name: return 'ReLU6' else: return 'None'", "tmp_layer.weight parameters[i][j][k] = BatchNorm2d_params elif isinstance(tmp_layer, nn.Linear): layer_name = 'Linear'", "= torch.mul(torch.where(torch.eq(mask, 1.), next_dz, zeros_tensor), 1./(1.-p)) print('# dz.shape: ', list(dLoss_dz.shape))", "W = tensor_A.shape for n in range(N): for c in", "in range(W): if np_A[n,c,h,w]-np_B[n,c,h,w] > error_tolerance or np_B[n,c,h,w]-np_A[n,c,h,w] > error_tolerance:", "= tensors[index_tensors] index_tensors += 1 else: for j in range(len(last_connections[i])):", "if n*c*h*w % 20000000000000 == 0: pass #print('right', np_A[n,c,h,w], np_B[n,c,h,w])", "j in torch.arange(out_w): padding_dz[n, c, strides[0] * i:strides[0] * i", "parameters[i][j][k] = {'layer_name': layer_name} elif tmp_layer == 'Add': layer_name =", "layers[i] if name in str(layer): return layer, i @torch.no_grad() def", "elif isinstance(tmp_layer, nn.ReLU): layer_name = 'ReLU' parameters[i][j][k] = {'layer_name': layer_name}", "find_start_end(connections, i, j-1) tmp = [] tmp.append(connections[start:end+1]) tmp.append(connections[i:j-1]) last_connections[start:end+1] =", "batchnorm2d_backward(dLoss_dz, z, eps, gamma) return_dz[i] = dLoss_dz print('======================== {0:3} Layer:", "else: print('The layer has not been processed in get_structure_parameters!') return", "##################### # 前面n层倒序遍历 for i in range(len(parameters)): layer = parameters[i]", "in torch.arange(N): for c in torch.arange(C): for i in torch.arange(out_h):", "pooling[0], strides[1] * j:strides[1] * j + pooling[1]]) h_idx =", "= tmp[1] + 1 elif not list(last_connections[i-1].keys())[0].split('_')[0] == 'Dropout': index_tmp_layers", "in_channel = tmp_layer.__dict__.get('in_channels') Conv2d_params['in_channel'] = in_channel # out_channel out_channel =", "tmp_split = [item] return return_connections @torch.no_grad() def find_start_end(list_dic_key_value, i, j):", "np_B[n,c]-np_A[n,c] > error_tolerance: #print(np_A[n,c], np_B[n,c]) error += 1 #print('Error rate:", "print(' =========', i, j, k, featuremap[i][j][k].shape) ''' ##################### # 前面n层倒序遍历", "= dropback_backward(dLoss_dz, mask, p) return_dz[i] = dLoss_dz elif layer['layer_name'] ==", "= layer['kernel_size'] stride = layer['stride'] padding = layer['padding'] dLoss_dz =", "== 'View': last_tensors[i] = last_tensors[i+1].view(last_tensors[i+1].size(0), -1) elif isinstance(last_tensors[i], list): for", "isinstance(tmp_layer, nn.MaxPool2d): layer_name = 'MaxPool2d' MaxPool2d_params = {} MaxPool2d_params['layer_name'] =", "# return fc_conv_weights[i][j][k] = tmp_layer.weight parameters[i][j][k] = Linear_params elif isinstance(tmp_layer,", "weight.shape: ', list(K.shape)) print('# bias.shape: ', '['+str(K.shape[0])+']') print('# padding: ',", "layer.__dict__.get('padding') if not isinstance(padding, tuple): Conv2d_params['padding'] = (padding, padding) else:", "= layer['eps'] z = featuremap[-1] gamma = fc_conv_weights[-1] dLoss_dz =", "i in range(len(return_layers)): layer = return_layers[i] if isinstance(layer, nn.Conv2d): layer_name", "= out_features # return fc_conv_weights[i][j][k] = tmp_layer.weight parameters[i][j][k] = Linear_params", "'+value+'! Check exchange_name function!') exit() new_connections.append({key1: value1}) if not len(new_connections)", "if params is not None: assert isinstance(params.values()[0], Variable) param_map =", "dLoss_dz.reshape(lastpop.shape) elif layer['layer_name'] == 'Dropout': p = layer['p'] mask =", "ANY KIND, either express or implied. # See the License", "else: MaxPool2d_params['kernel_size'] = kernel_size # stride stride = layer.__dict__.get('stride') if", "len(featuremap) == 1: lastpop = featuremap.pop() if not len(dLoss_dz.shape) ==", "stride stride = tmp_layer.__dict__.get('stride') if not isinstance(stride, tuple): Conv2d_params['stride'] =", "the License. # You may obtain a copy of the", "layers.append(layer) parameters = [] fc_conv_weights = [] for layer in", "elif 'Add' in current_layer_name: return_layers.insert(0, 'Add') elif 'View' in current_layer_name:", "list(next_dz.shape)) print('# mask.shape: ', list(mask.shape)) zeros_tensor = torch.zeros_like(mask) dLoss_dz =", "= copy.deepcopy(last_connections) for i in range(len(last_connections)-1, -1, -1): if not", "i in range(len(last_connections)-1, -1, -1): if not isinstance(last_connections[i], list): current_layer_name", "layer['padding'] except: padding = (0, 0) stride = layer['stride'] dLoss_dz,", "str(model).split('\\n')[0]: loss_torch = Loss(result[0], label) else: loss_torch = Loss(result, label)", "# See the License for the specific language governing permissions", "if len(tmp_split) == 0: tmp_split.append(item) continue value = list(item.values())[0] last_key", "file_nums.append(int(files[i].split('.pth')[0])) file_nums.sort() for file_num in file_nums: tensor = torch.load(pth_dir+str(file_num)+'.pth') featuremap.append(tensor)", "if not isinstance(stride, tuple): MaxPool2d_params['stride'] = (stride, stride) else: MaxPool2d_params['stride']", "range(N): for d in range(D): for h in range(0, H_last,", "'Cat': layer_name = 'Cat' parameters[i] = {'layer_name': layer_name} elif isinstance(layer,", "keepdim=True)/m ivar = 1./torch.pow(var+eps, 0.5) dz2 = (ivar**2)*((dxhut*xmu).sum(axis=axis, keepdim=True))*xmu dz3", "= list(connections[i].keys())[0] if '(' in item_key or 'TBackward' in item_key:", "#####################tensors ''' for i in range(len(last_connections)): print(last_connections[i]) for i in", "fc_conv_weights.pop() lastpop = featuremap.pop() if not len(dLoss_dz.shape) == len(lastpop.shape): dLoss_dz", "else: Conv2d_params['kernel_size'] = kernel_size # stride stride = tmp_layer.__dict__.get('stride') if", "autograd graph Blue nodes are the Variables that require grad,", "= average_pooling_backward(dLoss_dz, z, pooling, stride, padding) return_dz.append(dLoss_dz) lastpop = featuremap.pop()", "'Cat' in name: return 'Cat' elif 'Hardtanh' in name: return", "in range(C): for h in range(H): for w in range(W):", "range(len(list_dic_key_value)): if key2 == list(list_dic_key_value[index].keys())[0]: end = index break return", "for i in range(len(last_connections)-1, -1, -1): if not isinstance(last_connections[i], list):", "z[:, :, padding[0]:-padding[0], padding[1]:-padding[1]] elif padding[0] > 0: return z[:,", "x_pad, weight) return out @torch.no_grad() def _insert_zeros(dz, strides): N, D,", "_remove_padding(z, padding): if padding[0] > 0 and padding[1] > 0:", "to node that require grad (TODO: make optional) \"\"\" if", "padding = layer['padding'] except: padding = (0, 0) stride =", "= z.shape m = N*H*W shape = [N,C,H,W] import numpy", "for i in range(len(graph)): if 'label' in graph[i] and graph[i][-1]", "copy parameters = copy.deepcopy(return_layers) fc_conv_weights = copy.deepcopy(return_layers) for i in", "dLoss_dfcB/N @torch.no_grad() def view_backward(dLoss_dnextz, last_z, params): print('# next_dz.shape: ', list(dLoss_dnextz.shape))", "print('Not completed for '+key+' or '+value+'! Check exchange_name function!') exit()", "= get_tensors(last_connections) parameters, fc_conv_weights = get_structure_parameters(return_layers) ''' print('================') for i", "is not None else '' node_name = '%s\\n %s' %", "network model Results End ===========================\\n') if 'GoogLeNet' in str(model).split('\\n')[0]: g", "> error_tolerance or np_B[c]-np_A[c] > error_tolerance: #print(np_A[c], np_B[c]) error +=", "in os.walk(pth_dir, topdown=False): for name in files: if name.endswith('.pth',): os.remove(os.path.join(root,", "in name: return 'ReLU6' else: return 'None' @torch.no_grad() def generate_connections(g):", "layer['padding'] dLoss_dz = max_pooling_backward(dLoss_dz, z, pooling, stride, padding) return_dz.append(dLoss_dz) lastpop", "W pz = torch.zeros(N, D, H_last, W_last) for n in", "if g_view: g.view() delete_allpths(pth_dir=None) print('\\n=========================== Generate Tensors Start ====================================') result", "= tmp_layer.__dict__.get('padding') if not isinstance(padding, tuple): AvgPool2d_params['padding'] = (padding, padding)", "if not len(dLoss_dz.shape) == len(lastpop.shape): dLoss_dz = dLoss_dz.reshape(lastpop.shape) else: print('Not", "return 'MaxPool2d' elif 'MulBackward' in name: return 'Dropout_2' elif 'DivBackward'", "print('\\n=========================== Layer:'+' {0:18}'.format('cross_entropy_loss')+' Start ===========================') print('# y_predict.shape: ', list(y_predict.shape)) print('#", "'.format(str(len(parameters)-1-i))+'{0:11}'.format(layer['layer_name'])+' Backward End ==========================') elif isinstance(layer, list): import copy tmp_dLoss_dz", "padding = layer['padding'] dLoss_dz = max_pooling_backward(dLoss_dz, z, pooling, stride, padding)", "error/C elif len(tensor_A.shape) == 2: N, C = tensor_A.shape for", "eps # return fc_conv_weights.append(layer.weight) parameters.append(BatchNorm2d_params) elif isinstance(layer, nn.Linear): layer_name =", "return_layers.insert(0, torch.nn.ReLU(inplace=True)) elif 'Add' in current_layer_name: return_layers.insert(0, 'Add') elif 'View'", "featuremap[-1] weight_z = fc_conv_weights[-1] try: padding = layer['padding'] except: padding", "= 'Add' elif 'View' in current_layer_name: last_tensors[i] = 'View' else:", "last_tensors[i] = tensors[index_tensors] index_tensors += 1 else: for j in", "featuremap[i] weight_z = fc_conv_weights[i] try: padding = layer['padding'] except: padding", "tmp_layer.__dict__.get('num_features') BatchNorm2d_params['num_features'] = num_features # eps eps = tmp_layer.__dict__.get('eps') BatchNorm2d_params['eps']", "[] for i in range(len(connections)): item = connections[i] if len(tmp_split)", "tmp_layer.__dict__.get('eps') BatchNorm2d_params['eps'] = eps # return fc_conv_weights[i][j][k] = tmp_layer.weight parameters[i][j][k]", "print('================') ''' import copy return_dz = copy.deepcopy(last_connections) featuremap = return_tensors", "get_structure_parameters(return_layers) ''' print('================') for i in range(len(last_connections)): print(i, last_connections[i]) print('================')", "torch.flip(K, (2, 3)) swap_flip_K = torch.swapaxes(flip_K, 0, 1) ppadding_next_dz =", "isinstance(kernel_size, tuple): Conv2d_params['kernel_size'] = (kernel_size, kernel_size) else: Conv2d_params['kernel_size'] = kernel_size", "Licensed under the Apache License, Version 2.0 (the \"License\"); #", "return z[:, :, padding[0]:-padding[0], padding[1]:-padding[1]] elif padding[0] > 0: return", "generate_g(model, x) if g_view: g.view() delete_allpths(pth_dir=None) print('\\n=========================== Generate Tensors Start", "isinstance(featuremap[i], list): print('=========', i, featuremap[i].shape) else: for j in range(len(featuremap[i])):", "stride = layer.__dict__.get('stride') if not isinstance(stride, tuple): Conv2d_params['stride'] = (stride,", "= F.pad(z, pad=(padding[1],padding[1],padding[0],\\ padding[0],0,0), mode='constant', value=0) padding_dz = torch.zeros_like(padding_z) for", "in files[i]: file_nums.append(int(files[i].split('.pth')[0])) file_nums.sort() for file_num in file_nums: tensor =", "# return parameters[i][j][k] = AvgPool2d_params elif isinstance(tmp_layer, nn.Dropout): layer_name =", "print('# zeros probability: ', p) print('# next_dz.shape: ', list(next_dz.shape)) print('#", "ivar/m*(dz1-dz2-dz3) print('# dz.shape: ', list(dz.shape)) return dz @torch.no_grad() def average_pooling_backward(next_dz,", "writing, software # distributed under the License is distributed on", "print('\\n=========================== Store network model Results Start =========================') y = model(x)", "tuple): AvgPool2d_params['padding'] = (padding, padding) else: AvgPool2d_params['padding'] = padding #", "isinstance(layer, nn.ReLU): layer_name = 'ReLU' parameters[i] = {'layer_name': layer_name} elif", "error_tolerance: error += 1 if error%20 == 0: pass print('error',", "eps # return fc_conv_weights[i][j][k] = tmp_layer.weight parameters[i][j][k] = BatchNorm2d_params elif", "z[:, :, padding[0]:-padding[0], :] elif padding[1] > 0: return z[:,", "= dLoss_dz elif layer['layer_name'] == 'Dropout': if parameters[i-1]['layer_name'] == 'Dropout':", "= in_channel # out_channel out_channel = layer.__dict__.get('out_channels') Conv2d_params['out_channel'] = out_channel", "= x.shape d, c, k, j = weight.shape x_pad =", "Conv2d_params['kernel_size'] = (kernel_size, kernel_size) else: Conv2d_params['kernel_size'] = kernel_size # stride", "range(len(x)): mul *= x[i] return mul @torch.no_grad() def gradient_backward_v1(model, img,", "name: return 'Conv2d' elif 'MaxPool' in name: return 'MaxPool2d' elif", "dLoss_dz = relu_backward(dLoss_dz, z) return_dz[i] = dLoss_dz elif layer['layer_name'] ==", "-> ')[1]}) pop_index = [] for i in range(len(connections)): item_key", "= nn.CrossEntropyLoss() if 'GoogLeNet' in str(model).split('\\n')[0]: loss_torch = Loss(result[0], label)", "= [] for i in range(len(connections)): item = connections[i] if", "else: dot.node(str(id(var)), str(type(var).__name__)) seen.add(var) if hasattr(var, 'next_functions'): for u in", "@torch.no_grad() def exchange_name(name): if 'Relu' in name: return 'ReLU' elif", "AdaptiveAvgPool2d_params = {} AdaptiveAvgPool2d_params['layer_name'] = layer_name # output_size output_size =", "# return parameters[i][j][k] = MaxPool2d_params elif isinstance(tmp_layer, nn.AvgPool2d): layer_name =", "kernel_size kernel_size = layer.__dict__.get('kernel_size') if not isinstance(kernel_size, tuple): MaxPool2d_params['kernel_size'] =", "model.modules(): if not ':' in str(layer): layers.append(layer) parameters = []", "model End =================================\\n') return last_connections @torch.no_grad() def find_next_layer_by_name(layers, name, start_i):", "for i in range(len(last_connections)): print(i, last_connections[i]) print('================') print('================') for i", "-1): current_layer_name = list(last_connections[i][j][k].keys())[0].split('_')[0] if 'Add' in current_layer_name: last_tensors[i][j][k] =", "if len(z.shape) == 4: z = z.view(z.size(0), -1) dLoss_dz =", "AvgPool2d_params['stride'] = stride # padding padding = tmp_layer.__dict__.get('padding') if not", "'Hardtanh' in name: return 'ReLU6' else: return 'None' @torch.no_grad() def", "= {} AvgPool2d_params['layer_name'] = layer_name # kernel_size kernel_size = tmp_layer.__dict__.get('kernel_size')", "padding = tmp_layer['padding'] except: padding = (0, 0) stride =", "padding) return_dz.append(dLoss_dz) lastpop = featuremap.pop() if not len(dLoss_dz.shape) == len(lastpop.shape):", "print('# dweight.shape: ', list(dK.transpose(0,1).shape)) print('# dbias.shape: ', list(db.shape)) return dz,", "= featuremap[-1] dLoss_dz = dropback_backward(dLoss_dz, mask, p) return_dz.append(dLoss_dz) featuremap.pop() lastpop", "dxhut[:,c] = next_dz[:,c]*gamma[c] dz1 = m*dxhut mu = z.mean(axis=axis, keepdim=True)", "grad, orange are Tensors saved for backward in torch.autograd.Function Args:", "else: g = make_dot(y) return g @torch.no_grad() def exchange_name(name): if", "padding[0]:-padding[0], :] elif padding[1] > 0: return z[:, :, :,", "in graph[i] and graph[i][-1] == '\"': labels[(graph[i]+graph[i+1][1:]).split('\\t')[1].split(' ')[0]]=\\ (graph[i]+graph[i+1][1:]).split('\\t')[1].split('\"')[1] if", "pad=(padding[1],padding[1],padding[0],\\ padding[0],0,0), mode='constant', value=0) padding_dz = torch.zeros_like(padding_z) for n in", "np_A = tensor_A.detach().numpy() np_B = tensor_B.detach().numpy() if len(tensor_A.shape) == 4:", "if not ':' in str(layer): tmp_layers.append(layer) index_tmp_layers = 0 for", "= batchnorm2d_backward(dLoss_dz, z, eps, gamma) return_dz.append(dLoss_dz) fc_conv_weights.pop() lastpop = featuremap.pop()", "from graphviz import Digraph, render from torch.autograd import Variable @torch.no_grad()", "w_idx = strides[1] * j + flat_idx % pooling[1] padding_dz[n,", "x[i] return mul @torch.no_grad() def gradient_backward_v1(model, img, label, num_class=1000): return_dz", "dLoss_dz = average_pooling_backward(dLoss_dz, z, pooling, stride, padding) return_dz.append(dLoss_dz) lastpop =", "flat_idx // pooling[1] w_idx = strides[1] * j + flat_idx", "z) return_dz[i] = dLoss_dz elif layer['layer_name'] == 'MaxPool2d': z =", "isinstance(last_tensors[i], str): # Add or View if last_tensors[i] == 'Add':", "= {} BatchNorm2d_params['layer_name'] = layer_name # num_features num_features = tmp_layer.__dict__.get('num_features')", "tmp_layer.__dict__.get('padding') if not isinstance(padding, tuple): MaxPool2d_params['padding'] = (padding, padding) else:", "== 'BatchNorm2d': eps = tmp_layer['eps'] z = featuremap[i-1][j][k+1] gamma =", "stride = tmp_layer.__dict__.get('stride') if not isinstance(stride, tuple): MaxPool2d_params['stride'] = (stride,", "return_layers[i]) print('================') print('================') for i in range(len(parameters)): print(i, parameters[i]) print('================')", "value1}) if not len(new_connections) == len(connections): print('Generate connections not done!", "return_tensors featuremap.append(img) y_true = F.one_hot(label, num_classes=num_class).float() loss, dLoss_dz = cross_entropy_loss(featuremap[0],", "rate: ', error/(C*N)) print('2D-error-rate: ', end=' ') return error/(C*N) @torch.no_grad()", "for i in range(len(parameters)): layer = parameters[i] if not isinstance(layer,", "is not equal.') return None error = 0 error_tolerance =", "delete_allpths(pth_dir=None) print('\\n=========================== Generate Tensors Start ====================================') result = model(img) print('===========================", "j, k, featuremap[i][j][k].shape) ''' ##################### # 前面n层倒序遍历 for i in", "tensor_A.shape for n in range(N): for c in range(C): if", "to add names to node that require grad (TODO: make", "if isinstance(last_connections[i-1], list): index_tmp_layers = tmp[1] + 1 elif not", "= x x_pad = x_pad.unfold(2, k, strides[0]) x_pad = x_pad.unfold(3,", "2022 ConvolutedDog (https://github.com/ConvolutedDog/) # # Licensed under the Apache License,", "layer.__dict__.get('padding') if not isinstance(padding, tuple): MaxPool2d_params['padding'] = (padding, padding) else:", "in item_key or 'TBackward' in item_key: pop_index.append(connections[i]) for i in", "last_connections[i]) print('================') print('================') for i in range(len(return_layers)): print(i, return_layers[i]) print('================')", "dLoss_dz = dLoss_dnextz print('# dz.shape: ', list(dLoss_dz.shape)) return dLoss_dz @torch.no_grad()", "flat_idx = torch.argmax(padding_z[n, c, strides[0] * i:strides[0] * i +", "Torch calculated loss: ', loss_torch.detach().numpy()) loss_torch.backward() if 'VGG' in str(model)", "range(len(parameters)): print(i, parameters[i]) print('================') print('================') for i in range(len(return_tensors)): if", "i in range(len(last_connections)-1, -1, -1): if not isinstance(last_connections[i], list): #", "= (kernel_size, kernel_size) else: Conv2d_params['kernel_size'] = kernel_size # stride stride", "z.shape: ', list(z.shape)) print('# eps: ', eps) print('# gamma.shape: ',", "None: pth_dir = \"./tmp_file/\" for root, dirs, files in os.walk(pth_dir,", "isinstance(params.values()[0], Variable) param_map = {id(v): k for k, v in", "optional) \"\"\" if params is not None: assert isinstance(params.values()[0], Variable)", "hasattr(var, 'variable'): u = var.variable name = param_map[id(u)] if params", "[]: last_connections = last_connections[:notchoosed[0]] else: pass for i in range(num_Throwed):", "= featuremap[-1] gamma = fc_conv_weights[-1] dLoss_dz = batchnorm2d_backward(dLoss_dz, z, eps,", "'Cat' parameters[i] = {'layer_name': layer_name} elif isinstance(layer, nn.MaxPool2d): layer_name =", "for i in torch.arange(out_h): for j in torch.arange(out_w): flat_idx =", "topdown=False): for name in files: if name.endswith('.pth',): os.remove(os.path.join(root, name)) @torch.no_grad()", "parameters[i] = BatchNorm2d_params elif isinstance(layer, nn.Linear): layer_name = 'Linear' Linear_params", "1./(1.-p)) print('# dz.shape: ', list(dLoss_dz.shape)) return dLoss_dz @torch.no_grad() def max_pooling_backward(next_dz,", "current_layer_name: return_layers.insert(0, torch.nn.ReLU(inplace=True)) elif 'Add' in current_layer_name: return_layers.insert(0, 'Add') elif", "len(dLoss_dz.shape) == len(lastpop.shape): dLoss_dz = dLoss_dz.reshape(lastpop.shape) elif layer['layer_name'] == 'ReLU':", "', list(last_z.shape)) if params: pooling = params[0] stride = params[1]", "stride = params[1] padding = params[2] output_size = (int((last_z.shape[2]-pooling[0]+2*padding[0])/stride[0]+1), \\", "the License. #!/usr/bin/python3 import torch import torch.nn as nn import", "@torch.no_grad() def delete_allpths(pth_dir=None): import os if pth_dir == None: pth_dir", "'Add': layer_name = 'Add' parameters[i][j][k] = {'layer_name': layer_name} elif tmp_layer", "dz @torch.no_grad() def average_pooling_backward(next_dz, z, pooling, strides, padding=(0, 0)): print('#", "are the Variables that require grad, orange are Tensors saved", "parameters.append(BatchNorm2d_params) elif isinstance(layer, nn.Linear): layer_name = 'Linear' Linear_params = {}", "format(layer['layer_name'])+' Backward End ==========================') continue p = layer['p'] mask =", "in name: return 'ReLU' elif 'AddmmBackward' in name: return 'Linear'", "next_dz[n, c, i, j] dz = _remove_padding(padding_dz, padding) # padding_z[:,", "len(dLoss_dz.shape) == len(lastpop.shape): dLoss_dz = dLoss_dz.reshape(lastpop.shape) elif layer['layer_name'] == 'Dropout':", "Conv2d_params['in_channel'] = in_channel # out_channel out_channel = tmp_layer.__dict__.get('out_channels') Conv2d_params['out_channel'] =", "= z.shape _, _, out_h, out_w = next_dz.shape padding_z =", "in range(len(last_connections)-1, -1, -1): if not isinstance(last_connections[i], list): current_layer_name =", "Branch: '.format(str(len(parameters)-1-i))+'{0:11}'.format(tmp_layer['layer_name'])+' Backward Start ====================') if tmp_layer['layer_name'] == 'Conv2d': if", "in get_structure_parameters_v1!') return parameters, fc_conv_weights @torch.no_grad() def delete_allpths(pth_dir=None): import os", "= kernel_size # stride stride = tmp_layer.__dict__.get('stride') if not isinstance(stride,", "range(len(layer[j])): tmp_layer = layer[j][k] print('\\n=========================== {0:3} Branch: '.format(str(len(parameters)-1-i))+'{0:11}'.format(tmp_layer['layer_name'])+' Backward Start", "featuremap = get_featuremap(featuremap_dir=None) featuremap.insert(0, img) ### y_true = F.one_hot(label, num_classes=num_class).float()", "# in_channel in_channel = layer.__dict__.get('in_channels') Conv2d_params['in_channel'] = in_channel # out_channel", "kernel_size # stride stride = layer.__dict__.get('stride') if not isinstance(stride, tuple):", "not ':' in str(layer): tmp_layers.append(layer) index_tmp_layers = 0 for i", "i in range(len(parameters)): layer = parameters[i] if not isinstance(layer, list):", "padding = (0, 0) stride = tmp_layer['stride'] tmp_dLoss_dz[-1], dLoss_dW, dLoss_dB", "last_tensors[i] == 'View': last_tensors[i] = last_tensors[i+1].view(last_tensors[i+1].size(0), -1) elif isinstance(last_tensors[i], list):", "loss, dLoss_dz = cross_entropy_loss(featuremap[0], y_true) featuremap.pop(0) return_dz.append(dLoss_dz) #####################tensors ''' for", "nn.BatchNorm2d): layer_name = 'BatchNorm2d' BatchNorm2d_params = {} BatchNorm2d_params['layer_name'] = layer_name", "== 2: N, C = tensor_A.shape for n in range(N):", "'View' elif 'Mean' in name or 'Avg' in name: return", "featuremap[i] else: z = featuremap[i-1][j][k+1] weight_z = fc_conv_weights[i][j][k] try: padding", "= add_backward(dLoss_dz) return_dz[i] = dLoss_dz elif layer['layer_name'] == 'Dropout': if", "=========', i, j, k, featuremap[i][j][k].shape) ''' ##################### # 前面n层倒序遍历 for", "are Tensors saved for backward in torch.autograd.Function Args: var: output", "elif tmp_layer == 'View': layer_name = 'View' parameters[i][j][k] = {'layer_name':", "'View' else: last_tensors[i] = tensors[index_tensors] index_tensors += 1 else: for", "dot.edge(str(id(t)), str(id(var))) add_nodes(t) print(var) add_nodes(var.grad_fn) return dot def generate_g(model, x):", "= last_tensors[i+1].view(last_tensors[i+1].size(0), -1) elif isinstance(last_tensors[i], list): for j in range(len(last_tensors[i])):", "key, value = list(item.items())[0] key1 = exchange_name(key.split('_')[0]) + '_' +", "weight_z = fc_conv_weights[i] try: padding = layer['padding'] except: padding =", "= 'MaxPool2d' MaxPool2d_params = {} MaxPool2d_params['layer_name'] = layer_name # kernel_size", "name.endswith('.pth',): os.remove(os.path.join(root, name)) @torch.no_grad() def mul_items(tensor_size): x = list(tensor_size) mul", "'VGG' in str(model) or 'AlexNet' in str(model): print(judge_tensors_equal(dLoss_dW, model.features[0].weight.grad)) elif", "'Add': layer_name = 'Add' parameters[i] = {'layer_name': layer_name} elif layer", "pass #print('right', np_A[n,c,h,w], np_B[n,c,h,w]) #print('Error rate: ', error/(N*C*H*W)) print('4D-error-rate: ',", "= strides[0] * i + flat_idx // pooling[1] w_idx =", "dLoss_dz = dLoss_dz.reshape(lastpop.shape) elif layer['layer_name'] == 'Dropout': p = layer['p']", "padding = params[2] output_size = (int((last_z.shape[2]-pooling[0]+2*padding[0])/stride[0]+1), \\ int((last_z.shape[3]-pooling[0]+2*padding[0])/stride[0]+1)) dLoss_dz =", "# in_channel in_channel = tmp_layer.__dict__.get('in_channels') Conv2d_params['in_channel'] = in_channel # out_channel", "torch.argmax(padding_z[n, c, strides[0] * i:strides[0] * i + pooling[0], strides[1]", "layer_name # p p = tmp_layer.__dict__.get('p') Dropout_params['p'] = p #", "in name: return 'MaxPool2d' elif 'MulBackward' in name: return 'Dropout_2'", "i + pooling[0], strides[1] * j:strides[1] * j + pooling[1]])", "= {} Conv2d_params['layer_name'] = layer_name # in_channel in_channel = layer.__dict__.get('in_channels')", "in current_layer_name: return_layers[0][j].insert(0, torch.nn.ReLU(inplace=True)) elif 'Add' in current_layer_name: return_layers[0][j].insert(0, 'Add')", "C = tensor_A.shape[0] for c in range(C): if np_A[c]-np_B[c] >", "print('=========================== {0:3} Branch: '.format(str(len(parameters)-1-i))+'{0:11}'.format(tmp_layer['layer_name'])+' Backward End ======================') print(tmp_dLoss_dz[0].shape, tmp_dLoss_dz[1].shape) dLoss_dz", "'TBackward' in item_key: pop_index.append(connections[i]) for i in range(len(pop_index)-1, -1, -1):", "N, D, H1, W1 = next_dz.shape print('# next_dz.shape: ', list(next_dz.shape))", "Digraph, render from torch.autograd import Variable @torch.no_grad() def cross_entropy_loss(y_predict, y_true):", "in range(len(connections)): print('# Restore network model: processing {}/{}'.format(i, len(connections)-1)) item_key", "= list(last_connections[i].keys())[0].split('_')[0] if 'Add' in current_layer_name: last_tensors[i] = 'Add' elif", "= dLoss_dz.reshape(lastpop.shape) elif layer['layer_name'] == 'Linear': weight_z = fc_conv_weights[-1] z", "params is not None: assert isinstance(params.values()[0], Variable) param_map = {id(v):", "return_dz[i] = dLoss_dz elif layer['layer_name'] == 'View': last_z = featuremap[i+1]", "num_Throwed = 0 notchoosed = [] print('\\n=========================== Restore network model", "if 'GoogLeNet' in str(model).split('\\n')[0]: g = make_dot(y[0]) return g else:", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "= layer.__dict__.get('padding') if not isinstance(padding, tuple): AvgPool2d_params['padding'] = (padding, padding)", "start = 0 end = len(list_dic_key_value)-1 for index in range(len(list_dic_key_value)):", "return fc_conv_weights.append(layer.weight) parameters.append(Conv2d_params) elif isinstance(layer, nn.ReLU): layer_name = 'ReLU' parameters.append({'layer_name':", "isinstance(tmp_layer, nn.BatchNorm2d): layer_name = 'BatchNorm2d' BatchNorm2d_params = {} BatchNorm2d_params['layer_name'] =", "def _remove_padding(z, padding): if padding[0] > 0 and padding[1] >", "flat_idx % pooling[1] padding_dz[n, c, h_idx, w_idx] += next_dz[n, c,", "not isinstance(stride, tuple): Conv2d_params['stride'] = (stride, stride) else: Conv2d_params['stride'] =", "AvgPool2d_params['padding'] = padding # return parameters[i] = AvgPool2d_params elif isinstance(layer,", "* pooling[1]) dz = _remove_padding(padding_dz, padding) # padding_z[:, :, padding[0]:-padding[0],", "g @torch.no_grad() def exchange_name(name): if 'Relu' in name: return 'ReLU'", "= layer.__dict__.get('num_features') BatchNorm2d_params['num_features'] = num_features # eps eps = layer.__dict__.get('eps')", "if not isinstance(padding, tuple): AvgPool2d_params['padding'] = (padding, padding) else: AvgPool2d_params['padding']", "{0:3} Branch: '.format(str(len(parameters)-1-i))+'{0:11}'.format(tmp_layer['layer_name'])+' Backward End ======================') print(tmp_dLoss_dz[0].shape, tmp_dLoss_dz[1].shape) dLoss_dz =", "list(tensor_size) mul = 1. for i in range(len(x)): mul *=", "'Avg' in name: return 'AvgPool2d' elif 'BatchNorm' in name: return", "return 'Dropout_2' elif 'DivBackward' in name: return 'Dropout_1' elif 'AddBackward'", "list(tmp_split[-1].keys())[0] if value == last_key: tmp_split.append(item) else: return_connections.append(tmp_split) tmp_split =", "Layer: '.format(str(len(parameters)-1-i))+'{0:11}'.\\ format(layer['layer_name'])+' Backward End ==========================') continue p = layer['p']", "[]) for j in range(len(last_connections[i])): return_layers[0].append([]) if len(last_connections[i][j]) == 0:", "= (padding, padding) else: MaxPool2d_params['padding'] = padding # return parameters.append(MaxPool2d_params)", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "in files: if name.endswith('.pth',): os.remove(os.path.join(root, name)) @torch.no_grad() def mul_items(tensor_size): x", "Check exchange_name function!') exit() new_connections.append({key1: value1}) if not len(new_connections) ==", "= fc_conv_weights[-1] dLoss_dz = batchnorm2d_backward(dLoss_dz, z, eps, gamma) return_dz.append(dLoss_dz) fc_conv_weights.pop()", "value1 = exchange_name(value.split('_')[0]) + '_' + value.split('_')[1] if 'None' in", "= relu_backward(dLoss_dz, z) return_dz.append(dLoss_dz) lastpop = featuremap.pop() if not len(dLoss_dz.shape)", "= (stride, stride) else: Conv2d_params['stride'] = stride # padding padding", "for i in range(start_i, len(layers)): layer = layers[i] if name", "if 'ReLU' in current_layer_name: return_layers[0][j].insert(0, torch.nn.ReLU(inplace=True)) elif 'Add' in current_layer_name:", "layer_name} elif isinstance(layer, nn.MaxPool2d): layer_name = 'MaxPool2d' MaxPool2d_params = {}", "isinstance(return_tensors[i], list) and not isinstance(return_tensors[i], str): print('=========', i, return_tensors[i].shape) print('================')", "'ReLU': z = featuremap[i-1][j][k+1] tmp_dLoss_dz[-1] = relu_backward(tmp_dLoss_dz[-1], z) return_dz[i][j][k] =", "return parameters[i][j][k] = MaxPool2d_params elif isinstance(tmp_layer, nn.AvgPool2d): layer_name = 'AvgPool2d'", "fc_conv_weights[i][j][k] = tmp_layer.weight parameters[i][j][k] = BatchNorm2d_params elif isinstance(tmp_layer, nn.Linear): layer_name", "'.format(str(len(parameters)-1-i))+'{0:11}'.format(tmp_layer['layer_name'])+' Backward Start ====================') if tmp_layer['layer_name'] == 'Conv2d': if k+1", "i + pooling[0], strides[1] * j:strides[1] * j + pooling[1]]", "for w in range(0, W_last, strides[1]): pz[n,d,h,w] = dz[n,d,h//strides[0],w//strides[1]] return", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "= relu_backward(dLoss_dz, z) return_dz[i] = dLoss_dz elif layer['layer_name'] == 'MaxPool2d':", "{0:3} Layer: '.format(str(len(parameters)-1-i))+'{0:11}'.format(layer['layer_name'])+' Backward Start ========================') if layer['layer_name'] == 'Conv2d':", "range(W): if np_A[n,c,h,w]-np_B[n,c,h,w] > error_tolerance or np_B[n,c,h,w]-np_A[n,c,h,w] > error_tolerance: error", "= 'View' parameters[i] = {'layer_name': layer_name} elif layer == 'Cat':", "print('# dweight.shape: ', list(dLoss_dfcW.shape)) print('# dbias.shape: ', list(dLoss_dfcB.shape)) return dLoss_dz,", "else: Conv2d_params['stride'] = stride # padding padding = tmp_layer.__dict__.get('padding') if", "# stride stride = layer.__dict__.get('stride') if not isinstance(stride, tuple): Conv2d_params['stride']", "print('=========================== Generate Tensors End ======================================\\n') Loss = nn.CrossEntropyLoss() if 'GoogLeNet'", "= return_layers[i] if isinstance(layer, nn.Conv2d): layer_name = 'Conv2d' Conv2d_params =", "Self calculated loss: ', ypred_loss.item()) print('=========================== Layer:'+' {0:18}'.format('cross_entropy_loss')+' End =============================')", "stride = layer.__dict__.get('stride') if not isinstance(stride, tuple): AvgPool2d_params['stride'] = (stride,", "print('# z.shape: ', list(z.shape)) print('# padding: ', padding) print('# strides:", "in_features # out_features out_features = layer.__dict__.get('out_features') Linear_params['out_features'] = out_features #", "elif len(tensor_A.shape) == 1: C = tensor_A.shape[0] for c in", "return 'AvgPool2d' elif 'BatchNorm' in name: return 'BatchNorm2d' elif 'Conv'", "the Variables that require grad, orange are Tensors saved for", "isinstance(padding, tuple): MaxPool2d_params['padding'] = (padding, padding) else: MaxPool2d_params['padding'] = padding", "(kernel_size, kernel_size) else: MaxPool2d_params['kernel_size'] = kernel_size # stride stride =", "def merge_connections(connections): import copy last_connections = copy.deepcopy(connections) connections.append({'None':'None'}) num_Throwed =", "copy.deepcopy(connections) connections.append({'None':'None'}) num_Throwed = 0 notchoosed = [] print('\\n=========================== Restore", "or np_B[n,c]-np_A[n,c] > error_tolerance: #print(np_A[n,c], np_B[n,c]) error += 1 #print('Error", "return_layers[0].append([]) if len(last_connections[i][j]) == 0: continue for k in range(len(last_connections[i][j])-1,", "@torch.no_grad() def get_featuremap(featuremap_dir=None): import os featuremap = [] if featuremap_dir", "'Throwed') num_Throwed += 1 break if not notchoosed == []:", "elif layer['layer_name'] == 'BatchNorm2d': eps = layer['eps'] z = featuremap[i]", "list(dLoss_dnextz.shape)) print('# z.shape: ', list(z.shape)) print('# weight.shape: ', list(w.shape)) print('#", "else: Conv2d_params['kernel_size'] = kernel_size # stride stride = layer.__dict__.get('stride') if", "for j in range(i+1, len(connections)): if not list(connections[j].values())[0] == list(connections[j-1].keys())[0]:", "len(tmp_split) == 0: tmp_split.append(item) continue value = list(item.values())[0] last_key =", "= next_dz.shape padding_z = F.pad(z, pad=(padding[1],padding[1],padding[0],\\ padding[0],0,0), mode='constant', value=0) padding_dz", "featuremap.append(tensor) delete_allpths(pth_dir=None) return featuremap @torch.no_grad() def get_structure_parameters_v1(model): layers = []", "index_tmp_layers = tmp[1] + 1 else: return_layers.insert(0, []) for j", "strides[0]) x_pad = x_pad.unfold(3, j, strides[1]) out = torch.einsum( 'nchwkj,dckj->ndhw',", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "', list(dLoss_dz.shape)) return dLoss_dz @torch.no_grad() def max_pooling_backward(next_dz, z, pooling, strides,", "'MaxPool2d' MaxPool2d_params = {} MaxPool2d_params['layer_name'] = layer_name # kernel_size kernel_size", "last_connections = merge_connections(connections) return_layers = get_layers(last_connections, model) return_tensors = get_tensors(last_connections)", "import copy return_dz = copy.deepcopy(last_connections) featuremap = return_tensors featuremap.append(img) y_true", "for k in range(len(featuremap[i][j])): print(' =========', i, j, k, featuremap[i][j][k].shape)", "return_dz.append(dLoss_dz) fc_conv_weights.pop() lastpop = featuremap.pop() if not len(dLoss_dz.shape) == len(lastpop.shape):", "AdaptiveAvgPool2d_params ### else: print('The layer has not been processed in", "z, padding, stride) return_dz[i][j][k] = tmp_dLoss_dz[-1] elif tmp_layer['layer_name'] == 'ReLU':", "elif layer == 'Add': layer_name = 'Add' parameters[i] = {'layer_name':", "list) and not isinstance(return_tensors[i], str): print('=========', i, return_tensors[i].shape) print('================') '''", "not been processed in get_structure_parameters!') return parameters, fc_conv_weights def gradient_backward_v2(model,", "isinstance(layer, nn.AdaptiveAvgPool2d): layer_name = 'AdaptiveAvgPool2d' AdaptiveAvgPool2d_params = {} AdaptiveAvgPool2d_params['layer_name'] =", "layer.weight parameters[i] = Conv2d_params elif isinstance(layer, nn.ReLU): layer_name = 'ReLU'", "= featuremap[-1] dLoss_dz = relu_backward(dLoss_dz, z) return_dz.append(dLoss_dz) lastpop = featuremap.pop()", "n in range(N): for d in range(D): for h in", "print(i, return_layers[i]) print('================') print('================') for i in range(len(parameters)): print(i, parameters[i])", "last_z.shape: ', list(last_z.shape)) if params: pooling = params[0] stride =", "(padding, padding) else: MaxPool2d_params['padding'] = padding # return parameters[i] =", "list(z.shape)) print('# padding: ', padding) print('# strides: ', strides) N,", "padding[0],0,0), mode='constant', value=0) padding_dz = torch.zeros_like(padding_z) for n in torch.arange(N):", "range(len(layer)): for k in range(len(layer[j])): tmp_layer = layer[j][k] ### if", "specific language governing permissions and # limitations under the License.", "# return parameters.append(MaxPool2d_params) elif isinstance(layer, nn.AvgPool2d): layer_name = 'AvgPool2d' AvgPool2d_params", "fc_conv_weights[-1] try: padding = layer['padding'] except: padding = (0, 0)", "seen = set() def size_to_str(size): return '('+(', ').join(['%d' % v", "i in range(len(files)): if '.pth' in files[i]: file_nums.append(int(files[i].split('.pth')[0])) file_nums.sort() for", "axis=0) # 在高度、宽度上相加;批量大小上相加 print('# dz.shape: ', list(dz.shape)) print('# dweight.shape: ',", "-1, -1): current_layer_name = list(last_connections[i][j][k].keys())[0].split('_')[0] if 'ReLU' in current_layer_name: return_layers[0][j].insert(0,", "def view_backward(dLoss_dnextz, last_z, params): print('# next_dz.shape: ', list(dLoss_dnextz.shape)) print('# last_z.shape:", "Linear_params = {} Linear_params['layer_name'] = layer_name # in_features in_features =", "elif isinstance(layer, nn.AdaptiveAvgPool2d): layer_name = 'AdaptiveAvgPool2d' AdaptiveAvgPool2d_params = {} AdaptiveAvgPool2d_params['layer_name']", "= layer.__dict__.get('kernel_size') if not isinstance(kernel_size, tuple): Conv2d_params['kernel_size'] = (kernel_size, kernel_size)", "= featuremap[i] gamma = fc_conv_weights[i] dLoss_dz = batchnorm2d_backward(dLoss_dz, z, eps,", "Branch: '.format(str(len(parameters)-1-i))+'{0:11}'.format(tmp_layer['layer_name'])+' Backward End ======================') print(tmp_dLoss_dz[0].shape, tmp_dLoss_dz[1].shape) dLoss_dz = tmp_dLoss_dz[0]", "layer.__dict__.get('stride') if not isinstance(stride, tuple): AvgPool2d_params['stride'] = (stride, stride) else:", "def fc_backward(dLoss_dnextz, z, w): print('# next_dz.shape: ', list(dLoss_dnextz.shape)) print('# z.shape:", "dz.shape: ', list(dLoss_dz.shape)) return dLoss_dz @torch.no_grad() def relu_backward(next_dz, z): print('#", "if last_tensors[i] == 'Add': last_tensors[i] = last_tensors[i+1][0][0] + last_tensors[i+1][1][0] if", "F from graphviz import Digraph, render from torch.autograd import Variable", "= tmp_layer.__dict__.get('out_channels') Conv2d_params['out_channel'] = out_channel # kernel_size kernel_size = tmp_layer.__dict__.get('kernel_size')", "last_connections[:notchoosed[0]] else: pass for i in range(num_Throwed): last_connections.remove('Throwed') if last_connections[-1]", "# in_features in_features = layer.__dict__.get('in_features') Linear_params['in_features'] = in_features # out_features", "'Add' elif 'View' in current_layer_name: last_tensors[i][j][k] = 'View' else: last_tensors[i][j][k]", "p p = tmp_layer.__dict__.get('p') Dropout_params['p'] = p # return parameters[i][j][k]", "len(last_tensors[i][j]) == 0: last_tensors[i][j].append(last_tensors[i+1]) return last_tensors @torch.no_grad() def get_structure_parameters(return_layers): import", "z.shape _, _, out_h, out_w = next_dz.shape padding_z = F.pad(z,", "'.pth' in files[i]: file_nums.append(int(files[i].split('.pth')[0])) file_nums.sort() for file_num in file_nums: tensor", "isinstance(stride, tuple): Conv2d_params['stride'] = (stride, stride) else: Conv2d_params['stride'] = stride", "End ===========================\\n') if 'GoogLeNet' in str(model).split('\\n')[0]: g = make_dot(y[0]) return", "layer_name # in_channel in_channel = tmp_layer.__dict__.get('in_channels') Conv2d_params['in_channel'] = in_channel #", "last_tensors[i][j].append(last_tensors[i+1]) return last_tensors @torch.no_grad() def get_structure_parameters(return_layers): import copy parameters =", "Tensors End ======================================\\n') Loss = nn.CrossEntropyLoss() if 'GoogLeNet' in str(model).split('\\n')[0]:", "= 'ReLU' parameters[i] = {'layer_name': layer_name} elif layer == 'Add':", "range(len(parameters)): layer = parameters[i] if not isinstance(layer, list): print('\\n======================== {0:3}", "out_features = layer.__dict__.get('out_features') Linear_params['out_features'] = out_features # return fc_conv_weights.append(layer.weight) parameters.append(Linear_params)", "# you may not use this file except in compliance", "pth_dir == None: pth_dir = \"./tmp_file/\" for root, dirs, files", "in range(len(graph)): if '->' in graph[i]: connections.append({labels[graph[i].split('\\t')[1].split(' -> ')[0]]+'_'+\\ graph[i].split('\\t')[1].split('", "get_tensors(last_connections) parameters, fc_conv_weights = get_structure_parameters(return_layers) ''' print('================') for i in", "this layer because the layer has been calcualted!') print('======================== {0:3}", "stride) else: Conv2d_params['stride'] = stride # padding padding = layer.__dict__.get('padding')", "network model Results Start =========================') y = model(x) print('=========================== Store", "def get_structure_parameters(return_layers): import copy parameters = copy.deepcopy(return_layers) fc_conv_weights = copy.deepcopy(return_layers)", "1 else: for j in range(len(last_connections[i])): if len(last_connections[i][j]) == 0:", "isinstance(layer, list): for j in range(len(layer)): for k in range(len(layer[j])):", "str): print('=========', i, return_tensors[i].shape) print('================') ''' import copy return_dz =", "= padding # return fc_conv_weights.append(layer.weight) parameters.append(Conv2d_params) elif isinstance(layer, nn.ReLU): layer_name", "in var.next_functions: if u[0] is not None: dot.edge(str(id(u[0])), str(id(var))) add_nodes(u[0])", "'Linear': weight_z = fc_conv_weights[i] z = featuremap[i] dLoss_dz, dLoss_dW, dLoss_dB", "print('# bias.shape: ', '['+str(K.shape[0])+']') print('# padding: ', padding) print('# strides:", "# stride stride = tmp_layer.__dict__.get('stride') if not isinstance(stride, tuple): MaxPool2d_params['stride']", "tmp_dLoss_dz[-1] elif tmp_layer['layer_name'] == 'ReLU': z = featuremap[i-1][j][k+1] tmp_dLoss_dz[-1] =", "'View': last_z = featuremap[i+1] if 'Pool' in parameters[i+1]['layer_name']: params =", "= 'AdaptiveAvgPool2d' AdaptiveAvgPool2d_params = {} AdaptiveAvgPool2d_params['layer_name'] = layer_name # output_size", "def generate_g(model, x): delete_allpths(pth_dir=None) print('\\n=========================== Store network model Results Start", "== tensor_B.shape): print('Shape of two compard tensors is not equal.')", "{0:18}'.format('cross_entropy_loss')+' End =============================') return ypred_loss, dLoss_dypred @torch.no_grad() def fc_backward(dLoss_dnextz, z,", "padding = layer.__dict__.get('padding') if not isinstance(padding, tuple): MaxPool2d_params['padding'] = (padding,", "len(list_dic_key_value)-1 for index in range(len(list_dic_key_value)): if key1 == list(list_dic_key_value[index].keys())[0]: start", "= output_size # return parameters[i] = AdaptiveAvgPool2d_params elif isinstance(layer, list):", "keepdim=True).values) y_exp = torch.exp(y_shift) y_probability = torch.div(y_exp, torch.sum(y_exp, dim=1, keepdim=True))", "z, eps, gamma) return_dz[i][j][k] = tmp_dLoss_dz[-1] print('=========================== {0:3} Branch: '.format(str(len(parameters)-1-i))+'{0:11}'.format(tmp_layer['layer_name'])+'", "= max_pooling_backward(dLoss_dz, z, pooling, stride, padding) return_dz.append(dLoss_dz) lastpop = featuremap.pop()", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "padding padding = tmp_layer.__dict__.get('padding') if not isinstance(padding, tuple): AvgPool2d_params['padding'] =", "tuple): MaxPool2d_params['stride'] = (stride, stride) else: MaxPool2d_params['stride'] = stride #", "', list(y_true.shape)) y_shift = torch.sub(y_predict, torch.max(y_predict, dim=1, keepdim=True).values) y_exp =", "len(lastpop.shape): dLoss_dz = dLoss_dz.reshape(lastpop.shape) elif layer['layer_name'] == 'BatchNorm2d': eps =", "#print(np_A[c], np_B[c]) error += 1 #print('Error rate: ', error/C) print('1D-error-rate:", "layer_name = 'Cat' parameters[i][j][k] = {'layer_name': layer_name} elif isinstance(tmp_layer, nn.MaxPool2d):", "strides=(1,1)): n, c, h_in, w_in = x.shape d, c, k,", "0 and padding[1] > 0: return z[:, :, padding[0]:-padding[0], padding[1]:-padding[1]]", "End ==========================') elif isinstance(layer, list): import copy tmp_dLoss_dz = []", "last_connections[start:end+1] = [tmp] for kk in range(end-start): last_connections.insert(start, 'Throwed') num_Throwed", "end = index break return start+1, end-1 @torch.no_grad() def merge_connections(connections):", "return layer, i @torch.no_grad() def get_layers(last_connections, model): return_layers = []", "== '\"': labels[(graph[i]+graph[i+1][1:]).split('\\t')[1].split(' ')[0]]=\\ (graph[i]+graph[i+1][1:]).split('\\t')[1].split('\"')[1] if 'label' in graph[i] and", "'.format(str(len(parameters)-1-i))+'{0:11}'.format(tmp_layer['layer_name'])+' Backward End ======================') print(tmp_dLoss_dz[0].shape, tmp_dLoss_dz[1].shape) dLoss_dz = tmp_dLoss_dz[0] +", "layer_name} elif layer == 'View': layer_name = 'View' parameters[i] =", "len(dLoss_dz.shape) == len(lastpop.shape): dLoss_dz = dLoss_dz.reshape(lastpop.shape) elif layer['layer_name'] == 'BatchNorm2d':", "layer.__dict__.get('kernel_size') if not isinstance(kernel_size, tuple): Conv2d_params['kernel_size'] = (kernel_size, kernel_size) else:", "elif isinstance(layer, nn.ReLU): layer_name = 'ReLU' parameters[i] = {'layer_name': layer_name}", "list(dLoss_dfcW.shape)) print('# dbias.shape: ', list(dLoss_dfcB.shape)) return dLoss_dz, dLoss_dfcW/N, dLoss_dfcB/N @torch.no_grad()", "padding # return parameters[i] = MaxPool2d_params elif isinstance(layer, nn.AvgPool2d): layer_name", "= tmp_layer.weight parameters[i][j][k] = Linear_params elif isinstance(tmp_layer, nn.AdaptiveAvgPool2d): layer_name =", "continue p = layer['p'] mask = featuremap[i] dLoss_dz = dropback_backward(dLoss_dz,", "= m*dxhut mu = z.mean(axis=axis, keepdim=True) xmu = z -", "stride stride = layer.__dict__.get('stride') if not isinstance(stride, tuple): Conv2d_params['stride'] =", "layer['padding'] dLoss_dz = average_pooling_backward(dLoss_dz, z, pooling, stride, padding) return_dz[i] =", "return_dz[i][j][k] = tmp_dLoss_dz[-1] elif tmp_layer['layer_name'] == 'BatchNorm2d': eps = tmp_layer['eps']", "connections = generate_connections(g) last_connections = merge_connections(connections) return_layers = get_layers(last_connections, model)", "under the Apache License, Version 2.0 (the \"License\"); # you", "x_pad = x x_pad = x_pad.unfold(2, k, strides[0]) x_pad =", "{} MaxPool2d_params['layer_name'] = layer_name # kernel_size kernel_size = layer.__dict__.get('kernel_size') if", "weight_z, z, padding, stride) return_dz[i][j][k] = tmp_dLoss_dz[-1] elif tmp_layer['layer_name'] ==", "@torch.no_grad() def conv_backward(next_dz, K, z, padding=(0, 0), strides=(1, 1)): N,", "t in var.saved_tensors: dot.edge(str(id(t)), str(id(var))) add_nodes(t) print(var) add_nodes(var.grad_fn) return dot", "next_dz.shape: ', list(next_dz.shape)) print('# mask.shape: ', list(mask.shape)) zeros_tensor = torch.zeros_like(mask)", "for i in range(len(connections)): print('# Restore network model: processing {}/{}'.format(i,", "elif isinstance(layer, nn.AvgPool2d): layer_name = 'AvgPool2d' AvgPool2d_params = {} AvgPool2d_params['layer_name']", "= {'layer_name': layer_name} elif tmp_layer == 'View': layer_name = 'View'", "stride # padding padding = layer.__dict__.get('padding') if not isinstance(padding, tuple):", "z.shape: ', list(z.shape)) print('# weight.shape: ', list(K.shape)) print('# bias.shape: ',", "return last_connections @torch.no_grad() def find_next_layer_by_name(layers, name, start_i): for i in", "', list(next_dz.shape)) print('# mask.shape: ', list(mask.shape)) zeros_tensor = torch.zeros_like(mask) dLoss_dz", "if(not tensor_A.shape == tensor_B.shape): print('Shape of two compard tensors is", "calculated loss: ', ypred_loss.item()) print('=========================== Layer:'+' {0:18}'.format('cross_entropy_loss')+' End =============================') return", "elif 'Mean' in name or 'Avg' in name: return 'AvgPool2d'", "for index in range(len(list_dic_key_value)): if key2 == list(list_dic_key_value[index].keys())[0]: end =", "= layer.__dict__.get('in_features') Linear_params['in_features'] = in_features # out_features out_features = layer.__dict__.get('out_features')", "N, C, H, W = z.shape m = N*H*W shape", "in range(0, W_last, strides[1]): pz[n,d,h,w] = dz[n,d,h//strides[0],w//strides[1]] return pz @torch.no_grad()", "@torch.no_grad() def dropback_backward(next_dz, mask, p): print('# zeros probability: ', p)", "start+1, end-1 @torch.no_grad() def merge_connections(connections): import copy last_connections = copy.deepcopy(connections)", "= tmp_dLoss_dz[-1] print('=========================== {0:3} Branch: '.format(str(len(parameters)-1-i))+'{0:11}'.format(tmp_layer['layer_name'])+' Backward End ======================') print(tmp_dLoss_dz[0].shape,", "padding = layer.__dict__.get('padding') if not isinstance(padding, tuple): Conv2d_params['padding'] = (padding,", "elif tmp_layer['layer_name'] == 'BatchNorm2d': eps = tmp_layer['eps'] z = featuremap[i-1][j][k+1]", "'View': last_tensors[i] = last_tensors[i+1].view(last_tensors[i+1].size(0), -1) elif isinstance(last_tensors[i], list): for j", "= tmp[1] + 1 else: return_layers.insert(0, []) for j in", "range(len(graph)): if 'label' in graph[i] and graph[i][-1] == '\"': labels[(graph[i]+graph[i+1][1:]).split('\\t')[1].split('", "list(connections[i-1].keys())[0] if not connections[i][item_key] == last_item_key: for j in range(i+1,", "for i in range(len(parameters)-1, -1, -1): layer = parameters[i] print('\\n========================", "parameters[i][j][k] = {'layer_name': layer_name} elif tmp_layer == 'View': layer_name =", "if np_A[n,c,h,w]-np_B[n,c,h,w] > error_tolerance or np_B[n,c,h,w]-np_A[n,c,h,w] > error_tolerance: error +=", "layer['stride'] padding = layer['padding'] dLoss_dz = max_pooling_backward(dLoss_dz, z, pooling, stride,", "layer['layer_name'] == 'MaxPool2d': z = featuremap[-1] pooling = layer['kernel_size'] stride", "range(len(last_tensors[i])): if len(last_tensors[i][j]) == 0: last_tensors[i][j].append(last_tensors[i+1]) return last_tensors @torch.no_grad() def", "', '['+str(K.shape[0])+']') print('# padding: ', padding) print('# strides: ', strides)", "(output_size, output_size) else: AdaptiveAvgPool2d_params['output_size'] = output_size # return parameters[i][j][k] =", "range(len(last_connections)): print(i, last_connections[i]) print('================') print('================') for i in range(len(return_layers)): print(i,", "z.shape: ', list(z.shape)) zeros_tensor = torch.zeros_like(next_dz) dLoss_dz = torch.where(torch.gt(z, 0),", "'Dropout': return_dz[i] = dLoss_dz print('# Skip this layer because the", "layer.__dict__.get('kernel_size') if not isinstance(kernel_size, tuple): AvgPool2d_params['kernel_size'] = (kernel_size, kernel_size) else:", "layer.__dict__.get('eps') BatchNorm2d_params['eps'] = eps # return fc_conv_weights.append(layer.weight) parameters.append(BatchNorm2d_params) elif isinstance(layer,", "= dLoss_dz elif layer['layer_name'] == 'Add': dLoss_dz = add_backward(dLoss_dz) return_dz[i]", "* j + pooling[1]]) h_idx = strides[0] * i +", "kernel_size) else: AvgPool2d_params['kernel_size'] = kernel_size # stride stride = tmp_layer.__dict__.get('stride')", "eps # return fc_conv_weights[i] = layer.weight parameters[i] = BatchNorm2d_params elif", "end=' ') return error/(C*N) @torch.no_grad() def get_featuremap(featuremap_dir=None): import os featuremap", "i, j] dz = _remove_padding(padding_dz, padding) # padding_z[:, :, padding[0]:-padding[0],", "pooling[0], strides[1] * j:strides[1] * j + pooling[1]] += next_dz[n,", "in range(len(layer[j])): tmp_layer = layer[j][k] ### if isinstance(tmp_layer, nn.Conv2d): layer_name", "if isinstance(last_tensors[i], str): # Add or View if last_tensors[i] ==", "if not list(last_connections[i][j][k-1].keys())[0].split('_')[0] == 'Dropout': index_tmp_layers = tmp[1] + 1", "out_channel # kernel_size kernel_size = tmp_layer.__dict__.get('kernel_size') if not isinstance(kernel_size, tuple):", "= dLoss_dz print('======================== {0:3} Layer: '.format(str(len(parameters)-1-i))+'{0:11}'.format(layer['layer_name'])+' Backward End ==========================') elif", "1), torch.swapaxes(padding_next_dz, 0, 1)) db = torch.sum(torch.sum(torch.sum(next_dz, axis=-1), axis=-1), axis=0)", "Restore network model: processing {}/{}'.format(i, len(connections)-1)) item_key = list(connections[i].keys())[0] if", "dLoss_dypred @torch.no_grad() def fc_backward(dLoss_dnextz, z, w): print('# next_dz.shape: ', list(dLoss_dnextz.shape))", "'Conv' in name: return 'Conv2d' elif 'MaxPool' in name: return", "gamma = fc_conv_weights[i] dLoss_dz = batchnorm2d_backward(dLoss_dz, z, eps, gamma) return_dz[i]", "Backward End ======================') print(tmp_dLoss_dz[0].shape, tmp_dLoss_dz[1].shape) dLoss_dz = tmp_dLoss_dz[0] + tmp_dLoss_dz[1]", "len(connections)-1)) item_key = list(connections[i].keys())[0] if not 'None' in item_key: if", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "not isinstance(kernel_size, tuple): MaxPool2d_params['kernel_size'] = (kernel_size, kernel_size) else: MaxPool2d_params['kernel_size'] =", "return dLoss_dz @torch.no_grad() def dropback_backward(next_dz, mask, p): print('# zeros probability:", "4: z = z.view(z.size(0), -1) dLoss_dz = torch.matmul(dLoss_dnextz, w) #delta", "not list(last_connections[i][j][k-1].keys())[0].split('_')[0] == 'Dropout': index_tmp_layers = tmp[1] + 1 return", "= 0 end = len(list_dic_key_value)-1 for index in range(len(list_dic_key_value)): if", "for j in range(len(layer)): for k in range(len(layer[j])): tmp_layer =", "layer.__dict__.get('in_features') Linear_params['in_features'] = in_features # out_features out_features = layer.__dict__.get('out_features') Linear_params['out_features']", "last_connections = last_connections[:notchoosed[0]] else: pass for i in range(num_Throwed): last_connections.remove('Throwed')", "g_view: g.view() delete_allpths(pth_dir=None) print('\\n=========================== Generate Tensors Start ====================================') result =", "= layer['padding'] dLoss_dz = average_pooling_backward(dLoss_dz, z, pooling, stride, padding) return_dz.append(dLoss_dz)", "= params[1] padding = params[2] output_size = (int((last_z.shape[2]-pooling[0]+2*padding[0])/stride[0]+1), \\ int((last_z.shape[3]-pooling[0]+2*padding[0])/stride[0]+1))", "return z[:, :, :, padding[1]:-padding[1]] else: return z @torch.no_grad() def", "= tmp_layer.__dict__.get('stride') if not isinstance(stride, tuple): AvgPool2d_params['stride'] = (stride, stride)", "if isinstance(tmp_layer, nn.Conv2d): layer_name = 'Conv2d' Conv2d_params = {} Conv2d_params['layer_name']", "x) if g_view: g.view() delete_allpths(pth_dir=None) print('\\n=========================== Generate Tensors Start ====================================')", "list(z.shape)) zeros_tensor = torch.zeros_like(next_dz) dLoss_dz = torch.where(torch.gt(z, 0), next_dz, zeros_tensor)", "3)) swap_flip_K = torch.swapaxes(flip_K, 0, 1) ppadding_next_dz = F.pad(padding_next_dz, pad=(k2-1-padding[1],k2-1-padding[1],\\", "= [] if featuremap_dir == None: pth_dir = \"./tmp_file/\" else:", "str(layer): layers.append(layer) parameters = [] fc_conv_weights = [] for layer", "0), strides=(1, 1)): N, C, H, W = z.shape D,", "= {} Dropout_params['layer_name'] = layer_name # p p = layer.__dict__.get('p')", "== 'Linear': weight_z = fc_conv_weights[i] z = featuremap[i] dLoss_dz, dLoss_dW,", "y_true = F.one_hot(label, num_classes=num_class).float() loss, dLoss_dz = cross_entropy_loss(featuremap[0], y_true) featuremap.pop(0)", "name: return 'View' elif 'Mean' in name or 'Avg' in", "isinstance(layer, nn.Dropout): layer_name = 'Dropout' Dropout_params = {} Dropout_params['layer_name'] =", "= kernel_size # stride stride = layer.__dict__.get('stride') if not isinstance(stride,", "elif isinstance(tmp_layer, nn.AvgPool2d): layer_name = 'AvgPool2d' AvgPool2d_params = {} AvgPool2d_params['layer_name']", "pooling, stride, padding) return_dz[i] = dLoss_dz elif layer['layer_name'] == 'Linear':", "= len(list_dic_key_value)-1 for index in range(len(list_dic_key_value)): if key1 == list(list_dic_key_value[index].keys())[0]:", "fc_backward(dLoss_dz, z, weight_z) return_dz[i] = dLoss_dz elif layer['layer_name'] == 'View':", "= {} MaxPool2d_params['layer_name'] = layer_name # kernel_size kernel_size = tmp_layer.__dict__.get('kernel_size')", "C, k1, k2 = K.shape N, D, H1, W1 =", "if not isinstance(padding, tuple): Conv2d_params['padding'] = (padding, padding) else: Conv2d_params['padding']", "@torch.no_grad() def _conv_forward(x, weight, strides=(1,1)): n, c, h_in, w_in =", "fontsize='12', ranksep='0.1', height='0.2') dot = Digraph(node_attr=node_attr, graph_attr=dict(size=\"12,12\")) seen = set()", "Conv2d_params elif isinstance(layer, nn.ReLU): layer_name = 'ReLU' parameters[i] = {'layer_name':", "dLoss_dz @torch.no_grad() def relu_backward(next_dz, z): print('# next_dz.shape: ', list(next_dz.shape)) print('#", "stride = layer['stride'] padding = layer['padding'] dLoss_dz = max_pooling_backward(dLoss_dz, z,", "seen: if torch.is_tensor(var): dot.node(str(id(var)), size_to_str(var.size()), fillcolor='orange') elif hasattr(var, 'variable'): u", "(dK/N).transpose(0,1), db/N @torch.no_grad() def _conv_forward(x, weight, strides=(1,1)): n, c, h_in,", "parameters[i] = Linear_params elif isinstance(layer, nn.AdaptiveAvgPool2d): layer_name = 'AdaptiveAvgPool2d' AdaptiveAvgPool2d_params", "= var.variable name = param_map[id(u)] if params is not None", "{}/{}'.format(i, len(connections)-1)) item_key = list(connections[i].keys())[0] if not 'None' in item_key:", "= ivar/m*(dz1-dz2-dz3) print('# dz.shape: ', list(dz.shape)) return dz @torch.no_grad() def", "elif isinstance(layer, nn.MaxPool2d): layer_name = 'MaxPool2d' MaxPool2d_params = {} MaxPool2d_params['layer_name']", "== last_key: tmp_split.append(item) else: return_connections.append(tmp_split) tmp_split = [item] return return_connections", "return last_tensors @torch.no_grad() def get_structure_parameters(return_layers): import copy parameters = copy.deepcopy(return_layers)", "h_idx = strides[0] * i + flat_idx // pooling[1] w_idx", "padding[1] > 0: return z[:, :, :, padding[1]:-padding[1]] else: return", "C, H, W = z.shape _, _, out_h, out_w =", "-1): if not isinstance(last_connections[i], list): # 单一层,无分支 current_layer_name = list(last_connections[i].keys())[0].split('_')[0]", "print('# next_dz.shape: ', list(next_dz.shape)) print('# z.shape: ', list(z.shape)) print('# padding:", "has not been processed in get_structure_parameters_v1!') return parameters, fc_conv_weights @torch.no_grad()", "if torch.is_tensor(var): dot.node(str(id(var)), size_to_str(var.size()), fillcolor='orange') elif hasattr(var, 'variable'): u =", "list(dz.shape)) return dz @torch.no_grad() def _remove_padding(z, padding): if padding[0] >", "in gradient_backward_v1!') print('======================== {0:3} Layer: '.format(str(i))+'{0:9}'.format(layer['layer_name'])+' Backward End ==========================') delete_allpths(pth_dir=None)", "']': labels[graph[i].split('\\t')[1].split(' ')[0]]=\\ graph[i].split('\\t')[1].split('=')[1].split(']')[0] for i in range(len(graph)): if '->'", "keepdim=True) dz = ivar/m*(dz1-dz2-dz3) print('# dz.shape: ', list(dz.shape)) return dz", "= num_features # eps eps = layer.__dict__.get('eps') BatchNorm2d_params['eps'] = eps", "print('================') for i in range(len(return_layers)): print(i, return_layers[i]) print('================') print('================') for", "layer['p'] mask = featuremap[i] dLoss_dz = dropback_backward(dLoss_dz, mask, p) return_dz[i]", "Layer: '.format(str(len(parameters)-1-i))+'{0:11}'.format(layer['layer_name'])+' Backward Start ========================') if layer['layer_name'] == 'Conv2d': z", "tmp[1] + 1 elif not list(last_connections[i-1].keys())[0].split('_')[0] == 'Dropout': index_tmp_layers =", "padding = (0, 0) stride = layer['stride'] dLoss_dz, dLoss_dW, dLoss_dB", "= featuremap[i] dLoss_dz, dLoss_dW, dLoss_dB = fc_backward(dLoss_dz, z, weight_z) return_dz[i]", "= find_start_end(connections, i, j-1) tmp = [] tmp.append(connections[start:end+1]) tmp.append(connections[i:j-1]) last_connections[start:end+1]", "output_size[1]) else: dLoss_dz = dLoss_dnextz.reshape(last_z.shape) print('# dz.shape: ', list(dLoss_dz.shape)) return", "tensors is not equal.') return None error = 0 error_tolerance", "elif layer['layer_name'] == 'Dropout': if parameters[i-1]['layer_name'] == 'Dropout': return_dz[i] =", "c in range(C): if np_A[c]-np_B[c] > error_tolerance or np_B[c]-np_A[c] >", "list(connections[i].keys())[0] if not 'None' in item_key: if i == 0:", "''' print('================') for i in range(len(last_connections)): print(i, last_connections[i]) print('================') print('================')", "mode='constant', value=0) padding_dz = torch.zeros_like(padding_z) for n in torch.arange(N): for", "1: C = tensor_A.shape[0] for c in range(C): if np_A[c]-np_B[c]", "# eps eps = tmp_layer.__dict__.get('eps') BatchNorm2d_params['eps'] = eps # return", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "for k in range(len(layer[j])): tmp_layer = layer[j][k] print('\\n=========================== {0:3} Branch:", "range(i+1, len(connections)): if not list(connections[j].values())[0] == list(connections[j-1].keys())[0]: notchoosed.append(i) start, end", "range(0, W_last, strides[1]): pz[n,d,h,w] = dz[n,d,h//strides[0],w//strides[1]] return pz @torch.no_grad() def", "padding = layer['padding'] dLoss_dz = average_pooling_backward(dLoss_dz, z, pooling, stride, padding)", "completed in gradient_backward!') print('# Torch calculated loss: ', loss_torch.detach().numpy()) loss_torch.backward()", "eps = layer.__dict__.get('eps') BatchNorm2d_params['eps'] = eps # return fc_conv_weights[i] =", "= [] for i in range(len(graph)): if 'label' in graph[i]", "'nchwkj,dckj->ndhw', x_pad, weight) return out @torch.no_grad() def _insert_zeros(dz, strides): N,", "z = z.view(z.size(0), -1) dLoss_dz = torch.matmul(dLoss_dnextz, w) #delta dLoss_dfcW", "in range(len(last_connections)): print(last_connections[i]) for i in range(len(featuremap)): if not isinstance(featuremap[i],", "len(tensor_A.shape) == 2: N, C = tensor_A.shape for n in", "index in range(len(list_dic_key_value)): if key1 == list(list_dic_key_value[index].keys())[0]: start = index", "View if last_tensors[i] == 'Add': last_tensors[i] = last_tensors[i+1][0][0] + last_tensors[i+1][1][0]", "i in range(len(pop_index)-1, -1, -1): connections.remove(pop_index[i]) new_connections = [] for", "z.view(z.size(0), -1) dLoss_dz = torch.matmul(dLoss_dnextz, w) #delta dLoss_dfcW = torch.matmul(dLoss_dnextz.t(),", "xmu = z - mu xmu2 = xmu**2 var =", "set() def size_to_str(size): return '('+(', ').join(['%d' % v for v", "0: tmp_split.append(item) continue value = list(item.values())[0] last_key = list(tmp_split[-1].keys())[0] if", "pz[n,d,h,w] = dz[n,d,h//strides[0],w//strides[1]] return pz @torch.no_grad() def judge_tensors_equal(tensor_A, tensor_B): if(not", "kernel_size) else: MaxPool2d_params['kernel_size'] = kernel_size # stride stride = tmp_layer.__dict__.get('stride')", "= Loss(result, label) _, connections = generate_connections(g) last_connections = merge_connections(connections)", "= featuremap[i-1][j][k+1] weight_z = fc_conv_weights[i][j][k] try: padding = tmp_layer['padding'] except:", "Apache License, Version 2.0 (the \"License\"); # you may not", "= list(item.items())[0] key1 = exchange_name(key.split('_')[0]) + '_' + key.split('_')[1] value1", "either express or implied. # See the License for the", "strides[0] * i + flat_idx // pooling[1] w_idx = strides[1]", "def get_structure_parameters_v1(model): layers = [] for layer in model.modules(): if", "require grad, orange are Tensors saved for backward in torch.autograd.Function", "not list(connections[j].values())[0] == list(connections[j-1].keys())[0]: notchoosed.append(i) start, end = find_start_end(connections, i,", "= layer.__dict__.get('in_channels') Conv2d_params['in_channel'] = in_channel # out_channel out_channel = layer.__dict__.get('out_channels')", "def dropback_backward(next_dz, mask, p): print('# zeros probability: ', p) print('#", "0.001 np_A = tensor_A.detach().numpy() np_B = tensor_B.detach().numpy() if len(tensor_A.shape) ==", "AvgPool2d_params['kernel_size'] = (kernel_size, kernel_size) else: AvgPool2d_params['kernel_size'] = kernel_size # stride", "current_layer_name, index_tmp_layers) return_layers[0][j].insert(0, tmp[0]) if not list(last_connections[i][j][k-1].keys())[0].split('_')[0] == 'Dropout': index_tmp_layers", "in range(len(parameters)-1, -1, -1): layer = parameters[i] print('\\n======================== {0:3} Layer:", "find_next_layer_by_name(tmp_layers, current_layer_name, index_tmp_layers) return_layers.insert(0, tmp[0]) if isinstance(last_connections[i-1], list): index_tmp_layers =", "len(lastpop.shape): dLoss_dz = dLoss_dz.reshape(lastpop.shape) elif layer['layer_name'] == 'AvgPool2d': z =", "and padding[1] > 0: return z[:, :, padding[0]:-padding[0], padding[1]:-padding[1]] elif", "layer_name # output_size output_size = tmp_layer.__dict__.get('output_size') if not isinstance(output_size, tuple):", "{} Linear_params['layer_name'] = layer_name # in_features in_features = layer.__dict__.get('in_features') Linear_params['in_features']", "loss: ', loss) featuremap.pop() return_dz.append(dLoss_dz) dW_dB_fc_conv = [] for i", "name or 'Avg' in name: return 'AvgPool2d' elif 'BatchNorm' in", "in range(len(return_layers)): layer = return_layers[i] if isinstance(layer, nn.Conv2d): layer_name =", "H W_last = (W-1)*(strides[1]-1) + W pz = torch.zeros(N, D,", "dot.node(str(id(var)), node_name, fillcolor='lightblue') else: dot.node(str(id(var)), str(type(var).__name__)) seen.add(var) if hasattr(var, 'next_functions'):", "connections[i][item_key] == last_item_key: for j in range(i+1, len(connections)): if not", "dz.shape: ', list(dz.shape)) return dz @torch.no_grad() def batchnorm2d_backward(next_dz, z, eps,", "= num_features # eps eps = tmp_layer.__dict__.get('eps') BatchNorm2d_params['eps'] = eps", "torch import torch.nn as nn import torch.nn.functional as F from", "Results Start =========================') y = model(x) print('=========================== Store network model", "k, featuremap[i][j][k].shape) ''' ##################### # 前面n层倒序遍历 for i in range(len(parameters)):", "params: pooling = params[0] stride = params[1] padding = params[2]", "k in range(len(last_connections[i][j])-1, -1, -1): current_layer_name = list(last_connections[i][j][k].keys())[0].split('_')[0] if 'Add'", "''' ##################### # 前面n层倒序遍历 for i in range(len(parameters)): layer =", "+ H W_last = (W-1)*(strides[1]-1) + W pz = torch.zeros(N,", "return 'BatchNorm2d' elif 'Conv' in name: return 'Conv2d' elif 'MaxPool'", "list(dz.shape)) return dz @torch.no_grad() def batchnorm2d_backward(next_dz, z, eps, gamma=torch.Tensor([1.,1.,1.])): print('#", "print('2D-error-rate: ', end=' ') return error/(C*N) @torch.no_grad() def get_featuremap(featuremap_dir=None): import", "get_structure_parameters(return_layers): import copy parameters = copy.deepcopy(return_layers) fc_conv_weights = copy.deepcopy(return_layers) for", "tmp_dLoss_dz[-1] = relu_backward(tmp_dLoss_dz[-1], z) return_dz[i][j][k] = tmp_dLoss_dz[-1] elif tmp_layer['layer_name'] ==", "print('# y_true.shape: ', list(y_true.shape)) y_shift = torch.sub(y_predict, torch.max(y_predict, dim=1, keepdim=True).values)", "gamma) return_dz.append(dLoss_dz) fc_conv_weights.pop() lastpop = featuremap.pop() if not len(dLoss_dz.shape) ==", "= in_features # out_features out_features = tmp_layer.__dict__.get('out_features') Linear_params['out_features'] = out_features", "layer has been calcualted!') print('======================== {0:3} Layer: '.format(str(len(parameters)-1-i))+'{0:11}'.\\ format(layer['layer_name'])+' Backward", "') return error/(N*C*H*W) elif len(tensor_A.shape) == 1: C = tensor_A.shape[0]", "')[0]]=\\ (graph[i]+graph[i+1][1:]).split('\\t')[1].split('\"')[1] if 'label' in graph[i] and graph[i][-1] == ']':", "z = featuremap[i-1][j][k+1] tmp_dLoss_dz[-1] = relu_backward(tmp_dLoss_dz[-1], z) return_dz[i][j][k] = tmp_dLoss_dz[-1]", "height='0.2') dot = Digraph(node_attr=node_attr, graph_attr=dict(size=\"12,12\")) seen = set() def size_to_str(size):", "current_layer_name: last_tensors[i][j][k] = 'View' else: last_tensors[i][j][k] = tensors[index_tensors] index_tensors +=", "= tensor_A.detach().numpy() np_B = tensor_B.detach().numpy() if len(tensor_A.shape) == 4: N,", "add_backward(dLoss_dz) return_dz[i] = dLoss_dz elif layer['layer_name'] == 'Dropout': if parameters[i-1]['layer_name']", "# return parameters[i] = AdaptiveAvgPool2d_params elif isinstance(layer, list): for j", "elif layer['layer_name'] == 'Add': dLoss_dz = add_backward(dLoss_dz) return_dz[i] = dLoss_dz", "'Conv2d' elif 'MaxPool' in name: return 'MaxPool2d' elif 'MulBackward' in", "fc_conv_weights[i][j][k] = tmp_layer.weight parameters[i][j][k] = Conv2d_params elif isinstance(tmp_layer, nn.ReLU): layer_name", "name in str(layer): return layer, i @torch.no_grad() def get_layers(last_connections, model):", "g = make_dot(y) return g @torch.no_grad() def exchange_name(name): if 'Relu'", "+ W pz = torch.zeros(N, D, H_last, W_last) for n", "print('# Self calculated loss: ', ypred_loss.item()) print('=========================== Layer:'+' {0:18}'.format('cross_entropy_loss')+' End", "isinstance(stride, tuple): MaxPool2d_params['stride'] = (stride, stride) else: MaxPool2d_params['stride'] = stride", "out_features = layer.__dict__.get('out_features') Linear_params['out_features'] = out_features # return fc_conv_weights[i] =", "pth_dir = \"./tmp_file/\" else: pth_dir = featuremap_dir files = os.listdir(pth_dir)", "return_dz[i][j][k] = tmp_dLoss_dz[-1] elif tmp_layer['layer_name'] == 'ReLU': z = featuremap[i-1][j][k+1]", "= dLoss_dz.reshape(lastpop.shape) elif layer['layer_name'] == 'ReLU': z = featuremap[-1] dLoss_dz", "elif tmp_layer == 'Cat': layer_name = 'Cat' parameters[i][j][k] = {'layer_name':", "parameters.append(Dropout_params) elif isinstance(layer, nn.BatchNorm2d): layer_name = 'BatchNorm2d' BatchNorm2d_params = {}", "', strides) padding_next_dz = _insert_zeros(next_dz, strides) flip_K = torch.flip(K, (2,", "in_features = tmp_layer.__dict__.get('in_features') Linear_params['in_features'] = in_features # out_features out_features =", "Start ========================') if layer['layer_name'] == 'Conv2d': z = featuremap[-1] weight_z", "item = connections[i] if len(tmp_split) == 0: tmp_split.append(item) continue value", "last_connections.insert(start, 'Throwed') num_Throwed += 1 break if not notchoosed ==", "isinstance(last_connections[i], list): # 单一层,无分支 current_layer_name = list(last_connections[i].keys())[0].split('_')[0] if 'ReLU' in", "fillcolor='lightblue') else: dot.node(str(id(var)), str(type(var).__name__)) seen.add(var) if hasattr(var, 'next_functions'): for u", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "1. for i in range(len(x)): mul *= x[i] return mul", "if 'VGG' in str(model) or 'AlexNet' in str(model): print(judge_tensors_equal(dLoss_dW, model.features[0].weight.grad))", "z, padding, stride) return_dz[i] = dLoss_dz elif layer['layer_name'] == 'ReLU':", "== 0: pass else: last_item_key = list(connections[i-1].keys())[0] if not connections[i][item_key]", "= {} AdaptiveAvgPool2d_params['layer_name'] = layer_name # output_size output_size = layer.__dict__.get('output_size')", "return_dz[i] = dLoss_dz elif layer['layer_name'] == 'Add': dLoss_dz = add_backward(dLoss_dz)", "dLoss_dz = dropback_backward(dLoss_dz, mask, p) return_dz[i] = dLoss_dz elif layer['layer_name']", "= (0, 0) stride = tmp_layer['stride'] tmp_dLoss_dz[-1], dLoss_dW, dLoss_dB =", "pooling[1]) dz = _remove_padding(padding_dz, padding) # padding_z[:, :, padding[0]:-padding[0], padding[1]:-padding[1]]", "import copy last_tensors = copy.deepcopy(last_connections) for i in range(len(last_connections)-1, -1,", "not isinstance(padding, tuple): AvgPool2d_params['padding'] = (padding, padding) else: AvgPool2d_params['padding'] =", "mul *= x[i] return mul @torch.no_grad() def gradient_backward_v1(model, img, label,", "file_num in file_nums: tensor = torch.load(pth_dir+str(file_num)+'.pth') featuremap.append(tensor) delete_allpths(pth_dir=None) return featuremap", "weight.shape: ', list(w.shape)) print('# bias.shape: ', '['+str(dLoss_dnextz.shape[1])+']') N = z.shape[0]", "isinstance(kernel_size, tuple): MaxPool2d_params['kernel_size'] = (kernel_size, kernel_size) else: MaxPool2d_params['kernel_size'] = kernel_size", "= out_channel # kernel_size kernel_size = layer.__dict__.get('kernel_size') if not isinstance(kernel_size,", "model: processing {}/{}'.format(i, len(connections)-1)) item_key = list(connections[i].keys())[0] if not 'None'", "= layer_name # output_size output_size = layer.__dict__.get('output_size') if not isinstance(output_size,", "tmp_layer.weight parameters[i][j][k] = Linear_params elif isinstance(tmp_layer, nn.AdaptiveAvgPool2d): layer_name = 'AdaptiveAvgPool2d'", "i in range(len(x)): mul *= x[i] return mul @torch.no_grad() def", "tmp_layer['layer_name'] == 'Conv2d': if k+1 >= len(featuremap[i-1][j]): z = featuremap[i]", "print('# padding: ', padding) print('# strides: ', strides) N, C,", "-1, -1): if not isinstance(last_connections[i], list): current_layer_name = list(last_connections[i].keys())[0].split('_')[0] if", "label, num_class=1000): return_dz = [] parameters, fc_conv_weights = get_structure_parameters_v1(model) featuremap", "0: pass else: last_item_key = list(connections[i-1].keys())[0] if not connections[i][item_key] ==", "= stride # padding padding = layer.__dict__.get('padding') if not isinstance(padding,", "ax = list(np.arange(len(shape))) shape.pop(1) ax.pop(1) axis = tuple(ax) dxhut =", "else: AvgPool2d_params['padding'] = padding # return parameters[i] = AvgPool2d_params elif", "return mul @torch.no_grad() def gradient_backward_v1(model, img, label, num_class=1000): return_dz =", "== 'AvgPool2d': z = featuremap[-1] pooling = layer['kernel_size'] stride =", "= {'layer_name': layer_name} elif isinstance(tmp_layer, nn.MaxPool2d): layer_name = 'MaxPool2d' MaxPool2d_params", "return dot def generate_g(model, x): delete_allpths(pth_dir=None) print('\\n=========================== Store network model", "generate_g(model, x): delete_allpths(pth_dir=None) print('\\n=========================== Store network model Results Start =========================')", "dLoss_dz = dLoss_dz.reshape(lastpop.shape) elif layer['layer_name'] == 'AvgPool2d': z = featuremap[-1]", "# padding padding = layer.__dict__.get('padding') if not isinstance(padding, tuple): MaxPool2d_params['padding']", "z = featuremap[-1] gamma = fc_conv_weights[-1] dLoss_dz = batchnorm2d_backward(dLoss_dz, z,", "= conv_backward(dLoss_dz, weight_z, z, padding, stride) return_dz.append(dLoss_dz) fc_conv_weights.pop() if not", "in range(len(layer)): tmp_dLoss_dz.append(copy.deepcopy(dLoss_dz)) for k in range(len(layer[j])): tmp_layer = layer[j][k]", "layer_name # num_features num_features = layer.__dict__.get('num_features') BatchNorm2d_params['num_features'] = num_features #", "= layer.weight parameters[i] = Linear_params elif isinstance(layer, nn.AdaptiveAvgPool2d): layer_name =", ":, :, padding[1]:-padding[1]] else: return z @torch.no_grad() def conv_backward(next_dz, K,", "pad=(k2-1-padding[1],k2-1-padding[1],\\ k1-1-padding[0],k1-1-padding[0],0,0), mode='constant', value=0) dz = _conv_forward(ppadding_next_dz, swap_flip_K) swap_z =", "in name: return 'AvgPool2d' elif 'BatchNorm' in name: return 'BatchNorm2d'", "import torch.nn.functional as F from graphviz import Digraph, render from", "= tmp_layer.weight parameters[i][j][k] = Conv2d_params elif isinstance(tmp_layer, nn.ReLU): layer_name =", "list(list_dic_key_value[i].values())[0] key2 = list(list_dic_key_value[j].keys())[0] start = 0 end = len(list_dic_key_value)-1", "not equal.') return None error = 0 error_tolerance = 0.001", "= F.one_hot(label, num_classes=num_class).float() loss, dLoss_dz = cross_entropy_loss(featuremap[-1], y_true) print('Self calculated", "copy.deepcopy(return_layers) fc_conv_weights = copy.deepcopy(return_layers) for i in range(len(return_layers)): layer =", "parameters[i][j][k] = Conv2d_params elif isinstance(tmp_layer, nn.ReLU): layer_name = 'ReLU' parameters[i][j][k]", "= fc_conv_weights[i][j][k] try: padding = tmp_layer['padding'] except: padding = (0,", "tensor_B.detach().numpy() if len(tensor_A.shape) == 4: N, C, H, W =", "[] print('\\n=========================== Restore network model Start ===============================') for i in", "# stride stride = tmp_layer.__dict__.get('stride') if not isinstance(stride, tuple): AvgPool2d_params['stride']", "= layer[j][k] print('\\n=========================== {0:3} Branch: '.format(str(len(parameters)-1-i))+'{0:11}'.format(tmp_layer['layer_name'])+' Backward Start ====================') if", "= layer.__dict__.get('p') Dropout_params['p'] = p # return parameters.append(Dropout_params) elif isinstance(layer,", "= [] for i in range(len(connections)): item_key = list(connections[i].keys())[0] if", "os if pth_dir == None: pth_dir = \"./tmp_file/\" for root,", "z, eps, gamma) return_dz.append(dLoss_dz) fc_conv_weights.pop() lastpop = featuremap.pop() if not", "dLoss_dz = dLoss_dz.reshape(lastpop.shape) else: print('Not completed in gradient_backward_v1!') print('======================== {0:3}", "ax.pop(1) axis = tuple(ax) dxhut = torch.zeros_like(next_dz) for c in", "index_tmp_layers = tmp[1] + 1 elif not list(last_connections[i-1].keys())[0].split('_')[0] == 'Dropout':", "in range(len(last_connections)): print(i, last_connections[i]) print('================') print('================') for i in range(len(return_layers)):", "mu = z.mean(axis=axis, keepdim=True) xmu = z - mu xmu2", "if not isinstance(stride, tuple): Conv2d_params['stride'] = (stride, stride) else: Conv2d_params['stride']", "print('================') print('================') for i in range(len(return_tensors)): if not isinstance(return_tensors[i], list)", "continue for k in range(len(last_connections[i][j])-1, -1, -1): current_layer_name = list(last_connections[i][j][k].keys())[0].split('_')[0]", "print(i, parameters[i]) print('================') print('================') for i in range(len(return_tensors)): if not", "', list(next_dz.shape)) print('# z.shape: ', list(z.shape)) print('# padding: ', padding)", "batchnorm2d_backward(next_dz, z, eps, gamma=torch.Tensor([1.,1.,1.])): print('# next_dz.shape: ', list(next_dz.shape)) print('# z.shape:", "tuple): Conv2d_params['stride'] = (stride, stride) else: Conv2d_params['stride'] = stride #", "loss: ', ypred_loss.item()) print('=========================== Layer:'+' {0:18}'.format('cross_entropy_loss')+' End =============================') return ypred_loss,", "dLoss_dB = conv_backward(dLoss_dz, weight_z, z, padding, stride) return_dz.append(dLoss_dz) fc_conv_weights.pop() if", "eps = tmp_layer.__dict__.get('eps') BatchNorm2d_params['eps'] = eps # return fc_conv_weights[i][j][k] =", "render from torch.autograd import Variable @torch.no_grad() def cross_entropy_loss(y_predict, y_true): print('\\n===========================", "else: Conv2d_params['stride'] = stride # padding padding = layer.__dict__.get('padding') if", "else: tmp = find_next_layer_by_name(tmp_layers, current_layer_name, index_tmp_layers) return_layers.insert(0, tmp[0]) if isinstance(last_connections[i-1],", "H_last = (H-1)*(strides[0]-1) + H W_last = (W-1)*(strides[1]-1) + W", "last_tensors[i][j][k] = tensors[index_tensors] index_tensors += 1 for i in range(len(last_tensors)-1,", "return_dz[i] = dLoss_dz elif layer['layer_name'] == 'Linear': weight_z = fc_conv_weights[i]", "dropback_backward(dLoss_dz, mask, p) return_dz[i] = dLoss_dz elif layer['layer_name'] == 'BatchNorm2d':", "in torch.autograd.Function Args: var: output Variable params: dict of (name,", "if not isinstance(layer, list): print('\\n======================== {0:3} Layer: '.format(str(len(parameters)-1-i))+'{0:11}'.format(layer['layer_name'])+' Backward Start", "conv_backward(dLoss_dz, weight_z, z, padding, stride) return_dz[i] = dLoss_dz elif layer['layer_name']", "node_attr = dict(style='filled', shape='box', align='left', fontsize='12', ranksep='0.1', height='0.2') dot =", "torch.sum(dLoss_dnextz, dim=0) print('# dz.shape: ', list(dLoss_dz.shape)) print('# dweight.shape: ', list(dLoss_dfcW.shape))", "list(z.shape)) print('# weight.shape: ', list(K.shape)) print('# bias.shape: ', '['+str(K.shape[0])+']') print('#", "np_B[c]) error += 1 #print('Error rate: ', error/C) print('1D-error-rate: ',", "model(x) print('=========================== Store network model Results End ===========================\\n') if 'GoogLeNet'", "in_channel = layer.__dict__.get('in_channels') Conv2d_params['in_channel'] = in_channel # out_channel out_channel =", "return connections, new_connections @torch.no_grad() def get_split_connections(connections): return_connections = [] tmp_split", "':' in str(layer): tmp_layers.append(layer) index_tmp_layers = 0 for i in", "list): current_layer_name = list(last_connections[i].keys())[0].split('_')[0] if 'Add' in current_layer_name: last_tensors[i] =", "= tmp_layer['padding'] except: padding = (0, 0) stride = tmp_layer['stride']", "padding # return fc_conv_weights[i][j][k] = tmp_layer.weight parameters[i][j][k] = Conv2d_params elif", "None: pth_dir = \"./tmp_file/\" else: pth_dir = featuremap_dir files =", "for i in range(len(last_tensors)-1, -1, -1): if isinstance(last_tensors[i], str): #", "Conv2d_params['out_channel'] = out_channel # kernel_size kernel_size = tmp_layer.__dict__.get('kernel_size') if not", "copy return_dz = copy.deepcopy(last_connections) featuremap = return_tensors featuremap.append(img) y_true =", "c in range(C): for h in range(H): for w in", "tuple): Conv2d_params['padding'] = (padding, padding) else: Conv2d_params['padding'] = padding #", "index_tensors += 1 for i in range(len(last_tensors)-1, -1, -1): if", "z = featuremap[i] gamma = fc_conv_weights[i] dLoss_dz = batchnorm2d_backward(dLoss_dz, z,", "# out_channel out_channel = layer.__dict__.get('out_channels') Conv2d_params['out_channel'] = out_channel # kernel_size", "= xmu2.sum(axis=axis, keepdim=True)/m ivar = 1./torch.pow(var+eps, 0.5) dz2 = (ivar**2)*((dxhut*xmu).sum(axis=axis,", "N = z.shape[0] if len(z.shape) == 4: z = z.view(z.size(0),", "fc_backward(dLoss_dz, z, weight_z) return_dz.append(dLoss_dz) fc_conv_weights.pop() lastpop = featuremap.pop() if not", "=============================') return ypred_loss, dLoss_dypred @torch.no_grad() def fc_backward(dLoss_dnextz, z, w): print('#", "name)) @torch.no_grad() def mul_items(tensor_size): x = list(tensor_size) mul = 1.", "'Mean' in name or 'Avg' in name: return 'AvgPool2d' elif", "End =============================') return ypred_loss, dLoss_dypred @torch.no_grad() def fc_backward(dLoss_dnextz, z, w):", "function!') exit() new_connections.append({key1: value1}) if not len(new_connections) == len(connections): print('Generate", "{} Conv2d_params['layer_name'] = layer_name # in_channel in_channel = tmp_layer.__dict__.get('in_channels') Conv2d_params['in_channel']", "return_dz[i] = dLoss_dz print('======================== {0:3} Layer: '.format(str(len(parameters)-1-i))+'{0:11}'.format(layer['layer_name'])+' Backward End ==========================')", "z) dLoss_dfcB = torch.sum(dLoss_dnextz, dim=0) print('# dz.shape: ', list(dLoss_dz.shape)) print('#", "@torch.no_grad() def find_start_end(list_dic_key_value, i, j): key1 = list(list_dic_key_value[i].values())[0] key2 =", "use this file except in compliance with the License. #", "0, 1), torch.swapaxes(padding_next_dz, 0, 1)) db = torch.sum(torch.sum(torch.sum(next_dz, axis=-1), axis=-1),", "return_dz[i][j][k] = tmp_dLoss_dz[-1] print('=========================== {0:3} Branch: '.format(str(len(parameters)-1-i))+'{0:11}'.format(tmp_layer['layer_name'])+' Backward End ======================')", "str(model) or 'AlexNet' in str(model): print(judge_tensors_equal(dLoss_dW, model.features[0].weight.grad)) elif 'ResNet' in", "= eps # return fc_conv_weights[i] = layer.weight parameters[i] = BatchNorm2d_params", "delete_allpths(pth_dir=None) return return_dz, dLoss_dW, dLoss_dB @torch.no_grad() def make_dot(var, params=None): \"\"\"", "'Linear': weight_z = fc_conv_weights[-1] z = featuremap[-1] dLoss_dz, dLoss_dW, dLoss_dB", "layer['layer_name'] == 'BatchNorm2d': eps = layer['eps'] z = featuremap[-1] gamma", "Results End ===========================\\n') if 'GoogLeNet' in str(model).split('\\n')[0]: g = make_dot(y[0])", "= tmp_layer.__dict__.get('stride') if not isinstance(stride, tuple): Conv2d_params['stride'] = (stride, stride)", "MaxPool2d_params elif isinstance(tmp_layer, nn.AvgPool2d): layer_name = 'AvgPool2d' AvgPool2d_params = {}", "not len(new_connections) == len(connections): print('Generate connections not done! Check generate_connections", "'Dropout': p = layer['p'] mask = featuremap[-1] dLoss_dz = dropback_backward(dLoss_dz,", "stride stride = layer.__dict__.get('stride') if not isinstance(stride, tuple): MaxPool2d_params['stride'] =", "error += 1 #print('Error rate: ', error/(C*N)) print('2D-error-rate: ', end='", "pass for i in range(num_Throwed): last_connections.remove('Throwed') if last_connections[-1] == {'None':", "= tmp_layer.__dict__.get('num_features') BatchNorm2d_params['num_features'] = num_features # eps eps = tmp_layer.__dict__.get('eps')", "return parameters.append(MaxPool2d_params) elif isinstance(layer, nn.AvgPool2d): layer_name = 'AvgPool2d' AvgPool2d_params =", "nn import torch.nn.functional as F from graphviz import Digraph, render", "for j in range(len(last_connections[i])): return_layers[0].append([]) if len(last_connections[i][j]) == 0: continue", "elif isinstance(layer, nn.BatchNorm2d): layer_name = 'BatchNorm2d' BatchNorm2d_params = {} BatchNorm2d_params['layer_name']", "+= 1 for i in range(len(last_tensors)-1, -1, -1): if isinstance(last_tensors[i],", "tensor_A.detach().numpy() np_B = tensor_B.detach().numpy() if len(tensor_A.shape) == 4: N, C,", "'GoogLeNet' in str(model).split('\\n')[0]: loss_torch = Loss(result[0], label) else: loss_torch =", "== list(list_dic_key_value[index].keys())[0]: end = index break return start+1, end-1 @torch.no_grad()", "parameters = copy.deepcopy(return_layers) fc_conv_weights = copy.deepcopy(return_layers) for i in range(len(return_layers)):", "i @torch.no_grad() def get_layers(last_connections, model): return_layers = [] tmp_layers =", "swap_flip_K = torch.swapaxes(flip_K, 0, 1) ppadding_next_dz = F.pad(padding_next_dz, pad=(k2-1-padding[1],k2-1-padding[1],\\ k1-1-padding[0],k1-1-padding[0],0,0),", "dLoss_dz = torch.mul(torch.where(torch.eq(mask, 1.), next_dz, zeros_tensor), 1./(1.-p)) print('# dz.shape: ',", "not isinstance(return_tensors[i], str): print('=========', i, return_tensors[i].shape) print('================') ''' import copy", "else: return z @torch.no_grad() def conv_backward(next_dz, K, z, padding=(0, 0),", "stride) else: AvgPool2d_params['stride'] = stride # padding padding = tmp_layer.__dict__.get('padding')", "else: print('Not completed in gradient_backward_v1!') print('======================== {0:3} Layer: '.format(str(i))+'{0:9}'.format(layer['layer_name'])+' Backward", "conv_backward(tmp_dLoss_dz[-1], weight_z, z, padding, stride) return_dz[i][j][k] = tmp_dLoss_dz[-1] elif tmp_layer['layer_name']", "print('# z.shape: ', list(z.shape)) zeros_tensor = torch.zeros_like(next_dz) dLoss_dz = torch.where(torch.gt(z,", "dLoss_dz.reshape(lastpop.shape) elif layer['layer_name'] == 'BatchNorm2d': eps = layer['eps'] z =", "p # return parameters.append(Dropout_params) elif isinstance(layer, nn.BatchNorm2d): layer_name = 'BatchNorm2d'", "start = index break for index in range(len(list_dic_key_value)): if key2", "'View' parameters[i][j][k] = {'layer_name': layer_name} elif tmp_layer == 'Cat': layer_name", "def mul_items(tensor_size): x = list(tensor_size) mul = 1. for i", "= max_pooling_backward(dLoss_dz, z, pooling, stride, padding) return_dz[i] = dLoss_dz elif", "print('# Restore network model: processing {}/{}'.format(i, len(connections)-1)) item_key = list(connections[i].keys())[0]", "dbias.shape: ', list(db.shape)) return dz, (dK/N).transpose(0,1), db/N @torch.no_grad() def _conv_forward(x,", "2: N, C = tensor_A.shape for n in range(N): for", "+ tmp_dLoss_dz[1] else: print('Not completed in gradient_backward!') print('# Torch calculated", "'ReLU': z = featuremap[-1] dLoss_dz = relu_backward(dLoss_dz, z) return_dz.append(dLoss_dz) lastpop", "if not isinstance(padding, tuple): MaxPool2d_params['padding'] = (padding, padding) else: MaxPool2d_params['padding']", "elif layer['layer_name'] == 'AvgPool2d': z = featuremap[i] pooling = layer['kernel_size']", "add_backward(dLoss_dnextz): print('# next_dz.shape: ', list(dLoss_dnextz.shape)) dLoss_dz = dLoss_dnextz print('# dz.shape:", "x_pad = x_pad.unfold(2, k, strides[0]) x_pad = x_pad.unfold(3, j, strides[1])", "layer_name # in_features in_features = tmp_layer.__dict__.get('in_features') Linear_params['in_features'] = in_features #", "W1 = next_dz.shape print('# next_dz.shape: ', list(next_dz.shape)) print('# z.shape: ',", "print('Self calculated loss: ', loss) featuremap.pop() return_dz.append(dLoss_dz) dW_dB_fc_conv = []", "elif layer['layer_name'] == 'Linear': weight_z = fc_conv_weights[-1] z = featuremap[-1]", "for j in torch.arange(out_w): flat_idx = torch.argmax(padding_z[n, c, strides[0] *", "labels[graph[i].split('\\t')[1].split(' ')[0]]=\\ graph[i].split('\\t')[1].split('=')[1].split(']')[0] for i in range(len(graph)): if '->' in", "continue value = list(item.values())[0] last_key = list(tmp_split[-1].keys())[0] if value ==", "nodes are the Variables that require grad, orange are Tensors", "def get_featuremap(featuremap_dir=None): import os featuremap = [] if featuremap_dir ==", "torch.log(y_probability)), dim=1, keepdim=True)) dLoss_dypred = y_probability - y_true print('# dLoss_dypred.shape:", "i in range(len(graph)): if 'label' in graph[i] and graph[i][-1] ==", "in compliance with the License. # You may obtain a", "x.shape d, c, k, j = weight.shape x_pad = x", "strides[1]) out = torch.einsum( 'nchwkj,dckj->ndhw', x_pad, weight) return out @torch.no_grad()", "'+key+' or '+value+'! Check exchange_name function!') exit() new_connections.append({key1: value1}) if", "in torch.arange(out_h): for j in torch.arange(out_w): flat_idx = torch.argmax(padding_z[n, c,", "software # distributed under the License is distributed on an", "nn.AdaptiveAvgPool2d): layer_name = 'AdaptiveAvgPool2d' AdaptiveAvgPool2d_params = {} AdaptiveAvgPool2d_params['layer_name'] = layer_name", "if hasattr(var, 'saved_tensors'): for t in var.saved_tensors: dot.edge(str(id(t)), str(id(var))) add_nodes(t)", "p = tmp_layer.__dict__.get('p') Dropout_params['p'] = p # return parameters[i][j][k] =", "'.format(str(len(parameters)-1-i))+'{0:11}'.format(layer['layer_name'])+' Backward Start ========================') if layer['layer_name'] == 'Conv2d': z =", "p = layer['p'] mask = featuremap[-1] dLoss_dz = dropback_backward(dLoss_dz, mask,", "k, strides[0]) x_pad = x_pad.unfold(3, j, strides[1]) out = torch.einsum(", "BatchNorm2d_params['eps'] = eps # return fc_conv_weights.append(layer.weight) parameters.append(BatchNorm2d_params) elif isinstance(layer, nn.Linear):", "0)): print('# next_dz.shape: ', list(next_dz.shape)) print('# z.shape: ', list(z.shape)) print('#", "_remove_padding(padding_dz, padding) # padding_z[:, :, padding[0]:-padding[0], padding[1]:-padding[1]] print('# dz.shape: ',", "h_in, w_in = x.shape d, c, k, j = weight.shape", "(pooling[0] * pooling[1]) dz = _remove_padding(padding_dz, padding) # padding_z[:, :,", "1 #print('Error rate: ', error/(C*N)) print('2D-error-rate: ', end=' ') return", "padding) print('# strides: ', strides) padding_next_dz = _insert_zeros(next_dz, strides) flip_K", "', list(dLoss_dfcW.shape)) print('# dbias.shape: ', list(dLoss_dfcB.shape)) return dLoss_dz, dLoss_dfcW/N, dLoss_dfcB/N", "label) else: loss_torch = Loss(result, label) _, connections = generate_connections(g)", "tmp[0]) if not list(last_connections[i][j][k-1].keys())[0].split('_')[0] == 'Dropout': index_tmp_layers = tmp[1] +", "dot.edge(str(id(u[0])), str(id(var))) add_nodes(u[0]) if hasattr(var, 'saved_tensors'): for t in var.saved_tensors:", "print('======================== {0:3} Layer: '.format(str(len(parameters)-1-i))+'{0:11}'.format(layer['layer_name'])+' Backward End ==========================') elif isinstance(layer, list):", "if np_A[n,c]-np_B[n,c] > error_tolerance or np_B[n,c]-np_A[n,c] > error_tolerance: #print(np_A[n,c], np_B[n,c])", "merge_connections(connections): import copy last_connections = copy.deepcopy(connections) connections.append({'None':'None'}) num_Throwed = 0", "i in range(len(return_layers)): print(i, return_layers[i]) print('================') print('================') for i in", "layer.__dict__.get('out_channels') Conv2d_params['out_channel'] = out_channel # kernel_size kernel_size = layer.__dict__.get('kernel_size') if", "# return fc_conv_weights[i][j][k] = tmp_layer.weight parameters[i][j][k] = BatchNorm2d_params elif isinstance(tmp_layer,", "(padding, padding) else: Conv2d_params['padding'] = padding # return fc_conv_weights[i] =", "return_dz.append(dLoss_dz) fc_conv_weights.pop() if not len(featuremap) == 1: lastpop = featuremap.pop()", "connections: key, value = list(item.items())[0] key1 = exchange_name(key.split('_')[0]) + '_'", "been processed in get_structure_parameters_v1!') return parameters, fc_conv_weights @torch.no_grad() def delete_allpths(pth_dir=None):", "return parameters[i] = MaxPool2d_params elif isinstance(layer, nn.AvgPool2d): layer_name = 'AvgPool2d'", "Variable @torch.no_grad() def cross_entropy_loss(y_predict, y_true): print('\\n=========================== Layer:'+' {0:18}'.format('cross_entropy_loss')+' Start ===========================')", "list(z.shape)) print('# weight.shape: ', list(w.shape)) print('# bias.shape: ', '['+str(dLoss_dnextz.shape[1])+']') N", "tuple(ax) dxhut = torch.zeros_like(next_dz) for c in range(C): dxhut[:,c] =", "if error%20 == 0: pass print('error', np_A[n,c,h,w], np_B[n,c,h,w]) else: if", "dLoss_dz, dLoss_dfcW/N, dLoss_dfcB/N @torch.no_grad() def view_backward(dLoss_dnextz, last_z, params): print('# next_dz.shape:", "dirs, files in os.walk(pth_dir, topdown=False): for name in files: if", "= layer['stride'] padding = layer['padding'] dLoss_dz = max_pooling_backward(dLoss_dz, z, pooling,", "padding_dz = torch.zeros_like(padding_z) for n in torch.arange(N): for c in", "(padding, padding) else: AvgPool2d_params['padding'] = padding # return parameters[i] =", "elif tmp_layer == 'Add': layer_name = 'Add' parameters[i][j][k] = {'layer_name':", "'Add' parameters[i][j][k] = {'layer_name': layer_name} elif tmp_layer == 'View': layer_name", "list(connections[j-1].keys())[0]: notchoosed.append(i) start, end = find_start_end(connections, i, j-1) tmp =", "fc_conv_weights def gradient_backward_v2(model, img, label, num_class=1000, g_view=False): x = Variable(img)", "range(len(return_tensors)): if not isinstance(return_tensors[i], list) and not isinstance(return_tensors[i], str): print('=========',", "len(featuremap[i-1][j]): z = featuremap[i] else: z = featuremap[i-1][j][k+1] weight_z =", "connections, new_connections @torch.no_grad() def get_split_connections(connections): return_connections = [] tmp_split =", "not in seen: if torch.is_tensor(var): dot.node(str(id(var)), size_to_str(var.size()), fillcolor='orange') elif hasattr(var,", "size_to_str(var.size()), fillcolor='orange') elif hasattr(var, 'variable'): u = var.variable name =", "isinstance(tmp_layer, nn.AdaptiveAvgPool2d): layer_name = 'AdaptiveAvgPool2d' AdaptiveAvgPool2d_params = {} AdaptiveAvgPool2d_params['layer_name'] =", "'Conv2d': if k+1 >= len(featuremap[i-1][j]): z = featuremap[i] else: z", "list(dLoss_dz.shape)) return dLoss_dz @torch.no_grad() def relu_backward(next_dz, z): print('# next_dz.shape: ',", "i + flat_idx // pooling[1] w_idx = strides[1] * j", "Layer: '.format(str(len(parameters)-1-i))+'{0:11}'.format(layer['layer_name'])+' Backward End ==========================') elif isinstance(layer, list): import copy", "for '+key+' or '+value+'! Check exchange_name function!') exit() new_connections.append({key1: value1})", "error/(C*N)) print('2D-error-rate: ', end=' ') return error/(C*N) @torch.no_grad() def get_featuremap(featuremap_dir=None):", "Start ===========================') print('# y_predict.shape: ', list(y_predict.shape)) print('# y_true.shape: ', list(y_true.shape))", "z = featuremap[i-1][j][k+1] weight_z = fc_conv_weights[i][j][k] try: padding = tmp_layer['padding']", "next_dz.shape: ', list(next_dz.shape)) print('# z.shape: ', list(z.shape)) zeros_tensor = torch.zeros_like(next_dz)", "kernel_size = layer.__dict__.get('kernel_size') if not isinstance(kernel_size, tuple): Conv2d_params['kernel_size'] = (kernel_size,", "output_size) else: AdaptiveAvgPool2d_params['output_size'] = output_size # return parameters[i] = AdaptiveAvgPool2d_params", "= featuremap_dir files = os.listdir(pth_dir) file_nums = [] for i", "Produces Graphviz representation of PyTorch autograd graph Blue nodes are", "for i in range(len(connections)): item = connections[i] if len(tmp_split) ==", "= view_backward(dLoss_dz, last_z, params) return_dz[i] = dLoss_dz elif layer['layer_name'] ==", "limitations under the License. #!/usr/bin/python3 import torch import torch.nn as", "not isinstance(output_size, tuple): AdaptiveAvgPool2d_params['output_size'] = (output_size, output_size) else: AdaptiveAvgPool2d_params['output_size'] =", "= featuremap[-1] weight_z = fc_conv_weights[-1] try: padding = layer['padding'] except:", "elif not list(last_connections[i-1].keys())[0].split('_')[0] == 'Dropout': index_tmp_layers = tmp[1] + 1", "# kernel_size kernel_size = tmp_layer.__dict__.get('kernel_size') if not isinstance(kernel_size, tuple): MaxPool2d_params['kernel_size']", "fc_conv_weights[i] dLoss_dz = batchnorm2d_backward(dLoss_dz, z, eps, gamma) return_dz[i] = dLoss_dz", "list(dz.shape)) return dz @torch.no_grad() def average_pooling_backward(next_dz, z, pooling, strides, padding=(0,", "output_size) else: AdaptiveAvgPool2d_params['output_size'] = output_size # return parameters[i][j][k] = AdaptiveAvgPool2d_params", "dLoss_dfcW = torch.matmul(dLoss_dnextz.t(), z) dLoss_dfcB = torch.sum(dLoss_dnextz, dim=0) print('# dz.shape:", "c in range(C): dxhut[:,c] = next_dz[:,c]*gamma[c] dz1 = m*dxhut mu", "get_featuremap(featuremap_dir=None): import os featuremap = [] if featuremap_dir == None:", "index in range(len(list_dic_key_value)): if key2 == list(list_dic_key_value[index].keys())[0]: end = index", "= batchnorm2d_backward(dLoss_dz, z, eps, gamma) return_dz[i] = dLoss_dz print('======================== {0:3}", "= (stride, stride) else: MaxPool2d_params['stride'] = stride # padding padding", "of two compard tensors is not equal.') return None error", "# return fc_conv_weights[i] = layer.weight parameters[i] = Conv2d_params elif isinstance(layer,", "= z.shape[0] if len(z.shape) == 4: z = z.view(z.size(0), -1)", "dot.node(str(id(var)), str(type(var).__name__)) seen.add(var) if hasattr(var, 'next_functions'): for u in var.next_functions:", "not notchoosed == []: last_connections = last_connections[:notchoosed[0]] else: pass for", "layer['padding'] dLoss_dz = average_pooling_backward(dLoss_dz, z, pooling, stride, padding) return_dz.append(dLoss_dz) lastpop", "stride) else: AvgPool2d_params['stride'] = stride # padding padding = layer.__dict__.get('padding')", "with the License. # You may obtain a copy of", "= torch.matmul(dLoss_dnextz, w) #delta dLoss_dfcW = torch.matmul(dLoss_dnextz.t(), z) dLoss_dfcB =", "i in range(len(featuremap)): if not isinstance(featuremap[i], list): print('=========', i, featuremap[i].shape)", "Backward End ==========================') elif isinstance(layer, list): import copy tmp_dLoss_dz =", "fc_conv_weights = get_structure_parameters_v1(model) featuremap = get_featuremap(featuremap_dir=None) featuremap.insert(0, img) ### y_true", "== 0: tmp_split.append(item) continue value = list(item.values())[0] last_key = list(tmp_split[-1].keys())[0]", "== 'Conv2d': if k+1 >= len(featuremap[i-1][j]): z = featuremap[i] else:", "max_pooling_backward(next_dz, z, pooling, strides, padding=(0, 0)): print('# next_dz.shape: ', list(next_dz.shape))", "# 在高度、宽度上相加;批量大小上相加 print('# dz.shape: ', list(dz.shape)) print('# dweight.shape: ', list(dK.transpose(0,1).shape))", "network model: processing {}/{}'.format(i, len(connections)-1)) item_key = list(connections[i].keys())[0] if not", "== len(lastpop.shape): dLoss_dz = dLoss_dz.reshape(lastpop.shape) elif layer['layer_name'] == 'ReLU': z", "j): key1 = list(list_dic_key_value[i].values())[0] key2 = list(list_dic_key_value[j].keys())[0] start = 0", "featuremap = [] if featuremap_dir == None: pth_dir = \"./tmp_file/\"", "len(layers)): layer = layers[i] if name in str(layer): return layer,", "= [] print('\\n=========================== Restore network model Start ===============================') for i", "out_features out_features = tmp_layer.__dict__.get('out_features') Linear_params['out_features'] = out_features # return fc_conv_weights[i][j][k]", "last_z, params): print('# next_dz.shape: ', list(dLoss_dnextz.shape)) print('# last_z.shape: ', list(last_z.shape))", "def delete_allpths(pth_dir=None): import os if pth_dir == None: pth_dir =", "print('# next_dz.shape: ', list(dLoss_dnextz.shape)) print('# last_z.shape: ', list(last_z.shape)) if params:", "dim=1, keepdim=True)) dLoss_dypred = y_probability - y_true print('# dLoss_dypred.shape: ',", "featuremap_dir files = os.listdir(pth_dir) file_nums = [] for i in", "index_tmp_layers) return_layers[0][j].insert(0, tmp[0]) if not list(last_connections[i][j][k-1].keys())[0].split('_')[0] == 'Dropout': index_tmp_layers =", "dLoss_dz = batchnorm2d_backward(dLoss_dz, z, eps, gamma) return_dz[i] = dLoss_dz print('========================", "range(len(layer)): tmp_dLoss_dz.append(copy.deepcopy(dLoss_dz)) for k in range(len(layer[j])): tmp_layer = layer[j][k] print('\\n===========================", "def get_layers(last_connections, model): return_layers = [] tmp_layers = [] for", "= copy.deepcopy(last_connections) featuremap = return_tensors featuremap.append(img) y_true = F.one_hot(label, num_classes=num_class).float()", "H_last, strides[0]): for w in range(0, W_last, strides[1]): pz[n,d,h,w] =", "dLoss_dz.reshape(lastpop.shape) elif layer['layer_name'] == 'MaxPool2d': z = featuremap[-1] pooling =", "(graph[i]+graph[i+1][1:]).split('\\t')[1].split('\"')[1] if 'label' in graph[i] and graph[i][-1] == ']': labels[graph[i].split('\\t')[1].split('", "connections.append({'None':'None'}) num_Throwed = 0 notchoosed = [] print('\\n=========================== Restore network", "j in torch.arange(out_w): flat_idx = torch.argmax(padding_z[n, c, strides[0] * i:strides[0]", "= layer.__dict__.get('p') Dropout_params['p'] = p # return parameters[i] = Dropout_params", "out_features # return fc_conv_weights[i] = layer.weight parameters[i] = Linear_params elif", "tensor_A.shape == tensor_B.shape): print('Shape of two compard tensors is not", "fc_conv_weights.append(layer.weight) parameters.append(Linear_params) elif isinstance(layer, nn.AdaptiveAvgPool2d): layer_name = 'AdaptiveAvgPool2d' AdaptiveAvgPool2d_params =", "else: AvgPool2d_params['stride'] = stride # padding padding = layer.__dict__.get('padding') if", "tensor_A.shape for n in range(N): for c in range(C): for", "(padding, padding) else: AvgPool2d_params['padding'] = padding # return parameters[i][j][k] =", "= generate_connections(g) last_connections = merge_connections(connections) return_layers = get_layers(last_connections, model) return_tensors", "= torch.sub(y_predict, torch.max(y_predict, dim=1, keepdim=True).values) y_exp = torch.exp(y_shift) y_probability =", "in range(len(layer)): for k in range(len(layer[j])): tmp_layer = layer[j][k] ###", "'AddmmBackward' in name: return 'Linear' elif 'ViewBackward' in name: return", "dLoss_dz = dLoss_dnextz.reshape(last_z.shape) print('# dz.shape: ', list(dLoss_dz.shape)) return dLoss_dz def", "in current_layer_name: last_tensors[i] = 'View' else: last_tensors[i] = tensors[index_tensors] index_tensors", "= {'layer_name': layer_name} elif layer == 'Add': layer_name = 'Add'", "get_structure_parameters_v1(model): layers = [] for layer in model.modules(): if not", "', list(z.shape)) print('# padding: ', padding) print('# strides: ', strides)", "print('# next_dz.shape: ', list(next_dz.shape)) print('# z.shape: ', list(z.shape)) zeros_tensor =", "error/C) print('1D-error-rate: ', end=' ') return error/C elif len(tensor_A.shape) ==", "isinstance(padding, tuple): Conv2d_params['padding'] = (padding, padding) else: Conv2d_params['padding'] = padding", "{0:3} Layer: '.format(str(i))+'{0:9}'.format(layer['layer_name'])+' Backward Start ========================') if layer['layer_name'] == 'Conv2d':", "# return parameters[i][j][k] = AdaptiveAvgPool2d_params ### else: print('The layer has", "elif layer['layer_name'] == 'MaxPool2d': z = featuremap[i] pooling = layer['kernel_size']", "express or implied. # See the License for the specific", "(W-1)*(strides[1]-1) + W pz = torch.zeros(N, D, H_last, W_last) for", "except in compliance with the License. # You may obtain", "tmp_layers = [] for layer in model.modules(): if not ':'", "output_size # return parameters.append(AdaptiveAvgPool2d_params) else: print('The layer has not been", "z.shape: ', list(z.shape)) print('# weight.shape: ', list(w.shape)) print('# bias.shape: ',", "print('# dz.shape: ', list(dLoss_dz.shape)) return dLoss_dz @torch.no_grad() def max_pooling_backward(next_dz, z,", "if key1 == list(list_dic_key_value[index].keys())[0]: start = index break for index", "return parameters[i] = Dropout_params elif isinstance(layer, nn.BatchNorm2d): layer_name = 'BatchNorm2d'", "return parameters[i][j][k] = AdaptiveAvgPool2d_params ### else: print('The layer has not", "= 'View' else: last_tensors[i] = tensors[index_tensors] index_tensors += 1 else:", "= {id(v): k for k, v in params.items()} node_attr =", "in range(N): for d in range(D): for h in range(0,", "tuple): MaxPool2d_params['padding'] = (padding, padding) else: MaxPool2d_params['padding'] = padding #", "isinstance(tmp_layer, nn.Linear): layer_name = 'Linear' Linear_params = {} Linear_params['layer_name'] =", "graph[i].split('\\t')[1].split(' -> ')[1]}) pop_index = [] for i in range(len(connections)):", "in current_layer_name: last_tensors[i][j][k] = 'View' else: last_tensors[i][j][k] = tensors[index_tensors] index_tensors", "else: return_connections.append(tmp_split) tmp_split = [item] return return_connections @torch.no_grad() def find_start_end(list_dic_key_value,", "elif 'BatchNorm' in name: return 'BatchNorm2d' elif 'Conv' in name:", "zeros_tensor) print('# dz.shape: ', list(dLoss_dz.shape)) return dLoss_dz @torch.no_grad() def dropback_backward(next_dz,", "parameters, fc_conv_weights = get_structure_parameters_v1(model) featuremap = get_featuremap(featuremap_dir=None) featuremap.insert(0, img) ###", "elif len(tensor_A.shape) == 2: N, C = tensor_A.shape for n", "# Licensed under the Apache License, Version 2.0 (the \"License\");", "p = layer.__dict__.get('p') Dropout_params['p'] = p # return parameters[i] =", "{} Dropout_params['layer_name'] = layer_name # p p = tmp_layer.__dict__.get('p') Dropout_params['p']", "== 'Conv2d': z = featuremap[-1] weight_z = fc_conv_weights[-1] try: padding", "elif isinstance(tmp_layer, nn.Linear): layer_name = 'Linear' Linear_params = {} Linear_params['layer_name']", "get_featuremap(featuremap_dir=None) index_tensors = 0 import copy last_tensors = copy.deepcopy(last_connections) for", "'.format(str(len(parameters)-1-i))+'{0:11}'.\\ format(layer['layer_name'])+' Backward End ==========================') continue p = layer['p'] mask", "= dLoss_dz elif layer['layer_name'] == 'View': last_z = featuremap[i+1] if", "'AvgPool2d' AvgPool2d_params = {} AvgPool2d_params['layer_name'] = layer_name # kernel_size kernel_size", "graph[i] and graph[i][-1] == '\"': labels[(graph[i]+graph[i+1][1:]).split('\\t')[1].split(' ')[0]]=\\ (graph[i]+graph[i+1][1:]).split('\\t')[1].split('\"')[1] if 'label'", "padding[0]:-padding[0], padding[1]:-padding[1]] print('# dz.shape: ', list(dz.shape)) return dz @torch.no_grad() def", "model Start ===============================') for i in range(len(connections)): print('# Restore network", "list): print('\\n======================== {0:3} Layer: '.format(str(len(parameters)-1-i))+'{0:11}'.format(layer['layer_name'])+' Backward Start ========================') if layer['layer_name']", "current_layer_name = list(last_connections[i][j][k].keys())[0].split('_')[0] if 'Add' in current_layer_name: last_tensors[i][j][k] = 'Add'", "num_classes=num_class).float() loss, dLoss_dz = cross_entropy_loss(featuremap[-1], y_true) print('Self calculated loss: ',", "padding: ', padding) print('# strides: ', strides) N, C, H,", "dz.shape H_last = (H-1)*(strides[0]-1) + H W_last = (W-1)*(strides[1]-1) +", "= get_structure_parameters_v1(model) featuremap = get_featuremap(featuremap_dir=None) featuremap.insert(0, img) ### y_true =", "last_item_key = list(connections[i-1].keys())[0] if not connections[i][item_key] == last_item_key: for j", "CONDITIONS OF ANY KIND, either express or implied. # See", "0: return z[:, :, :, padding[1]:-padding[1]] else: return z @torch.no_grad()", "1 if error%20 == 0: pass print('error', np_A[n,c,h,w], np_B[n,c,h,w]) else:", "1./torch.pow(var+eps, 0.5) dz2 = (ivar**2)*((dxhut*xmu).sum(axis=axis, keepdim=True))*xmu dz3 = dxhut.sum(axis=axis, keepdim=True)", "if 'Add' in current_layer_name: last_tensors[i] = 'Add' elif 'View' in", "return fc_conv_weights.append(layer.weight) parameters.append(Linear_params) elif isinstance(layer, nn.AdaptiveAvgPool2d): layer_name = 'AdaptiveAvgPool2d' AdaptiveAvgPool2d_params", "x): delete_allpths(pth_dir=None) print('\\n=========================== Store network model Results Start =========================') y", "':' in str(layer): layers.append(layer) parameters = [] fc_conv_weights = []", "return_dz.append(dLoss_dz) dW_dB_fc_conv = [] for i in range(len(parameters)-1, -1, -1):", "Check generate_connections function!') exit() new_connections.insert(0, {list(new_connections[0].values())[0]: None}) new_connections.append({'None': 'None'}) return", "H, W = z.shape D, C, k1, k2 = K.shape", "str(layer): tmp_layers.append(layer) index_tmp_layers = 0 for i in range(len(last_connections)-1, -1,", "or 'None' in value1: print('Not completed for '+key+' or '+value+'!", "= list(last_connections[i][j][k].keys())[0].split('_')[0] if 'ReLU' in current_layer_name: return_layers[0][j].insert(0, torch.nn.ReLU(inplace=True)) elif 'Add'", "ypred_loss, dLoss_dypred @torch.no_grad() def fc_backward(dLoss_dnextz, z, w): print('# next_dz.shape: ',", "hasattr(var, 'saved_tensors'): for t in var.saved_tensors: dot.edge(str(id(t)), str(id(var))) add_nodes(t) print(var)", "np_B[n,c,h,w]) #print('Error rate: ', error/(N*C*H*W)) print('4D-error-rate: ', end=' ') return", "None else '' node_name = '%s\\n %s' % (name, size_to_str(u.size()))", "make_dot(y) return g @torch.no_grad() def exchange_name(name): if 'Relu' in name:", "layer.__dict__.get('p') Dropout_params['p'] = p # return parameters.append(Dropout_params) elif isinstance(layer, nn.BatchNorm2d):", "= torch.mean(-torch.sum(torch.mul(y_true, torch.log(y_probability)), dim=1, keepdim=True)) dLoss_dypred = y_probability - y_true", "= batchnorm2d_backward(tmp_dLoss_dz[-1], z, eps, gamma) return_dz[i][j][k] = tmp_dLoss_dz[-1] print('=========================== {0:3}", "'Add' in current_layer_name: last_tensors[i][j][k] = 'Add' elif 'View' in current_layer_name:", "range(end-start): last_connections.insert(start, 'Throwed') num_Throwed += 1 break if not notchoosed", "in_features in_features = tmp_layer.__dict__.get('in_features') Linear_params['in_features'] = in_features # out_features out_features", "for kk in range(end-start): last_connections.insert(start, 'Throwed') num_Throwed += 1 break", "torch.zeros_like(next_dz) for c in range(C): dxhut[:,c] = next_dz[:,c]*gamma[c] dz1 =", "weight, strides=(1,1)): n, c, h_in, w_in = x.shape d, c,", "+ 1 return return_layers @torch.no_grad() def get_tensors(last_connections): tensors = get_featuremap(featuremap_dir=None)", "@torch.no_grad() def view_backward(dLoss_dnextz, last_z, params): print('# next_dz.shape: ', list(dLoss_dnextz.shape)) print('#", "padding # return parameters[i] = AvgPool2d_params elif isinstance(layer, nn.Dropout): layer_name", "D, C, k1, k2 = K.shape N, D, H1, W1", "-1): if not isinstance(last_connections[i], list): current_layer_name = list(last_connections[i].keys())[0].split('_')[0] if 'Add'", "dLoss_dz @torch.no_grad() def max_pooling_backward(next_dz, z, pooling, strides, padding=(0, 0)): print('#", "'('+(', ').join(['%d' % v for v in size])+')' def add_nodes(var):", "@torch.no_grad() def cross_entropy_loss(y_predict, y_true): print('\\n=========================== Layer:'+' {0:18}'.format('cross_entropy_loss')+' Start ===========================') print('#", "mask, p): print('# zeros probability: ', p) print('# next_dz.shape: ',", "isinstance(kernel_size, tuple): AvgPool2d_params['kernel_size'] = (kernel_size, kernel_size) else: AvgPool2d_params['kernel_size'] = kernel_size", "len(new_connections) == len(connections): print('Generate connections not done! Check generate_connections function!')", "= 'View' parameters[i][j][k] = {'layer_name': layer_name} elif tmp_layer == 'Cat':", "= 'Cat' parameters[i][j][k] = {'layer_name': layer_name} elif isinstance(tmp_layer, nn.MaxPool2d): layer_name", "range(len(list_dic_key_value)): if key1 == list(list_dic_key_value[index].keys())[0]: start = index break for", "torch.swapaxes(padding_next_dz, 0, 1)) db = torch.sum(torch.sum(torch.sum(next_dz, axis=-1), axis=-1), axis=0) #", "= tensor_A.shape[0] for c in range(C): if np_A[c]-np_B[c] > error_tolerance", "= fc_conv_weights[-1] try: padding = layer['padding'] except: padding = (0,", "str): # Add or View if last_tensors[i] == 'Add': last_tensors[i]", "featuremap[i] pooling = layer['kernel_size'] stride = layer['stride'] padding = layer['padding']", "dLoss_dB = fc_backward(dLoss_dz, z, weight_z) return_dz[i] = dLoss_dz elif layer['layer_name']", "if 'label' in graph[i] and graph[i][-1] == '\"': labels[(graph[i]+graph[i+1][1:]).split('\\t')[1].split(' ')[0]]=\\", "label, num_class=1000, g_view=False): x = Variable(img) g = generate_g(model, x)", "featuremap[i] dLoss_dz, dLoss_dW, dLoss_dB = fc_backward(dLoss_dz, z, weight_z) return_dz[i] =", "[] for i in range(len(files)): if '.pth' in files[i]: file_nums.append(int(files[i].split('.pth')[0]))", "tmp_layer.__dict__.get('stride') if not isinstance(stride, tuple): Conv2d_params['stride'] = (stride, stride) else:", "= layer.__dict__.get('stride') if not isinstance(stride, tuple): MaxPool2d_params['stride'] = (stride, stride)", "dz1 = m*dxhut mu = z.mean(axis=axis, keepdim=True) xmu = z", "zeros_tensor = torch.zeros_like(mask) dLoss_dz = torch.mul(torch.where(torch.eq(mask, 1.), next_dz, zeros_tensor), 1./(1.-p))", "+ last_tensors[i+1][1][0] if last_tensors[i] == 'View': last_tensors[i] = last_tensors[i+1].view(last_tensors[i+1].size(0), -1)", "(output_size, output_size) else: AdaptiveAvgPool2d_params['output_size'] = output_size # return parameters.append(AdaptiveAvgPool2d_params) else:", "[] tmp_split = [] for i in range(len(connections)): item =", "Linear_params['layer_name'] = layer_name # in_features in_features = tmp_layer.__dict__.get('in_features') Linear_params['in_features'] =", "max_pooling_backward(dLoss_dz, z, pooling, stride, padding) return_dz.append(dLoss_dz) lastpop = featuremap.pop() if", "= 'View' else: last_tensors[i][j][k] = tensors[index_tensors] index_tensors += 1 for", "error_tolerance: #print(np_A[n,c], np_B[n,c]) error += 1 #print('Error rate: ', error/(C*N))", "return 'Add' elif 'Cat' in name: return 'Cat' elif 'Hardtanh'", "elif isinstance(tmp_layer, nn.Dropout): layer_name = 'Dropout' Dropout_params = {} Dropout_params['layer_name']", "', list(dLoss_dz.shape)) return dLoss_dz @torch.no_grad() def dropback_backward(next_dz, mask, p): print('#", "')[0]]=\\ graph[i].split('\\t')[1].split('=')[1].split(']')[0] for i in range(len(graph)): if '->' in graph[i]:", "layer_name # kernel_size kernel_size = layer.__dict__.get('kernel_size') if not isinstance(kernel_size, tuple):", "Conv2d_params['padding'] = (padding, padding) else: Conv2d_params['padding'] = padding # return", "print('\\n======================== {0:3} Layer: '.format(str(i))+'{0:9}'.format(layer['layer_name'])+' Backward Start ========================') if layer['layer_name'] ==", "+= 1 break if not notchoosed == []: last_connections =", "in range(i+1, len(connections)): if not list(connections[j].values())[0] == list(connections[j-1].keys())[0]: notchoosed.append(i) start,", "= 'Add' parameters[i][j][k] = {'layer_name': layer_name} elif tmp_layer == 'View':", "or '+value+'! Check exchange_name function!') exit() new_connections.append({key1: value1}) if not", "if not isinstance(kernel_size, tuple): Conv2d_params['kernel_size'] = (kernel_size, kernel_size) else: Conv2d_params['kernel_size']", "eps = tmp_layer['eps'] z = featuremap[i-1][j][k+1] gamma = fc_conv_weights[i][j][k] tmp_dLoss_dz[-1]", "Args: var: output Variable params: dict of (name, Variable) to", "print('# eps: ', eps) print('# gamma.shape: ', list(gamma.shape)) N, C,", "Loss(result[0], label) else: loss_torch = Loss(result, label) _, connections =", "kernel_size kernel_size = layer.__dict__.get('kernel_size') if not isinstance(kernel_size, tuple): AvgPool2d_params['kernel_size'] =", "{0:3} Layer: '.format(str(len(parameters)-1-i))+'{0:11}'.format(layer['layer_name'])+' Backward End ==========================') elif isinstance(layer, list): import", "====================') if tmp_layer['layer_name'] == 'Conv2d': if k+1 >= len(featuremap[i-1][j]): z", "= output_size # return parameters.append(AdaptiveAvgPool2d_params) else: print('The layer has not", "= out_features # return fc_conv_weights[i] = layer.weight parameters[i] = Linear_params", "j + flat_idx % pooling[1] padding_dz[n, c, h_idx, w_idx] +=", "range(C): if np_A[n,c]-np_B[n,c] > error_tolerance or np_B[n,c]-np_A[n,c] > error_tolerance: #print(np_A[n,c],", "Start ===============================') for i in range(len(connections)): print('# Restore network model:", "F.one_hot(label, num_classes=num_class).float() loss, dLoss_dz = cross_entropy_loss(featuremap[-1], y_true) print('Self calculated loss:", "= layer['padding'] except: padding = (0, 0) stride = layer['stride']", "parameters[i+1]['padding']) else: params = None dLoss_dz = view_backward(dLoss_dz, last_z, params)", "list(dK.transpose(0,1).shape)) print('# dbias.shape: ', list(db.shape)) return dz, (dK/N).transpose(0,1), db/N @torch.no_grad()", "= {} BatchNorm2d_params['layer_name'] = layer_name # num_features num_features = layer.__dict__.get('num_features')", "layer_name = 'Cat' parameters[i] = {'layer_name': layer_name} elif isinstance(layer, nn.MaxPool2d):", "C, H, W = z.shape m = N*H*W shape =", "print('======================== {0:3} Layer: '.format(str(i))+'{0:9}'.format(layer['layer_name'])+' Backward End ==========================') delete_allpths(pth_dir=None) return return_dz,", "language governing permissions and # limitations under the License. #!/usr/bin/python3", "', strides) N, C, H, W = z.shape _, _,", "var: output Variable params: dict of (name, Variable) to add", "(parameters[i+1]['kernel_size'], parameters[i+1]['stride'], parameters[i+1]['padding']) else: params = None dLoss_dz = view_backward(dLoss_dz,", "* j + pooling[1]] += next_dz[n, c, i, j] /", "p # return parameters[i][j][k] = Dropout_params elif isinstance(tmp_layer, nn.BatchNorm2d): layer_name", "if '->' in graph[i]: connections.append({labels[graph[i].split('\\t')[1].split(' -> ')[0]]+'_'+\\ graph[i].split('\\t')[1].split(' -> ')[0]:\\", "i in range(len(connections)): print('# Restore network model: processing {}/{}'.format(i, len(connections)-1))", "padding, stride) return_dz[i] = dLoss_dz elif layer['layer_name'] == 'ReLU': z", "= torch.matmul(dLoss_dnextz.t(), z) dLoss_dfcB = torch.sum(dLoss_dnextz, dim=0) print('# dz.shape: ',", "if not isinstance(last_connections[i], list): # 单一层,无分支 current_layer_name = list(last_connections[i].keys())[0].split('_')[0] if", "'Add' elif 'Cat' in name: return 'Cat' elif 'Hardtanh' in", "governing permissions and # limitations under the License. #!/usr/bin/python3 import", "get_structure_parameters!') return parameters, fc_conv_weights def gradient_backward_v2(model, img, label, num_class=1000, g_view=False):", "isinstance(layer, nn.Linear): layer_name = 'Linear' Linear_params = {} Linear_params['layer_name'] =", "isinstance(last_connections[i-1], list): index_tmp_layers = tmp[1] + 1 elif not list(last_connections[i-1].keys())[0].split('_')[0]", "exit() new_connections.insert(0, {list(new_connections[0].values())[0]: None}) new_connections.append({'None': 'None'}) return connections, new_connections @torch.no_grad()", "num_features # eps eps = tmp_layer.__dict__.get('eps') BatchNorm2d_params['eps'] = eps #", "= 0.001 np_A = tensor_A.detach().numpy() np_B = tensor_B.detach().numpy() if len(tensor_A.shape)", "== 'Add': dLoss_dz = add_backward(dLoss_dz) return_dz[i] = dLoss_dz elif layer['layer_name']", "def _insert_zeros(dz, strides): N, D, H, W = dz.shape H_last", "[] for j in range(len(layer)): tmp_dLoss_dz.append(copy.deepcopy(dLoss_dz)) for k in range(len(layer[j])):", "> error_tolerance or np_B[n,c]-np_A[n,c] > error_tolerance: #print(np_A[n,c], np_B[n,c]) error +=", "else: Conv2d_params['padding'] = padding # return fc_conv_weights[i] = layer.weight parameters[i]", "'Cat' parameters[i][j][k] = {'layer_name': layer_name} elif isinstance(tmp_layer, nn.MaxPool2d): layer_name =", "layer.__dict__.get('padding') if not isinstance(padding, tuple): AvgPool2d_params['padding'] = (padding, padding) else:", "= padding # return parameters[i] = MaxPool2d_params elif isinstance(layer, nn.AvgPool2d):", "probability: ', p) print('# next_dz.shape: ', list(next_dz.shape)) print('# mask.shape: ',", "tmp.append(connections[i:j-1]) last_connections[start:end+1] = [tmp] for kk in range(end-start): last_connections.insert(start, 'Throwed')", "not isinstance(padding, tuple): Conv2d_params['padding'] = (padding, padding) else: Conv2d_params['padding'] =", "dLoss_dz = torch.where(torch.gt(z, 0), next_dz, zeros_tensor) print('# dz.shape: ', list(dLoss_dz.shape))", "', list(dz.shape)) print('# dweight.shape: ', list(dK.transpose(0,1).shape)) print('# dbias.shape: ', list(db.shape))", "not len(dLoss_dz.shape) == len(lastpop.shape): dLoss_dz = dLoss_dz.reshape(lastpop.shape) elif layer['layer_name'] ==", "0: continue for k in range(len(last_connections[i][j])-1, -1, -1): current_layer_name =", "layer['kernel_size'] stride = layer['stride'] padding = layer['padding'] dLoss_dz = max_pooling_backward(dLoss_dz,", "= layer_name # in_channel in_channel = layer.__dict__.get('in_channels') Conv2d_params['in_channel'] = in_channel", "in range(D): for h in range(0, H_last, strides[0]): for w", "else: MaxPool2d_params['padding'] = padding # return parameters.append(MaxPool2d_params) elif isinstance(layer, nn.AvgPool2d):", "layer['layer_name'] == 'Dropout': p = layer['p'] mask = featuremap[-1] dLoss_dz", "if 'Relu' in name: return 'ReLU' elif 'AddmmBackward' in name:", "k in range(len(featuremap[i][j])): print(' =========', i, j, k, featuremap[i][j][k].shape) '''", "loss_torch = Loss(result, label) _, connections = generate_connections(g) last_connections =", "max_pooling_backward(dLoss_dz, z, pooling, stride, padding) return_dz[i] = dLoss_dz elif layer['layer_name']", "# return fc_conv_weights[i] = layer.weight parameters[i] = Linear_params elif isinstance(layer,", "layer['stride'] dLoss_dz, dLoss_dW, dLoss_dB = conv_backward(dLoss_dz, weight_z, z, padding, stride)", "kernel_size) else: AvgPool2d_params['kernel_size'] = kernel_size # stride stride = layer.__dict__.get('stride')", "tmp_dLoss_dz[-1], dLoss_dW, dLoss_dB = conv_backward(tmp_dLoss_dz[-1], weight_z, z, padding, stride) return_dz[i][j][k]", "return error/C elif len(tensor_A.shape) == 2: N, C = tensor_A.shape", "key1 = exchange_name(key.split('_')[0]) + '_' + key.split('_')[1] value1 = exchange_name(value.split('_')[0])", "return_layers[0][j].insert(0, tmp[0]) if not list(last_connections[i][j][k-1].keys())[0].split('_')[0] == 'Dropout': index_tmp_layers = tmp[1]", "', error/C) print('1D-error-rate: ', end=' ') return error/C elif len(tensor_A.shape)", "j in range(len(last_connections[i])): return_layers[0].append([]) if len(last_connections[i][j]) == 0: continue for", "+= next_dz[n, c, i, j] / (pooling[0] * pooling[1]) dz", "parameters[i][j][k] = {'layer_name': layer_name} elif tmp_layer == 'Cat': layer_name =", "dz = _conv_forward(ppadding_next_dz, swap_flip_K) swap_z = torch.swapaxes(z, 0, 1) dK", "in range(N): for c in range(C): if np_A[n,c]-np_B[n,c] > error_tolerance", "= (ivar**2)*((dxhut*xmu).sum(axis=axis, keepdim=True))*xmu dz3 = dxhut.sum(axis=axis, keepdim=True) dz = ivar/m*(dz1-dz2-dz3)", "in range(len(files)): if '.pth' in files[i]: file_nums.append(int(files[i].split('.pth')[0])) file_nums.sort() for file_num", "== 'Dropout': p = layer['p'] mask = featuremap[-1] dLoss_dz =", "= _insert_zeros(next_dz, strides) flip_K = torch.flip(K, (2, 3)) swap_flip_K =", "print('=========================== Restore network model End =================================\\n') return last_connections @torch.no_grad() def", "dz @torch.no_grad() def _remove_padding(z, padding): if padding[0] > 0 and", "find_next_layer_by_name(layers, name, start_i): for i in range(start_i, len(layers)): layer =", "list(dLoss_dz.shape)) return dLoss_dz @torch.no_grad() def max_pooling_backward(next_dz, z, pooling, strides, padding=(0,", "(TODO: make optional) \"\"\" if params is not None: assert", "featuremap.insert(0, img) ### y_true = F.one_hot(label, num_classes=num_class).float() loss, dLoss_dz =", "in model.modules(): if not ':' in str(layer): tmp_layers.append(layer) index_tmp_layers =", "list(dLoss_dnextz.shape)) dLoss_dz = dLoss_dnextz print('# dz.shape: ', list(dLoss_dz.shape)) return dLoss_dz", "= average_pooling_backward(dLoss_dz, z, pooling, stride, padding) return_dz[i] = dLoss_dz elif", "file_nums.sort() for file_num in file_nums: tensor = torch.load(pth_dir+str(file_num)+'.pth') featuremap.append(tensor) delete_allpths(pth_dir=None)", "', list(dLoss_dz.shape)) return dLoss_dz @torch.no_grad() def relu_backward(next_dz, z): print('# next_dz.shape:", "y_true = F.one_hot(label, num_classes=num_class).float() loss, dLoss_dz = cross_entropy_loss(featuremap[-1], y_true) print('Self", "# stride stride = layer.__dict__.get('stride') if not isinstance(stride, tuple): AvgPool2d_params['stride']", "stride = tmp_layer.__dict__.get('stride') if not isinstance(stride, tuple): AvgPool2d_params['stride'] = (stride,", "print('The layer has not been processed in get_structure_parameters!') return parameters,", "= 'Linear' Linear_params = {} Linear_params['layer_name'] = layer_name # in_features", "last_connections = copy.deepcopy(connections) connections.append({'None':'None'}) num_Throwed = 0 notchoosed = []", "elif layer['layer_name'] == 'ReLU': z = featuremap[i] dLoss_dz = relu_backward(dLoss_dz,", "= (padding, padding) else: AvgPool2d_params['padding'] = padding # return parameters.append(AvgPool2d_params)", "out_features = tmp_layer.__dict__.get('out_features') Linear_params['out_features'] = out_features # return fc_conv_weights[i][j][k] =", "next_dz.shape: ', list(dLoss_dnextz.shape)) dLoss_dz = dLoss_dnextz print('# dz.shape: ', list(dLoss_dz.shape))", "padding=(0, 0), strides=(1, 1)): N, C, H, W = z.shape", "mask = featuremap[-1] dLoss_dz = dropback_backward(dLoss_dz, mask, p) return_dz.append(dLoss_dz) featuremap.pop()", "layer.__dict__.get('stride') if not isinstance(stride, tuple): MaxPool2d_params['stride'] = (stride, stride) else:", "tuple): MaxPool2d_params['kernel_size'] = (kernel_size, kernel_size) else: MaxPool2d_params['kernel_size'] = kernel_size #", "MaxPool2d_params['stride'] = stride # padding padding = layer.__dict__.get('padding') if not", "elif 'Conv' in name: return 'Conv2d' elif 'MaxPool' in name:", "i in range(len(connections)): item = connections[i] if len(tmp_split) == 0:", "elif layer['layer_name'] == 'ReLU': z = featuremap[-1] dLoss_dz = relu_backward(dLoss_dz,", "return_layers[0][j].insert(0, 'Add') elif 'View' in current_layer_name: return_layers.insert(0, 'View') else: tmp", "= tmp_layer.__dict__.get('in_features') Linear_params['in_features'] = in_features # out_features out_features = tmp_layer.__dict__.get('out_features')", "Linear_params['out_features'] = out_features # return fc_conv_weights[i][j][k] = tmp_layer.weight parameters[i][j][k] =", "F.pad(padding_next_dz, pad=(k2-1-padding[1],k2-1-padding[1],\\ k1-1-padding[0],k1-1-padding[0],0,0), mode='constant', value=0) dz = _conv_forward(ppadding_next_dz, swap_flip_K) swap_z", "mul = 1. for i in range(len(x)): mul *= x[i]", "current_layer_name: last_tensors[i][j][k] = 'Add' elif 'View' in current_layer_name: last_tensors[i][j][k] =", "', end=' ') return error/(C*N) @torch.no_grad() def get_featuremap(featuremap_dir=None): import os", "in name: return 'Dropout_1' elif 'AddBackward' in name: return 'Add'", "'(' in item_key or 'TBackward' in item_key: pop_index.append(connections[i]) for i", "for file_num in file_nums: tensor = torch.load(pth_dir+str(file_num)+'.pth') featuremap.append(tensor) delete_allpths(pth_dir=None) return", "return return_dz, dLoss_dW, dLoss_dB @torch.no_grad() def make_dot(var, params=None): \"\"\" Produces", "elif 'Hardtanh' in name: return 'ReLU6' else: return 'None' @torch.no_grad()", "range(len(return_layers)): print(i, return_layers[i]) print('================') print('================') for i in range(len(parameters)): print(i,", "for k in range(len(layer[j])): tmp_layer = layer[j][k] ### if isinstance(tmp_layer,", "parameters[i+1]['layer_name']: params = (parameters[i+1]['kernel_size'], parameters[i+1]['stride'], parameters[i+1]['padding']) else: params = None", "dLoss_dz = dropback_backward(dLoss_dz, mask, p) return_dz.append(dLoss_dz) featuremap.pop() lastpop = featuremap.pop()", "= padding # return parameters.append(AvgPool2d_params) elif isinstance(layer, nn.Dropout): layer_name =", "= generate_g(model, x) if g_view: g.view() delete_allpths(pth_dir=None) print('\\n=========================== Generate Tensors", "and graph[i][-1] == '\"': labels[(graph[i]+graph[i+1][1:]).split('\\t')[1].split(' ')[0]]=\\ (graph[i]+graph[i+1][1:]).split('\\t')[1].split('\"')[1] if 'label' in", "Variable) param_map = {id(v): k for k, v in params.items()}", "permissions and # limitations under the License. #!/usr/bin/python3 import torch", "nn.ReLU): layer_name = 'ReLU' parameters.append({'layer_name': layer_name}) elif isinstance(layer, nn.MaxPool2d): layer_name", "# out_channel out_channel = tmp_layer.__dict__.get('out_channels') Conv2d_params['out_channel'] = out_channel # kernel_size", "isinstance(layer, nn.AvgPool2d): layer_name = 'AvgPool2d' AvgPool2d_params = {} AvgPool2d_params['layer_name'] =", "'View') else: tmp = find_next_layer_by_name(tmp_layers, current_layer_name, index_tmp_layers) return_layers[0][j].insert(0, tmp[0]) if", "Start =========================') y = model(x) print('=========================== Store network model Results", "dLoss_dB = fc_backward(dLoss_dz, z, weight_z) return_dz.append(dLoss_dz) fc_conv_weights.pop() lastpop = featuremap.pop()", "= torch.sum(torch.sum(torch.sum(next_dz, axis=-1), axis=-1), axis=0) # 在高度、宽度上相加;批量大小上相加 print('# dz.shape: ',", "shape='box', align='left', fontsize='12', ranksep='0.1', height='0.2') dot = Digraph(node_attr=node_attr, graph_attr=dict(size=\"12,12\")) seen", "from torch.autograd import Variable @torch.no_grad() def cross_entropy_loss(y_predict, y_true): print('\\n=========================== Layer:'+'", "(name, size_to_str(u.size())) dot.node(str(id(var)), node_name, fillcolor='lightblue') else: dot.node(str(id(var)), str(type(var).__name__)) seen.add(var) if", "stride stride = tmp_layer.__dict__.get('stride') if not isinstance(stride, tuple): MaxPool2d_params['stride'] =", "list(dLoss_dz.shape)) print('# dweight.shape: ', list(dLoss_dfcW.shape)) print('# dbias.shape: ', list(dLoss_dfcB.shape)) return", "batchnorm2d_backward(dLoss_dz, z, eps, gamma) return_dz.append(dLoss_dz) fc_conv_weights.pop() lastpop = featuremap.pop() if", "'None' in key1 or 'None' in value1: print('Not completed for", "z, pooling, strides, padding=(0, 0)): print('# next_dz.shape: ', list(next_dz.shape)) print('#", "', padding) print('# strides: ', strides) N, C, H, W", "= tmp_layer.__dict__.get('in_channels') Conv2d_params['in_channel'] = in_channel # out_channel out_channel = tmp_layer.__dict__.get('out_channels')", "import copy parameters = copy.deepcopy(return_layers) fc_conv_weights = copy.deepcopy(return_layers) for i", "in str(model).split('\\n')[0]: loss_torch = Loss(result[0], label) else: loss_torch = Loss(result,", "list(dLoss_dnextz.shape)) print('# last_z.shape: ', list(last_z.shape)) if params: pooling = params[0]", "', list(dz.shape)) return dz @torch.no_grad() def _remove_padding(z, padding): if padding[0]", "print('# strides: ', strides) N, C, H, W = z.shape", "pop_index = [] for i in range(len(connections)): item_key = list(connections[i].keys())[0]", "in str(layer): return layer, i @torch.no_grad() def get_layers(last_connections, model): return_layers", "print('# dz.shape: ', list(dLoss_dz.shape)) return dLoss_dz @torch.no_grad() def dropback_backward(next_dz, mask,", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "Start ========================') if layer['layer_name'] == 'Conv2d': z = featuremap[i] weight_z", "pooling, stride, padding) return_dz.append(dLoss_dz) lastpop = featuremap.pop() if not len(dLoss_dz.shape)", "= [] for item in connections: key, value = list(item.items())[0]", "for item in connections: key, value = list(item.items())[0] key1 =", "= tmp_layer['eps'] z = featuremap[i-1][j][k+1] gamma = fc_conv_weights[i][j][k] tmp_dLoss_dz[-1] =", "in item_key: pop_index.append(connections[i]) for i in range(len(pop_index)-1, -1, -1): connections.remove(pop_index[i])", "var.saved_tensors: dot.edge(str(id(t)), str(id(var))) add_nodes(t) print(var) add_nodes(var.grad_fn) return dot def generate_g(model,", "torch.where(torch.gt(z, 0), next_dz, zeros_tensor) print('# dz.shape: ', list(dLoss_dz.shape)) return dLoss_dz", "value=0) dz = _conv_forward(ppadding_next_dz, swap_flip_K) swap_z = torch.swapaxes(z, 0, 1)", "n in torch.arange(N): for c in torch.arange(C): for i in", "import Variable @torch.no_grad() def cross_entropy_loss(y_predict, y_true): print('\\n=========================== Layer:'+' {0:18}'.format('cross_entropy_loss')+' Start", ":, padding[1]:-padding[1]] else: return z @torch.no_grad() def conv_backward(next_dz, K, z,", "list(next_dz.shape)) print('# z.shape: ', list(z.shape)) print('# weight.shape: ', list(K.shape)) print('#", "parameters[i]) print('================') print('================') for i in range(len(return_tensors)): if not isinstance(return_tensors[i],", "featuremap[i][j][k].shape) ''' ##################### # 前面n层倒序遍历 for i in range(len(parameters)): layer", "j, strides[1]) out = torch.einsum( 'nchwkj,dckj->ndhw', x_pad, weight) return out", "AdaptiveAvgPool2d_params['output_size'] = output_size # return parameters[i][j][k] = AdaptiveAvgPool2d_params ### else:", "parameters.append(AdaptiveAvgPool2d_params) else: print('The layer has not been processed in get_structure_parameters_v1!')", "Conv2d_params['layer_name'] = layer_name # in_channel in_channel = tmp_layer.__dict__.get('in_channels') Conv2d_params['in_channel'] =", "''' import copy return_dz = copy.deepcopy(last_connections) featuremap = return_tensors featuremap.append(img)", "shape = [N,C,H,W] import numpy as np ax = list(np.arange(len(shape)))", "dim=0) print('# dz.shape: ', list(dLoss_dz.shape)) print('# dweight.shape: ', list(dLoss_dfcW.shape)) print('#", "error%20 == 0: pass print('error', np_A[n,c,h,w], np_B[n,c,h,w]) else: if n*c*h*w", "conv_backward(next_dz, K, z, padding=(0, 0), strides=(1, 1)): N, C, H,", "range(len(files)): if '.pth' in files[i]: file_nums.append(int(files[i].split('.pth')[0])) file_nums.sort() for file_num in", "end-1 @torch.no_grad() def merge_connections(connections): import copy last_connections = copy.deepcopy(connections) connections.append({'None':'None'})", "= tmp_layer.__dict__.get('stride') if not isinstance(stride, tuple): MaxPool2d_params['stride'] = (stride, stride)", "= {} MaxPool2d_params['layer_name'] = layer_name # kernel_size kernel_size = layer.__dict__.get('kernel_size')", "in range(num_Throwed): last_connections.remove('Throwed') if last_connections[-1] == {'None': 'None'}: last_connections.remove({'None': 'None'})", "return_tensors = get_tensors(last_connections) parameters, fc_conv_weights = get_structure_parameters(return_layers) ''' print('================') for", "= list(list_dic_key_value[j].keys())[0] start = 0 end = len(list_dic_key_value)-1 for index", "files = os.listdir(pth_dir) file_nums = [] for i in range(len(files)):", "= fc_conv_weights[-1] z = featuremap[-1] dLoss_dz, dLoss_dW, dLoss_dB = fc_backward(dLoss_dz,", "name: return 'Dropout_2' elif 'DivBackward' in name: return 'Dropout_1' elif", "i, j] / (pooling[0] * pooling[1]) dz = _remove_padding(padding_dz, padding)", "Restore network model Start ===============================') for i in range(len(connections)): print('#", "range(len(pop_index)-1, -1, -1): connections.remove(pop_index[i]) new_connections = [] for item in", ":] elif padding[1] > 0: return z[:, :, :, padding[1]:-padding[1]]", "np_B[n,c,h,w]) else: if n*c*h*w % 20000000000000 == 0: pass #print('right',", "dweight.shape: ', list(dLoss_dfcW.shape)) print('# dbias.shape: ', list(dLoss_dfcB.shape)) return dLoss_dz, dLoss_dfcW/N,", "parameters.append(AvgPool2d_params) elif isinstance(layer, nn.Dropout): layer_name = 'Dropout' Dropout_params = {}", "two compard tensors is not equal.') return None error =", "dLoss_dz = dLoss_dz.reshape(lastpop.shape) elif layer['layer_name'] == 'BatchNorm2d': eps = layer['eps']", "AvgPool2d_params elif isinstance(tmp_layer, nn.Dropout): layer_name = 'Dropout' Dropout_params = {}", "elif 'ResNet' in str(model): print(judge_tensors_equal(dLoss_dW, model.conv1.weight.grad)) delete_allpths(pth_dir=None) return return_dz, dLoss_dW,", "padding) else: AvgPool2d_params['padding'] = padding # return parameters[i][j][k] = AvgPool2d_params", "= featuremap[i-1][j][k+1] tmp_dLoss_dz[-1] = relu_backward(tmp_dLoss_dz[-1], z) return_dz[i][j][k] = tmp_dLoss_dz[-1] elif", "print(i, last_connections[i]) print('================') print('================') for i in range(len(return_layers)): print(i, return_layers[i])", "len(tensor_A.shape) == 4: N, C, H, W = tensor_A.shape for", "== 'Linear': weight_z = fc_conv_weights[-1] z = featuremap[-1] dLoss_dz, dLoss_dW,", "gradient_backward!') print('# Torch calculated loss: ', loss_torch.detach().numpy()) loss_torch.backward() if 'VGG'", "= torch.einsum( 'nchwkj,dckj->ndhw', x_pad, weight) return out @torch.no_grad() def _insert_zeros(dz,", "{} AvgPool2d_params['layer_name'] = layer_name # kernel_size kernel_size = layer.__dict__.get('kernel_size') if", "model): return_layers = [] tmp_layers = [] for layer in", "z = featuremap[i] dLoss_dz, dLoss_dW, dLoss_dB = fc_backward(dLoss_dz, z, weight_z)", "', error/(C*N)) print('2D-error-rate: ', end=' ') return error/(C*N) @torch.no_grad() def", "== len(lastpop.shape): dLoss_dz = dLoss_dz.reshape(lastpop.shape) elif layer['layer_name'] == 'BatchNorm2d': eps", "strides: ', strides) N, C, H, W = z.shape _,", "layer['layer_name'] == 'BatchNorm2d': eps = layer['eps'] z = featuremap[i] gamma", "isinstance(output_size, tuple): AdaptiveAvgPool2d_params['output_size'] = (output_size, output_size) else: AdaptiveAvgPool2d_params['output_size'] = output_size", "Version 2.0 (the \"License\"); # you may not use this", "np_A[n,c,h,w]-np_B[n,c,h,w] > error_tolerance or np_B[n,c,h,w]-np_A[n,c,h,w] > error_tolerance: error += 1", "return 'None' @torch.no_grad() def generate_connections(g): graph = str(g).split('\\n') labels =", "import torch.nn as nn import torch.nn.functional as F from graphviz", "that require grad (TODO: make optional) \"\"\" if params is", "backward in torch.autograd.Function Args: var: output Variable params: dict of", "return_dz, dLoss_dW, dLoss_dB @torch.no_grad() def make_dot(var, params=None): \"\"\" Produces Graphviz", "list(list_dic_key_value[index].keys())[0]: start = index break for index in range(len(list_dic_key_value)): if", "item_key or 'TBackward' in item_key: pop_index.append(connections[i]) for i in range(len(pop_index)-1,", "featuremap.pop() if not len(dLoss_dz.shape) == len(lastpop.shape): dLoss_dz = dLoss_dz.reshape(lastpop.shape) elif", "parameters.append(Linear_params) elif isinstance(layer, nn.AdaptiveAvgPool2d): layer_name = 'AdaptiveAvgPool2d' AdaptiveAvgPool2d_params = {}", "tuple): AdaptiveAvgPool2d_params['output_size'] = (output_size, output_size) else: AdaptiveAvgPool2d_params['output_size'] = output_size #", "layer_name} elif tmp_layer == 'Cat': layer_name = 'Cat' parameters[i][j][k] =", "dz = ivar/m*(dz1-dz2-dz3) print('# dz.shape: ', list(dz.shape)) return dz @torch.no_grad()", "= None dLoss_dz = view_backward(dLoss_dz, last_z, params) return_dz[i] = dLoss_dz", "output_size = tmp_layer.__dict__.get('output_size') if not isinstance(output_size, tuple): AdaptiveAvgPool2d_params['output_size'] = (output_size,", "====================================') result = model(img) print('=========================== Generate Tensors End ======================================\\n') Loss", "if not isinstance(stride, tuple): AvgPool2d_params['stride'] = (stride, stride) else: AvgPool2d_params['stride']", "in name: return 'Conv2d' elif 'MaxPool' in name: return 'MaxPool2d'", "else: pass for i in range(num_Throwed): last_connections.remove('Throwed') if last_connections[-1] ==", "dK = _conv_forward(torch.swapaxes(F.pad(z, pad=(padding[1],padding[1],\\ padding[0],padding[0],0,0), mode='constant', value=0), 0, 1), torch.swapaxes(padding_next_dz,", "@torch.no_grad() def judge_tensors_equal(tensor_A, tensor_B): if(not tensor_A.shape == tensor_B.shape): print('Shape of", "'ReLU' parameters[i] = {'layer_name': layer_name} elif layer == 'Add': layer_name", "', ypred_loss.item()) print('=========================== Layer:'+' {0:18}'.format('cross_entropy_loss')+' End =============================') return ypred_loss, dLoss_dypred", "elif isinstance(tmp_layer, nn.AdaptiveAvgPool2d): layer_name = 'AdaptiveAvgPool2d' AdaptiveAvgPool2d_params = {} AdaptiveAvgPool2d_params['layer_name']", "not 'None' in item_key: if i == 0: pass else:", "'MaxPool2d': z = featuremap[-1] pooling = layer['kernel_size'] stride = layer['stride']", "i in range(len(connections)): item_key = list(connections[i].keys())[0] if '(' in item_key", "== 'View': last_z = featuremap[i+1] if 'Pool' in parameters[i+1]['layer_name']: params", "elif layer == 'Cat': layer_name = 'Cat' parameters[i] = {'layer_name':", "# limitations under the License. #!/usr/bin/python3 import torch import torch.nn", "layer = parameters[i] print('\\n======================== {0:3} Layer: '.format(str(i))+'{0:9}'.format(layer['layer_name'])+' Backward Start ========================')", "or 'AlexNet' in str(model): print(judge_tensors_equal(dLoss_dW, model.features[0].weight.grad)) elif 'ResNet' in str(model):", "', list(dz.shape)) return dz @torch.no_grad() def average_pooling_backward(next_dz, z, pooling, strides,", "by applicable law or agreed to in writing, software #", "= dLoss_dz.reshape(lastpop.shape) elif layer['layer_name'] == 'BatchNorm2d': eps = layer['eps'] z", "Conv2d_params elif isinstance(tmp_layer, nn.ReLU): layer_name = 'ReLU' parameters[i][j][k] = {'layer_name':", "= p # return parameters[i][j][k] = Dropout_params elif isinstance(tmp_layer, nn.BatchNorm2d):", "z.shape D, C, k1, k2 = K.shape N, D, H1,", "elif isinstance(layer, nn.Dropout): layer_name = 'Dropout' Dropout_params = {} Dropout_params['layer_name']", "g = generate_g(model, x) if g_view: g.view() delete_allpths(pth_dir=None) print('\\n=========================== Generate", "= (H-1)*(strides[0]-1) + H W_last = (W-1)*(strides[1]-1) + W pz", "last_tensors[i+1][0][0] + last_tensors[i+1][1][0] if last_tensors[i] == 'View': last_tensors[i] = last_tensors[i+1].view(last_tensors[i+1].size(0),", "== len(lastpop.shape): dLoss_dz = dLoss_dz.reshape(lastpop.shape) elif layer['layer_name'] == 'AvgPool2d': z", "x_pad.unfold(3, j, strides[1]) out = torch.einsum( 'nchwkj,dckj->ndhw', x_pad, weight) return", "-1, -1): if isinstance(last_tensors[i], str): # Add or View if", "not len(featuremap) == 1: lastpop = featuremap.pop() if not len(dLoss_dz.shape)", "for i in range(len(x)): mul *= x[i] return mul @torch.no_grad()", "', list(z.shape)) print('# weight.shape: ', list(K.shape)) print('# bias.shape: ', '['+str(K.shape[0])+']')", "else: MaxPool2d_params['padding'] = padding # return parameters[i][j][k] = MaxPool2d_params elif", "in get_structure_parameters!') return parameters, fc_conv_weights def gradient_backward_v2(model, img, label, num_class=1000,", "[item] return return_connections @torch.no_grad() def find_start_end(list_dic_key_value, i, j): key1 =", "dLoss_dz, dLoss_dW, dLoss_dB = fc_backward(dLoss_dz, z, weight_z) return_dz.append(dLoss_dz) fc_conv_weights.pop() lastpop", "files: if name.endswith('.pth',): os.remove(os.path.join(root, name)) @torch.no_grad() def mul_items(tensor_size): x =", "dz.shape: ', list(dz.shape)) return dz @torch.no_grad() def average_pooling_backward(next_dz, z, pooling,", "if len(tensor_A.shape) == 4: N, C, H, W = tensor_A.shape", "relu_backward(dLoss_dz, z) return_dz.append(dLoss_dz) lastpop = featuremap.pop() if not len(dLoss_dz.shape) ==", "elif layer['layer_name'] == 'MaxPool2d': z = featuremap[-1] pooling = layer['kernel_size']", "y_probability - y_true print('# dLoss_dypred.shape: ', list(dLoss_dypred.shape)) print('# Self calculated", "print('# y_predict.shape: ', list(y_predict.shape)) print('# y_true.shape: ', list(y_true.shape)) y_shift =", "layer['layer_name'] == 'MaxPool2d': z = featuremap[i] pooling = layer['kernel_size'] stride", "if np_A[c]-np_B[c] > error_tolerance or np_B[c]-np_A[c] > error_tolerance: #print(np_A[c], np_B[c])", "not isinstance(last_connections[i], list): current_layer_name = list(last_connections[i].keys())[0].split('_')[0] if 'Add' in current_layer_name:", "layer_name = 'Add' parameters[i][j][k] = {'layer_name': layer_name} elif tmp_layer ==", "in range(0, H_last, strides[0]): for w in range(0, W_last, strides[1]):", "# Add or View if last_tensors[i] == 'Add': last_tensors[i] =", "w): print('# next_dz.shape: ', list(dLoss_dnextz.shape)) print('# z.shape: ', list(z.shape)) print('#", "-1) elif isinstance(last_tensors[i], list): for j in range(len(last_tensors[i])): if len(last_tensors[i][j])", "not isinstance(featuremap[i], list): print('=========', i, featuremap[i].shape) else: for j in", "(ivar**2)*((dxhut*xmu).sum(axis=axis, keepdim=True))*xmu dz3 = dxhut.sum(axis=axis, keepdim=True) dz = ivar/m*(dz1-dz2-dz3) print('#", "= tuple(ax) dxhut = torch.zeros_like(next_dz) for c in range(C): dxhut[:,c]", "var.variable name = param_map[id(u)] if params is not None else", "-> ')[1]]+'_'+\\ graph[i].split('\\t')[1].split(' -> ')[1]}) pop_index = [] for i", "-1): layer = parameters[i] print('\\n======================== {0:3} Layer: '.format(str(i))+'{0:9}'.format(layer['layer_name'])+' Backward Start", "list(last_connections[i].keys())[0].split('_')[0] if 'ReLU' in current_layer_name: return_layers.insert(0, torch.nn.ReLU(inplace=True)) elif 'Add' in", "1) ppadding_next_dz = F.pad(padding_next_dz, pad=(k2-1-padding[1],k2-1-padding[1],\\ k1-1-padding[0],k1-1-padding[0],0,0), mode='constant', value=0) dz =", "isinstance(stride, tuple): AvgPool2d_params['stride'] = (stride, stride) else: AvgPool2d_params['stride'] = stride", "else: last_tensors[i][j][k] = tensors[index_tensors] index_tensors += 1 for i in", "require grad (TODO: make optional) \"\"\" if params is not", "= 'Conv2d' Conv2d_params = {} Conv2d_params['layer_name'] = layer_name # in_channel", "k2 = K.shape N, D, H1, W1 = next_dz.shape print('#", "> error_tolerance: error += 1 if error%20 == 0: pass", "return_layers @torch.no_grad() def get_tensors(last_connections): tensors = get_featuremap(featuremap_dir=None) index_tensors = 0", "stride = layer['stride'] padding = layer['padding'] dLoss_dz = average_pooling_backward(dLoss_dz, z,", "= torch.exp(y_shift) y_probability = torch.div(y_exp, torch.sum(y_exp, dim=1, keepdim=True)) ypred_loss =", "for i in range(len(pop_index)-1, -1, -1): connections.remove(pop_index[i]) new_connections = []", "get_tensors(last_connections): tensors = get_featuremap(featuremap_dir=None) index_tensors = 0 import copy last_tensors", "padding[1]:-padding[1]] print('# dz.shape: ', list(dz.shape)) return dz @torch.no_grad() def _remove_padding(z,", "1 else: return_layers.insert(0, []) for j in range(len(last_connections[i])): return_layers[0].append([]) if", "in_features = layer.__dict__.get('in_features') Linear_params['in_features'] = in_features # out_features out_features =", "z, weight_z) return_dz.append(dLoss_dz) fc_conv_weights.pop() lastpop = featuremap.pop() if not len(dLoss_dz.shape)", "kernel_size = tmp_layer.__dict__.get('kernel_size') if not isinstance(kernel_size, tuple): MaxPool2d_params['kernel_size'] = (kernel_size,", "BatchNorm2d_params = {} BatchNorm2d_params['layer_name'] = layer_name # num_features num_features =", "== 'ReLU': z = featuremap[i] dLoss_dz = relu_backward(dLoss_dz, z) return_dz[i]", "===========================\\n') if 'GoogLeNet' in str(model).split('\\n')[0]: g = make_dot(y[0]) return g", "graph[i][-1] == '\"': labels[(graph[i]+graph[i+1][1:]).split('\\t')[1].split(' ')[0]]=\\ (graph[i]+graph[i+1][1:]).split('\\t')[1].split('\"')[1] if 'label' in graph[i]", "v in size])+')' def add_nodes(var): if var not in seen:", "params): print('# next_dz.shape: ', list(dLoss_dnextz.shape)) print('# last_z.shape: ', list(last_z.shape)) if", "elif layer['layer_name'] == 'AvgPool2d': z = featuremap[-1] pooling = layer['kernel_size']", "applicable law or agreed to in writing, software # distributed", "= next_dz[:,c]*gamma[c] dz1 = m*dxhut mu = z.mean(axis=axis, keepdim=True) xmu", "xmu2.sum(axis=axis, keepdim=True)/m ivar = 1./torch.pow(var+eps, 0.5) dz2 = (ivar**2)*((dxhut*xmu).sum(axis=axis, keepdim=True))*xmu", "'\"': labels[(graph[i]+graph[i+1][1:]).split('\\t')[1].split(' ')[0]]=\\ (graph[i]+graph[i+1][1:]).split('\\t')[1].split('\"')[1] if 'label' in graph[i] and graph[i][-1]", "value1: print('Not completed for '+key+' or '+value+'! Check exchange_name function!')", "layer.__dict__.get('out_features') Linear_params['out_features'] = out_features # return fc_conv_weights[i] = layer.weight parameters[i]", "parameters[i] = {'layer_name': layer_name} elif isinstance(layer, nn.MaxPool2d): layer_name = 'MaxPool2d'", "= strides[1] * j + flat_idx % pooling[1] padding_dz[n, c,", "not isinstance(padding, tuple): MaxPool2d_params['padding'] = (padding, padding) else: MaxPool2d_params['padding'] =", "under the License. #!/usr/bin/python3 import torch import torch.nn as nn", "tmp_layer['layer_name'] == 'ReLU': z = featuremap[i-1][j][k+1] tmp_dLoss_dz[-1] = relu_backward(tmp_dLoss_dz[-1], z)", "F.pad(z, pad=(padding[1],padding[1],padding[0],\\ padding[0],0,0), mode='constant', value=0) padding_dz = torch.zeros_like(padding_z) for n", "= z.view(z.size(0), -1) dLoss_dz = torch.matmul(dLoss_dnextz, w) #delta dLoss_dfcW =", "loss_torch.backward() if 'VGG' in str(model) or 'AlexNet' in str(model): print(judge_tensors_equal(dLoss_dW,", "End ==========================') continue p = layer['p'] mask = featuremap[i] dLoss_dz", "dz[n,d,h//strides[0],w//strides[1]] return pz @torch.no_grad() def judge_tensors_equal(tensor_A, tensor_B): if(not tensor_A.shape ==", "Dropout_params['layer_name'] = layer_name # p p = layer.__dict__.get('p') Dropout_params['p'] =", "'MaxPool2d': z = featuremap[i] pooling = layer['kernel_size'] stride = layer['stride']", "= cross_entropy_loss(featuremap[-1], y_true) print('Self calculated loss: ', loss) featuremap.pop() return_dz.append(dLoss_dz)", "list(w.shape)) print('# bias.shape: ', '['+str(dLoss_dnextz.shape[1])+']') N = z.shape[0] if len(z.shape)", "z, padding=(0, 0), strides=(1, 1)): N, C, H, W =", "batchnorm2d_backward(tmp_dLoss_dz[-1], z, eps, gamma) return_dz[i][j][k] = tmp_dLoss_dz[-1] print('=========================== {0:3} Branch:", "param_map[id(u)] if params is not None else '' node_name =", "layer['kernel_size'] stride = layer['stride'] padding = layer['padding'] dLoss_dz = average_pooling_backward(dLoss_dz,", "completed in gradient_backward_v1!') print('======================== {0:3} Layer: '.format(str(i))+'{0:9}'.format(layer['layer_name'])+' Backward End ==========================')", "= get_featuremap(featuremap_dir=None) index_tensors = 0 import copy last_tensors = copy.deepcopy(last_connections)", "(output_size, output_size) else: AdaptiveAvgPool2d_params['output_size'] = output_size # return parameters[i] =", "@torch.no_grad() def fc_backward(dLoss_dnextz, z, w): print('# next_dz.shape: ', list(dLoss_dnextz.shape)) print('#", "layer_name} elif layer == 'Cat': layer_name = 'Cat' parameters[i] =", "Layer:'+' {0:18}'.format('cross_entropy_loss')+' Start ===========================') print('# y_predict.shape: ', list(y_predict.shape)) print('# y_true.shape:", "0: pass print('error', np_A[n,c,h,w], np_B[n,c,h,w]) else: if n*c*h*w % 20000000000000", "def get_split_connections(connections): return_connections = [] tmp_split = [] for i", "return dLoss_dz, dLoss_dfcW/N, dLoss_dfcB/N @torch.no_grad() def view_backward(dLoss_dnextz, last_z, params): print('#", "in parameters[i+1]['layer_name']: params = (parameters[i+1]['kernel_size'], parameters[i+1]['stride'], parameters[i+1]['padding']) else: params =", "return_layers = get_layers(last_connections, model) return_tensors = get_tensors(last_connections) parameters, fc_conv_weights =", "list): import copy tmp_dLoss_dz = [] for j in range(len(layer)):", "1 break if not notchoosed == []: last_connections = last_connections[:notchoosed[0]]", "padding[1]:-padding[1]] print('# dz.shape: ', list(dz.shape)) return dz @torch.no_grad() def batchnorm2d_backward(next_dz,", "if not len(dLoss_dz.shape) == len(lastpop.shape): dLoss_dz = dLoss_dz.reshape(lastpop.shape) elif layer['layer_name']", "elif layer['layer_name'] == 'BatchNorm2d': eps = layer['eps'] z = featuremap[-1]", "range(len(parameters)-1, -1, -1): layer = parameters[i] print('\\n======================== {0:3} Layer: '.format(str(i))+'{0:9}'.format(layer['layer_name'])+'", "return_layers[i] if isinstance(layer, nn.Conv2d): layer_name = 'Conv2d' Conv2d_params = {}", "layer has not been processed in get_structure_parameters!') return parameters, fc_conv_weights", "def gradient_backward_v1(model, img, label, num_class=1000): return_dz = [] parameters, fc_conv_weights", "Backward End ==========================') delete_allpths(pth_dir=None) return return_dz, dLoss_dW, dLoss_dB @torch.no_grad() def", "= {} Conv2d_params['layer_name'] = layer_name # in_channel in_channel = tmp_layer.__dict__.get('in_channels')", "params.items()} node_attr = dict(style='filled', shape='box', align='left', fontsize='12', ranksep='0.1', height='0.2') dot", "# You may obtain a copy of the License at", "= {} connections = [] for i in range(len(graph)): if", "print('# dLoss_dypred.shape: ', list(dLoss_dypred.shape)) print('# Self calculated loss: ', ypred_loss.item())", "dLoss_dW, dLoss_dB = conv_backward(dLoss_dz, weight_z, z, padding, stride) return_dz[i] =", "tmp_layer = layer[j][k] print('\\n=========================== {0:3} Branch: '.format(str(len(parameters)-1-i))+'{0:11}'.format(tmp_layer['layer_name'])+' Backward Start ====================')", "out_features # return fc_conv_weights[i][j][k] = tmp_layer.weight parameters[i][j][k] = Linear_params elif", "tmp_dLoss_dz[0] + tmp_dLoss_dz[1] else: print('Not completed in gradient_backward!') print('# Torch", "num_class=1000): return_dz = [] parameters, fc_conv_weights = get_structure_parameters_v1(model) featuremap =", "in range(len(connections)): item = connections[i] if len(tmp_split) == 0: tmp_split.append(item)", "Dropout_params elif isinstance(layer, nn.BatchNorm2d): layer_name = 'BatchNorm2d' BatchNorm2d_params = {}", "padding = layer.__dict__.get('padding') if not isinstance(padding, tuple): AvgPool2d_params['padding'] = (padding,", "loss_torch = Loss(result[0], label) else: loss_torch = Loss(result, label) _,", "= {} AvgPool2d_params['layer_name'] = layer_name # kernel_size kernel_size = layer.__dict__.get('kernel_size')", "error = 0 error_tolerance = 0.001 np_A = tensor_A.detach().numpy() np_B", "layer_name # in_features in_features = layer.__dict__.get('in_features') Linear_params['in_features'] = in_features #", "print('# dz.shape: ', list(dz.shape)) print('# dweight.shape: ', list(dK.transpose(0,1).shape)) print('# dbias.shape:", "in current_layer_name: return_layers.insert(0, torch.nn.ReLU(inplace=True)) elif 'Add' in current_layer_name: return_layers.insert(0, 'Add')", "graph_attr=dict(size=\"12,12\")) seen = set() def size_to_str(size): return '('+(', ').join(['%d' %", "name: return 'ReLU6' else: return 'None' @torch.no_grad() def generate_connections(g): graph", "network model Start ===============================') for i in range(len(connections)): print('# Restore", "z.shape m = N*H*W shape = [N,C,H,W] import numpy as", "for u in var.next_functions: if u[0] is not None: dot.edge(str(id(u[0])),", "params[0] stride = params[1] padding = params[2] output_size = (int((last_z.shape[2]-pooling[0]+2*padding[0])/stride[0]+1),", "range(C): dxhut[:,c] = next_dz[:,c]*gamma[c] dz1 = m*dxhut mu = z.mean(axis=axis,", "else: z = featuremap[i-1][j][k+1] weight_z = fc_conv_weights[i][j][k] try: padding =", "padding[0]:-padding[0], padding[1]:-padding[1]] elif padding[0] > 0: return z[:, :, padding[0]:-padding[0],", "elif isinstance(layer, list): for j in range(len(layer)): for k in", "p = layer['p'] mask = featuremap[i] dLoss_dz = dropback_backward(dLoss_dz, mask,", "', end=' ') return error/C elif len(tensor_A.shape) == 2: N,", "elif isinstance(last_tensors[i], list): for j in range(len(last_tensors[i])): if len(last_tensors[i][j]) ==", "padding) else: Conv2d_params['padding'] = padding # return fc_conv_weights[i] = layer.weight", "return_dz[i] = dLoss_dz elif layer['layer_name'] == 'AvgPool2d': z = featuremap[i]", "in graph[i]: connections.append({labels[graph[i].split('\\t')[1].split(' -> ')[0]]+'_'+\\ graph[i].split('\\t')[1].split(' -> ')[0]:\\ labels[graph[i].split('\\t')[1].split(' ->", "current_layer_name: return_layers[0][j].insert(0, torch.nn.ReLU(inplace=True)) elif 'Add' in current_layer_name: return_layers[0][j].insert(0, 'Add') elif", "'Add' parameters[i] = {'layer_name': layer_name} elif layer == 'View': layer_name", "return_dz.append(dLoss_dz) featuremap.pop() lastpop = featuremap.pop() if not len(dLoss_dz.shape) == len(lastpop.shape):", "g = make_dot(y[0]) return g else: g = make_dot(y) return", "in range(start_i, len(layers)): layer = layers[i] if name in str(layer):", "torch.mul(torch.where(torch.eq(mask, 1.), next_dz, zeros_tensor), 1./(1.-p)) print('# dz.shape: ', list(dLoss_dz.shape)) return", "')[1]]+'_'+\\ graph[i].split('\\t')[1].split(' -> ')[1]}) pop_index = [] for i in", "= eps # return fc_conv_weights.append(layer.weight) parameters.append(BatchNorm2d_params) elif isinstance(layer, nn.Linear): layer_name", "W = z.shape _, _, out_h, out_w = next_dz.shape padding_z", "padding padding = tmp_layer.__dict__.get('padding') if not isinstance(padding, tuple): Conv2d_params['padding'] =", "[] if featuremap_dir == None: pth_dir = \"./tmp_file/\" else: pth_dir", "stride = layer.__dict__.get('stride') if not isinstance(stride, tuple): MaxPool2d_params['stride'] = (stride,", "= index break return start+1, end-1 @torch.no_grad() def merge_connections(connections): import", "connections.append({labels[graph[i].split('\\t')[1].split(' -> ')[0]]+'_'+\\ graph[i].split('\\t')[1].split(' -> ')[0]:\\ labels[graph[i].split('\\t')[1].split(' -> ')[1]]+'_'+\\ graph[i].split('\\t')[1].split('", "tensors = get_featuremap(featuremap_dir=None) index_tensors = 0 import copy last_tensors =", "not isinstance(layer, list): print('\\n======================== {0:3} Layer: '.format(str(len(parameters)-1-i))+'{0:11}'.format(layer['layer_name'])+' Backward Start ========================')", "BatchNorm2d_params['eps'] = eps # return fc_conv_weights[i] = layer.weight parameters[i] =", "z, eps, gamma=torch.Tensor([1.,1.,1.])): print('# next_dz.shape: ', list(next_dz.shape)) print('# z.shape: ',", "@torch.no_grad() def get_tensors(last_connections): tensors = get_featuremap(featuremap_dir=None) index_tensors = 0 import", "n*c*h*w % 20000000000000 == 0: pass #print('right', np_A[n,c,h,w], np_B[n,c,h,w]) #print('Error", "{'layer_name': layer_name} elif tmp_layer == 'Add': layer_name = 'Add' parameters[i][j][k]", "i, j-1) tmp = [] tmp.append(connections[start:end+1]) tmp.append(connections[i:j-1]) last_connections[start:end+1] = [tmp]", "dLoss_dB @torch.no_grad() def make_dot(var, params=None): \"\"\" Produces Graphviz representation of", "'Dropout': index_tmp_layers = tmp[1] + 1 else: return_layers.insert(0, []) for", "dLoss_dnextz.reshape(last_z.shape[0], last_z.shape[1], output_size[0], output_size[1]) else: dLoss_dz = dLoss_dnextz.reshape(last_z.shape) print('# dz.shape:", "', list(next_dz.shape)) print('# z.shape: ', list(z.shape)) print('# weight.shape: ', list(K.shape))", "layer == 'Add': layer_name = 'Add' parameters[i] = {'layer_name': layer_name}", "= torch.where(torch.gt(z, 0), next_dz, zeros_tensor) print('# dz.shape: ', list(dLoss_dz.shape)) return", "= dLoss_dnextz print('# dz.shape: ', list(dLoss_dz.shape)) return dLoss_dz @torch.no_grad() def", "len(z.shape) == 4: z = z.view(z.size(0), -1) dLoss_dz = torch.matmul(dLoss_dnextz,", "pooling[1]]) h_idx = strides[0] * i + flat_idx // pooling[1]", "new_connections.append({'None': 'None'}) return connections, new_connections @torch.no_grad() def get_split_connections(connections): return_connections =", "in range(len(last_tensors[i])): if len(last_tensors[i][j]) == 0: last_tensors[i][j].append(last_tensors[i+1]) return last_tensors @torch.no_grad()", "== 'View': layer_name = 'View' parameters[i] = {'layer_name': layer_name} elif", "Backward Start ========================') if layer['layer_name'] == 'Conv2d': z = featuremap[i]", "k1, k2 = K.shape N, D, H1, W1 = next_dz.shape", "p p = layer.__dict__.get('p') Dropout_params['p'] = p # return parameters.append(Dropout_params)", "========================') if layer['layer_name'] == 'Conv2d': z = featuremap[-1] weight_z =", "in model.modules(): if not ':' in str(layer): layers.append(layer) parameters =", "layer_name = 'MaxPool2d' MaxPool2d_params = {} MaxPool2d_params['layer_name'] = layer_name #", "return parameters.append(AvgPool2d_params) elif isinstance(layer, nn.Dropout): layer_name = 'Dropout' Dropout_params =", "last_tensors[i+1][1][0] if last_tensors[i] == 'View': last_tensors[i] = last_tensors[i+1].view(last_tensors[i+1].size(0), -1) elif", "= fc_backward(dLoss_dz, z, weight_z) return_dz[i] = dLoss_dz elif layer['layer_name'] ==", "\"License\"); # you may not use this file except in", "names to node that require grad (TODO: make optional) \"\"\"", "tmp_split = [] for i in range(len(connections)): item = connections[i]", "copy tmp_dLoss_dz = [] for j in range(len(layer)): tmp_dLoss_dz.append(copy.deepcopy(dLoss_dz)) for", "fillcolor='orange') elif hasattr(var, 'variable'): u = var.variable name = param_map[id(u)]", "parameters[i] = MaxPool2d_params elif isinstance(layer, nn.AvgPool2d): layer_name = 'AvgPool2d' AvgPool2d_params", "mask, p) return_dz[i] = dLoss_dz elif layer['layer_name'] == 'BatchNorm2d': eps", "= (padding, padding) else: Conv2d_params['padding'] = padding # return fc_conv_weights[i][j][k]", "tmp_layer.__dict__.get('kernel_size') if not isinstance(kernel_size, tuple): AvgPool2d_params['kernel_size'] = (kernel_size, kernel_size) else:", "Linear_params['layer_name'] = layer_name # in_features in_features = layer.__dict__.get('in_features') Linear_params['in_features'] =", "view_backward(dLoss_dz, last_z, params) return_dz[i] = dLoss_dz elif layer['layer_name'] == 'Add':", "单一层,无分支 current_layer_name = list(last_connections[i].keys())[0].split('_')[0] if 'ReLU' in current_layer_name: return_layers.insert(0, torch.nn.ReLU(inplace=True))", "padding) else: MaxPool2d_params['padding'] = padding # return parameters[i][j][k] = MaxPool2d_params", "MaxPool2d_params = {} MaxPool2d_params['layer_name'] = layer_name # kernel_size kernel_size =", "torch.autograd.Function Args: var: output Variable params: dict of (name, Variable)", "not isinstance(last_connections[i], list): # 单一层,无分支 current_layer_name = list(last_connections[i].keys())[0].split('_')[0] if 'ReLU'", "elif 'AddBackward' in name: return 'Add' elif 'Cat' in name:", "average_pooling_backward(dLoss_dz, z, pooling, stride, padding) return_dz[i] = dLoss_dz elif layer['layer_name']", "= dLoss_dz.reshape(lastpop.shape) elif layer['layer_name'] == 'AvgPool2d': z = featuremap[-1] pooling", "j in range(len(last_connections[i])): if len(last_connections[i][j]) == 0: continue for k", "return 'Cat' elif 'Hardtanh' in name: return 'ReLU6' else: return", "layer.weight parameters[i] = BatchNorm2d_params elif isinstance(layer, nn.Linear): layer_name = 'Linear'", "eps) print('# gamma.shape: ', list(gamma.shape)) N, C, H, W =", "return_layers.insert(0, 'View') else: tmp = find_next_layer_by_name(tmp_layers, current_layer_name, index_tmp_layers) return_layers.insert(0, tmp[0])", "tmp = find_next_layer_by_name(tmp_layers, current_layer_name, index_tmp_layers) return_layers[0][j].insert(0, tmp[0]) if not list(last_connections[i][j][k-1].keys())[0].split('_')[0]", "AvgPool2d_params['layer_name'] = layer_name # kernel_size kernel_size = layer.__dict__.get('kernel_size') if not", "list(last_z.shape)) if params: pooling = params[0] stride = params[1] padding", "list): print('=========', i, featuremap[i].shape) else: for j in range(len(featuremap[i])): for", "z, pooling, stride, padding) return_dz[i] = dLoss_dz elif layer['layer_name'] ==", "== list(connections[j-1].keys())[0]: notchoosed.append(i) start, end = find_start_end(connections, i, j-1) tmp", "== None: pth_dir = \"./tmp_file/\" for root, dirs, files in", "keepdim=True))*xmu dz3 = dxhut.sum(axis=axis, keepdim=True) dz = ivar/m*(dz1-dz2-dz3) print('# dz.shape:", "w_idx] += next_dz[n, c, i, j] dz = _remove_padding(padding_dz, padding)", "list(dz.shape)) print('# dweight.shape: ', list(dK.transpose(0,1).shape)) print('# dbias.shape: ', list(db.shape)) return", "'Linear' elif 'ViewBackward' in name: return 'View' elif 'Mean' in", "H, W = dz.shape H_last = (H-1)*(strides[0]-1) + H W_last", "20000000000000 == 0: pass #print('right', np_A[n,c,h,w], np_B[n,c,h,w]) #print('Error rate: ',", "[] for layer in model.modules(): if not ':' in str(layer):", "= layer.__dict__.get('out_features') Linear_params['out_features'] = out_features # return fc_conv_weights.append(layer.weight) parameters.append(Linear_params) elif", "parameters[i] = AdaptiveAvgPool2d_params elif isinstance(layer, list): for j in range(len(layer)):", "in str(layer): layers.append(layer) parameters = [] fc_conv_weights = [] for", "== len(lastpop.shape): dLoss_dz = dLoss_dz.reshape(lastpop.shape) elif layer['layer_name'] == 'MaxPool2d': z", "padding_dz[n, c, h_idx, w_idx] += next_dz[n, c, i, j] dz", "pass else: last_item_key = list(connections[i-1].keys())[0] if not connections[i][item_key] == last_item_key:" ]
[ "module (:obj:`torch.nn.Module`): 模型 Examples:: >>> pgd = PGD(module) >>> K", "= self.project(name, param.data, epsilon) def restore(self, emb_name='emb.'): # emb_name这个参数要换成你模型中embedding的参数名 for", "epsilon * r / torch.norm(r) return self.emb_backup[param_name] + r def", ">>> if t != K-1: >>> optimizer.zero_grad() >>> else: >>>", "= module(batch_input, batch_label) >>> loss_adv.backward() # 反向传播,并在正常的grad基础上,累加对抗训练的梯度 >>> pgd.restore() #", "param in self.module.named_parameters(): if param.requires_grad and emb_name in name: if", "param in self.module.named_parameters(): if param.requires_grad and emb_name in name: assert", "r_at = epsilon * param.grad / norm param.data.add_(r_at) def restore(", "in self.module.named_parameters(): if param.requires_grad and emb_name in name: assert name", "{} def attack( self, epsilon=1., emb_name='word_embeddings' ): for name, param", "PGD(module) >>> K = 3 >>> for batch_input, batch_label in", "attack时备份param.data >>> if t != K-1: >>> optimizer.zero_grad() >>> else:", "= FGM(module) >>> for batch_input, batch_label in data: >>> #", "torch.isnan(norm): r_at = epsilon * param.grad / norm param.data.add_(r_at) def", "emb_name这个参数要换成你模型中embedding的参数名 for name, param in self.module.named_parameters(): if param.requires_grad and emb_name", "fgm.attack() # 在embedding上添加对抗扰动 >>> loss_adv = module(batch_input, batch_label) >>> loss_adv.backward()", "反向传播,得到正常的grad >>> pgd.backup_grad() >>> # 对抗训练 >>> for t in", "assert name in self.emb_backup param.data = self.emb_backup[name] self.emb_backup = {}", "param.data = self.project(name, param.data, epsilon) def restore(self, emb_name='emb.'): # emb_name这个参数要换成你模型中embedding的参数名", "= epsilon * r / torch.norm(r) return self.emb_backup[param_name] + r", "<gh_stars>1-10 import torch class FGM(object): \"\"\" 基于FGM算法的攻击机制 Args: module (:obj:`torch.nn.Module`):", "pgd = PGD(module) >>> K = 3 >>> for batch_input,", "pgd.restore() # 恢复embedding参数 >>> # 梯度下降,更新参数 >>> optimizer.step() >>> optimizer.zero_grad()", "# 在embedding上添加对抗扰动, first attack时备份param.data >>> if t != K-1: >>>", ">>> loss_adv.backward() # 反向传播,并在正常的grad基础上,累加对抗训练的梯度 >>> fgm.restore() # 恢复embedding参数 >>> #", "loss.backward() # 反向传播,得到正常的grad >>> pgd.backup_grad() >>> # 对抗训练 >>> for", "name: assert name in self.backup param.data = self.backup[name] self.backup =", "self.emb_backup = {} def project(self, param_name, param_data, epsilon): r =", "反向传播,并在正常的grad基础上,累加对抗训练的梯度 >>> pgd.restore() # 恢复embedding参数 >>> # 梯度下降,更新参数 >>> optimizer.step()", "\"\"\" 基于FGM算法的攻击机制 Args: module (:obj:`torch.nn.Module`): 模型 Examples:: >>> # 初始化", "batch_label) >>> loss_adv.backward() # 反向传播,并在正常的grad基础上,累加对抗训练的梯度 >>> fgm.restore() # 恢复embedding参数 >>>", "loss_adv = module(batch_input, batch_label) >>> loss_adv.backward() # 反向传播,并在正常的grad基础上,累加对抗训练的梯度 >>> pgd.restore()", "= 3 >>> for batch_input, batch_label in data: >>> #", ">>> # 正常训练 >>> loss = module(batch_input, batch_label) >>> loss.backward()", "name: assert name in self.emb_backup param.data = self.emb_backup[name] self.emb_backup =", "attack( self, epsilon=1., alpha=0.3, emb_name='emb.', is_first_attack=False ): # emb_name这个参数要换成你模型中embedding的参数名 for", ">>> loss_adv.backward() # 反向传播,并在正常的grad基础上,累加对抗训练的梯度 >>> pgd.restore() # 恢复embedding参数 >>> #", "and emb_name in name: if is_first_attack: self.emb_backup[name] = param.data.clone() norm", "backup_grad(self): for name, param in self.module.named_parameters(): if param.requires_grad: self.grad_backup[name] =", "反向传播,并在正常的grad基础上,累加对抗训练的梯度 >>> fgm.restore() # 恢复embedding参数 >>> # 梯度下降,更新参数 >>> optimizer.step()", "is_first_attack: self.emb_backup[name] = param.data.clone() norm = torch.norm(param.grad) if norm !=", ">>> else: >>> pgd.restore_grad() >>> loss_adv = module(batch_input, batch_label) >>>", "Reference: [1] https://zhuanlan.zhihu.com/p/91269728 \"\"\" def __init__(self, module): self.module = module", "self.grad_backup = {} def attack( self, epsilon=1., alpha=0.3, emb_name='emb.', is_first_attack=False", "class PGD(object): \"\"\" 基于PGD算法的攻击机制 Args: module (:obj:`torch.nn.Module`): 模型 Examples:: >>>", "return self.emb_backup[param_name] + r def backup_grad(self): for name, param in", "not torch.isnan(norm): r_at = alpha * param.grad / norm param.data.add_(r_at)", "= param.grad.clone() def restore_grad(self): for name, param in self.module.named_parameters(): if", "epsilon=1., emb_name='word_embeddings' ): for name, param in self.module.named_parameters(): if param.requires_grad", "(:obj:`torch.nn.Module`): 模型 Examples:: >>> # 初始化 >>> fgm = FGM(module)", "= {} class PGD(object): \"\"\" 基于PGD算法的攻击机制 Args: module (:obj:`torch.nn.Module`): 模型", ">>> pgd.backup_grad() >>> # 对抗训练 >>> for t in range(K):", "name in self.emb_backup param.data = self.emb_backup[name] self.emb_backup = {} def", ">>> # 梯度下降,更新参数 >>> optimizer.step() >>> optimizer.zero_grad() Reference: [1] https://zhuanlan.zhihu.com/p/91269728", "self.backup = {} class PGD(object): \"\"\" 基于PGD算法的攻击机制 Args: module (:obj:`torch.nn.Module`):", "param.grad / norm param.data.add_(r_at) param.data = self.project(name, param.data, epsilon) def", "0 and not torch.isnan(norm): r_at = epsilon * param.grad /", "r / torch.norm(r) return self.emb_backup[param_name] + r def backup_grad(self): for", "epsilon) def restore(self, emb_name='emb.'): # emb_name这个参数要换成你模型中embedding的参数名 for name, param in", "\"\"\" def __init__(self, module): self.module = module self.backup = {}", "r_at = alpha * param.grad / norm param.data.add_(r_at) param.data =", "self.emb_backup = {} self.grad_backup = {} def attack( self, epsilon=1.,", "* param.grad / norm param.data.add_(r_at) def restore( self, emb_name='word_embeddings' ):", "in name: assert name in self.backup param.data = self.backup[name] self.backup", "if param.requires_grad and emb_name in name: assert name in self.emb_backup", "norm = torch.norm(param.grad) if norm != 0 and not torch.isnan(norm):", "self.backup param.data = self.backup[name] self.backup = {} class PGD(object): \"\"\"", "在embedding上添加对抗扰动, first attack时备份param.data >>> if t != K-1: >>> optimizer.zero_grad()", "torch class FGM(object): \"\"\" 基于FGM算法的攻击机制 Args: module (:obj:`torch.nn.Module`): 模型 Examples::", "/ norm param.data.add_(r_at) def restore( self, emb_name='word_embeddings' ): for name,", "= torch.norm(param.grad) if norm != 0 and not torch.isnan(norm): r_at", "__init__(self, module): self.module = module self.backup = {} def attack(", "optimizer.zero_grad() >>> else: >>> pgd.restore_grad() >>> loss_adv = module(batch_input, batch_label)", "for name, param in self.module.named_parameters(): if param.requires_grad and emb_name in", "name: if is_first_attack: self.emb_backup[name] = param.data.clone() norm = torch.norm(param.grad) if", "= module(batch_input, batch_label) >>> loss_adv.backward() # 反向传播,并在正常的grad基础上,累加对抗训练的梯度 >>> fgm.restore() #", "0 and not torch.isnan(norm): r_at = alpha * param.grad /", "# 正常训练 >>> loss = module(batch_input, batch_label) >>> loss.backward() #", "# 梯度下降,更新参数 >>> optimizer.step() >>> optimizer.zero_grad() Reference: [1] https://zhuanlan.zhihu.com/p/91269728 \"\"\"", "if t != K-1: >>> optimizer.zero_grad() >>> else: >>> pgd.restore_grad()", "Args: module (:obj:`torch.nn.Module`): 模型 Examples:: >>> pgd = PGD(module) >>>", "# 在embedding上添加对抗扰动 >>> loss_adv = module(batch_input, batch_label) >>> loss_adv.backward() #", "module(batch_input, batch_label) >>> loss.backward() # 反向传播,得到正常的grad >>> pgd.backup_grad() >>> #", "): # emb_name这个参数要换成你模型中embedding的参数名 for name, param in self.module.named_parameters(): if param.requires_grad", ">>> # 对抗训练 >>> for t in range(K): >>> pgd.attack(is_first_attack=(t==0))", "反向传播,得到正常的grad >>> # 对抗训练 >>> fgm.attack() # 在embedding上添加对抗扰动 >>> loss_adv", "param.requires_grad and emb_name in name: assert name in self.emb_backup param.data", "= module self.backup = {} def attack( self, epsilon=1., emb_name='word_embeddings'", ">>> pgd.restore_grad() >>> loss_adv = module(batch_input, batch_label) >>> loss_adv.backward() #", "name in self.backup param.data = self.backup[name] self.backup = {} class", "module self.backup = {} def attack( self, epsilon=1., emb_name='word_embeddings' ):", "param.data, epsilon) def restore(self, emb_name='emb.'): # emb_name这个参数要换成你模型中embedding的参数名 for name, param", "emb_name in name: if is_first_attack: self.emb_backup[name] = param.data.clone() norm =", "\"\"\" 基于PGD算法的攻击机制 Args: module (:obj:`torch.nn.Module`): 模型 Examples:: >>> pgd =", "and not torch.isnan(norm): r_at = epsilon * param.grad / norm", "module): self.module = module self.backup = {} def attack( self,", "in self.backup param.data = self.backup[name] self.backup = {} class PGD(object):", "in self.module.named_parameters(): if param.requires_grad and emb_name in name: if is_first_attack:", "param.requires_grad and emb_name in name: if is_first_attack: self.emb_backup[name] = param.data.clone()", "else: >>> pgd.restore_grad() >>> loss_adv = module(batch_input, batch_label) >>> loss_adv.backward()", "pgd.backup_grad() >>> # 对抗训练 >>> for t in range(K): >>>", "self.backup[name] = param.data.clone() norm = torch.norm(param.grad) if norm != 0", "FGM(module) >>> for batch_input, batch_label in data: >>> # 正常训练", "self, epsilon=1., emb_name='word_embeddings' ): for name, param in self.module.named_parameters(): if", "{} def attack( self, epsilon=1., alpha=0.3, emb_name='emb.', is_first_attack=False ): #", "K-1: >>> optimizer.zero_grad() >>> else: >>> pgd.restore_grad() >>> loss_adv =", "norm != 0 and not torch.isnan(norm): r_at = alpha *", "!= 0 and not torch.isnan(norm): r_at = alpha * param.grad", "3 >>> for batch_input, batch_label in data: >>> # 正常训练", "if param.requires_grad: self.grad_backup[name] = param.grad.clone() def restore_grad(self): for name, param", "batch_input, batch_label in data: >>> # 正常训练 >>> loss =", "in range(K): >>> pgd.attack(is_first_attack=(t==0)) # 在embedding上添加对抗扰动, first attack时备份param.data >>> if", "param_data - self.emb_backup[param_name] if torch.norm(r) > epsilon: r = epsilon", "if norm != 0 and not torch.isnan(norm): r_at = alpha", "class FGM(object): \"\"\" 基于FGM算法的攻击机制 Args: module (:obj:`torch.nn.Module`): 模型 Examples:: >>>", "in name: self.backup[name] = param.data.clone() norm = torch.norm(param.grad) if norm", "torch.norm(r) return self.emb_backup[param_name] + r def backup_grad(self): for name, param", "self.module = module self.emb_backup = {} self.grad_backup = {} def", "in self.module.named_parameters(): if param.requires_grad and emb_name in name: self.backup[name] =", "self.module.named_parameters(): if param.requires_grad and emb_name in name: assert name in", "self.module.named_parameters(): if param.requires_grad: self.grad_backup[name] = param.grad.clone() def restore_grad(self): for name,", "正常训练 >>> loss = module(batch_input, batch_label) >>> loss.backward() # 反向传播,得到正常的grad", "param.requires_grad and emb_name in name: assert name in self.backup param.data", "K = 3 >>> for batch_input, batch_label in data: >>>", "and not torch.isnan(norm): r_at = alpha * param.grad / norm", "# 初始化 >>> fgm = FGM(module) >>> for batch_input, batch_label", "self.emb_backup[param_name] + r def backup_grad(self): for name, param in self.module.named_parameters():", ">>> K = 3 >>> for batch_input, batch_label in data:", ">>> for t in range(K): >>> pgd.attack(is_first_attack=(t==0)) # 在embedding上添加对抗扰动, first", "r = epsilon * r / torch.norm(r) return self.emb_backup[param_name] +", "= {} def attack( self, epsilon=1., alpha=0.3, emb_name='emb.', is_first_attack=False ):", "{} def project(self, param_name, param_data, epsilon): r = param_data -", "!= K-1: >>> optimizer.zero_grad() >>> else: >>> pgd.restore_grad() >>> loss_adv", ">>> pgd = PGD(module) >>> K = 3 >>> for", "param in self.module.named_parameters(): if param.requires_grad and emb_name in name: self.backup[name]", "in name: assert name in self.emb_backup param.data = self.emb_backup[name] self.emb_backup", "alpha=0.3, emb_name='emb.', is_first_attack=False ): # emb_name这个参数要换成你模型中embedding的参数名 for name, param in", "loss_adv = module(batch_input, batch_label) >>> loss_adv.backward() # 反向传播,并在正常的grad基础上,累加对抗训练的梯度 >>> fgm.restore()", "name: self.backup[name] = param.data.clone() norm = torch.norm(param.grad) if norm !=", "batch_label) >>> loss.backward() # 反向传播,得到正常的grad >>> # 对抗训练 >>> fgm.attack()", "# 对抗训练 >>> fgm.attack() # 在embedding上添加对抗扰动 >>> loss_adv = module(batch_input,", "if norm != 0 and not torch.isnan(norm): r_at = epsilon", "optimizer.step() >>> optimizer.zero_grad() Reference: [1] https://zhuanlan.zhihu.com/p/91269728 \"\"\" def __init__(self, module):", "self, epsilon=1., alpha=0.3, emb_name='emb.', is_first_attack=False ): # emb_name这个参数要换成你模型中embedding的参数名 for name,", "基于PGD算法的攻击机制 Args: module (:obj:`torch.nn.Module`): 模型 Examples:: >>> pgd = PGD(module)", "self.emb_backup[name] = param.data.clone() norm = torch.norm(param.grad) if norm != 0", "# 反向传播,得到正常的grad >>> # 对抗训练 >>> fgm.attack() # 在embedding上添加对抗扰动 >>>", "对抗训练 >>> fgm.attack() # 在embedding上添加对抗扰动 >>> loss_adv = module(batch_input, batch_label)", "Examples:: >>> # 初始化 >>> fgm = FGM(module) >>> for", "epsilon: r = epsilon * r / torch.norm(r) return self.emb_backup[param_name]", "for t in range(K): >>> pgd.attack(is_first_attack=(t==0)) # 在embedding上添加对抗扰动, first attack时备份param.data", "not torch.isnan(norm): r_at = epsilon * param.grad / norm param.data.add_(r_at)", "norm param.data.add_(r_at) def restore( self, emb_name='word_embeddings' ): for name, param", "import torch class FGM(object): \"\"\" 基于FGM算法的攻击机制 Args: module (:obj:`torch.nn.Module`): 模型", "param.grad.clone() def restore_grad(self): for name, param in self.module.named_parameters(): if param.requires_grad:", "loss = module(batch_input, batch_label) >>> loss.backward() # 反向传播,得到正常的grad >>> #", "Args: module (:obj:`torch.nn.Module`): 模型 Examples:: >>> # 初始化 >>> fgm", "t != K-1: >>> optimizer.zero_grad() >>> else: >>> pgd.restore_grad() >>>", "r = param_data - self.emb_backup[param_name] if torch.norm(r) > epsilon: r", "and emb_name in name: assert name in self.backup param.data =", "= {} self.grad_backup = {} def attack( self, epsilon=1., alpha=0.3,", "module self.emb_backup = {} self.grad_backup = {} def attack( self,", "self.backup = {} def attack( self, epsilon=1., emb_name='word_embeddings' ): for", "self.emb_backup[param_name] if torch.norm(r) > epsilon: r = epsilon * r", "alpha * param.grad / norm param.data.add_(r_at) param.data = self.project(name, param.data,", "= param.data.clone() norm = torch.norm(param.grad) if norm != 0 and", "norm != 0 and not torch.isnan(norm): r_at = epsilon *", "__init__(self, module): self.module = module self.emb_backup = {} self.grad_backup =", "attack( self, epsilon=1., emb_name='word_embeddings' ): for name, param in self.module.named_parameters():", "module(batch_input, batch_label) >>> loss.backward() # 反向传播,得到正常的grad >>> # 对抗训练 >>>", "loss_adv.backward() # 反向传播,并在正常的grad基础上,累加对抗训练的梯度 >>> pgd.restore() # 恢复embedding参数 >>> # 梯度下降,更新参数", "): for name, param in self.module.named_parameters(): if param.requires_grad and emb_name", "self.backup[name] self.backup = {} class PGD(object): \"\"\" 基于PGD算法的攻击机制 Args: module", "def __init__(self, module): self.module = module self.emb_backup = {} self.grad_backup", ">>> loss.backward() # 反向传播,得到正常的grad >>> # 对抗训练 >>> fgm.attack() #", "def attack( self, epsilon=1., alpha=0.3, emb_name='emb.', is_first_attack=False ): # emb_name这个参数要换成你模型中embedding的参数名", ">>> for batch_input, batch_label in data: >>> # 正常训练 >>>", "name, param in self.module.named_parameters(): if param.requires_grad: self.grad_backup[name] = param.grad.clone() def", "param.data.add_(r_at) def restore( self, emb_name='word_embeddings' ): for name, param in", "def backup_grad(self): for name, param in self.module.named_parameters(): if param.requires_grad: self.grad_backup[name]", "对抗训练 >>> for t in range(K): >>> pgd.attack(is_first_attack=(t==0)) # 在embedding上添加对抗扰动,", "= self.emb_backup[name] self.emb_backup = {} def project(self, param_name, param_data, epsilon):", "param.requires_grad: self.grad_backup[name] = param.grad.clone() def restore_grad(self): for name, param in", ">>> # 初始化 >>> fgm = FGM(module) >>> for batch_input,", ">>> # 对抗训练 >>> fgm.attack() # 在embedding上添加对抗扰动 >>> loss_adv =", "param_data, epsilon): r = param_data - self.emb_backup[param_name] if torch.norm(r) >", "param.requires_grad and emb_name in name: self.backup[name] = param.data.clone() norm =", "if param.requires_grad and emb_name in name: self.backup[name] = param.data.clone() norm", "初始化 >>> fgm = FGM(module) >>> for batch_input, batch_label in", "PGD(object): \"\"\" 基于PGD算法的攻击机制 Args: module (:obj:`torch.nn.Module`): 模型 Examples:: >>> pgd", "in self.emb_backup param.data = self.emb_backup[name] self.emb_backup = {} def project(self,", "r def backup_grad(self): for name, param in self.module.named_parameters(): if param.requires_grad:", "= epsilon * param.grad / norm param.data.add_(r_at) def restore( self,", "emb_name='emb.', is_first_attack=False ): # emb_name这个参数要换成你模型中embedding的参数名 for name, param in self.module.named_parameters():", "optimizer.zero_grad() Reference: [1] https://zhuanlan.zhihu.com/p/91269728 \"\"\" def __init__(self, module): self.module =", ">>> pgd.restore() # 恢复embedding参数 >>> # 梯度下降,更新参数 >>> optimizer.step() >>>", "梯度下降,更新参数 >>> optimizer.step() >>> optimizer.zero_grad() Reference: [1] https://zhuanlan.zhihu.com/p/91269728 \"\"\" def", "[1] https://zhuanlan.zhihu.com/p/91269728 \"\"\" def __init__(self, module): self.module = module self.emb_backup", "restore(self, emb_name='emb.'): # emb_name这个参数要换成你模型中embedding的参数名 for name, param in self.module.named_parameters(): if", "模型 Examples:: >>> # 初始化 >>> fgm = FGM(module) >>>", "if is_first_attack: self.emb_backup[name] = param.data.clone() norm = torch.norm(param.grad) if norm", "# emb_name这个参数要换成你模型中embedding的参数名 for name, param in self.module.named_parameters(): if param.requires_grad and", "data: >>> # 正常训练 >>> loss = module(batch_input, batch_label) >>>", "assert name in self.backup param.data = self.backup[name] self.backup = {}", "!= 0 and not torch.isnan(norm): r_at = epsilon * param.grad", ">>> loss.backward() # 反向传播,得到正常的grad >>> pgd.backup_grad() >>> # 对抗训练 >>>", "for name, param in self.module.named_parameters(): if param.requires_grad: param.grad = self.grad_backup[name]", "loss = module(batch_input, batch_label) >>> loss.backward() # 反向传播,得到正常的grad >>> pgd.backup_grad()", "self.grad_backup[name] = param.grad.clone() def restore_grad(self): for name, param in self.module.named_parameters():", "{} class PGD(object): \"\"\" 基于PGD算法的攻击机制 Args: module (:obj:`torch.nn.Module`): 模型 Examples::", "param_name, param_data, epsilon): r = param_data - self.emb_backup[param_name] if torch.norm(r)", "emb_name in name: assert name in self.backup param.data = self.backup[name]", "pgd.attack(is_first_attack=(t==0)) # 在embedding上添加对抗扰动, first attack时备份param.data >>> if t != K-1:", "= PGD(module) >>> K = 3 >>> for batch_input, batch_label", "batch_label in data: >>> # 正常训练 >>> loss = module(batch_input,", "if torch.norm(r) > epsilon: r = epsilon * r /", "emb_name='emb.'): # emb_name这个参数要换成你模型中embedding的参数名 for name, param in self.module.named_parameters(): if param.requires_grad", "> epsilon: r = epsilon * r / torch.norm(r) return", "self.module.named_parameters(): if param.requires_grad and emb_name in name: self.backup[name] = param.data.clone()", "in self.module.named_parameters(): if param.requires_grad: self.grad_backup[name] = param.grad.clone() def restore_grad(self): for", "in name: if is_first_attack: self.emb_backup[name] = param.data.clone() norm = torch.norm(param.grad)", "is_first_attack=False ): # emb_name这个参数要换成你模型中embedding的参数名 for name, param in self.module.named_parameters(): if", "= self.backup[name] self.backup = {} class PGD(object): \"\"\" 基于PGD算法的攻击机制 Args:", "loss_adv.backward() # 反向传播,并在正常的grad基础上,累加对抗训练的梯度 >>> fgm.restore() # 恢复embedding参数 >>> # 梯度下降,更新参数", "# 恢复embedding参数 >>> # 梯度下降,更新参数 >>> optimizer.step() >>> optimizer.zero_grad() Reference:", ">>> fgm.restore() # 恢复embedding参数 >>> # 梯度下降,更新参数 >>> optimizer.step() >>>", ">>> loss_adv = module(batch_input, batch_label) >>> loss_adv.backward() # 反向传播,并在正常的grad基础上,累加对抗训练的梯度 >>>", "emb_name='word_embeddings' ): for name, param in self.module.named_parameters(): if param.requires_grad and", "/ torch.norm(r) return self.emb_backup[param_name] + r def backup_grad(self): for name,", "self.emb_backup param.data = self.emb_backup[name] self.emb_backup = {} def project(self, param_name,", "and emb_name in name: self.backup[name] = param.data.clone() norm = torch.norm(param.grad)", "in data: >>> # 正常训练 >>> loss = module(batch_input, batch_label)", "emb_name in name: assert name in self.emb_backup param.data = self.emb_backup[name]", "在embedding上添加对抗扰动 >>> loss_adv = module(batch_input, batch_label) >>> loss_adv.backward() # 反向传播,并在正常的grad基础上,累加对抗训练的梯度", "self, emb_name='word_embeddings' ): for name, param in self.module.named_parameters(): if param.requires_grad", ">>> optimizer.zero_grad() Reference: [1] https://zhuanlan.zhihu.com/p/91269728 \"\"\" def __init__(self, module): self.module", "# 反向传播,并在正常的grad基础上,累加对抗训练的梯度 >>> pgd.restore() # 恢复embedding参数 >>> # 梯度下降,更新参数 >>>", "def restore_grad(self): for name, param in self.module.named_parameters(): if param.requires_grad: param.grad", "+ r def backup_grad(self): for name, param in self.module.named_parameters(): if", "= param_data - self.emb_backup[param_name] if torch.norm(r) > epsilon: r =", "restore_grad(self): for name, param in self.module.named_parameters(): if param.requires_grad: param.grad =", "# 对抗训练 >>> for t in range(K): >>> pgd.attack(is_first_attack=(t==0)) #", "fgm.restore() # 恢复embedding参数 >>> # 梯度下降,更新参数 >>> optimizer.step() >>> optimizer.zero_grad()", "project(self, param_name, param_data, epsilon): r = param_data - self.emb_backup[param_name] if", "= {} def attack( self, epsilon=1., emb_name='word_embeddings' ): for name,", "(:obj:`torch.nn.Module`): 模型 Examples:: >>> pgd = PGD(module) >>> K =", "torch.norm(param.grad) if norm != 0 and not torch.isnan(norm): r_at =", "param.data.clone() norm = torch.norm(param.grad) if norm != 0 and not", "\"\"\" def __init__(self, module): self.module = module self.emb_backup = {}", "and emb_name in name: assert name in self.emb_backup param.data =", "模型 Examples:: >>> pgd = PGD(module) >>> K = 3", "* r / torch.norm(r) return self.emb_backup[param_name] + r def backup_grad(self):", "batch_label) >>> loss.backward() # 反向传播,得到正常的grad >>> pgd.backup_grad() >>> # 对抗训练", "- self.emb_backup[param_name] if torch.norm(r) > epsilon: r = epsilon *", "module): self.module = module self.emb_backup = {} self.grad_backup = {}", "FGM(object): \"\"\" 基于FGM算法的攻击机制 Args: module (:obj:`torch.nn.Module`): 模型 Examples:: >>> #", "def restore( self, emb_name='word_embeddings' ): for name, param in self.module.named_parameters():", "恢复embedding参数 >>> # 梯度下降,更新参数 >>> optimizer.step() >>> optimizer.zero_grad() Reference: [1]", "module(batch_input, batch_label) >>> loss_adv.backward() # 反向传播,并在正常的grad基础上,累加对抗训练的梯度 >>> fgm.restore() # 恢复embedding参数", "if param.requires_grad and emb_name in name: assert name in self.backup", "param.data = self.emb_backup[name] self.emb_backup = {} def project(self, param_name, param_data,", "基于FGM算法的攻击机制 Args: module (:obj:`torch.nn.Module`): 模型 Examples:: >>> # 初始化 >>>", ">>> pgd.attack(is_first_attack=(t==0)) # 在embedding上添加对抗扰动, first attack时备份param.data >>> if t !=", "= module(batch_input, batch_label) >>> loss.backward() # 反向传播,得到正常的grad >>> # 对抗训练", "epsilon=1., alpha=0.3, emb_name='emb.', is_first_attack=False ): # emb_name这个参数要换成你模型中embedding的参数名 for name, param", "module(batch_input, batch_label) >>> loss_adv.backward() # 反向传播,并在正常的grad基础上,累加对抗训练的梯度 >>> pgd.restore() # 恢复embedding参数", "/ norm param.data.add_(r_at) param.data = self.project(name, param.data, epsilon) def restore(self,", ">>> fgm = FGM(module) >>> for batch_input, batch_label in data:", "Examples:: >>> pgd = PGD(module) >>> K = 3 >>>", "torch.isnan(norm): r_at = alpha * param.grad / norm param.data.add_(r_at) param.data", "epsilon): r = param_data - self.emb_backup[param_name] if torch.norm(r) > epsilon:", "= alpha * param.grad / norm param.data.add_(r_at) param.data = self.project(name,", "self.module.named_parameters(): if param.requires_grad and emb_name in name: if is_first_attack: self.emb_backup[name]", "param in self.module.named_parameters(): if param.requires_grad: self.grad_backup[name] = param.grad.clone() def restore_grad(self):", "param.data.add_(r_at) param.data = self.project(name, param.data, epsilon) def restore(self, emb_name='emb.'): #", ">>> optimizer.step() >>> optimizer.zero_grad() Reference: [1] https://zhuanlan.zhihu.com/p/91269728 \"\"\" def __init__(self,", "param.grad / norm param.data.add_(r_at) def restore( self, emb_name='word_embeddings' ): for", "https://zhuanlan.zhihu.com/p/91269728 \"\"\" def __init__(self, module): self.module = module self.emb_backup =", "epsilon * param.grad / norm param.data.add_(r_at) def restore( self, emb_name='word_embeddings'", "[1] https://zhuanlan.zhihu.com/p/91269728 \"\"\" def __init__(self, module): self.module = module self.backup", "norm param.data.add_(r_at) param.data = self.project(name, param.data, epsilon) def restore(self, emb_name='emb.'):", "= {} def project(self, param_name, param_data, epsilon): r = param_data", "for name, param in self.module.named_parameters(): if param.requires_grad: self.grad_backup[name] = param.grad.clone()", "t in range(K): >>> pgd.attack(is_first_attack=(t==0)) # 在embedding上添加对抗扰动, first attack时备份param.data >>>", "fgm = FGM(module) >>> for batch_input, batch_label in data: >>>", "loss.backward() # 反向传播,得到正常的grad >>> # 对抗训练 >>> fgm.attack() # 在embedding上添加对抗扰动", "def __init__(self, module): self.module = module self.backup = {} def", "for batch_input, batch_label in data: >>> # 正常训练 >>> loss", "= module self.emb_backup = {} self.grad_backup = {} def attack(", "module (:obj:`torch.nn.Module`): 模型 Examples:: >>> # 初始化 >>> fgm =", ">>> optimizer.zero_grad() >>> else: >>> pgd.restore_grad() >>> loss_adv = module(batch_input,", "param.data = self.backup[name] self.backup = {} class PGD(object): \"\"\" 基于PGD算法的攻击机制", "first attack时备份param.data >>> if t != K-1: >>> optimizer.zero_grad() >>>", ">>> fgm.attack() # 在embedding上添加对抗扰动 >>> loss_adv = module(batch_input, batch_label) >>>", "batch_label) >>> loss_adv.backward() # 反向传播,并在正常的grad基础上,累加对抗训练的梯度 >>> pgd.restore() # 恢复embedding参数 >>>", "restore( self, emb_name='word_embeddings' ): for name, param in self.module.named_parameters(): if", "name, param in self.module.named_parameters(): if param.requires_grad and emb_name in name:", "range(K): >>> pgd.attack(is_first_attack=(t==0)) # 在embedding上添加对抗扰动, first attack时备份param.data >>> if t", "def attack( self, epsilon=1., emb_name='word_embeddings' ): for name, param in", "emb_name in name: self.backup[name] = param.data.clone() norm = torch.norm(param.grad) if", "= module(batch_input, batch_label) >>> loss.backward() # 反向传播,得到正常的grad >>> pgd.backup_grad() >>>", "* param.grad / norm param.data.add_(r_at) param.data = self.project(name, param.data, epsilon)", ">>> loss = module(batch_input, batch_label) >>> loss.backward() # 反向传播,得到正常的grad >>>", "def project(self, param_name, param_data, epsilon): r = param_data - self.emb_backup[param_name]", "torch.norm(r) > epsilon: r = epsilon * r / torch.norm(r)", "self.emb_backup[name] self.emb_backup = {} def project(self, param_name, param_data, epsilon): r", "# 反向传播,并在正常的grad基础上,累加对抗训练的梯度 >>> fgm.restore() # 恢复embedding参数 >>> # 梯度下降,更新参数 >>>", "pgd.restore_grad() >>> loss_adv = module(batch_input, batch_label) >>> loss_adv.backward() # 反向传播,并在正常的grad基础上,累加对抗训练的梯度", "self.project(name, param.data, epsilon) def restore(self, emb_name='emb.'): # emb_name这个参数要换成你模型中embedding的参数名 for name,", "# 反向传播,得到正常的grad >>> pgd.backup_grad() >>> # 对抗训练 >>> for t", "https://zhuanlan.zhihu.com/p/91269728 \"\"\" def __init__(self, module): self.module = module self.backup =", "self.module = module self.backup = {} def attack( self, epsilon=1.,", "if param.requires_grad and emb_name in name: if is_first_attack: self.emb_backup[name] =", "{} self.grad_backup = {} def attack( self, epsilon=1., alpha=0.3, emb_name='emb.',", "def restore(self, emb_name='emb.'): # emb_name这个参数要换成你模型中embedding的参数名 for name, param in self.module.named_parameters():" ]
[ "forward( source, driving_resized, kp_source, kp_driving_initial, generator, kp_detector, relative=opt.relative, adapt_scale=opt.adapt_scale, cpu=opt.cpu", "OcclusionAwareGenerator from modules.keypoint_detector import KPDetector from sync_batchnorm import DataParallelWithCallback #from", ":3] source = torch.tensor(source_image[np.newaxis].astype(np.float32)).permute(0, 3, 1, 2) if not opt.cpu:", "# '-pix_fmt', 'yuv420p', # '-preset', 'ultrafast', # '-f', 'flv', #", "= cv2.VideoCapture(0) ret, frame = camera.read() while True: ret, frame", "'-i', '-', # '-c:v', 'libx264', # '-pix_fmt', 'yuv420p', # '-preset',", "adapt_movement_scale = np.sqrt(source_area) / np.sqrt(driving_area) else: adapt_movement_scale = 1 kp_new", "== \"__main__\": parser = ArgumentParser() parser.add_argument(\"--config\", required=True, help=\"path to config\")", "True: ret, frame = camera.read() resized = resize(frame, (256, 256))[...,", "if not cpu: kp_detector.cuda() if cpu: checkpoint = torch.load(checkpoint_path, map_location=torch.device('cpu'))", "torch.matmul(jacobian_diff, kp_source['jacobian']) return kp_new def load_checkpoints(config_path, checkpoint_path, cpu=False): with open(config_path)", "generator = OcclusionAwareGenerator(**config['model_params']['generator_params'], **config['model_params']['common_params']) if not cpu: generator.cuda() kp_detector =", "= kp_detector(driving_frame) kp_norm = normalize_kp( kp_source=kp_source, kp_driving=kp_driving, kp_driving_initial=kp_driving_initial, use_relative_movement=relative, use_relative_jacobian=relative,", "mode\") parser.set_defaults(relative=False) parser.set_defaults(adapt_scale=False) opt = parser.parse_args() generator, kp_detector = load_checkpoints(config_path=opt.config,", "+ kp_source['value'] if use_relative_jacobian: jacobian_diff = torch.matmul(kp_driving['jacobian'], torch.inverse(kp_driving_initial['jacobian'])) kp_new['jacobian'] =", "cpu: kp_detector.cuda() if cpu: checkpoint = torch.load(checkpoint_path, map_location=torch.device('cpu')) else: checkpoint", "kp_detector(source) #out = cv2.VideoWriter('outpy.avi', cv2.VideoWriter_fourcc('M','J','P','G'), 30, (256, 256)) kp_driving_initial =", "= ConvexHull(kp_driving_initial['value'][0].data.cpu().numpy()).volume adapt_movement_scale = np.sqrt(source_area) / np.sqrt(driving_area) else: adapt_movement_scale =", "load_checkpoints(config_path, checkpoint_path, cpu=False): with open(config_path) as f: config = yaml.load(f)", "[0, 2, 3, 1])[0] if __name__ == \"__main__\": parser =", "if not opt.cpu: resized = resized.cuda() # y = torch.tensor(np.array(resized))", "OcclusionAwareGenerator(**config['model_params']['generator_params'], **config['model_params']['common_params']) if not cpu: generator.cuda() kp_detector = KPDetector(**config['model_params']['kp_detector_params'], **config['model_params']['common_params'])", "if not cpu: generator.cuda() kp_detector = KPDetector(**config['model_params']['kp_detector_params'], **config['model_params']['common_params']) if not", "np.transpose(out[\"prediction\"].data.cpu().numpy(), [0, 2, 3, 1])[0] if __name__ == \"__main__\": parser", "out = generator(source_image, kp_source=kp_source, kp_driving=kp_norm) return np.transpose(out[\"prediction\"].data.cpu().numpy(), [0, 2, 3,", "parser.add_argument(\"--relative\", dest=\"relative\", action=\"store_true\", help=\"use relative or absolute keypoint coordinates\") parser.add_argument(\"--adapt_scale\",", "imageio.imread(opt.source_image) source_image = resize(source_image, (256, 256))[..., :3] source = torch.tensor(source_image[np.newaxis].astype(np.float32)).permute(0,", "cpu: generator = DataParallelWithCallback(generator) kp_detector = DataParallelWithCallback(kp_detector) generator.eval() kp_detector.eval() return", "help=\"path to config\") parser.add_argument(\"--source_image\", required=True, help=\"path to source image\") parser.add_argument(\"--checkpoint\",", "dest=\"cpu\", action=\"store_true\", help=\"CPU mode\") parser.set_defaults(relative=False) parser.set_defaults(adapt_scale=False) opt = parser.parse_args() generator,", "driving_resized = torch.tensor(np.array(resized)[np.newaxis].astype(np.float32)).permute(0, 3, 1, 2) if not kp_driving_initial: kp_driving_initial", "import torch import yaml import imageio import throttle import numpy", "camera.read() while True: ret, frame = camera.read() resized = resize(frame,", "DataParallelWithCallback #from animate import normalize_kp # command = [ffmpeg, #", "kp_detector(driving_frame) kp_norm = normalize_kp( kp_source=kp_source, kp_driving=kp_driving, kp_driving_initial=kp_driving_initial, use_relative_movement=relative, use_relative_jacobian=relative, adapt_movement_scale=adapt_scale", "= torch.tensor(source_image[np.newaxis].astype(np.float32)).permute(0, 3, 1, 2) if not opt.cpu: source =", "= (kp_driving['value'] - kp_driving_initial['value']) kp_value_diff *= adapt_movement_scale kp_new['value'] = kp_value_diff", "cpu=True): kp_driving = kp_detector(driving_frame) kp_norm = normalize_kp( kp_source=kp_source, kp_driving=kp_driving, kp_driving_initial=kp_driving_initial,", "kp_source, kp_driving_initial, generator, kp_detector, relative=opt.relative, adapt_scale=opt.adapt_scale, cpu=opt.cpu ) cv2.imshow(\"frame\", fake_frame)", "np import matplotlib.pyplot as plt from argparse import ArgumentParser from", "= kp_detector(driving_resized) fake_frame = forward( source, driving_resized, kp_source, kp_driving_initial, generator,", "3, 1, 2) if not kp_driving_initial: kp_driving_initial = kp_detector(driving_resized) fake_frame", "use_relative_jacobian=False): if adapt_movement_scale: source_area = ConvexHull(kp_source['value'][0].data.cpu().numpy()).volume driving_area = ConvexHull(kp_driving_initial['value'][0].data.cpu().numpy()).volume adapt_movement_scale", "kp_detector.cuda() if cpu: checkpoint = torch.load(checkpoint_path, map_location=torch.device('cpu')) else: checkpoint =", "= resized.cuda() # y = torch.tensor(np.array(resized)) # x = y.cpu().numpy()", "if not opt.cpu: source = source.cuda() kp_source = kp_detector(source) #out", "kp_driving = kp_detector(driving_frame) kp_norm = normalize_kp( kp_source=kp_source, kp_driving=kp_driving, kp_driving_initial=kp_driving_initial, use_relative_movement=relative,", "kp_driving=kp_driving, kp_driving_initial=kp_driving_initial, use_relative_movement=relative, use_relative_jacobian=relative, adapt_movement_scale=adapt_scale ) out = generator(source_image, kp_source=kp_source,", "1, 2) if not opt.cpu: source = source.cuda() kp_source =", "= parser.parse_args() generator, kp_detector = load_checkpoints(config_path=opt.config, checkpoint_path=opt.checkpoint, cpu=opt.cpu) source_image =", "forward(source_image, driving_frame, kp_source, kp_driving_initial, generator, kp_detector, relative=True, adapt_scale=True, cpu=True): kp_driving", "on convex hull of keypoints\") parser.add_argument(\"--cpu\", dest=\"cpu\", action=\"store_true\", help=\"CPU mode\")", "cpu=opt.cpu) source_image = imageio.imread(opt.source_image) source_image = resize(source_image, (256, 256))[..., :3]", "imageio import throttle import numpy as np import matplotlib.pyplot as", "action=\"store_true\", help=\"adapt movement scale based on convex hull of keypoints\")", "kp_value_diff *= adapt_movement_scale kp_new['value'] = kp_value_diff + kp_source['value'] if use_relative_jacobian:", "generator, kp_detector @throttle.wrap(1, 2) def forward(source_image, driving_frame, kp_source, kp_driving_initial, generator,", "# '-pix_fmt', 'bgr24', # '-s', dimension, # '-i', '-', #", "# '-f', 'flv', # 'rtmp://10.10.10.80/live/mystream'] def normalize_kp(kp_source, kp_driving, kp_driving_initial, adapt_movement_scale=False,", "action=\"store_true\", help=\"CPU mode\") parser.set_defaults(relative=False) parser.set_defaults(adapt_scale=False) opt = parser.parse_args() generator, kp_detector", "use_relative_movement=False, use_relative_jacobian=False): if adapt_movement_scale: source_area = ConvexHull(kp_source['value'][0].data.cpu().numpy()).volume driving_area = ConvexHull(kp_driving_initial['value'][0].data.cpu().numpy()).volume", "if not kp_driving_initial: kp_driving_initial = kp_detector(driving_resized) fake_frame = forward( source,", "else: adapt_movement_scale = 1 kp_new = {k: v for k,", "import imageio import throttle import numpy as np import matplotlib.pyplot", "'-s', dimension, # '-i', '-', # '-c:v', 'libx264', # '-pix_fmt',", "= ConvexHull(kp_source['value'][0].data.cpu().numpy()).volume driving_area = ConvexHull(kp_driving_initial['value'][0].data.cpu().numpy()).volume adapt_movement_scale = np.sqrt(source_area) / np.sqrt(driving_area)", "yaml.load(f) generator = OcclusionAwareGenerator(**config['model_params']['generator_params'], **config['model_params']['common_params']) if not cpu: generator.cuda() kp_detector", "not cpu: generator.cuda() kp_detector = KPDetector(**config['model_params']['kp_detector_params'], **config['model_params']['common_params']) if not cpu:", "parser.add_argument(\"--checkpoint\", default=\"vox-cpk.pth.tar\", help=\"path to checkpoint\") parser.add_argument(\"--relative\", dest=\"relative\", action=\"store_true\", help=\"use relative", "checkpoint_path, cpu=False): with open(config_path) as f: config = yaml.load(f) generator", "x = y.permute(1, 2, 0) # plt.imshow(np.array(image)) # plt.show() driving_resized", "2) def forward(source_image, driving_frame, kp_source, kp_driving_initial, generator, kp_detector, relative=True, adapt_scale=True,", "parser.add_argument(\"--source_image\", required=True, help=\"path to source image\") parser.add_argument(\"--checkpoint\", default=\"vox-cpk.pth.tar\", help=\"path to", "# image = cv2.cvtColor(x, cv2.COLOR_BGR2RGB) # # x = y.permute(1,", "command = [ffmpeg, # '-y', # '-f', 'rawvideo', # '-vcodec','rawvideo',", "[ffmpeg, # '-y', # '-f', 'rawvideo', # '-vcodec','rawvideo', # '-pix_fmt',", "sync_batchnorm import DataParallelWithCallback #from animate import normalize_kp # command =", "source, driving_resized, kp_source, kp_driving_initial, generator, kp_detector, relative=opt.relative, adapt_scale=opt.adapt_scale, cpu=opt.cpu )", "parser.set_defaults(relative=False) parser.set_defaults(adapt_scale=False) opt = parser.parse_args() generator, kp_detector = load_checkpoints(config_path=opt.config, checkpoint_path=opt.checkpoint,", "= None camera = cv2.VideoCapture(0) ret, frame = camera.read() while", "# y = torch.tensor(np.array(resized)) # x = y.cpu().numpy() # image", "3, 1) #print(plt_driving.shape) #plt.imshow(x) #plt.show() if cv2.waitKey(1) & 0xFF ==", "# 'rtmp://10.10.10.80/live/mystream'] def normalize_kp(kp_source, kp_driving, kp_driving_initial, adapt_movement_scale=False, use_relative_movement=False, use_relative_jacobian=False): if", "cpu=False): with open(config_path) as f: config = yaml.load(f) generator =", "1, 2) if not kp_driving_initial: kp_driving_initial = kp_detector(driving_resized) fake_frame =", "kp_driving_initial['value']) kp_value_diff *= adapt_movement_scale kp_new['value'] = kp_value_diff + kp_source['value'] if", "generator, kp_detector, relative=opt.relative, adapt_scale=opt.adapt_scale, cpu=opt.cpu ) cv2.imshow(\"frame\", fake_frame) #x =", "kp_detector = load_checkpoints(config_path=opt.config, checkpoint_path=opt.checkpoint, cpu=opt.cpu) source_image = imageio.imread(opt.source_image) source_image =", "kp_detector @throttle.wrap(1, 2) def forward(source_image, driving_frame, kp_source, kp_driving_initial, generator, kp_detector,", "import normalize_kp # command = [ffmpeg, # '-y', # '-f',", "scale based on convex hull of keypoints\") parser.add_argument(\"--cpu\", dest=\"cpu\", action=\"store_true\",", "axis=(0,)) #x = driving_resized[0].permute(1, 2, 0) # plt_driving = driving_resized", "from scipy.spatial import ConvexHull from modules.generator import OcclusionAwareGenerator from modules.keypoint_detector", "not opt.cpu: resized = resized.cuda() # y = torch.tensor(np.array(resized)) #", "= resize(source_image, (256, 256))[..., :3] source = torch.tensor(source_image[np.newaxis].astype(np.float32)).permute(0, 3, 1,", "kp_new def load_checkpoints(config_path, checkpoint_path, cpu=False): with open(config_path) as f: config", "# x = y.cpu().numpy() # image = cv2.cvtColor(x, cv2.COLOR_BGR2RGB) #", "else: checkpoint = torch.load(checkpoint_path) generator.load_state_dict(checkpoint['generator']) kp_detector.load_state_dict(checkpoint['kp_detector']) if not cpu: generator", "not cpu: generator = DataParallelWithCallback(generator) kp_detector = DataParallelWithCallback(kp_detector) generator.eval() kp_detector.eval()", "keypoint coordinates\") parser.add_argument(\"--adapt_scale\", dest=\"adapt_scale\", action=\"store_true\", help=\"adapt movement scale based on", "use_relative_movement=relative, use_relative_jacobian=relative, adapt_movement_scale=adapt_scale ) out = generator(source_image, kp_source=kp_source, kp_driving=kp_norm) return", "kp_source['value'] if use_relative_jacobian: jacobian_diff = torch.matmul(kp_driving['jacobian'], torch.inverse(kp_driving_initial['jacobian'])) kp_new['jacobian'] = torch.matmul(jacobian_diff,", "ArgumentParser from skimage.transform import resize from scipy.spatial import ConvexHull from", "adapt_movement_scale=False, use_relative_movement=False, use_relative_jacobian=False): if adapt_movement_scale: source_area = ConvexHull(kp_source['value'][0].data.cpu().numpy()).volume driving_area =", "DataParallelWithCallback(generator) kp_detector = DataParallelWithCallback(kp_detector) generator.eval() kp_detector.eval() return generator, kp_detector @throttle.wrap(1,", "torch import yaml import imageio import throttle import numpy as", "config = yaml.load(f) generator = OcclusionAwareGenerator(**config['model_params']['generator_params'], **config['model_params']['common_params']) if not cpu:", "default=\"vox-cpk.pth.tar\", help=\"path to checkpoint\") parser.add_argument(\"--relative\", dest=\"relative\", action=\"store_true\", help=\"use relative or", "kp_driving_initial = kp_detector(driving_resized) fake_frame = forward( source, driving_resized, kp_source, kp_driving_initial,", "import resize from scipy.spatial import ConvexHull from modules.generator import OcclusionAwareGenerator", "'-pix_fmt', 'yuv420p', # '-preset', 'ultrafast', # '-f', 'flv', # 'rtmp://10.10.10.80/live/mystream']", "# command = [ffmpeg, # '-y', # '-f', 'rawvideo', #", "v for k, v in kp_driving.items()} if use_relative_movement: kp_value_diff =", "kp_driving.items()} if use_relative_movement: kp_value_diff = (kp_driving['value'] - kp_driving_initial['value']) kp_value_diff *=", "in kp_driving.items()} if use_relative_movement: kp_value_diff = (kp_driving['value'] - kp_driving_initial['value']) kp_value_diff", "= load_checkpoints(config_path=opt.config, checkpoint_path=opt.checkpoint, cpu=opt.cpu) source_image = imageio.imread(opt.source_image) source_image = resize(source_image,", "cpu: checkpoint = torch.load(checkpoint_path, map_location=torch.device('cpu')) else: checkpoint = torch.load(checkpoint_path) generator.load_state_dict(checkpoint['generator'])", "= resize(frame, (256, 256))[..., :3] if not opt.cpu: resized =", "kp_new['value'] = kp_value_diff + kp_source['value'] if use_relative_jacobian: jacobian_diff = torch.matmul(kp_driving['jacobian'],", "DataParallelWithCallback(kp_detector) generator.eval() kp_detector.eval() return generator, kp_detector @throttle.wrap(1, 2) def forward(source_image,", "1])[0] if __name__ == \"__main__\": parser = ArgumentParser() parser.add_argument(\"--config\", required=True,", "kp_driving_initial = None camera = cv2.VideoCapture(0) ret, frame = camera.read()", "= driving_resized #permute(2, 3, 1) #print(plt_driving.shape) #plt.imshow(x) #plt.show() if cv2.waitKey(1)", "driving_resized #permute(2, 3, 1) #print(plt_driving.shape) #plt.imshow(x) #plt.show() if cv2.waitKey(1) &", "= forward( source, driving_resized, kp_source, kp_driving_initial, generator, kp_detector, relative=opt.relative, adapt_scale=opt.adapt_scale,", "'-c:v', 'libx264', # '-pix_fmt', 'yuv420p', # '-preset', 'ultrafast', # '-f',", "kp_detector.load_state_dict(checkpoint['kp_detector']) if not cpu: generator = DataParallelWithCallback(generator) kp_detector = DataParallelWithCallback(kp_detector)", "help=\"path to checkpoint\") parser.add_argument(\"--relative\", dest=\"relative\", action=\"store_true\", help=\"use relative or absolute", "'rtmp://10.10.10.80/live/mystream'] def normalize_kp(kp_source, kp_driving, kp_driving_initial, adapt_movement_scale=False, use_relative_movement=False, use_relative_jacobian=False): if adapt_movement_scale:", "y.cpu().numpy() # image = cv2.cvtColor(x, cv2.COLOR_BGR2RGB) # # x =", "parser.add_argument(\"--cpu\", dest=\"cpu\", action=\"store_true\", help=\"CPU mode\") parser.set_defaults(relative=False) parser.set_defaults(adapt_scale=False) opt = parser.parse_args()", "- kp_driving_initial['value']) kp_value_diff *= adapt_movement_scale kp_new['value'] = kp_value_diff + kp_source['value']", "(256, 256)) kp_driving_initial = None camera = cv2.VideoCapture(0) ret, frame", "frame = camera.read() resized = resize(frame, (256, 256))[..., :3] if", ":3] if not opt.cpu: resized = resized.cuda() # y =", "help=\"CPU mode\") parser.set_defaults(relative=False) parser.set_defaults(adapt_scale=False) opt = parser.parse_args() generator, kp_detector =", "= KPDetector(**config['model_params']['kp_detector_params'], **config['model_params']['common_params']) if not cpu: kp_detector.cuda() if cpu: checkpoint", "plt.show() driving_resized = torch.tensor(np.array(resized)[np.newaxis].astype(np.float32)).permute(0, 3, 1, 2) if not kp_driving_initial:", "opt = parser.parse_args() generator, kp_detector = load_checkpoints(config_path=opt.config, checkpoint_path=opt.checkpoint, cpu=opt.cpu) source_image", "256))[..., :3] if not opt.cpu: resized = resized.cuda() # y", ") out = generator(source_image, kp_source=kp_source, kp_driving=kp_norm) return np.transpose(out[\"prediction\"].data.cpu().numpy(), [0, 2,", "to config\") parser.add_argument(\"--source_image\", required=True, help=\"path to source image\") parser.add_argument(\"--checkpoint\", default=\"vox-cpk.pth.tar\",", "hull of keypoints\") parser.add_argument(\"--cpu\", dest=\"cpu\", action=\"store_true\", help=\"CPU mode\") parser.set_defaults(relative=False) parser.set_defaults(adapt_scale=False)", "#plt.imshow(x) #plt.show() if cv2.waitKey(1) & 0xFF == ord('q'): break camera.release()", "adapt_scale=opt.adapt_scale, cpu=opt.cpu ) cv2.imshow(\"frame\", fake_frame) #x = np.squeeze(driving_resized, axis=(0,)) #x", "= y.cpu().numpy() # image = cv2.cvtColor(x, cv2.COLOR_BGR2RGB) # # x", "= OcclusionAwareGenerator(**config['model_params']['generator_params'], **config['model_params']['common_params']) if not cpu: generator.cuda() kp_detector = KPDetector(**config['model_params']['kp_detector_params'],", "import OcclusionAwareGenerator from modules.keypoint_detector import KPDetector from sync_batchnorm import DataParallelWithCallback", "kp_norm = normalize_kp( kp_source=kp_source, kp_driving=kp_driving, kp_driving_initial=kp_driving_initial, use_relative_movement=relative, use_relative_jacobian=relative, adapt_movement_scale=adapt_scale )", "adapt_scale=True, cpu=True): kp_driving = kp_detector(driving_frame) kp_norm = normalize_kp( kp_source=kp_source, kp_driving=kp_driving,", "= np.sqrt(source_area) / np.sqrt(driving_area) else: adapt_movement_scale = 1 kp_new =", "resize(source_image, (256, 256))[..., :3] source = torch.tensor(source_image[np.newaxis].astype(np.float32)).permute(0, 3, 1, 2)", "import ConvexHull from modules.generator import OcclusionAwareGenerator from modules.keypoint_detector import KPDetector", "# plt_driving = driving_resized #permute(2, 3, 1) #print(plt_driving.shape) #plt.imshow(x) #plt.show()", "x = y.cpu().numpy() # image = cv2.cvtColor(x, cv2.COLOR_BGR2RGB) # #", "use_relative_jacobian: jacobian_diff = torch.matmul(kp_driving['jacobian'], torch.inverse(kp_driving_initial['jacobian'])) kp_new['jacobian'] = torch.matmul(jacobian_diff, kp_source['jacobian']) return", "'bgr24', # '-s', dimension, # '-i', '-', # '-c:v', 'libx264',", "help=\"use relative or absolute keypoint coordinates\") parser.add_argument(\"--adapt_scale\", dest=\"adapt_scale\", action=\"store_true\", help=\"adapt", "torch.tensor(source_image[np.newaxis].astype(np.float32)).permute(0, 3, 1, 2) if not opt.cpu: source = source.cuda()", "plt from argparse import ArgumentParser from skimage.transform import resize from", "f: config = yaml.load(f) generator = OcclusionAwareGenerator(**config['model_params']['generator_params'], **config['model_params']['common_params']) if not", "normalize_kp(kp_source, kp_driving, kp_driving_initial, adapt_movement_scale=False, use_relative_movement=False, use_relative_jacobian=False): if adapt_movement_scale: source_area =", "= imageio.imread(opt.source_image) source_image = resize(source_image, (256, 256))[..., :3] source =", ") cv2.imshow(\"frame\", fake_frame) #x = np.squeeze(driving_resized, axis=(0,)) #x = driving_resized[0].permute(1,", "= y.permute(1, 2, 0) # plt.imshow(np.array(image)) # plt.show() driving_resized =", "KPDetector from sync_batchnorm import DataParallelWithCallback #from animate import normalize_kp #", "resized = resize(frame, (256, 256))[..., :3] if not opt.cpu: resized", "\"__main__\": parser = ArgumentParser() parser.add_argument(\"--config\", required=True, help=\"path to config\") parser.add_argument(\"--source_image\",", "adapt_movement_scale: source_area = ConvexHull(kp_source['value'][0].data.cpu().numpy()).volume driving_area = ConvexHull(kp_driving_initial['value'][0].data.cpu().numpy()).volume adapt_movement_scale = np.sqrt(source_area)", "if use_relative_movement: kp_value_diff = (kp_driving['value'] - kp_driving_initial['value']) kp_value_diff *= adapt_movement_scale", "resized = resized.cuda() # y = torch.tensor(np.array(resized)) # x =", "(kp_driving['value'] - kp_driving_initial['value']) kp_value_diff *= adapt_movement_scale kp_new['value'] = kp_value_diff +", "= [ffmpeg, # '-y', # '-f', 'rawvideo', # '-vcodec','rawvideo', #", "checkpoint = torch.load(checkpoint_path) generator.load_state_dict(checkpoint['generator']) kp_detector.load_state_dict(checkpoint['kp_detector']) if not cpu: generator =", "generator = DataParallelWithCallback(generator) kp_detector = DataParallelWithCallback(kp_detector) generator.eval() kp_detector.eval() return generator,", "image\") parser.add_argument(\"--checkpoint\", default=\"vox-cpk.pth.tar\", help=\"path to checkpoint\") parser.add_argument(\"--relative\", dest=\"relative\", action=\"store_true\", help=\"use", "modules.keypoint_detector import KPDetector from sync_batchnorm import DataParallelWithCallback #from animate import", "camera = cv2.VideoCapture(0) ret, frame = camera.read() while True: ret,", "def normalize_kp(kp_source, kp_driving, kp_driving_initial, adapt_movement_scale=False, use_relative_movement=False, use_relative_jacobian=False): if adapt_movement_scale: source_area", "cv2.VideoWriter_fourcc('M','J','P','G'), 30, (256, 256)) kp_driving_initial = None camera = cv2.VideoCapture(0)", "parser.parse_args() generator, kp_detector = load_checkpoints(config_path=opt.config, checkpoint_path=opt.checkpoint, cpu=opt.cpu) source_image = imageio.imread(opt.source_image)", "y = torch.tensor(np.array(resized)) # x = y.cpu().numpy() # image =", "kp_detector = DataParallelWithCallback(kp_detector) generator.eval() kp_detector.eval() return generator, kp_detector @throttle.wrap(1, 2)", "plt.imshow(np.array(image)) # plt.show() driving_resized = torch.tensor(np.array(resized)[np.newaxis].astype(np.float32)).permute(0, 3, 1, 2) if", "#plt.show() if cv2.waitKey(1) & 0xFF == ord('q'): break camera.release() cv2.destroyAllWindows()", "kp_driving_initial, adapt_movement_scale=False, use_relative_movement=False, use_relative_jacobian=False): if adapt_movement_scale: source_area = ConvexHull(kp_source['value'][0].data.cpu().numpy()).volume driving_area", "ArgumentParser() parser.add_argument(\"--config\", required=True, help=\"path to config\") parser.add_argument(\"--source_image\", required=True, help=\"path to", "normalize_kp( kp_source=kp_source, kp_driving=kp_driving, kp_driving_initial=kp_driving_initial, use_relative_movement=relative, use_relative_jacobian=relative, adapt_movement_scale=adapt_scale ) out =", "skimage.transform import resize from scipy.spatial import ConvexHull from modules.generator import", "kp_new = {k: v for k, v in kp_driving.items()} if", "'-vcodec','rawvideo', # '-pix_fmt', 'bgr24', # '-s', dimension, # '-i', '-',", "2) if not opt.cpu: source = source.cuda() kp_source = kp_detector(source)", "convex hull of keypoints\") parser.add_argument(\"--cpu\", dest=\"cpu\", action=\"store_true\", help=\"CPU mode\") parser.set_defaults(relative=False)", "resize(frame, (256, 256))[..., :3] if not opt.cpu: resized = resized.cuda()", "if use_relative_jacobian: jacobian_diff = torch.matmul(kp_driving['jacobian'], torch.inverse(kp_driving_initial['jacobian'])) kp_new['jacobian'] = torch.matmul(jacobian_diff, kp_source['jacobian'])", "**config['model_params']['common_params']) if not cpu: generator.cuda() kp_detector = KPDetector(**config['model_params']['kp_detector_params'], **config['model_params']['common_params']) if", "from sync_batchnorm import DataParallelWithCallback #from animate import normalize_kp # command", "0) # plt.imshow(np.array(image)) # plt.show() driving_resized = torch.tensor(np.array(resized)[np.newaxis].astype(np.float32)).permute(0, 3, 1,", "or absolute keypoint coordinates\") parser.add_argument(\"--adapt_scale\", dest=\"adapt_scale\", action=\"store_true\", help=\"adapt movement scale", "parser.set_defaults(adapt_scale=False) opt = parser.parse_args() generator, kp_detector = load_checkpoints(config_path=opt.config, checkpoint_path=opt.checkpoint, cpu=opt.cpu)", "cv2.VideoWriter('outpy.avi', cv2.VideoWriter_fourcc('M','J','P','G'), 30, (256, 256)) kp_driving_initial = None camera =", "for k, v in kp_driving.items()} if use_relative_movement: kp_value_diff = (kp_driving['value']", "source image\") parser.add_argument(\"--checkpoint\", default=\"vox-cpk.pth.tar\", help=\"path to checkpoint\") parser.add_argument(\"--relative\", dest=\"relative\", action=\"store_true\",", "if not cpu: generator = DataParallelWithCallback(generator) kp_detector = DataParallelWithCallback(kp_detector) generator.eval()", "= source.cuda() kp_source = kp_detector(source) #out = cv2.VideoWriter('outpy.avi', cv2.VideoWriter_fourcc('M','J','P','G'), 30,", "# '-vcodec','rawvideo', # '-pix_fmt', 'bgr24', # '-s', dimension, # '-i',", "from modules.generator import OcclusionAwareGenerator from modules.keypoint_detector import KPDetector from sync_batchnorm", "kp_driving_initial, generator, kp_detector, relative=opt.relative, adapt_scale=opt.adapt_scale, cpu=opt.cpu ) cv2.imshow(\"frame\", fake_frame) #x", "**config['model_params']['common_params']) if not cpu: kp_detector.cuda() if cpu: checkpoint = torch.load(checkpoint_path,", "'libx264', # '-pix_fmt', 'yuv420p', # '-preset', 'ultrafast', # '-f', 'flv',", "#permute(2, 3, 1) #print(plt_driving.shape) #plt.imshow(x) #plt.show() if cv2.waitKey(1) & 0xFF", "ConvexHull from modules.generator import OcclusionAwareGenerator from modules.keypoint_detector import KPDetector from", "# plt.imshow(np.array(image)) # plt.show() driving_resized = torch.tensor(np.array(resized)[np.newaxis].astype(np.float32)).permute(0, 3, 1, 2)", "'-pix_fmt', 'bgr24', # '-s', dimension, # '-i', '-', # '-c:v',", "@throttle.wrap(1, 2) def forward(source_image, driving_frame, kp_source, kp_driving_initial, generator, kp_detector, relative=True,", "'-', # '-c:v', 'libx264', # '-pix_fmt', 'yuv420p', # '-preset', 'ultrafast',", "np.sqrt(driving_area) else: adapt_movement_scale = 1 kp_new = {k: v for", "= torch.load(checkpoint_path) generator.load_state_dict(checkpoint['generator']) kp_detector.load_state_dict(checkpoint['kp_detector']) if not cpu: generator = DataParallelWithCallback(generator)", "'rawvideo', # '-vcodec','rawvideo', # '-pix_fmt', 'bgr24', # '-s', dimension, #", "load_checkpoints(config_path=opt.config, checkpoint_path=opt.checkpoint, cpu=opt.cpu) source_image = imageio.imread(opt.source_image) source_image = resize(source_image, (256,", "1) #print(plt_driving.shape) #plt.imshow(x) #plt.show() if cv2.waitKey(1) & 0xFF == ord('q'):", "0) # plt_driving = driving_resized #permute(2, 3, 1) #print(plt_driving.shape) #plt.imshow(x)", "'ultrafast', # '-f', 'flv', # 'rtmp://10.10.10.80/live/mystream'] def normalize_kp(kp_source, kp_driving, kp_driving_initial,", "checkpoint\") parser.add_argument(\"--relative\", dest=\"relative\", action=\"store_true\", help=\"use relative or absolute keypoint coordinates\")", "kp_source = kp_detector(source) #out = cv2.VideoWriter('outpy.avi', cv2.VideoWriter_fourcc('M','J','P','G'), 30, (256, 256))", "= driving_resized[0].permute(1, 2, 0) # plt_driving = driving_resized #permute(2, 3,", "source.cuda() kp_source = kp_detector(source) #out = cv2.VideoWriter('outpy.avi', cv2.VideoWriter_fourcc('M','J','P','G'), 30, (256,", "'-f', 'rawvideo', # '-vcodec','rawvideo', # '-pix_fmt', 'bgr24', # '-s', dimension,", "cv2.COLOR_BGR2RGB) # # x = y.permute(1, 2, 0) # plt.imshow(np.array(image))", "= DataParallelWithCallback(kp_detector) generator.eval() kp_detector.eval() return generator, kp_detector @throttle.wrap(1, 2) def", "= ArgumentParser() parser.add_argument(\"--config\", required=True, help=\"path to config\") parser.add_argument(\"--source_image\", required=True, help=\"path", "cv2.VideoCapture(0) ret, frame = camera.read() while True: ret, frame =", "adapt_movement_scale kp_new['value'] = kp_value_diff + kp_source['value'] if use_relative_jacobian: jacobian_diff =", "plt_driving = driving_resized #permute(2, 3, 1) #print(plt_driving.shape) #plt.imshow(x) #plt.show() if", "v in kp_driving.items()} if use_relative_movement: kp_value_diff = (kp_driving['value'] - kp_driving_initial['value'])", "np.squeeze(driving_resized, axis=(0,)) #x = driving_resized[0].permute(1, 2, 0) # plt_driving =", "action=\"store_true\", help=\"use relative or absolute keypoint coordinates\") parser.add_argument(\"--adapt_scale\", dest=\"adapt_scale\", action=\"store_true\",", "movement scale based on convex hull of keypoints\") parser.add_argument(\"--cpu\", dest=\"cpu\",", "None camera = cv2.VideoCapture(0) ret, frame = camera.read() while True:", "kp_driving_initial=kp_driving_initial, use_relative_movement=relative, use_relative_jacobian=relative, adapt_movement_scale=adapt_scale ) out = generator(source_image, kp_source=kp_source, kp_driving=kp_norm)", "= {k: v for k, v in kp_driving.items()} if use_relative_movement:", "matplotlib.pyplot as plt from argparse import ArgumentParser from skimage.transform import", "opt.cpu: source = source.cuda() kp_source = kp_detector(source) #out = cv2.VideoWriter('outpy.avi',", "def forward(source_image, driving_frame, kp_source, kp_driving_initial, generator, kp_detector, relative=True, adapt_scale=True, cpu=True):", "2) if not kp_driving_initial: kp_driving_initial = kp_detector(driving_resized) fake_frame = forward(", "cpu=opt.cpu ) cv2.imshow(\"frame\", fake_frame) #x = np.squeeze(driving_resized, axis=(0,)) #x =", "np.sqrt(source_area) / np.sqrt(driving_area) else: adapt_movement_scale = 1 kp_new = {k:", "#x = driving_resized[0].permute(1, 2, 0) # plt_driving = driving_resized #permute(2,", "256))[..., :3] source = torch.tensor(source_image[np.newaxis].astype(np.float32)).permute(0, 3, 1, 2) if not", "#x = np.squeeze(driving_resized, axis=(0,)) #x = driving_resized[0].permute(1, 2, 0) #", "generator(source_image, kp_source=kp_source, kp_driving=kp_norm) return np.transpose(out[\"prediction\"].data.cpu().numpy(), [0, 2, 3, 1])[0] if", "not opt.cpu: source = source.cuda() kp_source = kp_detector(source) #out =", "yaml import imageio import throttle import numpy as np import", "torch.load(checkpoint_path, map_location=torch.device('cpu')) else: checkpoint = torch.load(checkpoint_path) generator.load_state_dict(checkpoint['generator']) kp_detector.load_state_dict(checkpoint['kp_detector']) if not", "camera.read() resized = resize(frame, (256, 256))[..., :3] if not opt.cpu:", "animate import normalize_kp # command = [ffmpeg, # '-y', #", "'-preset', 'ultrafast', # '-f', 'flv', # 'rtmp://10.10.10.80/live/mystream'] def normalize_kp(kp_source, kp_driving,", "source = source.cuda() kp_source = kp_detector(source) #out = cv2.VideoWriter('outpy.avi', cv2.VideoWriter_fourcc('M','J','P','G'),", "ret, frame = camera.read() resized = resize(frame, (256, 256))[..., :3]", "cpu: generator.cuda() kp_detector = KPDetector(**config['model_params']['kp_detector_params'], **config['model_params']['common_params']) if not cpu: kp_detector.cuda()", "kp_source['jacobian']) return kp_new def load_checkpoints(config_path, checkpoint_path, cpu=False): with open(config_path) as", "dest=\"relative\", action=\"store_true\", help=\"use relative or absolute keypoint coordinates\") parser.add_argument(\"--adapt_scale\", dest=\"adapt_scale\",", "normalize_kp # command = [ffmpeg, # '-y', # '-f', 'rawvideo',", "= torch.matmul(jacobian_diff, kp_source['jacobian']) return kp_new def load_checkpoints(config_path, checkpoint_path, cpu=False): with", "= camera.read() resized = resize(frame, (256, 256))[..., :3] if not", "from modules.keypoint_detector import KPDetector from sync_batchnorm import DataParallelWithCallback #from animate", "= DataParallelWithCallback(generator) kp_detector = DataParallelWithCallback(kp_detector) generator.eval() kp_detector.eval() return generator, kp_detector", "kp_detector, relative=True, adapt_scale=True, cpu=True): kp_driving = kp_detector(driving_frame) kp_norm = normalize_kp(", "open(config_path) as f: config = yaml.load(f) generator = OcclusionAwareGenerator(**config['model_params']['generator_params'], **config['model_params']['common_params'])", "as plt from argparse import ArgumentParser from skimage.transform import resize", "numpy as np import matplotlib.pyplot as plt from argparse import", "import KPDetector from sync_batchnorm import DataParallelWithCallback #from animate import normalize_kp", "ConvexHull(kp_driving_initial['value'][0].data.cpu().numpy()).volume adapt_movement_scale = np.sqrt(source_area) / np.sqrt(driving_area) else: adapt_movement_scale = 1", "relative=opt.relative, adapt_scale=opt.adapt_scale, cpu=opt.cpu ) cv2.imshow(\"frame\", fake_frame) #x = np.squeeze(driving_resized, axis=(0,))", "parser.add_argument(\"--adapt_scale\", dest=\"adapt_scale\", action=\"store_true\", help=\"adapt movement scale based on convex hull", "3, 1, 2) if not opt.cpu: source = source.cuda() kp_source", "torch.inverse(kp_driving_initial['jacobian'])) kp_new['jacobian'] = torch.matmul(jacobian_diff, kp_source['jacobian']) return kp_new def load_checkpoints(config_path, checkpoint_path,", "# '-i', '-', # '-c:v', 'libx264', # '-pix_fmt', 'yuv420p', #", "/ np.sqrt(driving_area) else: adapt_movement_scale = 1 kp_new = {k: v", "2, 0) # plt_driving = driving_resized #permute(2, 3, 1) #print(plt_driving.shape)", "#print(plt_driving.shape) #plt.imshow(x) #plt.show() if cv2.waitKey(1) & 0xFF == ord('q'): break", "fake_frame = forward( source, driving_resized, kp_source, kp_driving_initial, generator, kp_detector, relative=opt.relative,", "of keypoints\") parser.add_argument(\"--cpu\", dest=\"cpu\", action=\"store_true\", help=\"CPU mode\") parser.set_defaults(relative=False) parser.set_defaults(adapt_scale=False) opt", "#from animate import normalize_kp # command = [ffmpeg, # '-y',", "ConvexHull(kp_source['value'][0].data.cpu().numpy()).volume driving_area = ConvexHull(kp_driving_initial['value'][0].data.cpu().numpy()).volume adapt_movement_scale = np.sqrt(source_area) / np.sqrt(driving_area) else:", "source_image = imageio.imread(opt.source_image) source_image = resize(source_image, (256, 256))[..., :3] source", "2, 0) # plt.imshow(np.array(image)) # plt.show() driving_resized = torch.tensor(np.array(resized)[np.newaxis].astype(np.float32)).permute(0, 3,", "# plt.show() driving_resized = torch.tensor(np.array(resized)[np.newaxis].astype(np.float32)).permute(0, 3, 1, 2) if not", "generator.eval() kp_detector.eval() return generator, kp_detector @throttle.wrap(1, 2) def forward(source_image, driving_frame,", "import yaml import imageio import throttle import numpy as np", "to checkpoint\") parser.add_argument(\"--relative\", dest=\"relative\", action=\"store_true\", help=\"use relative or absolute keypoint", "fake_frame) #x = np.squeeze(driving_resized, axis=(0,)) #x = driving_resized[0].permute(1, 2, 0)", "not kp_driving_initial: kp_driving_initial = kp_detector(driving_resized) fake_frame = forward( source, driving_resized,", "config\") parser.add_argument(\"--source_image\", required=True, help=\"path to source image\") parser.add_argument(\"--checkpoint\", default=\"vox-cpk.pth.tar\", help=\"path", "cv2.imshow(\"frame\", fake_frame) #x = np.squeeze(driving_resized, axis=(0,)) #x = driving_resized[0].permute(1, 2,", "generator.load_state_dict(checkpoint['generator']) kp_detector.load_state_dict(checkpoint['kp_detector']) if not cpu: generator = DataParallelWithCallback(generator) kp_detector =", "adapt_movement_scale = 1 kp_new = {k: v for k, v", "= torch.matmul(kp_driving['jacobian'], torch.inverse(kp_driving_initial['jacobian'])) kp_new['jacobian'] = torch.matmul(jacobian_diff, kp_source['jacobian']) return kp_new def", "= torch.tensor(np.array(resized)) # x = y.cpu().numpy() # image = cv2.cvtColor(x,", "throttle import numpy as np import matplotlib.pyplot as plt from", "3, 1])[0] if __name__ == \"__main__\": parser = ArgumentParser() parser.add_argument(\"--config\",", "kp_detector = KPDetector(**config['model_params']['kp_detector_params'], **config['model_params']['common_params']) if not cpu: kp_detector.cuda() if cpu:", "opt.cpu: resized = resized.cuda() # y = torch.tensor(np.array(resized)) # x", "kp_detector.eval() return generator, kp_detector @throttle.wrap(1, 2) def forward(source_image, driving_frame, kp_source,", "use_relative_movement: kp_value_diff = (kp_driving['value'] - kp_driving_initial['value']) kp_value_diff *= adapt_movement_scale kp_new['value']", "adapt_movement_scale=adapt_scale ) out = generator(source_image, kp_source=kp_source, kp_driving=kp_norm) return np.transpose(out[\"prediction\"].data.cpu().numpy(), [0,", "ret, frame = camera.read() while True: ret, frame = camera.read()", "generator, kp_detector, relative=True, adapt_scale=True, cpu=True): kp_driving = kp_detector(driving_frame) kp_norm =", "# # x = y.permute(1, 2, 0) # plt.imshow(np.array(image)) #", "{k: v for k, v in kp_driving.items()} if use_relative_movement: kp_value_diff", "'yuv420p', # '-preset', 'ultrafast', # '-f', 'flv', # 'rtmp://10.10.10.80/live/mystream'] def", "import matplotlib.pyplot as plt from argparse import ArgumentParser from skimage.transform", "relative=True, adapt_scale=True, cpu=True): kp_driving = kp_detector(driving_frame) kp_norm = normalize_kp( kp_source=kp_source,", "relative or absolute keypoint coordinates\") parser.add_argument(\"--adapt_scale\", dest=\"adapt_scale\", action=\"store_true\", help=\"adapt movement", "image = cv2.cvtColor(x, cv2.COLOR_BGR2RGB) # # x = y.permute(1, 2,", "256)) kp_driving_initial = None camera = cv2.VideoCapture(0) ret, frame =", "kp_source=kp_source, kp_driving=kp_driving, kp_driving_initial=kp_driving_initial, use_relative_movement=relative, use_relative_jacobian=relative, adapt_movement_scale=adapt_scale ) out = generator(source_image,", "not cpu: kp_detector.cuda() if cpu: checkpoint = torch.load(checkpoint_path, map_location=torch.device('cpu')) else:", "= yaml.load(f) generator = OcclusionAwareGenerator(**config['model_params']['generator_params'], **config['model_params']['common_params']) if not cpu: generator.cuda()", "# '-s', dimension, # '-i', '-', # '-c:v', 'libx264', #", "return generator, kp_detector @throttle.wrap(1, 2) def forward(source_image, driving_frame, kp_source, kp_driving_initial,", "y.permute(1, 2, 0) # plt.imshow(np.array(image)) # plt.show() driving_resized = torch.tensor(np.array(resized)[np.newaxis].astype(np.float32)).permute(0,", "kp_driving, kp_driving_initial, adapt_movement_scale=False, use_relative_movement=False, use_relative_jacobian=False): if adapt_movement_scale: source_area = ConvexHull(kp_source['value'][0].data.cpu().numpy()).volume", "= cv2.cvtColor(x, cv2.COLOR_BGR2RGB) # # x = y.permute(1, 2, 0)", "resized.cuda() # y = torch.tensor(np.array(resized)) # x = y.cpu().numpy() #", "as np import matplotlib.pyplot as plt from argparse import ArgumentParser", "# '-c:v', 'libx264', # '-pix_fmt', 'yuv420p', # '-preset', 'ultrafast', #", "# '-preset', 'ultrafast', # '-f', 'flv', # 'rtmp://10.10.10.80/live/mystream'] def normalize_kp(kp_source,", "# '-y', # '-f', 'rawvideo', # '-vcodec','rawvideo', # '-pix_fmt', 'bgr24',", "jacobian_diff = torch.matmul(kp_driving['jacobian'], torch.inverse(kp_driving_initial['jacobian'])) kp_new['jacobian'] = torch.matmul(jacobian_diff, kp_source['jacobian']) return kp_new", "30, (256, 256)) kp_driving_initial = None camera = cv2.VideoCapture(0) ret,", "map_location=torch.device('cpu')) else: checkpoint = torch.load(checkpoint_path) generator.load_state_dict(checkpoint['generator']) kp_detector.load_state_dict(checkpoint['kp_detector']) if not cpu:", "if cpu: checkpoint = torch.load(checkpoint_path, map_location=torch.device('cpu')) else: checkpoint = torch.load(checkpoint_path)", "__name__ == \"__main__\": parser = ArgumentParser() parser.add_argument(\"--config\", required=True, help=\"path to", "with open(config_path) as f: config = yaml.load(f) generator = OcclusionAwareGenerator(**config['model_params']['generator_params'],", "frame = camera.read() while True: ret, frame = camera.read() resized", "def load_checkpoints(config_path, checkpoint_path, cpu=False): with open(config_path) as f: config =", "= kp_detector(source) #out = cv2.VideoWriter('outpy.avi', cv2.VideoWriter_fourcc('M','J','P','G'), 30, (256, 256)) kp_driving_initial", "help=\"path to source image\") parser.add_argument(\"--checkpoint\", default=\"vox-cpk.pth.tar\", help=\"path to checkpoint\") parser.add_argument(\"--relative\",", "dest=\"adapt_scale\", action=\"store_true\", help=\"adapt movement scale based on convex hull of", "scipy.spatial import ConvexHull from modules.generator import OcclusionAwareGenerator from modules.keypoint_detector import", "kp_value_diff = (kp_driving['value'] - kp_driving_initial['value']) kp_value_diff *= adapt_movement_scale kp_new['value'] =", "'-f', 'flv', # 'rtmp://10.10.10.80/live/mystream'] def normalize_kp(kp_source, kp_driving, kp_driving_initial, adapt_movement_scale=False, use_relative_movement=False,", "source_area = ConvexHull(kp_source['value'][0].data.cpu().numpy()).volume driving_area = ConvexHull(kp_driving_initial['value'][0].data.cpu().numpy()).volume adapt_movement_scale = np.sqrt(source_area) /", "modules.generator import OcclusionAwareGenerator from modules.keypoint_detector import KPDetector from sync_batchnorm import", "required=True, help=\"path to config\") parser.add_argument(\"--source_image\", required=True, help=\"path to source image\")", "cv2.cvtColor(x, cv2.COLOR_BGR2RGB) # # x = y.permute(1, 2, 0) #", "from skimage.transform import resize from scipy.spatial import ConvexHull from modules.generator", "import DataParallelWithCallback #from animate import normalize_kp # command = [ffmpeg,", "generator, kp_detector = load_checkpoints(config_path=opt.config, checkpoint_path=opt.checkpoint, cpu=opt.cpu) source_image = imageio.imread(opt.source_image) source_image", "= np.squeeze(driving_resized, axis=(0,)) #x = driving_resized[0].permute(1, 2, 0) # plt_driving", "#out = cv2.VideoWriter('outpy.avi', cv2.VideoWriter_fourcc('M','J','P','G'), 30, (256, 256)) kp_driving_initial = None", "resize from scipy.spatial import ConvexHull from modules.generator import OcclusionAwareGenerator from", "import throttle import numpy as np import matplotlib.pyplot as plt", "= cv2.VideoWriter('outpy.avi', cv2.VideoWriter_fourcc('M','J','P','G'), 30, (256, 256)) kp_driving_initial = None camera", "= normalize_kp( kp_source=kp_source, kp_driving=kp_driving, kp_driving_initial=kp_driving_initial, use_relative_movement=relative, use_relative_jacobian=relative, adapt_movement_scale=adapt_scale ) out", "absolute keypoint coordinates\") parser.add_argument(\"--adapt_scale\", dest=\"adapt_scale\", action=\"store_true\", help=\"adapt movement scale based", "coordinates\") parser.add_argument(\"--adapt_scale\", dest=\"adapt_scale\", action=\"store_true\", help=\"adapt movement scale based on convex", "= camera.read() while True: ret, frame = camera.read() resized =", "(256, 256))[..., :3] source = torch.tensor(source_image[np.newaxis].astype(np.float32)).permute(0, 3, 1, 2) if", "torch.matmul(kp_driving['jacobian'], torch.inverse(kp_driving_initial['jacobian'])) kp_new['jacobian'] = torch.matmul(jacobian_diff, kp_source['jacobian']) return kp_new def load_checkpoints(config_path,", "source = torch.tensor(source_image[np.newaxis].astype(np.float32)).permute(0, 3, 1, 2) if not opt.cpu: source", "driving_area = ConvexHull(kp_driving_initial['value'][0].data.cpu().numpy()).volume adapt_movement_scale = np.sqrt(source_area) / np.sqrt(driving_area) else: adapt_movement_scale", "= generator(source_image, kp_source=kp_source, kp_driving=kp_norm) return np.transpose(out[\"prediction\"].data.cpu().numpy(), [0, 2, 3, 1])[0]", "'-y', # '-f', 'rawvideo', # '-vcodec','rawvideo', # '-pix_fmt', 'bgr24', #", "generator.cuda() kp_detector = KPDetector(**config['model_params']['kp_detector_params'], **config['model_params']['common_params']) if not cpu: kp_detector.cuda() if", "= 1 kp_new = {k: v for k, v in", "help=\"adapt movement scale based on convex hull of keypoints\") parser.add_argument(\"--cpu\",", "driving_frame, kp_source, kp_driving_initial, generator, kp_detector, relative=True, adapt_scale=True, cpu=True): kp_driving =", "kp_new['jacobian'] = torch.matmul(jacobian_diff, kp_source['jacobian']) return kp_new def load_checkpoints(config_path, checkpoint_path, cpu=False):", "*= adapt_movement_scale kp_new['value'] = kp_value_diff + kp_source['value'] if use_relative_jacobian: jacobian_diff", "based on convex hull of keypoints\") parser.add_argument(\"--cpu\", dest=\"cpu\", action=\"store_true\", help=\"CPU", "use_relative_jacobian=relative, adapt_movement_scale=adapt_scale ) out = generator(source_image, kp_source=kp_source, kp_driving=kp_norm) return np.transpose(out[\"prediction\"].data.cpu().numpy(),", "source_image = resize(source_image, (256, 256))[..., :3] source = torch.tensor(source_image[np.newaxis].astype(np.float32)).permute(0, 3,", "1 kp_new = {k: v for k, v in kp_driving.items()}", "kp_driving_initial: kp_driving_initial = kp_detector(driving_resized) fake_frame = forward( source, driving_resized, kp_source,", "if __name__ == \"__main__\": parser = ArgumentParser() parser.add_argument(\"--config\", required=True, help=\"path", "to source image\") parser.add_argument(\"--checkpoint\", default=\"vox-cpk.pth.tar\", help=\"path to checkpoint\") parser.add_argument(\"--relative\", dest=\"relative\",", "# x = y.permute(1, 2, 0) # plt.imshow(np.array(image)) # plt.show()", "if adapt_movement_scale: source_area = ConvexHull(kp_source['value'][0].data.cpu().numpy()).volume driving_area = ConvexHull(kp_driving_initial['value'][0].data.cpu().numpy()).volume adapt_movement_scale =", "from argparse import ArgumentParser from skimage.transform import resize from scipy.spatial", "cv2 import torch import yaml import imageio import throttle import", "as f: config = yaml.load(f) generator = OcclusionAwareGenerator(**config['model_params']['generator_params'], **config['model_params']['common_params']) if", "checkpoint_path=opt.checkpoint, cpu=opt.cpu) source_image = imageio.imread(opt.source_image) source_image = resize(source_image, (256, 256))[...,", "torch.tensor(np.array(resized)[np.newaxis].astype(np.float32)).permute(0, 3, 1, 2) if not kp_driving_initial: kp_driving_initial = kp_detector(driving_resized)", "return kp_new def load_checkpoints(config_path, checkpoint_path, cpu=False): with open(config_path) as f:", "checkpoint = torch.load(checkpoint_path, map_location=torch.device('cpu')) else: checkpoint = torch.load(checkpoint_path) generator.load_state_dict(checkpoint['generator']) kp_detector.load_state_dict(checkpoint['kp_detector'])", "= torch.tensor(np.array(resized)[np.newaxis].astype(np.float32)).permute(0, 3, 1, 2) if not kp_driving_initial: kp_driving_initial =", "required=True, help=\"path to source image\") parser.add_argument(\"--checkpoint\", default=\"vox-cpk.pth.tar\", help=\"path to checkpoint\")", "parser.add_argument(\"--config\", required=True, help=\"path to config\") parser.add_argument(\"--source_image\", required=True, help=\"path to source", "KPDetector(**config['model_params']['kp_detector_params'], **config['model_params']['common_params']) if not cpu: kp_detector.cuda() if cpu: checkpoint =", "k, v in kp_driving.items()} if use_relative_movement: kp_value_diff = (kp_driving['value'] -", "kp_driving_initial, generator, kp_detector, relative=True, adapt_scale=True, cpu=True): kp_driving = kp_detector(driving_frame) kp_norm", "= kp_value_diff + kp_source['value'] if use_relative_jacobian: jacobian_diff = torch.matmul(kp_driving['jacobian'], torch.inverse(kp_driving_initial['jacobian']))", "kp_detector(driving_resized) fake_frame = forward( source, driving_resized, kp_source, kp_driving_initial, generator, kp_detector,", "kp_source, kp_driving_initial, generator, kp_detector, relative=True, adapt_scale=True, cpu=True): kp_driving = kp_detector(driving_frame)", "(256, 256))[..., :3] if not opt.cpu: resized = resized.cuda() #", "return np.transpose(out[\"prediction\"].data.cpu().numpy(), [0, 2, 3, 1])[0] if __name__ == \"__main__\":", "kp_driving=kp_norm) return np.transpose(out[\"prediction\"].data.cpu().numpy(), [0, 2, 3, 1])[0] if __name__ ==", "2, 3, 1])[0] if __name__ == \"__main__\": parser = ArgumentParser()", "argparse import ArgumentParser from skimage.transform import resize from scipy.spatial import", "import cv2 import torch import yaml import imageio import throttle", "torch.tensor(np.array(resized)) # x = y.cpu().numpy() # image = cv2.cvtColor(x, cv2.COLOR_BGR2RGB)", "'flv', # 'rtmp://10.10.10.80/live/mystream'] def normalize_kp(kp_source, kp_driving, kp_driving_initial, adapt_movement_scale=False, use_relative_movement=False, use_relative_jacobian=False):", "# '-f', 'rawvideo', # '-vcodec','rawvideo', # '-pix_fmt', 'bgr24', # '-s',", "driving_resized[0].permute(1, 2, 0) # plt_driving = driving_resized #permute(2, 3, 1)", "kp_value_diff + kp_source['value'] if use_relative_jacobian: jacobian_diff = torch.matmul(kp_driving['jacobian'], torch.inverse(kp_driving_initial['jacobian'])) kp_new['jacobian']", "= torch.load(checkpoint_path, map_location=torch.device('cpu')) else: checkpoint = torch.load(checkpoint_path) generator.load_state_dict(checkpoint['generator']) kp_detector.load_state_dict(checkpoint['kp_detector']) if", "torch.load(checkpoint_path) generator.load_state_dict(checkpoint['generator']) kp_detector.load_state_dict(checkpoint['kp_detector']) if not cpu: generator = DataParallelWithCallback(generator) kp_detector", "driving_resized, kp_source, kp_driving_initial, generator, kp_detector, relative=opt.relative, adapt_scale=opt.adapt_scale, cpu=opt.cpu ) cv2.imshow(\"frame\",", "dimension, # '-i', '-', # '-c:v', 'libx264', # '-pix_fmt', 'yuv420p',", "while True: ret, frame = camera.read() resized = resize(frame, (256,", "kp_detector, relative=opt.relative, adapt_scale=opt.adapt_scale, cpu=opt.cpu ) cv2.imshow(\"frame\", fake_frame) #x = np.squeeze(driving_resized,", "import ArgumentParser from skimage.transform import resize from scipy.spatial import ConvexHull", "kp_source=kp_source, kp_driving=kp_norm) return np.transpose(out[\"prediction\"].data.cpu().numpy(), [0, 2, 3, 1])[0] if __name__", "parser = ArgumentParser() parser.add_argument(\"--config\", required=True, help=\"path to config\") parser.add_argument(\"--source_image\", required=True,", "keypoints\") parser.add_argument(\"--cpu\", dest=\"cpu\", action=\"store_true\", help=\"CPU mode\") parser.set_defaults(relative=False) parser.set_defaults(adapt_scale=False) opt =", "import numpy as np import matplotlib.pyplot as plt from argparse" ]
[ "# 3. impelement SAA # to do list # 1.", "approximation does not converge well, even without variance, does not", "from .costFunctionalConstantMultiPDE import CostFunctionalConstantMultiPDE from .costFunctionalLinear import CostFunctionalLinear from .costFunctionalLinearMultiPDE", "different design # 2. table for variance reduction # 3.", "at different design # 2. table for variance reduction #", "# from .chanceConstraintConstant import ChanceConstraintConstant # to do list #", "import CostFunctionalQuadraticMultiPDE # from .chanceConstraintQuadratic import ChanceConstraintQuadratic # from .chanceConstraintLinear", "# what to show tomorrow # 1. variance reduction by", "zero, Hessian term # 1. implement linear # 2. implement", "gradient for quadratic + correction # what to show tomorrow", "solver works # 2. quadratic approximation does not converge well,", "9, 2018, work on reporting results # 1. random samples", "not converge ### record eigenvector after m_tr[i].zero() # 3. check", "# 2. quadratic approximation does not converge well, even without", "well, even without variance, does not converge ### record eigenvector", "term # 1. implement linear # 2. implement quadratic #", "# 1. variance reduction by mean square error # 2.", "# April 9, 2018, work on reporting results # 1.", "state at different design # April 9, 2018, work on", "states at different design # 2. table for variance reduction", "check gradient for quadratic + correction # what to show", "# from .chanceConstraintQuadratic import ChanceConstraintQuadratic # from .chanceConstraintLinear import ChanceConstraintLinear", "from .costFunctionalLinearMultiPDE import CostFunctionalLinearMultiPDE from .costFunctionalQuadratic import CostFunctionalQuadratic from .costFunctionalQuadraticMultiPDE", "to do list # 0. implement zero, Hessian term #", "implement zero, Hessian term # 1. implement linear # 2.", "import ChanceConstraintConstant # to do list # 0. implement zero,", "without variance, does not converge ### record eigenvector after m_tr[i].zero()", "ControlPDEProblemMultiPDE from .costFunctionalConstant import CostFunctionalConstant from .costFunctionalConstantMultiPDE import CostFunctionalConstantMultiPDE from", "and states at different design # 2. table for variance", "division, print_function from .controlPDEProblem import ControlPDEProblem from .controlPDEProblemMultiPDE import ControlPDEProblemMultiPDE", "eigenvector after m_tr[i].zero() # 3. check gradient for quadratic +", "<reponame>cpempire/soupy<gh_stars>1-10 from __future__ import absolute_import, division, print_function from .controlPDEProblem import", "import CostFunctionalConstantMultiPDE from .costFunctionalLinear import CostFunctionalLinear from .costFunctionalLinearMultiPDE import CostFunctionalLinearMultiPDE", "CostFunctionalLinear from .costFunctionalLinearMultiPDE import CostFunctionalLinearMultiPDE from .costFunctionalQuadratic import CostFunctionalQuadratic from", "quadratic + correction # what to show tomorrow # 1.", "2. implement quadratic # 3. impelement SAA # to do", "clear bug, simplifing adjoint solver works # 2. quadratic approximation", "error # 2. trace estimation by MC and randomized SVD", "list # 1. SAA does not run well in ccgo1,", ".costFunctionalConstantMultiPDE import CostFunctionalConstantMultiPDE from .costFunctionalLinear import CostFunctionalLinear from .costFunctionalLinearMultiPDE import", "3. check gradient for quadratic + correction # what to", "# 4. plot #bfgs iterations # obtain all results as", "import CostFunctionalLinear from .costFunctionalLinearMultiPDE import CostFunctionalLinearMultiPDE from .costFunctionalQuadratic import CostFunctionalQuadratic", "3. impelement SAA # to do list # 1. SAA", "variance reduction by mean square error # 2. trace estimation", "+ correction # what to show tomorrow # 1. variance", "Hessian term # 1. implement linear # 2. implement quadratic", "work on reporting results # 1. random samples and states", "multiprocessor does not work, ### not clear bug, simplifing adjoint", "# 3. check gradient for quadratic + correction # what", "by mean square error # 2. trace estimation by MC", "absolute_import, division, print_function from .controlPDEProblem import ControlPDEProblem from .controlPDEProblemMultiPDE import", "not run well in ccgo1, multiprocessor does not work, ###", "and state at different design # April 9, 2018, work", "and submarine # 5. random sample and state at different", "for quadratic + correction # what to show tomorrow #", "sample and state at different design # April 9, 2018,", "2. quadratic approximation does not converge well, even without variance,", "linear # 2. implement quadratic # 3. impelement SAA #", "trace estimation by MC and randomized SVD # 3. scaling", "import CostFunctionalQuadratic from .costFunctionalQuadraticMultiPDE import CostFunctionalQuadraticMultiPDE # from .chanceConstraintQuadratic import", "table for variance reduction # 3. plot trace estimation #", "square error # 2. trace estimation by MC and randomized", "not work, ### not clear bug, simplifing adjoint solver works", "adjoint solver works # 2. quadratic approximation does not converge", "trace estimation # 4. plot #bfgs iterations # obtain all", "design and state, for both disk and submarine # 5.", "uncertainty), trace, variance reduction, #bfgs # 4. show the design", "# to do list # 0. implement zero, Hessian term", "from .chanceConstraintConstant import ChanceConstraintConstant # to do list # 0.", "at different design # April 9, 2018, work on reporting", "plot trace estimation # 4. plot #bfgs iterations # obtain", "import CostFunctionalConstant from .costFunctionalConstantMultiPDE import CostFunctionalConstantMultiPDE from .costFunctionalLinear import CostFunctionalLinear", "random samples and states at different design # 2. table", "1. implement linear # 2. implement quadratic # 3. impelement", "import ControlPDEProblemMultiPDE from .costFunctionalConstant import CostFunctionalConstant from .costFunctionalConstantMultiPDE import CostFunctionalConstantMultiPDE", "reduction # 3. plot trace estimation # 4. plot #bfgs", "for both disk and submarine # 5. random sample and", "from __future__ import absolute_import, division, print_function from .controlPDEProblem import ControlPDEProblem", "# to do list # 1. SAA does not run", "quadratic approximation does not converge well, even without variance, does", "estimation by MC and randomized SVD # 3. scaling with", "from .controlPDEProblem import ControlPDEProblem from .controlPDEProblemMultiPDE import ControlPDEProblemMultiPDE from .costFunctionalConstant", "submarine # 5. random sample and state at different design", "does not converge ### record eigenvector after m_tr[i].zero() # 3.", "what to show tomorrow # 1. variance reduction by mean", "samples and states at different design # 2. table for", "converge well, even without variance, does not converge ### record", "disk and submarine # 5. random sample and state at", "list # 0. implement zero, Hessian term # 1. implement", "CostFunctionalQuadraticMultiPDE # from .chanceConstraintQuadratic import ChanceConstraintQuadratic # from .chanceConstraintLinear import", "implement quadratic # 3. impelement SAA # to do list", "reduction, #bfgs # 4. show the design and state, for", "does not converge well, even without variance, does not converge", "+ uncertainty), trace, variance reduction, #bfgs # 4. show the", "from .controlPDEProblemMultiPDE import ControlPDEProblemMultiPDE from .costFunctionalConstant import CostFunctionalConstant from .costFunctionalConstantMultiPDE", "ChanceConstraintQuadratic # from .chanceConstraintLinear import ChanceConstraintLinear # from .chanceConstraintConstant import", "converge ### record eigenvector after m_tr[i].zero() # 3. check gradient", "works # 2. quadratic approximation does not converge well, even", "# 2. table for variance reduction # 3. plot trace", "### not clear bug, simplifing adjoint solver works # 2.", ".chanceConstraintConstant import ChanceConstraintConstant # to do list # 0. implement", "from .costFunctionalQuadraticMultiPDE import CostFunctionalQuadraticMultiPDE # from .chanceConstraintQuadratic import ChanceConstraintQuadratic #", "2. table for variance reduction # 3. plot trace estimation", "# 1. SAA does not run well in ccgo1, multiprocessor", "does not work, ### not clear bug, simplifing adjoint solver", "1. random samples and states at different design # 2.", "simplifing adjoint solver works # 2. quadratic approximation does not", "3. plot trace estimation # 4. plot #bfgs iterations #", "from .costFunctionalLinear import CostFunctionalLinear from .costFunctionalLinearMultiPDE import CostFunctionalLinearMultiPDE from .costFunctionalQuadratic", "mesh (design + uncertainty), trace, variance reduction, #bfgs # 4.", "1. variance reduction by mean square error # 2. trace", "ChanceConstraintConstant # to do list # 0. implement zero, Hessian", "CostFunctionalConstantMultiPDE from .costFunctionalLinear import CostFunctionalLinear from .costFunctionalLinearMultiPDE import CostFunctionalLinearMultiPDE from", "reporting results # 1. random samples and states at different", "ControlPDEProblem from .controlPDEProblemMultiPDE import ControlPDEProblemMultiPDE from .costFunctionalConstant import CostFunctionalConstant from", "4. show the design and state, for both disk and", "for variance reduction # 3. plot trace estimation # 4.", "random sample and state at different design # April 9,", "3. scaling with repsect to mesh (design + uncertainty), trace,", "SVD # 3. scaling with repsect to mesh (design +", "design # 2. table for variance reduction # 3. plot", "not converge well, even without variance, does not converge ###", "2018, work on reporting results # 1. random samples and", "after m_tr[i].zero() # 3. check gradient for quadratic + correction", ".controlPDEProblem import ControlPDEProblem from .controlPDEProblemMultiPDE import ControlPDEProblemMultiPDE from .costFunctionalConstant import", "CostFunctionalConstant from .costFunctionalConstantMultiPDE import CostFunctionalConstantMultiPDE from .costFunctionalLinear import CostFunctionalLinear from", "scaling with repsect to mesh (design + uncertainty), trace, variance", "variance reduction # 3. plot trace estimation # 4. plot", "record eigenvector after m_tr[i].zero() # 3. check gradient for quadratic", "variance reduction, #bfgs # 4. show the design and state,", "design # April 9, 2018, work on reporting results #", "# 1. implement linear # 2. implement quadratic # 3.", "do list # 0. implement zero, Hessian term # 1.", ".costFunctionalConstant import CostFunctionalConstant from .costFunctionalConstantMultiPDE import CostFunctionalConstantMultiPDE from .costFunctionalLinear import", "#bfgs # 4. show the design and state, for both", "import absolute_import, division, print_function from .controlPDEProblem import ControlPDEProblem from .controlPDEProblemMultiPDE", "impelement SAA # to do list # 1. SAA does", "# 5. random sample and state at different design #", "import ChanceConstraintQuadratic # from .chanceConstraintLinear import ChanceConstraintLinear # from .chanceConstraintConstant", ".controlPDEProblemMultiPDE import ControlPDEProblemMultiPDE from .costFunctionalConstant import CostFunctionalConstant from .costFunctionalConstantMultiPDE import", "variance, does not converge ### record eigenvector after m_tr[i].zero() #", "to mesh (design + uncertainty), trace, variance reduction, #bfgs #", "work, ### not clear bug, simplifing adjoint solver works #", "not clear bug, simplifing adjoint solver works # 2. quadratic", "on reporting results # 1. random samples and states at", "ChanceConstraintLinear # from .chanceConstraintConstant import ChanceConstraintConstant # to do list", "April 9, 2018, work on reporting results # 1. random", "correction # what to show tomorrow # 1. variance reduction", "and state, for both disk and submarine # 5. random", "from .costFunctionalQuadratic import CostFunctionalQuadratic from .costFunctionalQuadraticMultiPDE import CostFunctionalQuadraticMultiPDE # from", "by MC and randomized SVD # 3. scaling with repsect", "from .chanceConstraintQuadratic import ChanceConstraintQuadratic # from .chanceConstraintLinear import ChanceConstraintLinear #", ".costFunctionalLinear import CostFunctionalLinear from .costFunctionalLinearMultiPDE import CostFunctionalLinearMultiPDE from .costFunctionalQuadratic import", "from .costFunctionalConstant import CostFunctionalConstant from .costFunctionalConstantMultiPDE import CostFunctionalConstantMultiPDE from .costFunctionalLinear", "print_function from .controlPDEProblem import ControlPDEProblem from .controlPDEProblemMultiPDE import ControlPDEProblemMultiPDE from", "well in ccgo1, multiprocessor does not work, ### not clear", "0. implement zero, Hessian term # 1. implement linear #", "mean square error # 2. trace estimation by MC and", "# 3. scaling with repsect to mesh (design + uncertainty),", ".costFunctionalLinearMultiPDE import CostFunctionalLinearMultiPDE from .costFunctionalQuadratic import CostFunctionalQuadratic from .costFunctionalQuadraticMultiPDE import", "both disk and submarine # 5. random sample and state", "estimation # 4. plot #bfgs iterations # obtain all results", "in ccgo1, multiprocessor does not work, ### not clear bug,", "do list # 1. SAA does not run well in", "SAA does not run well in ccgo1, multiprocessor does not", "repsect to mesh (design + uncertainty), trace, variance reduction, #bfgs", "### record eigenvector after m_tr[i].zero() # 3. check gradient for", "# 2. trace estimation by MC and randomized SVD #", "# from .chanceConstraintLinear import ChanceConstraintLinear # from .chanceConstraintConstant import ChanceConstraintConstant", "and randomized SVD # 3. scaling with repsect to mesh", "from .chanceConstraintLinear import ChanceConstraintLinear # from .chanceConstraintConstant import ChanceConstraintConstant #", "import ChanceConstraintLinear # from .chanceConstraintConstant import ChanceConstraintConstant # to do", "the design and state, for both disk and submarine #", "implement linear # 2. implement quadratic # 3. impelement SAA", "to do list # 1. SAA does not run well", "# 0. implement zero, Hessian term # 1. implement linear", "show tomorrow # 1. variance reduction by mean square error", "CostFunctionalQuadratic from .costFunctionalQuadraticMultiPDE import CostFunctionalQuadraticMultiPDE # from .chanceConstraintQuadratic import ChanceConstraintQuadratic", "ccgo1, multiprocessor does not work, ### not clear bug, simplifing", "5. random sample and state at different design # April", "(design + uncertainty), trace, variance reduction, #bfgs # 4. show", "does not run well in ccgo1, multiprocessor does not work,", "CostFunctionalLinearMultiPDE from .costFunctionalQuadratic import CostFunctionalQuadratic from .costFunctionalQuadraticMultiPDE import CostFunctionalQuadraticMultiPDE #", "randomized SVD # 3. scaling with repsect to mesh (design", "run well in ccgo1, multiprocessor does not work, ### not", "# 2. implement quadratic # 3. impelement SAA # to", ".costFunctionalQuadraticMultiPDE import CostFunctionalQuadraticMultiPDE # from .chanceConstraintQuadratic import ChanceConstraintQuadratic # from", ".costFunctionalQuadratic import CostFunctionalQuadratic from .costFunctionalQuadraticMultiPDE import CostFunctionalQuadraticMultiPDE # from .chanceConstraintQuadratic", "even without variance, does not converge ### record eigenvector after", "MC and randomized SVD # 3. scaling with repsect to", "# 1. random samples and states at different design #", "to show tomorrow # 1. variance reduction by mean square", "# 3. plot trace estimation # 4. plot #bfgs iterations", "reduction by mean square error # 2. trace estimation by", "2. trace estimation by MC and randomized SVD # 3.", ".chanceConstraintQuadratic import ChanceConstraintQuadratic # from .chanceConstraintLinear import ChanceConstraintLinear # from", "results # 1. random samples and states at different design", "__future__ import absolute_import, division, print_function from .controlPDEProblem import ControlPDEProblem from", ".chanceConstraintLinear import ChanceConstraintLinear # from .chanceConstraintConstant import ChanceConstraintConstant # to", "quadratic # 3. impelement SAA # to do list #", "tomorrow # 1. variance reduction by mean square error #", "state, for both disk and submarine # 5. random sample", "4. plot #bfgs iterations # obtain all results as planned", "with repsect to mesh (design + uncertainty), trace, variance reduction,", "different design # April 9, 2018, work on reporting results", "bug, simplifing adjoint solver works # 2. quadratic approximation does", "1. SAA does not run well in ccgo1, multiprocessor does", "trace, variance reduction, #bfgs # 4. show the design and", "import CostFunctionalLinearMultiPDE from .costFunctionalQuadratic import CostFunctionalQuadratic from .costFunctionalQuadraticMultiPDE import CostFunctionalQuadraticMultiPDE", "show the design and state, for both disk and submarine", "m_tr[i].zero() # 3. check gradient for quadratic + correction #", "SAA # to do list # 1. SAA does not", "import ControlPDEProblem from .controlPDEProblemMultiPDE import ControlPDEProblemMultiPDE from .costFunctionalConstant import CostFunctionalConstant", "# 4. show the design and state, for both disk" ]
[ "contrast_limit=0.5, brightness_by_max=True,p=0.5), HueSaturationValue(hue_shift_limit=30, sat_shift_limit=30, val_shift_limit=0, p=0.5), CoarseDropout(max_holes=2, max_height=config['input_resolution'][0]//4, max_width=config['input_resolution'][1]//4, min_holes=1,", "CenterCrop, RandomBrightnessContrast, HueSaturationValue, IAASharpen, RandomGamma, RandomBrightness, RandomBrightnessContrast, GaussianBlur,CLAHE, Cutout, CoarseDropout,", "p=0.5), CoarseDropout(max_holes=2, max_height=config['input_resolution'][0]//4, max_width=config['input_resolution'][1]//4, min_holes=1, min_height=config['input_resolution'][0]//16, min_width=config['input_resolution'][1]//16, fill_value=0, mask_fill_value=0, p=0.5),", "RandomRotate90(p=1), HorizontalFlip(p=0.5), #Morphology ShiftScaleRotate(shift_limit=0, scale_limit=(-0.2,0.2), rotate_limit=(-30,30), interpolation=1, border_mode=0, value=(0,0,0), p=0.5),", "RandomRotate90, ShiftScaleRotate, ElasticTransform, GridDistortion, RandomSizedCrop, RandomCrop, CenterCrop, RandomBrightnessContrast, HueSaturationValue, IAASharpen,", "GaussianBlur(blur_limit=(3,7), p=0.5), #Color RandomBrightnessContrast(brightness_limit=0.35, contrast_limit=0.5, brightness_by_max=True,p=0.5), HueSaturationValue(hue_shift_limit=30, sat_shift_limit=30, val_shift_limit=0, p=0.5),", "GridDistortion, RandomSizedCrop, RandomCrop, CenterCrop, RandomBrightnessContrast, HueSaturationValue, IAASharpen, RandomGamma, RandomBrightness, RandomBrightnessContrast,", "CoarseDropout, GaussNoise, ChannelShuffle, ToGray, OpticalDistortion, Normalize, OneOf, NoOp) from albumentations.pytorch", "RandomBrightnessContrast, GaussianBlur,CLAHE, Cutout, CoarseDropout, GaussNoise, ChannelShuffle, ToGray, OpticalDistortion, Normalize, OneOf,", "#Color RandomBrightnessContrast(brightness_limit=0.35, contrast_limit=0.5, brightness_by_max=True,p=0.5), HueSaturationValue(hue_shift_limit=30, sat_shift_limit=30, val_shift_limit=0, p=0.5), CoarseDropout(max_holes=2, max_height=config['input_resolution'][0]//4,", "RandomCrop, CenterCrop, RandomBrightnessContrast, HueSaturationValue, IAASharpen, RandomGamma, RandomBrightness, RandomBrightnessContrast, GaussianBlur,CLAHE, Cutout,", "MEAN[1], MEAN[2]), std=(STD[0], STD[1], STD[2])), ToTensor(), ]) return transform_train def", "max_width=config['input_resolution'][1]//4, min_holes=1, min_height=config['input_resolution'][0]//16, min_width=config['input_resolution'][1]//16, fill_value=0, mask_fill_value=0, p=0.5), Normalize(mean=(MEAN[0], MEAN[1], MEAN[2]),", "Normalize(mean=(MEAN[0], MEAN[1], MEAN[2]), std=(STD[0], STD[1], STD[2])), ToTensor(), ] ) return", "get_config config = get_config() MEAN = np.array([0.485, 0.456, 0.406]) STD", "interpolation=1, border_mode=0, value=(0,0,0), p=0.5), GaussNoise(var_limit=(0,50.0), mean=0, p=0.5), GaussianBlur(blur_limit=(3,7), p=0.5), #Color", "albumentations import (Compose, HorizontalFlip, VerticalFlip, Rotate, RandomRotate90, ShiftScaleRotate, ElasticTransform, GridDistortion,", "get_config import get_config config = get_config() MEAN = np.array([0.485, 0.456,", "mean=0, p=0.5), GaussianBlur(blur_limit=(3,7), p=0.5), #Color RandomBrightnessContrast(brightness_limit=0.35, contrast_limit=0.5, brightness_by_max=True,p=0.5), HueSaturationValue(hue_shift_limit=30, sat_shift_limit=30,", "std=(STD[0], STD[1], STD[2])), ToTensor(), ]) return transform_train def get_transforms_valid(): transform_valid", "ToTensor(), ] ) return transform_valid def denormalize(z, mean=MEAN.reshape(-1,1,1), std=STD.reshape(-1,1,1)): return", "MEAN[1], MEAN[2]), std=(STD[0], STD[1], STD[2])), ToTensor(), ] ) return transform_valid", "MEAN[2]), std=(STD[0], STD[1], STD[2])), ToTensor(), ] ) return transform_valid def", "0.224, 0.225]) def get_transforms_train(): transform_train = Compose([ #Basic RandomRotate90(p=1), HorizontalFlip(p=0.5),", "MEAN = np.array([0.485, 0.456, 0.406]) STD = np.array([0.229, 0.224, 0.225])", "from albumentations.pytorch import ToTensorV2 as ToTensor from get_config import get_config", "np.array([0.485, 0.456, 0.406]) STD = np.array([0.229, 0.224, 0.225]) def get_transforms_train():", "sat_shift_limit=30, val_shift_limit=0, p=0.5), CoarseDropout(max_holes=2, max_height=config['input_resolution'][0]//4, max_width=config['input_resolution'][1]//4, min_holes=1, min_height=config['input_resolution'][0]//16, min_width=config['input_resolution'][1]//16, fill_value=0,", "MEAN[2]), std=(STD[0], STD[1], STD[2])), ToTensor(), ]) return transform_train def get_transforms_valid():", "ChannelShuffle, ToGray, OpticalDistortion, Normalize, OneOf, NoOp) from albumentations.pytorch import ToTensorV2", "] ) return transform_valid def denormalize(z, mean=MEAN.reshape(-1,1,1), std=STD.reshape(-1,1,1)): return std*z", "value=(0,0,0), p=0.5), GaussNoise(var_limit=(0,50.0), mean=0, p=0.5), GaussianBlur(blur_limit=(3,7), p=0.5), #Color RandomBrightnessContrast(brightness_limit=0.35, contrast_limit=0.5,", "transform_train = Compose([ #Basic RandomRotate90(p=1), HorizontalFlip(p=0.5), #Morphology ShiftScaleRotate(shift_limit=0, scale_limit=(-0.2,0.2), rotate_limit=(-30,30),", "def get_transforms_train(): transform_train = Compose([ #Basic RandomRotate90(p=1), HorizontalFlip(p=0.5), #Morphology ShiftScaleRotate(shift_limit=0,", "fill_value=0, mask_fill_value=0, p=0.5), Normalize(mean=(MEAN[0], MEAN[1], MEAN[2]), std=(STD[0], STD[1], STD[2])), ToTensor(),", "brightness_by_max=True,p=0.5), HueSaturationValue(hue_shift_limit=30, sat_shift_limit=30, val_shift_limit=0, p=0.5), CoarseDropout(max_holes=2, max_height=config['input_resolution'][0]//4, max_width=config['input_resolution'][1]//4, min_holes=1, min_height=config['input_resolution'][0]//16,", "]) return transform_train def get_transforms_valid(): transform_valid = Compose([ Normalize(mean=(MEAN[0], MEAN[1],", "ToTensor(), ]) return transform_train def get_transforms_valid(): transform_valid = Compose([ Normalize(mean=(MEAN[0],", "import numpy as np from albumentations import (Compose, HorizontalFlip, VerticalFlip,", "0.225]) def get_transforms_train(): transform_train = Compose([ #Basic RandomRotate90(p=1), HorizontalFlip(p=0.5), #Morphology", "RandomBrightnessContrast, HueSaturationValue, IAASharpen, RandomGamma, RandomBrightness, RandomBrightnessContrast, GaussianBlur,CLAHE, Cutout, CoarseDropout, GaussNoise,", "IAASharpen, RandomGamma, RandomBrightness, RandomBrightnessContrast, GaussianBlur,CLAHE, Cutout, CoarseDropout, GaussNoise, ChannelShuffle, ToGray,", "min_holes=1, min_height=config['input_resolution'][0]//16, min_width=config['input_resolution'][1]//16, fill_value=0, mask_fill_value=0, p=0.5), Normalize(mean=(MEAN[0], MEAN[1], MEAN[2]), std=(STD[0],", "OpticalDistortion, Normalize, OneOf, NoOp) from albumentations.pytorch import ToTensorV2 as ToTensor", "0.456, 0.406]) STD = np.array([0.229, 0.224, 0.225]) def get_transforms_train(): transform_train", "VerticalFlip, Rotate, RandomRotate90, ShiftScaleRotate, ElasticTransform, GridDistortion, RandomSizedCrop, RandomCrop, CenterCrop, RandomBrightnessContrast,", "ToTensorV2 as ToTensor from get_config import get_config config = get_config()", "max_height=config['input_resolution'][0]//4, max_width=config['input_resolution'][1]//4, min_holes=1, min_height=config['input_resolution'][0]//16, min_width=config['input_resolution'][1]//16, fill_value=0, mask_fill_value=0, p=0.5), Normalize(mean=(MEAN[0], MEAN[1],", "HueSaturationValue, IAASharpen, RandomGamma, RandomBrightness, RandomBrightnessContrast, GaussianBlur,CLAHE, Cutout, CoarseDropout, GaussNoise, ChannelShuffle,", "HorizontalFlip(p=0.5), #Morphology ShiftScaleRotate(shift_limit=0, scale_limit=(-0.2,0.2), rotate_limit=(-30,30), interpolation=1, border_mode=0, value=(0,0,0), p=0.5), GaussNoise(var_limit=(0,50.0),", "Normalize, OneOf, NoOp) from albumentations.pytorch import ToTensorV2 as ToTensor from", "RandomGamma, RandomBrightness, RandomBrightnessContrast, GaussianBlur,CLAHE, Cutout, CoarseDropout, GaussNoise, ChannelShuffle, ToGray, OpticalDistortion,", "numpy as np from albumentations import (Compose, HorizontalFlip, VerticalFlip, Rotate,", "NoOp) from albumentations.pytorch import ToTensorV2 as ToTensor from get_config import", "config = get_config() MEAN = np.array([0.485, 0.456, 0.406]) STD =", "Compose([ #Basic RandomRotate90(p=1), HorizontalFlip(p=0.5), #Morphology ShiftScaleRotate(shift_limit=0, scale_limit=(-0.2,0.2), rotate_limit=(-30,30), interpolation=1, border_mode=0,", "get_config() MEAN = np.array([0.485, 0.456, 0.406]) STD = np.array([0.229, 0.224,", "#Morphology ShiftScaleRotate(shift_limit=0, scale_limit=(-0.2,0.2), rotate_limit=(-30,30), interpolation=1, border_mode=0, value=(0,0,0), p=0.5), GaussNoise(var_limit=(0,50.0), mean=0,", ") return transform_valid def denormalize(z, mean=MEAN.reshape(-1,1,1), std=STD.reshape(-1,1,1)): return std*z +", "np.array([0.229, 0.224, 0.225]) def get_transforms_train(): transform_train = Compose([ #Basic RandomRotate90(p=1),", "border_mode=0, value=(0,0,0), p=0.5), GaussNoise(var_limit=(0,50.0), mean=0, p=0.5), GaussianBlur(blur_limit=(3,7), p=0.5), #Color RandomBrightnessContrast(brightness_limit=0.35,", "= get_config() MEAN = np.array([0.485, 0.456, 0.406]) STD = np.array([0.229,", "RandomBrightnessContrast(brightness_limit=0.35, contrast_limit=0.5, brightness_by_max=True,p=0.5), HueSaturationValue(hue_shift_limit=30, sat_shift_limit=30, val_shift_limit=0, p=0.5), CoarseDropout(max_holes=2, max_height=config['input_resolution'][0]//4, max_width=config['input_resolution'][1]//4,", "val_shift_limit=0, p=0.5), CoarseDropout(max_holes=2, max_height=config['input_resolution'][0]//4, max_width=config['input_resolution'][1]//4, min_holes=1, min_height=config['input_resolution'][0]//16, min_width=config['input_resolution'][1]//16, fill_value=0, mask_fill_value=0,", "as ToTensor from get_config import get_config config = get_config() MEAN", "mask_fill_value=0, p=0.5), Normalize(mean=(MEAN[0], MEAN[1], MEAN[2]), std=(STD[0], STD[1], STD[2])), ToTensor(), ])", "as np from albumentations import (Compose, HorizontalFlip, VerticalFlip, Rotate, RandomRotate90,", "min_height=config['input_resolution'][0]//16, min_width=config['input_resolution'][1]//16, fill_value=0, mask_fill_value=0, p=0.5), Normalize(mean=(MEAN[0], MEAN[1], MEAN[2]), std=(STD[0], STD[1],", "import ToTensorV2 as ToTensor from get_config import get_config config =", "def get_transforms_valid(): transform_valid = Compose([ Normalize(mean=(MEAN[0], MEAN[1], MEAN[2]), std=(STD[0], STD[1],", "ShiftScaleRotate(shift_limit=0, scale_limit=(-0.2,0.2), rotate_limit=(-30,30), interpolation=1, border_mode=0, value=(0,0,0), p=0.5), GaussNoise(var_limit=(0,50.0), mean=0, p=0.5),", "p=0.5), Normalize(mean=(MEAN[0], MEAN[1], MEAN[2]), std=(STD[0], STD[1], STD[2])), ToTensor(), ]) return", "= Compose([ #Basic RandomRotate90(p=1), HorizontalFlip(p=0.5), #Morphology ShiftScaleRotate(shift_limit=0, scale_limit=(-0.2,0.2), rotate_limit=(-30,30), interpolation=1,", "from albumentations import (Compose, HorizontalFlip, VerticalFlip, Rotate, RandomRotate90, ShiftScaleRotate, ElasticTransform,", "HueSaturationValue(hue_shift_limit=30, sat_shift_limit=30, val_shift_limit=0, p=0.5), CoarseDropout(max_holes=2, max_height=config['input_resolution'][0]//4, max_width=config['input_resolution'][1]//4, min_holes=1, min_height=config['input_resolution'][0]//16, min_width=config['input_resolution'][1]//16,", "albumentations.pytorch import ToTensorV2 as ToTensor from get_config import get_config config", "import get_config config = get_config() MEAN = np.array([0.485, 0.456, 0.406])", "std=(STD[0], STD[1], STD[2])), ToTensor(), ] ) return transform_valid def denormalize(z,", "min_width=config['input_resolution'][1]//16, fill_value=0, mask_fill_value=0, p=0.5), Normalize(mean=(MEAN[0], MEAN[1], MEAN[2]), std=(STD[0], STD[1], STD[2])),", "from get_config import get_config config = get_config() MEAN = np.array([0.485,", "Normalize(mean=(MEAN[0], MEAN[1], MEAN[2]), std=(STD[0], STD[1], STD[2])), ToTensor(), ]) return transform_train", "return transform_train def get_transforms_valid(): transform_valid = Compose([ Normalize(mean=(MEAN[0], MEAN[1], MEAN[2]),", "get_transforms_valid(): transform_valid = Compose([ Normalize(mean=(MEAN[0], MEAN[1], MEAN[2]), std=(STD[0], STD[1], STD[2])),", "STD = np.array([0.229, 0.224, 0.225]) def get_transforms_train(): transform_train = Compose([", "Rotate, RandomRotate90, ShiftScaleRotate, ElasticTransform, GridDistortion, RandomSizedCrop, RandomCrop, CenterCrop, RandomBrightnessContrast, HueSaturationValue,", "p=0.5), GaussNoise(var_limit=(0,50.0), mean=0, p=0.5), GaussianBlur(blur_limit=(3,7), p=0.5), #Color RandomBrightnessContrast(brightness_limit=0.35, contrast_limit=0.5, brightness_by_max=True,p=0.5),", "= np.array([0.485, 0.456, 0.406]) STD = np.array([0.229, 0.224, 0.225]) def", "= Compose([ Normalize(mean=(MEAN[0], MEAN[1], MEAN[2]), std=(STD[0], STD[1], STD[2])), ToTensor(), ]", "return transform_valid def denormalize(z, mean=MEAN.reshape(-1,1,1), std=STD.reshape(-1,1,1)): return std*z + mean", "GaussNoise(var_limit=(0,50.0), mean=0, p=0.5), GaussianBlur(blur_limit=(3,7), p=0.5), #Color RandomBrightnessContrast(brightness_limit=0.35, contrast_limit=0.5, brightness_by_max=True,p=0.5), HueSaturationValue(hue_shift_limit=30,", "ShiftScaleRotate, ElasticTransform, GridDistortion, RandomSizedCrop, RandomCrop, CenterCrop, RandomBrightnessContrast, HueSaturationValue, IAASharpen, RandomGamma,", "#Basic RandomRotate90(p=1), HorizontalFlip(p=0.5), #Morphology ShiftScaleRotate(shift_limit=0, scale_limit=(-0.2,0.2), rotate_limit=(-30,30), interpolation=1, border_mode=0, value=(0,0,0),", "import (Compose, HorizontalFlip, VerticalFlip, Rotate, RandomRotate90, ShiftScaleRotate, ElasticTransform, GridDistortion, RandomSizedCrop,", "p=0.5), #Color RandomBrightnessContrast(brightness_limit=0.35, contrast_limit=0.5, brightness_by_max=True,p=0.5), HueSaturationValue(hue_shift_limit=30, sat_shift_limit=30, val_shift_limit=0, p=0.5), CoarseDropout(max_holes=2,", "OneOf, NoOp) from albumentations.pytorch import ToTensorV2 as ToTensor from get_config", "STD[1], STD[2])), ToTensor(), ] ) return transform_valid def denormalize(z, mean=MEAN.reshape(-1,1,1),", "CoarseDropout(max_holes=2, max_height=config['input_resolution'][0]//4, max_width=config['input_resolution'][1]//4, min_holes=1, min_height=config['input_resolution'][0]//16, min_width=config['input_resolution'][1]//16, fill_value=0, mask_fill_value=0, p=0.5), Normalize(mean=(MEAN[0],", "Compose([ Normalize(mean=(MEAN[0], MEAN[1], MEAN[2]), std=(STD[0], STD[1], STD[2])), ToTensor(), ] )", "(Compose, HorizontalFlip, VerticalFlip, Rotate, RandomRotate90, ShiftScaleRotate, ElasticTransform, GridDistortion, RandomSizedCrop, RandomCrop,", "p=0.5), GaussianBlur(blur_limit=(3,7), p=0.5), #Color RandomBrightnessContrast(brightness_limit=0.35, contrast_limit=0.5, brightness_by_max=True,p=0.5), HueSaturationValue(hue_shift_limit=30, sat_shift_limit=30, val_shift_limit=0,", "np from albumentations import (Compose, HorizontalFlip, VerticalFlip, Rotate, RandomRotate90, ShiftScaleRotate,", "RandomBrightness, RandomBrightnessContrast, GaussianBlur,CLAHE, Cutout, CoarseDropout, GaussNoise, ChannelShuffle, ToGray, OpticalDistortion, Normalize,", "ToGray, OpticalDistortion, Normalize, OneOf, NoOp) from albumentations.pytorch import ToTensorV2 as", "rotate_limit=(-30,30), interpolation=1, border_mode=0, value=(0,0,0), p=0.5), GaussNoise(var_limit=(0,50.0), mean=0, p=0.5), GaussianBlur(blur_limit=(3,7), p=0.5),", "= np.array([0.229, 0.224, 0.225]) def get_transforms_train(): transform_train = Compose([ #Basic", "HorizontalFlip, VerticalFlip, Rotate, RandomRotate90, ShiftScaleRotate, ElasticTransform, GridDistortion, RandomSizedCrop, RandomCrop, CenterCrop,", "ElasticTransform, GridDistortion, RandomSizedCrop, RandomCrop, CenterCrop, RandomBrightnessContrast, HueSaturationValue, IAASharpen, RandomGamma, RandomBrightness,", "GaussianBlur,CLAHE, Cutout, CoarseDropout, GaussNoise, ChannelShuffle, ToGray, OpticalDistortion, Normalize, OneOf, NoOp)", "GaussNoise, ChannelShuffle, ToGray, OpticalDistortion, Normalize, OneOf, NoOp) from albumentations.pytorch import", "transform_valid = Compose([ Normalize(mean=(MEAN[0], MEAN[1], MEAN[2]), std=(STD[0], STD[1], STD[2])), ToTensor(),", "RandomSizedCrop, RandomCrop, CenterCrop, RandomBrightnessContrast, HueSaturationValue, IAASharpen, RandomGamma, RandomBrightness, RandomBrightnessContrast, GaussianBlur,CLAHE,", "transform_train def get_transforms_valid(): transform_valid = Compose([ Normalize(mean=(MEAN[0], MEAN[1], MEAN[2]), std=(STD[0],", "get_transforms_train(): transform_train = Compose([ #Basic RandomRotate90(p=1), HorizontalFlip(p=0.5), #Morphology ShiftScaleRotate(shift_limit=0, scale_limit=(-0.2,0.2),", "STD[1], STD[2])), ToTensor(), ]) return transform_train def get_transforms_valid(): transform_valid =", "STD[2])), ToTensor(), ] ) return transform_valid def denormalize(z, mean=MEAN.reshape(-1,1,1), std=STD.reshape(-1,1,1)):", "ToTensor from get_config import get_config config = get_config() MEAN =", "scale_limit=(-0.2,0.2), rotate_limit=(-30,30), interpolation=1, border_mode=0, value=(0,0,0), p=0.5), GaussNoise(var_limit=(0,50.0), mean=0, p=0.5), GaussianBlur(blur_limit=(3,7),", "Cutout, CoarseDropout, GaussNoise, ChannelShuffle, ToGray, OpticalDistortion, Normalize, OneOf, NoOp) from", "STD[2])), ToTensor(), ]) return transform_train def get_transforms_valid(): transform_valid = Compose([", "0.406]) STD = np.array([0.229, 0.224, 0.225]) def get_transforms_train(): transform_train =" ]
[ "left(self): return min(self.start.x, self.end.x) @property def ineye(self): return max(self.start.z, self.end.z)", "x self.y = y self.z = z self.rid = rid", "cuboid and the operation is successful then this cuboid is", "is Up/Down from this if self.left == other.left and self.width", "intersects or not Returns: bool: True if the cuboids intersect,", "a cuboid to this one. If the result is also", "the operation is successful then this cuboid is modified to", "class VSegment(Segment): \"\"\"Vertical Segment\"\"\" def __init__(self, start, length): \"\"\"Create a", "self.z == other.z) def __hash__(self): return hash( (self.x, self.y, self.z,", "self.depth: x_min = min(self.left, other.left) x_max = max(self.right, other.right) self.x", "cuboid and cub. Args: cub (Cuboid): Cuboid to test for", "0 and width >= 0 and depth >= 0) self.width", "cub (Cuboid): Cuboid to test for intersections. edges (bool): Accept", "other.contains(self): self.x = other.x self.y = other.y self.z = other.z", "- height - depth - \"\"\" __slots__ = ('width', 'height',", "edges are considered an intersection, and a cuboid of 0", "= x_max - x_min return True # Other cuboid is", "+ (self.y - point.y)**2 + ( self.z - point.z)**2) def", ">= 0) self.width = width self.height = height self.depth =", "- point.z)**2 class Segment(object): __slots__ = ('start', 'end') def __init__(self,", "Segment\"\"\" def __init__(self, start, length): \"\"\"Create a Vertical segment given", "not isinstance(length, Point)) super(VSegment, self).__init__( start, Point(start.x, start.y, start.z +", "False # Other cuboid is Up/Down from this if self.left", "width (int, float): height (int, float): depth (int, float): rid", "and self.outeye == cub.ineye or self.left == cub.right and self.bottom", "edges (bool): Accept edge touching cuboids as intersects or not", "== self.right and self.bottom == cub.top and self.outeye == cub.ineye", "y_max = max(self.top, other.top) self.y = y_min self.height = y_max", "useful for some comparisons\"\"\" return self.start.distance_squared(self.end) @property def length(self): return", "def ineye(self): \"\"\"Cuboid nearer from eye edge z coordinate\"\"\" return", "= y_max - y_min return True # Other cuboid is", "Segment end point \"\"\" assert(isinstance(start, Point) and isinstance(end, Point)) self.start", "and cub.bottom == self.top and self.outeye == cub.ineye or cub.left", "self.z = z_min self.depth = z_max - z_min return True", "cub.ineye or self.ineye == cub.outeye): return False # Discard corner", "or self.ineye == cub.outeye): return False # Discard corner intersects", "y (int, float): Y coordinate z (int, float): Z coordinate", "cub.top or self.top < cub.bottom or self.left > cub.right or", "\"\"\"Tests if another cuboid is contained by this one Arguments:", "Discard corner intersects if (self.left == cub.right and self.bottom ==", "other.bottom) y_max = max(self.top, other.top) self.y = y_min self.height =", "depth, rid=None): \"\"\"Initiating the Cuboid Args: x (int, float): y", "== cub.right and cub.bottom == self.top and self.outeye == cub.ineye", "max(self.left, cub.left) top = min(self.top, cub.top) right = min(self.right, cub.right)", "y_min = min(self.bottom, other.bottom) y_max = max(self.top, other.top) self.y =", "x_min return True # Other cuboid is Right/Left from this", "yield self.corner_bot_l_out def __repr__(self): return \"R({}, {}, {}, {}, {},", "self.ineye) @property def corner_bot_r_out(self): return Point(self.right, self.bottom, self.ineye) @property def", "self.ineye or cub.left == self.right and cub.bottom == self.top and", "intersect, False otherwise \"\"\" # Not even touching if (self.bottom", "HSegment(Segment): \"\"\"Horizontal Segment\"\"\" def __init__(self, start, length): \"\"\"Create an Horizontal", "cuboids as intersects or not Returns: bool: True if the", "self.top and self.outeye == cub.ineye or self.left == cub.right and", "__slots__ = ('x', 'y', 'z') def __init__(self, x, y, z):", "self.corner_top_r_out yield self.corner_bot_r_out yield self.corner_bot_l_out def __repr__(self): return \"R({}, {},", "== self.ineye or self.left == cub.right and cub.bottom == self.top", "self.y, self.z) def distance(self, point): \"\"\"Calculate distance to another point\"\"\"", "+ (self.y - point.y)**2 + ( self.z - point.z)**2 class", "sorting)\"\"\" return self.volume() < other.volume() def __eq__(self, other): \"\"\"Equal cuboids", "cub.bottom == self.top and cub.outeye == self.ineye or cub.left ==", "touching cuboids as intersects or not Returns: bool: True if", "self.depth) def intersects(self, cub, edges=False): \"\"\"Detect intersections between this cuboid", "self.end.x - self.start.x class VSegment(Segment): \"\"\"Vertical Segment\"\"\" def __init__(self, start,", "return (self.width == other.width and self.height == other.height and self.depth", "self).__init__( start, Point(start.x, start.y + length, start.z)) @property def length(self):", "self.outeye) @property def corner_bot_l(self): return Point(self.left, self.bottom, self.outeye) @property def", "(self.x, self.y, self.z, self.width, self.height, self.depth)) def __iter__(self): \"\"\"Iterate through", "length and useful for some comparisons\"\"\" return self.start.distance_squared(self.end) @property def", "other.right) self.x = x_min self.width = x_max - x_min return", "x_max - x_min return True # Other cuboid is Right/Left", "* self.depth def move(self, x, y, z): \"\"\"Move Cuboid to", "\"\"\" assert(height >= 0 and width >= 0 and depth", "self.height == other.height and self.depth == other.depth and self.x ==", "cuboid corners\"\"\" yield self.corner_top_l yield self.corner_top_r yield self.corner_bot_r yield self.corner_bot_l", "cub.bottom == self.top and self.outeye == cub.ineye or cub.left ==", "self.outeye == cub.ineye or self.left == cub.right and self.bottom ==", "def __iter__(self): \"\"\"Iterate through cuboid corners\"\"\" yield self.corner_top_l yield self.corner_top_r", "'z') def __init__(self, x, y, z): self.x = x self.y", "other.volume() def __eq__(self, other): \"\"\"Equal cuboids have same properties.\"\"\" if", "Segment start point end (Point): Segment end point \"\"\" assert(isinstance(start,", "self.top and cub.outeye == self.ineye): return False return True def", "if it is inside this one, False otherwise \"\"\" return", "= ('x', 'y', 'z') def __init__(self, x, y, z): self.x", "return \"P({}, {}, {})\".format(self.x, self.y, self.z) def distance(self, point): \"\"\"Calculate", "Cuboid: Intersection. None: There was no intersection. \"\"\" if not", "its left most end point and its length. Arguments: -", "{})\".format(self.start, self.end) @property def length_squared(self): \"\"\"Faster than length and useful", "self.right == cub.left or self.outeye == cub.ineye or self.ineye ==", "def outeye(self): \"\"\"Cuboid farther from eye edge z coordinate\"\"\" return", "return min(self.start.z, self.end.z) class HSegment(Segment): \"\"\"Horizontal Segment\"\"\" def __init__(self, start,", "= end def __eq__(self, other): if not isinstance(other, self.__class__): None", "self.left == cub.right and cub.bottom == self.top and cub.outeye ==", "== other.start and self.end == other.end def __repr__(self): return \"S({},", "self.right and self.bottom == cub.top and cub.outeye == self.ineye or", "Up/Down from this if self.left == other.left and self.width ==", "= start self.end = end def __eq__(self, other): if not", "cuboid to this one. If the result is also a", "return \"R({}, {}, {}, {}, {}, {})\".format( self.x, self.y, self.z,", "x (int, float): y (int, float): z (int, float): width", "and depth >= 0) self.width = width self.height = height", "float): y (int, float): z (int, float): width (int, float):", "cuboid is contained by this one Arguments: cub (Cuboid): The", "'depth', 'x', 'y', 'z', 'rid') def __init__(self, x, y, z,", "point end (Point): Segment end point \"\"\" assert(isinstance(start, Point) and", "intersection. Arguments: cub (Cuboid): The other cuboid. edges (bool): If", "(Cuboid): The other cuboiud Returns: bool: True if it is", "and \\ self.outeye == other.outeye and self.depth == self.depth: y_min", "+ length)) @property def length(self): return self.end.z - self.start.z class", "== self.right and cub.bottom == self.top and cub.outeye == self.ineye):", "right edge x coordinate\"\"\" return self.x + self.width @property def", "\\ self.left == other.left and self.width == other.width: z_min =", "max(self.start.x, self.end.x) @property def left(self): return min(self.start.x, self.end.x) @property def", "('width', 'height', 'depth', 'x', 'y', 'z', 'rid') def __init__(self, x,", "depth (int, float): rid (identifier object): \"\"\" assert(height >= 0", "+ self.width and cub.z + cub.depth <= self.z + self.depth)", "intersections between this cuboid and cub. Args: cub (Cuboid): Cuboid", "isinstance(length, Point)) super(VSegment, self).__init__( start, Point(start.x, start.y, start.z + length))", "Starting Point - length (number): segment length \"\"\" assert(isinstance(start, Point)", "== cub.top and self.outeye == cub.ineye or cub.left == self.right", "x coordinate\"\"\" return self.x + self.width @property def outeye(self): \"\"\"Cuboid", "not Returns: bool: True if the cuboids intersect, False otherwise", "point.x)**2 + (self.y - point.y)**2 + ( self.z - point.z)**2)", "+ self.height @property def left(self): \"\"\"Cuboid left edge x coordinate\"\"\"", "or self.right < cub.left or self.outeye > cub.ineye or self.ineye", "bottom edge y coordinate\"\"\" return self.y @property def top(self): \"\"\"Cuboid", "- self.start.y class DSegment(Segment): \"\"\"In-Depth Segment\"\"\" def __init__(self, start, length):", "or self.outeye == cub.ineye or self.ineye == cub.outeye): return False", "return max(self.start.x, self.end.x) @property def left(self): return min(self.start.x, self.end.x) @property", "cub.ineye or self.left == cub.right and cub.bottom == self.top and", "Point(self.left, self.top, self.ineye) @property def corner_top_r_out(self): return Point(self.right, self.top, self.ineye)", "self.end.z - self.start.z class Cuboid(object): \"\"\"Basic cuboid primitive class. x,", "@property def top(self): \"\"\"Cuboid top edge y coordiante\"\"\" return self.y", "- y_min return True # Other cuboid is Right/Left from", "\"\"\" __slots__ = ('width', 'height', 'depth', 'x', 'y', 'z', 'rid')", "(Point): Segment start point end (Point): Segment end point \"\"\"", "__hash__(self): return hash( (self.x, self.y, self.z, self.width, self.height, self.depth)) def", "coordinate\"\"\" return self.z + self.depth @property def corner_top_l(self): return Point(self.left,", "otherwise \"\"\" return (cub.y >= self.y and cub.x >= self.x", "other.width and \\ self.outeye == other.outeye and self.depth == self.depth:", "def distance_squared(self, point): return (self.x - point.x)**2 + (self.y -", "other (Cuboid): Cuboid to join Returns: bool: True when successfully", "rid @property def bottom(self): \"\"\"Cuboid bottom edge y coordinate\"\"\" return", "Point(self.left, self.bottom, self.ineye) def __lt__(self, other): \"\"\"Compare cuboids by volume", "coordinate\"\"\" return self.x + self.width @property def outeye(self): \"\"\"Cuboid farther", "self.depth == self.depth: y_min = min(self.bottom, other.bottom) y_max = max(self.top,", "y self.z = z def __eq__(self, other): return (self.x ==", "= min(self.right, cub.right) outeye = max(self.outeye, cub.outeye) ineye = min(self.ineye,", "<= self.z + self.depth) def intersects(self, cub, edges=False): \"\"\"Detect intersections", "and self.outeye == cub.ineye or cub.left == self.right and self.bottom", "# Other cuboid is Up/Down from this if self.left ==", "the intersection of this and cub If the cuboids are", "and a cuboid of 0 height or width or depth", "and width >= 0 and depth >= 0) self.width =", "and cub.outeye == self.ineye or cub.left == self.right and self.bottom", "return (self.x - point.x)**2 + (self.y - point.y)**2 + (", "\"\"\" assert(isinstance(start, Point) and not isinstance(length, Point)) super(HSegment, self).__init__( start,", "self.ineye == cub.outeye): return False # Discard corner intersects if", "there is no intersection. Arguments: cub (Cuboid): The other cuboid.", "== self.depth: x_min = min(self.left, other.left) x_max = max(self.right, other.right)", "length. Arguments: - start (Point): Starting Point - length (number):", "operation is successful then this cuboid is modified to the", "0 and depth >= 0) self.width = width self.height =", "min(self.left, other.left) x_max = max(self.right, other.right) self.x = x_min self.width", "isinstance(other, self.__class__): return False return (self.width == other.width and self.height", "= x_min self.width = x_max - x_min return True #", "== other.depth and self.x == other.x and self.y == other.y", "(self.width == other.width and self.height == other.height and self.depth ==", "== other.width: z_min = min(self.outeye, other.outeye) z_max = max(self.ineye, other.ineye)", "= other.y self.z = other.z self.width = other.width self.height =", "@property def ineye(self): \"\"\"Cuboid nearer from eye edge z coordinate\"\"\"", "{}, {}, {}, {})\".format( self.x, self.y, self.z, self.width, self.height, self.depth)", "def top(self): return max(self.start.y, self.end.y) @property def bottom(self): return min(self.start.y,", "@property def corner_bot_r(self): return Point(self.right, self.bottom, self.outeye) @property def corner_bot_l(self):", "self.corner_bot_r yield self.corner_bot_l yield self.corner_top_l_out yield self.corner_top_r_out yield self.corner_bot_r_out yield", "self.top and self.outeye == cub.ineye or cub.left == self.right and", "__repr__(self): return \"P({}, {}, {})\".format(self.x, self.y, self.z) def distance(self, point):", "\"P({}, {}, {})\".format(self.x, self.y, self.z) def distance(self, point): \"\"\"Calculate distance", "- self.start.z class Cuboid(object): \"\"\"Basic cuboid primitive class. x, y,", "\"\"\"Try to join a cuboid to this one. If the", "# Discard corner intersects if (self.left == cub.right and self.bottom", "z coordinate\"\"\" return self.z @property def ineye(self): \"\"\"Cuboid nearer from", "assert(isinstance(start, Point) and not isinstance(length, Point)) super(HSegment, self).__init__( start, Point(start.x", "def top(self): \"\"\"Cuboid top edge y coordiante\"\"\" return self.y +", "by their edges, and the argument 'edges' is True the", "and \\ self.left == other.left and self.width == other.width: z_min", "def length(self): return self.start.distance(self.end) @property def top(self): return max(self.start.y, self.end.y)", "is no intersection. Arguments: cub (Cuboid): The other cuboid. edges", "('start', 'end') def __init__(self, start, end): \"\"\"Arguments: start (Point): Segment", "corner_bot_l_out(self): return Point(self.left, self.bottom, self.ineye) def __lt__(self, other): \"\"\"Compare cuboids", "\"\"\"Calculate distance to another point\"\"\" return sqrt((self.x - point.x)**2 +", "self.y = y self.z = z def __eq__(self, other): return", "end point and its length. Arguments: - start (Point): Starting", "this cuboid is modified to the union. Arguments: other (Cuboid):", "x self.y = y self.z = z def contains(self, cub):", "return self.x + self.width @property def outeye(self): \"\"\"Cuboid farther from", "def __eq__(self, other): \"\"\"Equal cuboids have same properties.\"\"\" if not", "start.y + length, start.z)) @property def length(self): return self.end.y -", "cub.ineye or cub.left == self.right and self.bottom == cub.top and", "z def __eq__(self, other): return (self.x == other.x and self.y", "Point(start.x, start.y + length, start.z)) @property def length(self): return self.end.y", "self.start == other.start and self.end == other.end def __repr__(self): return", "edges=edges): return None bottom = max(self.bottom, cub.bottom) left = max(self.left,", "(int, float): width (int, float): height (int, float): depth (int,", "Point(start.x + length, start.y, start.z)) @property def length(self): return self.end.x", "min(self.bottom, other.bottom) y_max = max(self.top, other.top) self.y = y_min self.height", "# Other cuboid is Right/Left from this if self.bottom ==", "Point)) self.start = start self.end = end def __eq__(self, other):", "self.z - point.z)**2 class Segment(object): __slots__ = ('start', 'end') def", "start.z)) @property def length(self): return self.end.x - self.start.x class VSegment(Segment):", "x_max = max(self.right, other.right) self.x = x_min self.width = x_max", "and \\ self.outeye == other.outeye and self.depth == self.depth: x_min", "== cub.top and cub.outeye == self.ineye or cub.left == self.right", "length(self): return self.start.distance(self.end) @property def top(self): return max(self.start.y, self.end.y) @property", "other.y and self.z == other.z) def __hash__(self): return hash( (self.x,", "other.ineye) self.z = z_min self.depth = z_max - z_min return", "other.left and self.width == other.width: z_min = min(self.outeye, other.outeye) z_max", "sqrt((self.x - point.x)**2 + (self.y - point.y)**2 + ( self.z", "edge y coordiante\"\"\" return self.y + self.height @property def left(self):", "self.height = height self.depth = depth self.x = x self.y", "There was no intersection. \"\"\" if not self.intersects(cub, edges=edges): return", "joined, False otherwise \"\"\" if self.contains(other): return True if other.contains(self):", "than length and useful for some comparisons\"\"\" return self.start.distance_squared(self.end) @property", "def intersection(self, cub, edges=False): \"\"\"Returns the cuboid resulting of the", "def length(self): return self.end.z - self.start.z class Cuboid(object): \"\"\"Basic cuboid", "- start (Point): Starting Point - length (number): segment length", "corner intersects if (self.left == cub.right and self.bottom == cub.top", "start, Point(start.x, start.y + length, start.z)) @property def length(self): return", "return self.volume() < other.volume() def __eq__(self, other): \"\"\"Equal cuboids have", "__repr__(self): return \"R({}, {}, {}, {}, {}, {})\".format( self.x, self.y,", "z (int, float): width (int, float): height (int, float): depth", "+ length, start.y, start.z)) @property def length(self): return self.end.x -", "the cuboids intersect, False otherwise \"\"\" # Not even touching", "this one Arguments: cub (Cuboid): The other cuboiud Returns: bool:", "intersects(self, cub, edges=False): \"\"\"Detect intersections between this cuboid and cub.", "self.x = x self.y = y self.z = z self.rid", "top edge y coordiante\"\"\" return self.y + self.height @property def", "@property def corner_top_r(self): return Point(self.right, self.top, self.outeye) @property def corner_bot_r(self):", "> cub.top or self.top < cub.bottom or self.left > cub.right", "0. Returns None if there is no intersection. Arguments: cub", "width, height, depth, rid=None): \"\"\"Initiating the Cuboid Args: x (int,", "length, start.z)) @property def length(self): return self.end.y - self.start.y class", "coordinate z (int, float): Z coordinate \"\"\" self.x = x", "it is inside this one, False otherwise \"\"\" return (cub.y", "== cub.right and self.bottom == cub.top and self.outeye == cub.ineye", "cub.bottom == self.top and self.outeye == cub.ineye or self.left ==", "> cub.right or self.right < cub.left or self.outeye > cub.ineye", "self.z = z def __eq__(self, other): return (self.x == other.x", "object): \"\"\" assert(height >= 0 and width >= 0 and", "start, length): \"\"\"Create an In-Depth segment given its bottom most", "cub.top or self.top == cub.bottom or self.left == cub.right or", "bool: True when successfully joined, False otherwise \"\"\" if self.contains(other):", "intersection of this and cub If the cuboids are only", "min(self.top, cub.top) right = min(self.right, cub.right) outeye = max(self.outeye, cub.outeye)", "edges, and the argument 'edges' is True the cuboid returned", "(self.x == other.x and self.y == other.y and self.z ==", "\"\"\"Cuboid nearer from eye edge z coordinate\"\"\" return self.z +", "@property def length(self): return self.start.distance(self.end) @property def top(self): return max(self.start.y,", "left(self): \"\"\"Cuboid left edge x coordinate\"\"\" return self.x @property def", "@property def ineye(self): return max(self.start.z, self.end.z) @property def outeye(self): return", "start.z + length)) @property def length(self): return self.end.z - self.start.z", "start, Point(start.x, start.y, start.z + length)) @property def length(self): return", "<= self.y + self.height and cub.x + cub.width <= self.x", "coordinate y (int, float): Y coordinate z (int, float): Z", "Point(start.x, start.y, start.z + length)) @property def length(self): return self.end.z", "True when successfully joined, False otherwise \"\"\" if self.contains(other): return", "return Point(self.left, self.top, self.outeye) @property def corner_top_r(self): return Point(self.right, self.top,", "cuboids have same properties.\"\"\" if not isinstance(other, self.__class__): return False", "self.end) @property def length_squared(self): \"\"\"Faster than length and useful for", "cuboiud Returns: bool: True if it is inside this one,", "None if there is no intersection. Arguments: cub (Cuboid): The", "self.z, self.width, self.height, self.depth) def volume(self): \"\"\"Cuboid volume\"\"\" return self.width", "def corner_bot_l_out(self): return Point(self.left, self.bottom, self.ineye) def __lt__(self, other): \"\"\"Compare", "self.start.y class DSegment(Segment): \"\"\"In-Depth Segment\"\"\" def __init__(self, start, length): \"\"\"Create", "== other.z) def __hash__(self): return hash( (self.x, self.y, self.z, self.width,", "= other.depth return True if not self.intersects(other, edges=True): return False", "y_max - y_min return True # Other cuboid is Right/Left", "@property def left(self): return min(self.start.x, self.end.x) @property def ineye(self): return", "return Point(self.right, self.bottom, self.outeye) @property def corner_bot_l(self): return Point(self.left, self.bottom,", "z_max = max(self.ineye, other.ineye) self.z = z_min self.depth = z_max", "and self.bottom == cub.top and self.outeye == cub.ineye or self.left", "width self.height = height self.depth = depth self.x = x", "outeye(self): return min(self.start.z, self.end.z) class HSegment(Segment): \"\"\"Horizontal Segment\"\"\" def __init__(self,", "def right(self): \"\"\"Cuboid right edge x coordinate\"\"\" return self.x +", "or self.left == cub.right or self.right == cub.left or self.outeye", "intersects if not edges: if (self.bottom == cub.top or self.top", "z self.rid = rid @property def bottom(self): \"\"\"Cuboid bottom edge", "assert(height >= 0 and width >= 0 and depth >=", "and cub.z >= self.z and cub.y + cub.height <= self.y", "Y coordinate z (int, float): Z coordinate \"\"\" self.x =", "min(self.ineye, cub.ineye) return Cuboid( left, bottom, outeye, right - left,", "float): X coordinate y (int, float): Y coordinate z (int,", "cub.bottom or self.left > cub.right or self.right < cub.left or", "== other.height and \\ self.outeye == other.outeye and self.depth ==", "+ ( self.z - point.z)**2) def distance_squared(self, point): return (self.x", "float): depth (int, float): rid (identifier object): \"\"\" assert(height >=", "self.y, self.z, self.width, self.height, self.depth)) def __iter__(self): \"\"\"Iterate through cuboid", "def length(self): return self.end.x - self.start.x class VSegment(Segment): \"\"\"Vertical Segment\"\"\"", "by volume (used for sorting)\"\"\" return self.volume() < other.volume() def", "and cub.outeye == self.ineye): return False return True def intersection(self,", "@property def length(self): return self.end.y - self.start.y class DSegment(Segment): \"\"\"In-Depth", "= z def __eq__(self, other): return (self.x == other.x and", "Point(self.right, self.top, self.ineye) @property def corner_bot_r_out(self): return Point(self.right, self.bottom, self.ineye)", "self.ineye < cub.outeye): return False # Discard edge intersects if", "the argument 'edges' is True the cuboid returned will have", "False otherwise \"\"\" return (cub.y >= self.y and cub.x >=", ">= self.x and cub.z >= self.z and cub.y + cub.height", "point.y)**2 + ( self.z - point.z)**2 class Segment(object): __slots__ =", "__repr__(self): return \"S({}, {})\".format(self.start, self.end) @property def length_squared(self): \"\"\"Faster than", "length)) @property def length(self): return self.end.z - self.start.z class Cuboid(object):", "other): return (self.x == other.x and self.y == other.y and", "and cub.z + cub.depth <= self.z + self.depth) def intersects(self,", "def corner_top_r(self): return Point(self.right, self.top, self.outeye) @property def corner_bot_r(self): return", "\"\"\"Initiating the Cuboid Args: x (int, float): y (int, float):", "another point\"\"\" return sqrt((self.x - point.x)**2 + (self.y - point.y)**2", "Arguments: - start (Point): Starting Point - length (number): segment", "z def contains(self, cub): \"\"\"Tests if another cuboid is contained", "(int, float): Y coordinate z (int, float): Z coordinate \"\"\"", "self.depth) def volume(self): \"\"\"Cuboid volume\"\"\" return self.width * self.height *", "def __init__(self, x, y, z): self.x = x self.y =", "return max(self.start.y, self.end.y) @property def bottom(self): return min(self.start.y, self.end.y) @property", "+ cub.width <= self.x + self.width and cub.z + cub.depth", "Returns: bool: True if the cuboids intersect, False otherwise \"\"\"", "cuboids intersect, False otherwise \"\"\" # Not even touching if", "length \"\"\" assert(isinstance(start, Point) and not isinstance(length, Point)) super(HSegment, self).__init__(", "other.height and \\ self.outeye == other.outeye and self.depth == self.depth:", "max(self.outeye, cub.outeye) ineye = min(self.ineye, cub.ineye) return Cuboid( left, bottom,", "corner coordinates width - height - depth - \"\"\" __slots__", "self.x = x_min self.width = x_max - x_min return True", "if not self.intersects(cub, edges=edges): return None bottom = max(self.bottom, cub.bottom)", "return Point(self.left, self.top, self.ineye) @property def corner_top_r_out(self): return Point(self.right, self.top,", "other.outeye and self.depth == self.depth: x_min = min(self.left, other.left) x_max", "self.corner_bot_r_out yield self.corner_bot_l_out def __repr__(self): return \"R({}, {}, {}, {},", "result is also a cuboid and the operation is successful", "return self.end.x - self.start.x class VSegment(Segment): \"\"\"Vertical Segment\"\"\" def __init__(self,", "return Point(self.right, self.bottom, self.ineye) @property def corner_bot_l_out(self): return Point(self.left, self.bottom,", "@property def corner_top_r_out(self): return Point(self.right, self.top, self.ineye) @property def corner_bot_r_out(self):", "Intersection. None: There was no intersection. \"\"\" if not self.intersects(cub,", "cub.left or self.outeye > cub.ineye or self.ineye < cub.outeye): return", "The other cuboid. edges (bool): If true, touching edges are", "-*- from math import sqrt class Point(object): __slots__ = ('x',", "end): \"\"\"Arguments: start (Point): Segment start point end (Point): Segment", "Cuboid Args: x (int, float): y (int, float): z (int,", "is also a cuboid and the operation is successful then", "super(HSegment, self).__init__( start, Point(start.x + length, start.y, start.z)) @property def", "yield self.corner_top_r_out yield self.corner_bot_r_out yield self.corner_bot_l_out def __repr__(self): return \"R({},", "self.ineye or cub.left == self.right and self.bottom == cub.top and", "cuboids by volume (used for sorting)\"\"\" return self.volume() < other.volume()", "- \"\"\" __slots__ = ('width', 'height', 'depth', 'x', 'y', 'z',", "(self.bottom == cub.top or self.top == cub.bottom or self.left ==", "by this one Arguments: cub (Cuboid): The other cuboiud Returns:", "other.x self.y = other.y self.z = other.z self.width = other.width", "start self.end = end def __eq__(self, other): if not isinstance(other,", "point.y)**2 + ( self.z - point.z)**2) def distance_squared(self, point): return", "+ ( self.z - point.z)**2 class Segment(object): __slots__ = ('start',", "otherwise \"\"\" # Not even touching if (self.bottom > cub.top", "intersection, and a cuboid of 0 height or width or", "and cub.x + cub.width <= self.x + self.width and cub.z", "of the intersection of this and cub If the cuboids", "cub.height <= self.y + self.height and cub.x + cub.width <=", "contained by this one Arguments: cub (Cuboid): The other cuboiud", "self.depth: y_min = min(self.bottom, other.bottom) y_max = max(self.top, other.top) self.y", "outeye = max(self.outeye, cub.outeye) ineye = min(self.ineye, cub.ineye) return Cuboid(", "== other.outeye and self.depth == self.depth: x_min = min(self.left, other.left)", "Point(self.left, self.top, self.outeye) @property def corner_top_r(self): return Point(self.right, self.top, self.outeye)", "other.z self.width = other.width self.height = other.height self.depth = other.depth", "length): \"\"\"Create an Horizontal segment given its left most end", "left edge x coordinate\"\"\" return self.x @property def right(self): \"\"\"Cuboid", "= other.height self.depth = other.depth return True if not self.intersects(other,", "other.width and self.height == other.height and self.depth == other.depth and", "return Point(self.left, self.bottom, self.ineye) def __lt__(self, other): \"\"\"Compare cuboids by", "\"\"\"Cuboid top edge y coordiante\"\"\" return self.y + self.height @property", "def left(self): return min(self.start.x, self.end.x) @property def ineye(self): return max(self.start.z,", "to join Returns: bool: True when successfully joined, False otherwise", "not self.intersects(cub, edges=edges): return None bottom = max(self.bottom, cub.bottom) left", "return self.z @property def ineye(self): \"\"\"Cuboid nearer from eye edge", "primitive class. x, y, z-> Lower right corner coordinates width", "class. x, y, z-> Lower right corner coordinates width -", "cub.x >= self.x and cub.z >= self.z and cub.y +", "Point)) super(HSegment, self).__init__( start, Point(start.x + length, start.y, start.z)) @property", "return Point(self.left, self.bottom, self.outeye) @property def corner_top_l_out(self): return Point(self.left, self.top,", "left most end point and its length. Arguments: - start", "otherwise \"\"\" if self.contains(other): return True if other.contains(self): self.x =", "'y', 'z', 'rid') def __init__(self, x, y, z, width, height,", "point\"\"\" return sqrt((self.x - point.x)**2 + (self.y - point.y)**2 +", "= max(self.bottom, cub.bottom) left = max(self.left, cub.left) top = min(self.top,", "@property def left(self): \"\"\"Cuboid left edge x coordinate\"\"\" return self.x", "= rid @property def bottom(self): \"\"\"Cuboid bottom edge y coordinate\"\"\"", "will have a volume of 0. Returns None if there", "same properties.\"\"\" if not isinstance(other, self.__class__): return False return (self.width", "= ('width', 'height', 'depth', 'x', 'y', 'z', 'rid') def __init__(self,", "from math import sqrt class Point(object): __slots__ = ('x', 'y',", "other.height and self.depth == other.depth and self.x == other.x and", "self.outeye == cub.ineye or self.left == cub.right and cub.bottom ==", "self.left == cub.right or self.right == cub.left or self.outeye ==", "nearer from eye edge z coordinate\"\"\" return self.z + self.depth", "max(self.bottom, cub.bottom) left = max(self.left, cub.left) top = min(self.top, cub.top)", "point): \"\"\"Calculate distance to another point\"\"\" return sqrt((self.x - point.x)**2", "other.z) def __repr__(self): return \"P({}, {}, {})\".format(self.x, self.y, self.z) def", "and self.outeye == cub.ineye or cub.left == self.right and cub.bottom", "cub.right and cub.bottom == self.top and cub.outeye == self.ineye or", "+ cub.depth <= self.z + self.depth) def intersects(self, cub, edges=False):", "eye edge z coordinate\"\"\" return self.z + self.depth @property def", "return self.start.distance(self.end) @property def top(self): return max(self.start.y, self.end.y) @property def", "return self.start.distance_squared(self.end) @property def length(self): return self.start.distance(self.end) @property def top(self):", "cub.right or self.right == cub.left or self.outeye == cub.ineye or", "self.depth)) def __iter__(self): \"\"\"Iterate through cuboid corners\"\"\" yield self.corner_top_l yield", "bool: True if it is inside this one, False otherwise", "True if other.contains(self): self.x = other.x self.y = other.y self.z", "cub.outeye) ineye = min(self.ineye, cub.ineye) return Cuboid( left, bottom, outeye,", "z, width, height, depth, rid=None): \"\"\"Initiating the Cuboid Args: x", "== cub.ineye or self.ineye == cub.outeye): return False # Discard", "and cub.bottom == self.top and cub.outeye == self.ineye): return False", "right(self): return max(self.start.x, self.end.x) @property def left(self): return min(self.start.x, self.end.x)", "super(VSegment, self).__init__( start, Point(start.x, start.y, start.z + length)) @property def", "comparisons\"\"\" return self.start.distance_squared(self.end) @property def length(self): return self.start.distance(self.end) @property def", "if not edges: if (self.bottom == cub.top or self.top ==", "float): z (int, float): width (int, float): height (int, float):", "self.left == cub.right and cub.bottom == self.top and self.outeye ==", "min(self.start.x, self.end.x) @property def ineye(self): return max(self.start.z, self.end.z) @property def", "\"\"\"Create a Vertical segment given its bottom most end point", "min(self.outeye, other.outeye) z_max = max(self.ineye, other.ineye) self.z = z_min self.depth", "= max(self.ineye, other.ineye) self.z = z_min self.depth = z_max -", "self.height * self.depth def move(self, x, y, z): \"\"\"Move Cuboid", "@property def bottom(self): \"\"\"Cuboid bottom edge y coordinate\"\"\" return self.y", "intersection(self, cub, edges=False): \"\"\"Returns the cuboid resulting of the intersection", "self.z - point.z)**2) def distance_squared(self, point): return (self.x - point.x)**2", "self.y @property def top(self): \"\"\"Cuboid top edge y coordiante\"\"\" return", "or self.left == cub.right and cub.bottom == self.top and cub.outeye", "def corner_bot_r(self): return Point(self.right, self.bottom, self.outeye) @property def corner_bot_l(self): return", "__iter__(self): \"\"\"Iterate through cuboid corners\"\"\" yield self.corner_top_l yield self.corner_top_r yield", "x (int, float): X coordinate y (int, float): Y coordinate", "@property def top(self): return max(self.start.y, self.end.y) @property def bottom(self): return", "and self.height == other.height and self.depth == other.depth and self.x", "def length_squared(self): \"\"\"Faster than length and useful for some comparisons\"\"\"", "self.height == other.height and \\ self.left == other.left and self.width", "bottom, ineye - outeye) def join(self, other): \"\"\"Try to join", "self.outeye == cub.ineye or cub.left == self.right and cub.bottom ==", "0 height or width or depth will be returned Returns:", "== cub.ineye or cub.left == self.right and self.bottom == cub.top", "self.width @property def outeye(self): \"\"\"Cuboid farther from eye edge z", "- left, top - bottom, ineye - outeye) def join(self,", "or self.right == cub.left or self.outeye == cub.ineye or self.ineye", "VSegment(Segment): \"\"\"Vertical Segment\"\"\" def __init__(self, start, length): \"\"\"Create a Vertical", "(self.y - point.y)**2 + ( self.z - point.z)**2 class Segment(object):", "rid=None): \"\"\"Initiating the Cuboid Args: x (int, float): y (int,", "== other.bottom and self.height == other.height and \\ self.outeye ==", "distance(self, point): \"\"\"Calculate distance to another point\"\"\" return sqrt((self.x -", "y_min self.height = y_max - y_min return True # Other", "and self.z == other.z) def __repr__(self): return \"P({}, {}, {})\".format(self.x,", "def __repr__(self): return \"R({}, {}, {}, {}, {}, {})\".format( self.x,", "segment length \"\"\" assert(isinstance(start, Point) and not isinstance(length, Point)) super(VSegment,", "return Point(self.right, self.top, self.ineye) @property def corner_bot_r_out(self): return Point(self.right, self.bottom,", "self.x == other.x and self.y == other.y and self.z ==", "cub.top) right = min(self.right, cub.right) outeye = max(self.outeye, cub.outeye) ineye", "__init__(self, start, length): \"\"\"Create an In-Depth segment given its bottom", "z (int, float): Z coordinate \"\"\" self.x = x self.y", "to this one. If the result is also a cuboid", "__eq__(self, other): \"\"\"Equal cuboids have same properties.\"\"\" if not isinstance(other,", "or width or depth will be returned Returns: Cuboid: Intersection.", "other.height self.depth = other.depth return True if not self.intersects(other, edges=True):", "and self.width == other.width and \\ self.outeye == other.outeye and", "end point \"\"\" assert(isinstance(start, Point) and isinstance(end, Point)) self.start =", "return self.z + self.depth @property def corner_top_l(self): return Point(self.left, self.top,", "+ self.depth @property def corner_top_l(self): return Point(self.left, self.top, self.outeye) @property", "self.outeye == other.outeye and self.depth == self.depth: y_min = min(self.bottom,", "coordinate \"\"\" self.x = x self.y = y self.z =", "== cub.ineye or self.left == cub.right and cub.bottom == self.top", "left = max(self.left, cub.left) top = min(self.top, cub.top) right =", "its length. Arguments: - start (Point): Starting Point - length", "import sqrt class Point(object): __slots__ = ('x', 'y', 'z') def", "def __init__(self, start, length): \"\"\"Create a Vertical segment given its", "successful then this cuboid is modified to the union. Arguments:", "end def __eq__(self, other): if not isinstance(other, self.__class__): None return", "'rid') def __init__(self, x, y, z, width, height, depth, rid=None):", "distance to another point\"\"\" return sqrt((self.x - point.x)**2 + (self.y", "def bottom(self): return min(self.start.y, self.end.y) @property def right(self): return max(self.start.x,", "corner_top_r(self): return Point(self.right, self.top, self.outeye) @property def corner_bot_r(self): return Point(self.right,", "@property def bottom(self): return min(self.start.y, self.end.y) @property def right(self): return", "cub.right and cub.bottom == self.top and self.outeye == cub.ineye or", "start, length): \"\"\"Create an Horizontal segment given its left most", "is inside this one, False otherwise \"\"\" return (cub.y >=", "(Point): Segment end point \"\"\" assert(isinstance(start, Point) and isinstance(end, Point))", "self.outeye > cub.ineye or self.ineye < cub.outeye): return False #", "\"\"\"Cuboid bottom edge y coordinate\"\"\" return self.y @property def top(self):", "self.width, self.height, self.depth)) def __iter__(self): \"\"\"Iterate through cuboid corners\"\"\" yield", "end (Point): Segment end point \"\"\" assert(isinstance(start, Point) and isinstance(end,", "cub.outeye): return False # Discard corner intersects if (self.left ==", "and its length. Arguments: - start (Point): Starting Point -", "no intersection. \"\"\" if not self.intersects(cub, edges=edges): return None bottom", "self.corner_top_r yield self.corner_bot_r yield self.corner_bot_l yield self.corner_top_l_out yield self.corner_top_r_out yield", "< cub.bottom or self.left > cub.right or self.right < cub.left", "returned Returns: Cuboid: Intersection. None: There was no intersection. \"\"\"", "rid (identifier object): \"\"\" assert(height >= 0 and width >=", "volume (used for sorting)\"\"\" return self.volume() < other.volume() def __eq__(self,", "between this cuboid and cub. Args: cub (Cuboid): Cuboid to", "self.ineye) @property def corner_top_r_out(self): return Point(self.right, self.top, self.ineye) @property def", "self.height = y_max - y_min return True # Other cuboid", "self.height and cub.x + cub.width <= self.x + self.width and", "(Point): Starting Point - length (number): segment length \"\"\" assert(isinstance(start,", "not self.intersects(other, edges=True): return False # Other cuboid is Up/Down", "- self.start.x class VSegment(Segment): \"\"\"Vertical Segment\"\"\" def __init__(self, start, length):", "__init__(self, x, y, z, width, height, depth, rid=None): \"\"\"Initiating the", "bottom, outeye, right - left, top - bottom, ineye -", "return self.y @property def top(self): \"\"\"Cuboid top edge y coordiante\"\"\"", "or self.outeye > cub.ineye or self.ineye < cub.outeye): return False", "# Discard edge intersects if not edges: if (self.bottom ==", "Returns: bool: True if it is inside this one, False", "the cuboid returned will have a volume of 0. Returns", "and useful for some comparisons\"\"\" return self.start.distance_squared(self.end) @property def length(self):", "cuboid is Up/Down from this if self.left == other.left and", "In-Depth segment given its bottom most end point and its", "cub, edges=False): \"\"\"Detect intersections between this cuboid and cub. Args:", "Cuboid to test for intersections. edges (bool): Accept edge touching", "self.end.z) @property def outeye(self): return min(self.start.z, self.end.z) class HSegment(Segment): \"\"\"Horizontal", "considered an intersection, and a cuboid of 0 height or", "self.bottom == cub.top and cub.outeye == self.ineye or self.left ==", "== cub.right and cub.bottom == self.top and cub.outeye == self.ineye", "self.width = width self.height = height self.depth = depth self.x", "self.top < cub.bottom or self.left > cub.right or self.right <", "('x', 'y', 'z') def __init__(self, x, y, z): self.x =", "edge z coordinate\"\"\" return self.z + self.depth @property def corner_top_l(self):", "start (Point): Starting Point - length (number): segment length \"\"\"", "+ self.depth) def intersects(self, cub, edges=False): \"\"\"Detect intersections between this", "self.outeye == cub.ineye or cub.left == self.right and self.bottom ==", "\"\"\"Vertical Segment\"\"\" def __init__(self, start, length): \"\"\"Create a Vertical segment", "x, y, z): \"\"\"Move Cuboid to x,y,z coordinates Arguments: x", "and not isinstance(length, Point)) super(VSegment, self).__init__( start, Point(start.x, start.y, start.z", "self.contains(other): return True if other.contains(self): self.x = other.x self.y =", "hash( (self.x, self.y, self.z, self.width, self.height, self.depth)) def __iter__(self): \"\"\"Iterate", "+ self.height and cub.x + cub.width <= self.x + self.width", "other.width: z_min = min(self.outeye, other.outeye) z_max = max(self.ineye, other.ineye) self.z", "\"\"\" assert(isinstance(start, Point) and not isinstance(length, Point)) super(VSegment, self).__init__( start,", "(self.y - point.y)**2 + ( self.z - point.z)**2) def distance_squared(self,", "also a cuboid and the operation is successful then this", "\"\"\"Returns the cuboid resulting of the intersection of this and", "x, y, z-> Lower right corner coordinates width - height", "Arguments: cub (Cuboid): The other cuboiud Returns: bool: True if", "(number): segment length \"\"\" assert(isinstance(start, Point) and not isinstance(length, Point))", "'y', 'z') def __init__(self, x, y, z): self.x = x", "def __eq__(self, other): return (self.x == other.x and self.y ==", "self.z == other.z) def __repr__(self): return \"P({}, {}, {})\".format(self.x, self.y,", "Z coordinate \"\"\" self.x = x self.y = y self.z", "Discard edge intersects if not edges: if (self.bottom == cub.top", "depth self.x = x self.y = y self.z = z", "the Cuboid Args: x (int, float): y (int, float): z", "cub.top and self.outeye == cub.ineye or self.left == cub.right and", "self.z = other.z self.width = other.width self.height = other.height self.depth", "min(self.start.z, self.end.z) class HSegment(Segment): \"\"\"Horizontal Segment\"\"\" def __init__(self, start, length):", "if there is no intersection. Arguments: cub (Cuboid): The other", "intersection. \"\"\" if not self.intersects(cub, edges=edges): return None bottom =", "cub.depth <= self.z + self.depth) def intersects(self, cub, edges=False): \"\"\"Detect", "and self.depth == other.depth and self.x == other.x and self.y", "another cuboid is contained by this one Arguments: cub (Cuboid):", "'edges' is True the cuboid returned will have a volume", "self.bottom == cub.top and self.outeye == cub.ineye or self.left ==", "False otherwise \"\"\" # Not even touching if (self.bottom >", "then this cuboid is modified to the union. Arguments: other", "== cub.ineye or cub.left == self.right and cub.bottom == self.top", "-*- coding: utf-8 -*- from math import sqrt class Point(object):", "== cub.right and self.bottom == cub.top and cub.outeye == self.ineye", "@property def right(self): \"\"\"Cuboid right edge x coordinate\"\"\" return self.x", "__init__(self, start, length): \"\"\"Create a Vertical segment given its bottom", "if another cuboid is contained by this one Arguments: cub", "\"\"\"Detect intersections between this cuboid and cub. Args: cub (Cuboid):", "true, touching edges are considered an intersection, and a cuboid", "cub.left == self.right and self.bottom == cub.top and cub.outeye ==", "is True the cuboid returned will have a volume of", "Args: x (int, float): y (int, float): z (int, float):", "\"\"\"Horizontal Segment\"\"\" def __init__(self, start, length): \"\"\"Create an Horizontal segment", "is Right/Left from this if self.bottom == other.bottom and self.height", "if (self.bottom > cub.top or self.top < cub.bottom or self.left", "no intersection. Arguments: cub (Cuboid): The other cuboid. edges (bool):", "outeye(self): \"\"\"Cuboid farther from eye edge z coordinate\"\"\" return self.z", "of 0. Returns None if there is no intersection. Arguments:", "cub.top and self.outeye == cub.ineye or cub.left == self.right and", "(self.bottom > cub.top or self.top < cub.bottom or self.left >", "return True # Other cuboid is Right/Left from this if", "== other.bottom and self.height == other.height and \\ self.left ==", "cub.left == self.right and self.bottom == cub.top and self.outeye ==", "self.bottom == other.bottom and self.height == other.height and \\ self.outeye", "Point(object): __slots__ = ('x', 'y', 'z') def __init__(self, x, y,", "cub (Cuboid): The other cuboid. edges (bool): If true, touching", "right(self): \"\"\"Cuboid right edge x coordinate\"\"\" return self.x + self.width", "and cub.outeye == self.ineye or cub.left == self.right and cub.bottom", "= z_min self.depth = z_max - z_min return True return", "self.intersects(cub, edges=edges): return None bottom = max(self.bottom, cub.bottom) left =", "its bottom most end point and its length. Arguments: -", "\"\"\"Cuboid left edge x coordinate\"\"\" return self.x @property def right(self):", "= other.z self.width = other.width self.height = other.height self.depth =", "other.outeye and self.depth == self.depth: y_min = min(self.bottom, other.bottom) y_max", "max(self.start.y, self.end.y) @property def bottom(self): return min(self.start.y, self.end.y) @property def", "Cuboid(object): \"\"\"Basic cuboid primitive class. x, y, z-> Lower right", "= height self.depth = depth self.x = x self.y =", "= min(self.top, cub.top) right = min(self.right, cub.right) outeye = max(self.outeye,", "== other.left and self.width == other.width and \\ self.outeye ==", "== self.ineye or cub.left == self.right and self.bottom == cub.top", "isinstance(other, self.__class__): None return self.start == other.start and self.end ==", "self.depth == other.depth and self.x == other.x and self.y ==", "Point(self.left, self.bottom, self.outeye) @property def corner_top_l_out(self): return Point(self.left, self.top, self.ineye)", "return min(self.start.x, self.end.x) @property def ineye(self): return max(self.start.z, self.end.z) @property", "\\ self.outeye == other.outeye and self.depth == self.depth: x_min =", "== other.width and \\ self.outeye == other.outeye and self.depth ==", "- point.y)**2 + ( self.z - point.z)**2) def distance_squared(self, point):", "given its left most end point and its length. Arguments:", "self.width, self.height, self.depth) def volume(self): \"\"\"Cuboid volume\"\"\" return self.width *", "one Arguments: cub (Cuboid): The other cuboiud Returns: bool: True", "self.corner_top_l_out yield self.corner_top_r_out yield self.corner_bot_r_out yield self.corner_bot_l_out def __repr__(self): return", "def bottom(self): \"\"\"Cuboid bottom edge y coordinate\"\"\" return self.y @property", "corner_top_l_out(self): return Point(self.left, self.top, self.ineye) @property def corner_top_r_out(self): return Point(self.right,", "a volume of 0. Returns None if there is no", "point and its length. Arguments: - start (Point): Starting Point", "= y self.z = z def contains(self, cub): \"\"\"Tests if", "Returns None if there is no intersection. Arguments: cub (Cuboid):", "length(self): return self.end.z - self.start.z class Cuboid(object): \"\"\"Basic cuboid primitive", "z-> Lower right corner coordinates width - height - depth", "= max(self.right, other.right) self.x = x_min self.width = x_max -", "edges=False): \"\"\"Returns the cuboid resulting of the intersection of this", "outeye) def join(self, other): \"\"\"Try to join a cuboid to", "point.x)**2 + (self.y - point.y)**2 + ( self.z - point.z)**2", "y, z, width, height, depth, rid=None): \"\"\"Initiating the Cuboid Args:", "self.corner_top_l yield self.corner_top_r yield self.corner_bot_r yield self.corner_bot_l yield self.corner_top_l_out yield", "top(self): \"\"\"Cuboid top edge y coordiante\"\"\" return self.y + self.height", "\"R({}, {}, {}, {}, {}, {})\".format( self.x, self.y, self.z, self.width,", "- outeye) def join(self, other): \"\"\"Try to join a cuboid", "True if not self.intersects(other, edges=True): return False # Other cuboid", "start (Point): Segment start point end (Point): Segment end point", "self.y and cub.x >= self.x and cub.z >= self.z and", "\"\"\" if not self.intersects(cub, edges=edges): return None bottom = max(self.bottom,", "and self.depth == self.depth: x_min = min(self.left, other.left) x_max =", "== other.x and self.y == other.y and self.z == other.z)", "cub.z >= self.z and cub.y + cub.height <= self.y +", "volume\"\"\" return self.width * self.height * self.depth def move(self, x,", "and self.height == other.height and \\ self.left == other.left and", "corner_bot_r(self): return Point(self.right, self.bottom, self.outeye) @property def corner_bot_l(self): return Point(self.left,", "(int, float): Z coordinate \"\"\" self.x = x self.y =", "== other.left and self.width == other.width: z_min = min(self.outeye, other.outeye)", "__slots__ = ('start', 'end') def __init__(self, start, end): \"\"\"Arguments: start", "def distance(self, point): \"\"\"Calculate distance to another point\"\"\" return sqrt((self.x", "cub.outeye == self.ineye or cub.left == self.right and cub.bottom ==", "outeye, right - left, top - bottom, ineye - outeye)", "for intersections. edges (bool): Accept edge touching cuboids as intersects", "cub.ineye or cub.left == self.right and cub.bottom == self.top and", "and self.x == other.x and self.y == other.y and self.z", "sqrt class Point(object): __slots__ = ('x', 'y', 'z') def __init__(self,", "self.outeye) @property def corner_bot_r(self): return Point(self.right, self.bottom, self.outeye) @property def", "length): \"\"\"Create an In-Depth segment given its bottom most end", "self.width and cub.z + cub.depth <= self.z + self.depth) def", "self.width = other.width self.height = other.height self.depth = other.depth return", "height (int, float): depth (int, float): rid (identifier object): \"\"\"", "\"\"\" return (cub.y >= self.y and cub.x >= self.x and", "z): self.x = x self.y = y self.z = z", "self.outeye) @property def corner_top_r(self): return Point(self.right, self.top, self.outeye) @property def", "or self.left == cub.right and self.bottom == cub.top and cub.outeye", "Returns: Cuboid: Intersection. None: There was no intersection. \"\"\" if", "\"\"\"Cuboid volume\"\"\" return self.width * self.height * self.depth def move(self,", "self.top and cub.outeye == self.ineye or cub.left == self.right and", "this if self.bottom == other.bottom and self.height == other.height and", "or self.top == cub.bottom or self.left == cub.right or self.right", "cub.ineye or self.left == cub.right and self.bottom == cub.top and", "and the operation is successful then this cuboid is modified", "self.height = other.height self.depth = other.depth return True if not", "yield self.corner_top_l yield self.corner_top_r yield self.corner_bot_r yield self.corner_bot_l yield self.corner_top_l_out", "ineye = min(self.ineye, cub.ineye) return Cuboid( left, bottom, outeye, right", "(identifier object): \"\"\" assert(height >= 0 and width >= 0", "self.bottom, self.outeye) @property def corner_bot_l(self): return Point(self.left, self.bottom, self.outeye) @property", "Point)) super(VSegment, self).__init__( start, Point(start.x, start.y + length, start.z)) @property", "length): \"\"\"Create a Vertical segment given its bottom most end", "to x,y,z coordinates Arguments: x (int, float): X coordinate y", "__init__(self, x, y, z): self.x = x self.y = y", "self.z + self.depth) def intersects(self, cub, edges=False): \"\"\"Detect intersections between", "an intersection, and a cuboid of 0 height or width", "join a cuboid to this one. If the result is", "x self.y = y self.z = z def __eq__(self, other):", "return False # Other cuboid is Up/Down from this if", "test for intersections. edges (bool): Accept edge touching cuboids as", "width or depth will be returned Returns: Cuboid: Intersection. None:", "(bool): If true, touching edges are considered an intersection, and", "True the cuboid returned will have a volume of 0.", "other): if not isinstance(other, self.__class__): None return self.start == other.start", "distance_squared(self, point): return (self.x - point.x)**2 + (self.y - point.y)**2", "other.start and self.end == other.end def __repr__(self): return \"S({}, {})\".format(self.start,", "return True if not self.intersects(other, edges=True): return False # Other", "if not isinstance(other, self.__class__): return False return (self.width == other.width", "z_min = min(self.outeye, other.outeye) z_max = max(self.ineye, other.ineye) self.z =", "return (self.x == other.x and self.y == other.y and self.z", "the cuboid resulting of the intersection of this and cub", "from eye edge z coordinate\"\"\" return self.z + self.depth @property", "self.top == cub.bottom or self.left == cub.right or self.right ==", "- point.z)**2) def distance_squared(self, point): return (self.x - point.x)**2 +", "self.y == other.y and self.z == other.z) def __hash__(self): return", "or cub.left == self.right and cub.bottom == self.top and cub.outeye", "argument 'edges' is True the cuboid returned will have a", "and self.outeye == cub.ineye or self.left == cub.right and cub.bottom", "(used for sorting)\"\"\" return self.volume() < other.volume() def __eq__(self, other):", "'z', 'rid') def __init__(self, x, y, z, width, height, depth,", "return min(self.start.y, self.end.y) @property def right(self): return max(self.start.x, self.end.x) @property", "self.z, self.width, self.height, self.depth)) def __iter__(self): \"\"\"Iterate through cuboid corners\"\"\"", "self.bottom == cub.top and self.outeye == cub.ineye or cub.left ==", "self.corner_bot_l yield self.corner_top_l_out yield self.corner_top_r_out yield self.corner_bot_r_out yield self.corner_bot_l_out def", "only touching by their edges, and the argument 'edges' is", "if (self.left == cub.right and self.bottom == cub.top and self.outeye", "True # Other cuboid is Right/Left from this if self.bottom", "(int, float): depth (int, float): rid (identifier object): \"\"\" assert(height", "self.start.distance_squared(self.end) @property def length(self): return self.start.distance(self.end) @property def top(self): return", "self.height == other.height and \\ self.outeye == other.outeye and self.depth", "and cub. Args: cub (Cuboid): Cuboid to test for intersections.", "edge z coordinate\"\"\" return self.z @property def ineye(self): \"\"\"Cuboid nearer", "self.width == other.width: z_min = min(self.outeye, other.outeye) z_max = max(self.ineye,", "self.depth def move(self, x, y, z): \"\"\"Move Cuboid to x,y,z", "cub.ineye or self.ineye < cub.outeye): return False # Discard edge", "other.bottom and self.height == other.height and \\ self.outeye == other.outeye", "an In-Depth segment given its bottom most end point and", "== cub.top and self.outeye == cub.ineye or self.left == cub.right", "def corner_bot_l(self): return Point(self.left, self.bottom, self.outeye) @property def corner_top_l_out(self): return", "edges=True): return False # Other cuboid is Up/Down from this", "cub): \"\"\"Tests if another cuboid is contained by this one", "self.start.z class Cuboid(object): \"\"\"Basic cuboid primitive class. x, y, z->", "given its bottom most end point and its length. Arguments:", "self.z) def distance(self, point): \"\"\"Calculate distance to another point\"\"\" return", "def __repr__(self): return \"P({}, {}, {})\".format(self.x, self.y, self.z) def distance(self,", "Point(self.right, self.bottom, self.ineye) @property def corner_bot_l_out(self): return Point(self.left, self.bottom, self.ineye)", "length (number): segment length \"\"\" assert(isinstance(start, Point) and not isinstance(length,", "def outeye(self): return min(self.start.z, self.end.z) class HSegment(Segment): \"\"\"Horizontal Segment\"\"\" def", "= z def contains(self, cub): \"\"\"Tests if another cuboid is", "max(self.start.z, self.end.z) @property def outeye(self): return min(self.start.z, self.end.z) class HSegment(Segment):", "cub. Args: cub (Cuboid): Cuboid to test for intersections. edges", "\"\"\"Equal cuboids have same properties.\"\"\" if not isinstance(other, self.__class__): return", "@property def corner_top_l(self): return Point(self.left, self.top, self.outeye) @property def corner_top_r(self):", "= depth self.x = x self.y = y self.z =", "if self.bottom == other.bottom and self.height == other.height and \\", "cub.outeye == self.ineye or self.left == cub.right and cub.bottom ==", "x, y, z, width, height, depth, rid=None): \"\"\"Initiating the Cuboid", "self.left == cub.right and self.bottom == cub.top and cub.outeye ==", "self.depth = depth self.x = x self.y = y self.z", "== other.z) def __repr__(self): return \"P({}, {}, {})\".format(self.x, self.y, self.z)", "if other.contains(self): self.x = other.x self.y = other.y self.z =", "self.bottom == cub.top and cub.outeye == self.ineye or cub.left ==", "cub.y + cub.height <= self.y + self.height and cub.x +", "< cub.outeye): return False # Discard edge intersects if not", "corner_bot_r_out(self): return Point(self.right, self.bottom, self.ineye) @property def corner_bot_l_out(self): return Point(self.left,", "right - left, top - bottom, ineye - outeye) def", "other.left) x_max = max(self.right, other.right) self.x = x_min self.width =", "self).__init__( start, Point(start.x + length, start.y, start.z)) @property def length(self):", "point.z)**2 class Segment(object): __slots__ = ('start', 'end') def __init__(self, start,", "self.height @property def left(self): \"\"\"Cuboid left edge x coordinate\"\"\" return", "return self.start == other.start and self.end == other.end def __repr__(self):", "cuboid resulting of the intersection of this and cub If", "None return self.start == other.start and self.end == other.end def", "an Horizontal segment given its left most end point and", "inside this one, False otherwise \"\"\" return (cub.y >= self.y", "(int, float): rid (identifier object): \"\"\" assert(height >= 0 and", "== other.outeye and self.depth == self.depth: y_min = min(self.bottom, other.bottom)", "return False # Discard edge intersects if not edges: if", "cub.top and cub.outeye == self.ineye or cub.left == self.right and", "< cub.left or self.outeye > cub.ineye or self.ineye < cub.outeye):", "self.start = start self.end = end def __eq__(self, other): if", "width - height - depth - \"\"\" __slots__ = ('width',", "Arguments: cub (Cuboid): The other cuboid. edges (bool): If true,", "min(self.start.y, self.end.y) @property def right(self): return max(self.start.x, self.end.x) @property def", "False return (self.width == other.width and self.height == other.height and", "cub.outeye == self.ineye or cub.left == self.right and self.bottom ==", "x coordinate\"\"\" return self.x @property def right(self): \"\"\"Cuboid right edge", "class Segment(object): __slots__ = ('start', 'end') def __init__(self, start, end):", "isinstance(length, Point)) super(HSegment, self).__init__( start, Point(start.x + length, start.y, start.z))", "\"\"\"Cuboid farther from eye edge z coordinate\"\"\" return self.z @property", "False # Discard edge intersects if not edges: if (self.bottom", "and self.z == other.z) def __hash__(self): return hash( (self.x, self.y,", "self.end.y) @property def right(self): return max(self.start.x, self.end.x) @property def left(self):", "= min(self.bottom, other.bottom) y_max = max(self.top, other.top) self.y = y_min", "* self.height * self.depth def move(self, x, y, z): \"\"\"Move", "@property def outeye(self): return min(self.start.z, self.end.z) class HSegment(Segment): \"\"\"Horizontal Segment\"\"\"", "= max(self.outeye, cub.outeye) ineye = min(self.ineye, cub.ineye) return Cuboid( left,", "@property def length(self): return self.end.z - self.start.z class Cuboid(object): \"\"\"Basic", "'height', 'depth', 'x', 'y', 'z', 'rid') def __init__(self, x, y,", "float): Z coordinate \"\"\" self.x = x self.y = y", "self.end.x) @property def left(self): return min(self.start.x, self.end.x) @property def ineye(self):", "== other.width and self.height == other.height and self.depth == other.depth", "self.x, self.y, self.z, self.width, self.height, self.depth) def volume(self): \"\"\"Cuboid volume\"\"\"", "cub.left) top = min(self.top, cub.top) right = min(self.right, cub.right) outeye", "y self.z = z def contains(self, cub): \"\"\"Tests if another", "yield self.corner_top_r yield self.corner_bot_r yield self.corner_bot_l yield self.corner_top_l_out yield self.corner_top_r_out", "top(self): return max(self.start.y, self.end.y) @property def bottom(self): return min(self.start.y, self.end.y)", "self.top, self.ineye) @property def corner_top_r_out(self): return Point(self.right, self.top, self.ineye) @property", "(Cuboid): Cuboid to test for intersections. edges (bool): Accept edge", "z coordinate\"\"\" return self.z + self.depth @property def corner_top_l(self): return", "0) self.width = width self.height = height self.depth = depth", "depth >= 0) self.width = width self.height = height self.depth", "self.z + self.depth @property def corner_top_l(self): return Point(self.left, self.top, self.outeye)", "== cub.bottom or self.left == cub.right or self.right == cub.left", "bottom(self): \"\"\"Cuboid bottom edge y coordinate\"\"\" return self.y @property def", "are only touching by their edges, and the argument 'edges'", "start, end): \"\"\"Arguments: start (Point): Segment start point end (Point):", "(self.x - point.x)**2 + (self.y - point.y)**2 + ( self.z", "y (int, float): z (int, float): width (int, float): height", "Returns: bool: True when successfully joined, False otherwise \"\"\" if", "return max(self.start.z, self.end.z) @property def outeye(self): return min(self.start.z, self.end.z) class", "self.width == other.width and \\ self.outeye == other.outeye and self.depth", "Point(self.right, self.bottom, self.outeye) @property def corner_bot_l(self): return Point(self.left, self.bottom, self.outeye)", "left, bottom, outeye, right - left, top - bottom, ineye", "== self.top and self.outeye == cub.ineye or cub.left == self.right", "\"\"\" assert(isinstance(start, Point) and isinstance(end, Point)) self.start = start self.end", "length(self): return self.end.x - self.start.x class VSegment(Segment): \"\"\"Vertical Segment\"\"\" def", "\"\"\" if self.contains(other): return True if other.contains(self): self.x = other.x", "self.bottom, self.ineye) def __lt__(self, other): \"\"\"Compare cuboids by volume (used", "self.volume() < other.volume() def __eq__(self, other): \"\"\"Equal cuboids have same", "None: There was no intersection. \"\"\" if not self.intersects(cub, edges=edges):", "corners\"\"\" yield self.corner_top_l yield self.corner_top_r yield self.corner_bot_r yield self.corner_bot_l yield", "and self.bottom == cub.top and self.outeye == cub.ineye or cub.left", "self.left > cub.right or self.right < cub.left or self.outeye >", "\"\"\"Create an In-Depth segment given its bottom most end point", "cuboids are only touching by their edges, and the argument", "def contains(self, cub): \"\"\"Tests if another cuboid is contained by", "ineye(self): return max(self.start.z, self.end.z) @property def outeye(self): return min(self.start.z, self.end.z)", "self.bottom, self.outeye) @property def corner_top_l_out(self): return Point(self.left, self.top, self.ineye) @property", "{})\".format(self.x, self.y, self.z) def distance(self, point): \"\"\"Calculate distance to another", ">= 0 and width >= 0 and depth >= 0)", "or cub.left == self.right and self.bottom == cub.top and cub.outeye", "coordinate\"\"\" return self.z @property def ineye(self): \"\"\"Cuboid nearer from eye", "self.top, self.outeye) @property def corner_bot_r(self): return Point(self.right, self.bottom, self.outeye) @property", "Arguments: x (int, float): X coordinate y (int, float): Y", "max(self.ineye, other.ineye) self.z = z_min self.depth = z_max - z_min", "<gh_stars>10-100 # -*- coding: utf-8 -*- from math import sqrt", "- point.x)**2 + (self.y - point.y)**2 + ( self.z -", "\"\"\"Compare cuboids by volume (used for sorting)\"\"\" return self.volume() <", "bottom most end point and its length. Arguments: - start", "height, depth, rid=None): \"\"\"Initiating the Cuboid Args: x (int, float):", "self.outeye) @property def corner_top_l_out(self): return Point(self.left, self.top, self.ineye) @property def", "and not isinstance(length, Point)) super(VSegment, self).__init__( start, Point(start.x, start.y +", "x_min self.width = x_max - x_min return True # Other", "other.x and self.y == other.y and self.z == other.z) def", "self.y + self.height and cub.x + cub.width <= self.x +", "(int, float): X coordinate y (int, float): Y coordinate z", "bool: True if the cuboids intersect, False otherwise \"\"\" #", "Point)) super(VSegment, self).__init__( start, Point(start.x, start.y, start.z + length)) @property", "corner_bot_l(self): return Point(self.left, self.bottom, self.outeye) @property def corner_top_l_out(self): return Point(self.left,", "def __lt__(self, other): \"\"\"Compare cuboids by volume (used for sorting)\"\"\"", "def join(self, other): \"\"\"Try to join a cuboid to this", "this and cub If the cuboids are only touching by", "edge x coordinate\"\"\" return self.x @property def right(self): \"\"\"Cuboid right", "== self.right and self.bottom == cub.top and cub.outeye == self.ineye", "(int, float): z (int, float): width (int, float): height (int,", "# Not even touching if (self.bottom > cub.top or self.top", "be returned Returns: Cuboid: Intersection. None: There was no intersection.", "ineye - outeye) def join(self, other): \"\"\"Try to join a", "start, Point(start.x + length, start.y, start.z)) @property def length(self): return", "def __hash__(self): return hash( (self.x, self.y, self.z, self.width, self.height, self.depth))", "will be returned Returns: Cuboid: Intersection. None: There was no", "self.outeye == cub.ineye or self.ineye == cub.outeye): return False #", "other.outeye) z_max = max(self.ineye, other.ineye) self.z = z_min self.depth =", "Point) and isinstance(end, Point)) self.start = start self.end = end", "and cub.y + cub.height <= self.y + self.height and cub.x", "\"\"\" self.x = x self.y = y self.z = z", "False # Discard corner intersects if (self.left == cub.right and", "< other.volume() def __eq__(self, other): \"\"\"Equal cuboids have same properties.\"\"\"", "this one, False otherwise \"\"\" return (cub.y >= self.y and", "self.z and cub.y + cub.height <= self.y + self.height and", "__init__(self, start, length): \"\"\"Create an Horizontal segment given its left", "self.right and self.bottom == cub.top and self.outeye == cub.ineye or", "y_min return True # Other cuboid is Right/Left from this", "def __repr__(self): return \"S({}, {})\".format(self.start, self.end) @property def length_squared(self): \"\"\"Faster", "height or width or depth will be returned Returns: Cuboid:", "if self.left == other.left and self.width == other.width and \\", "or self.left > cub.right or self.right < cub.left or self.outeye", "+ length, start.z)) @property def length(self): return self.end.y - self.start.y", "def __eq__(self, other): if not isinstance(other, self.__class__): None return self.start", ">= self.y and cub.x >= self.x and cub.z >= self.z", "return self.end.y - self.start.y class DSegment(Segment): \"\"\"In-Depth Segment\"\"\" def __init__(self,", "self.x and cub.z >= self.z and cub.y + cub.height <=", "X coordinate y (int, float): Y coordinate z (int, float):", "length(self): return self.end.y - self.start.y class DSegment(Segment): \"\"\"In-Depth Segment\"\"\" def", "y, z): self.x = x self.y = y self.z =", "If the cuboids are only touching by their edges, and", "coordinate\"\"\" return self.x @property def right(self): \"\"\"Cuboid right edge x", "segment length \"\"\" assert(isinstance(start, Point) and not isinstance(length, Point)) super(HSegment,", "cub.outeye == self.ineye): return False return True def intersection(self, cub,", "False otherwise \"\"\" if self.contains(other): return True if other.contains(self): self.x", "return self.y + self.height @property def left(self): \"\"\"Cuboid left edge", "other.bottom and self.height == other.height and \\ self.left == other.left", "Arguments: other (Cuboid): Cuboid to join Returns: bool: True when", "(self.left == cub.right and self.bottom == cub.top and self.outeye ==", "self.y == other.y and self.z == other.z) def __repr__(self): return", "self.top, self.ineye) @property def corner_bot_r_out(self): return Point(self.right, self.bottom, self.ineye) @property", "self.intersects(other, edges=True): return False # Other cuboid is Up/Down from", "\"\"\"Iterate through cuboid corners\"\"\" yield self.corner_top_l yield self.corner_top_r yield self.corner_bot_r", "y, z): \"\"\"Move Cuboid to x,y,z coordinates Arguments: x (int,", "self.y = y self.z = z def contains(self, cub): \"\"\"Tests", "return \"S({}, {})\".format(self.start, self.end) @property def length_squared(self): \"\"\"Faster than length", "the result is also a cuboid and the operation is", "edges: if (self.bottom == cub.top or self.top == cub.bottom or", "a Vertical segment given its bottom most end point and", "self.__class__): None return self.start == other.start and self.end == other.end", "+ cub.height <= self.y + self.height and cub.x + cub.width", "__eq__(self, other): return (self.x == other.x and self.y == other.y", "def move(self, x, y, z): \"\"\"Move Cuboid to x,y,z coordinates", "@property def corner_top_l_out(self): return Point(self.left, self.top, self.ineye) @property def corner_top_r_out(self):", "of 0 height or width or depth will be returned", "def corner_bot_r_out(self): return Point(self.right, self.bottom, self.ineye) @property def corner_bot_l_out(self): return", "cub.right and self.bottom == cub.top and self.outeye == cub.ineye or", "self).__init__( start, Point(start.x, start.y, start.z + length)) @property def length(self):", "\\ self.outeye == other.outeye and self.depth == self.depth: y_min =", "the cuboids are only touching by their edges, and the", "None bottom = max(self.bottom, cub.bottom) left = max(self.left, cub.left) top", "depth - \"\"\" __slots__ = ('width', 'height', 'depth', 'x', 'y',", "__eq__(self, other): if not isinstance(other, self.__class__): None return self.start ==", "DSegment(Segment): \"\"\"In-Depth Segment\"\"\" def __init__(self, start, length): \"\"\"Create an In-Depth", "cub, edges=False): \"\"\"Returns the cuboid resulting of the intersection of", "\"\"\"Faster than length and useful for some comparisons\"\"\" return self.start.distance_squared(self.end)", "return Cuboid( left, bottom, outeye, right - left, top -", "Point - length (number): segment length \"\"\" assert(isinstance(start, Point) and", "not isinstance(other, self.__class__): None return self.start == other.start and self.end", "cub.left or self.outeye == cub.ineye or self.ineye == cub.outeye): return", "@property def corner_bot_l(self): return Point(self.left, self.bottom, self.outeye) @property def corner_top_l_out(self):", "float): rid (identifier object): \"\"\" assert(height >= 0 and width", "- length (number): segment length \"\"\" assert(isinstance(start, Point) and not", "Segment\"\"\" def __init__(self, start, length): \"\"\"Create an In-Depth segment given", "= other.width self.height = other.height self.depth = other.depth return True", "__lt__(self, other): \"\"\"Compare cuboids by volume (used for sorting)\"\"\" return", "== other.y and self.z == other.z) def __hash__(self): return hash(", "height - depth - \"\"\" __slots__ = ('width', 'height', 'depth',", "\"\"\"Basic cuboid primitive class. x, y, z-> Lower right corner", "self.start.x class VSegment(Segment): \"\"\"Vertical Segment\"\"\" def __init__(self, start, length): \"\"\"Create", "through cuboid corners\"\"\" yield self.corner_top_l yield self.corner_top_r yield self.corner_bot_r yield", "corner_top_l(self): return Point(self.left, self.top, self.outeye) @property def corner_top_r(self): return Point(self.right,", "= y self.z = z def __eq__(self, other): return (self.x", "cub.top and cub.outeye == self.ineye or self.left == cub.right and", "def right(self): return max(self.start.x, self.end.x) @property def left(self): return min(self.start.x,", "Vertical segment given its bottom most end point and its", "def corner_top_r_out(self): return Point(self.right, self.top, self.ineye) @property def corner_bot_r_out(self): return", "volume(self): \"\"\"Cuboid volume\"\"\" return self.width * self.height * self.depth def", "their edges, and the argument 'edges' is True the cuboid", "self.y = y_min self.height = y_max - y_min return True", "self.right < cub.left or self.outeye > cub.ineye or self.ineye <", "or self.top < cub.bottom or self.left > cub.right or self.right", "other.y self.z = other.z self.width = other.width self.height = other.height", "(Cuboid): The other cuboid. edges (bool): If true, touching edges", "Accept edge touching cuboids as intersects or not Returns: bool:", "point \"\"\" assert(isinstance(start, Point) and isinstance(end, Point)) self.start = start", "when successfully joined, False otherwise \"\"\" if self.contains(other): return True", "other.end def __repr__(self): return \"S({}, {})\".format(self.start, self.end) @property def length_squared(self):", "and self.bottom == cub.top and cub.outeye == self.ineye or self.left", "self.left == other.left and self.width == other.width: z_min = min(self.outeye,", "self.y, self.z, self.width, self.height, self.depth) def volume(self): \"\"\"Cuboid volume\"\"\" return", "and isinstance(end, Point)) self.start = start self.end = end def", "cuboid is Right/Left from this if self.bottom == other.bottom and", "return True if other.contains(self): self.x = other.x self.y = other.y", "{})\".format( self.x, self.y, self.z, self.width, self.height, self.depth) def volume(self): \"\"\"Cuboid", "self.bottom, self.ineye) @property def corner_bot_l_out(self): return Point(self.left, self.bottom, self.ineye) def", "( self.z - point.z)**2) def distance_squared(self, point): return (self.x -", "yield self.corner_bot_r_out yield self.corner_bot_l_out def __repr__(self): return \"R({}, {}, {},", "eye edge z coordinate\"\"\" return self.z @property def ineye(self): \"\"\"Cuboid", "bottom = max(self.bottom, cub.bottom) left = max(self.left, cub.left) top =", "== cub.top and cub.outeye == self.ineye or self.left == cub.right", "start.y, start.z + length)) @property def length(self): return self.end.z -", "(Cuboid): Cuboid to join Returns: bool: True when successfully joined,", "other.depth and self.x == other.x and self.y == other.y and", "properties.\"\"\" if not isinstance(other, self.__class__): return False return (self.width ==", "# -*- coding: utf-8 -*- from math import sqrt class", "cub.left == self.right and cub.bottom == self.top and self.outeye ==", "\"\"\"Arguments: start (Point): Segment start point end (Point): Segment end", "self.ineye) @property def corner_bot_l_out(self): return Point(self.left, self.bottom, self.ineye) def __lt__(self,", "if not isinstance(other, self.__class__): None return self.start == other.start and", "return self.width * self.height * self.depth def move(self, x, y,", "cub.right and self.bottom == cub.top and cub.outeye == self.ineye or", "return False return (self.width == other.width and self.height == other.height", "The other cuboiud Returns: bool: True if it is inside", "cub.right) outeye = max(self.outeye, cub.outeye) ineye = min(self.ineye, cub.ineye) return", "yield self.corner_top_l_out yield self.corner_top_r_out yield self.corner_bot_r_out yield self.corner_bot_l_out def __repr__(self):", "True if the cuboids intersect, False otherwise \"\"\" # Not", "self.depth = other.depth return True if not self.intersects(other, edges=True): return", "( self.z - point.z)**2 class Segment(object): __slots__ = ('start', 'end')", "== self.top and self.outeye == cub.ineye or self.left == cub.right", "bottom(self): return min(self.start.y, self.end.y) @property def right(self): return max(self.start.x, self.end.x)", "return (cub.y >= self.y and cub.x >= self.x and cub.z", "self.ineye): return False return True def intersection(self, cub, edges=False): \"\"\"Returns", "def intersects(self, cub, edges=False): \"\"\"Detect intersections between this cuboid and", "width >= 0 and depth >= 0) self.width = width", "to test for intersections. edges (bool): Accept edge touching cuboids", "and cub.bottom == self.top and cub.outeye == self.ineye or cub.left", "right corner coordinates width - height - depth - \"\"\"", "cub If the cuboids are only touching by their edges,", "y coordinate\"\"\" return self.y @property def top(self): \"\"\"Cuboid top edge", "def length(self): return self.end.y - self.start.y class DSegment(Segment): \"\"\"In-Depth Segment\"\"\"", "== cub.ineye or self.left == cub.right and self.bottom == cub.top", "Cuboid to join Returns: bool: True when successfully joined, False", "not isinstance(length, Point)) super(VSegment, self).__init__( start, Point(start.x, start.y + length,", "__slots__ = ('width', 'height', 'depth', 'x', 'y', 'z', 'rid') def", "cub.bottom == self.top and cub.outeye == self.ineye): return False return", "y coordiante\"\"\" return self.y + self.height @property def left(self): \"\"\"Cuboid", "other cuboiud Returns: bool: True if it is inside this", "right = min(self.right, cub.right) outeye = max(self.outeye, cub.outeye) ineye =", "have a volume of 0. Returns None if there is", "length \"\"\" assert(isinstance(start, Point) and not isinstance(length, Point)) super(VSegment, self).__init__(", "if not self.intersects(other, edges=True): return False # Other cuboid is", "cub.outeye): return False # Discard edge intersects if not edges:", "\"\"\"Create an Horizontal segment given its left most end point", "return None bottom = max(self.bottom, cub.bottom) left = max(self.left, cub.left)", "Not even touching if (self.bottom > cub.top or self.top <", "and self.bottom == cub.top and cub.outeye == self.ineye or cub.left", "False return True def intersection(self, cub, edges=False): \"\"\"Returns the cuboid", "Other cuboid is Up/Down from this if self.left == other.left", "other.y and self.z == other.z) def __repr__(self): return \"P({}, {},", "\"S({}, {})\".format(self.start, self.end) @property def length_squared(self): \"\"\"Faster than length and", "y self.z = z self.rid = rid @property def bottom(self):", "intersections. edges (bool): Accept edge touching cuboids as intersects or", "other cuboid. edges (bool): If true, touching edges are considered", "edges (bool): If true, touching edges are considered an intersection,", "= x self.y = y self.z = z def contains(self,", "x, y, z): self.x = x self.y = y self.z", "= width self.height = height self.depth = depth self.x =", "one, False otherwise \"\"\" return (cub.y >= self.y and cub.x", "other.left and self.width == other.width and \\ self.outeye == other.outeye", "start point end (Point): Segment end point \"\"\" assert(isinstance(start, Point)", "def ineye(self): return max(self.start.z, self.end.z) @property def outeye(self): return min(self.start.z,", "self.y = y self.z = z self.rid = rid @property", "def corner_top_l_out(self): return Point(self.left, self.top, self.ineye) @property def corner_top_r_out(self): return", "= x self.y = y self.z = z def __eq__(self,", "and cub.x >= self.x and cub.z >= self.z and cub.y", "self.height, self.depth)) def __iter__(self): \"\"\"Iterate through cuboid corners\"\"\" yield self.corner_top_l", "def __init__(self, start, end): \"\"\"Arguments: start (Point): Segment start point", "to join a cuboid to this one. If the result", "float): Y coordinate z (int, float): Z coordinate \"\"\" self.x", "cub.ineye) return Cuboid( left, bottom, outeye, right - left, top", "or self.ineye < cub.outeye): return False # Discard edge intersects", "and cub If the cuboids are only touching by their", "def corner_top_l(self): return Point(self.left, self.top, self.outeye) @property def corner_top_r(self): return", "and cub.bottom == self.top and self.outeye == cub.ineye or self.left", "== cub.outeye): return False # Discard corner intersects if (self.left", "self.end.z) class HSegment(Segment): \"\"\"Horizontal Segment\"\"\" def __init__(self, start, length): \"\"\"Create", "- bottom, ineye - outeye) def join(self, other): \"\"\"Try to", "@property def corner_bot_l_out(self): return Point(self.left, self.bottom, self.ineye) def __lt__(self, other):", "= other.x self.y = other.y self.z = other.z self.width =", "one. If the result is also a cuboid and the", "- x_min return True # Other cuboid is Right/Left from", "self.ineye) def __lt__(self, other): \"\"\"Compare cuboids by volume (used for", "is contained by this one Arguments: cub (Cuboid): The other", "@property def length(self): return self.end.x - self.start.x class VSegment(Segment): \"\"\"Vertical", "== other.height and self.depth == other.depth and self.x == other.x", "def __init__(self, start, length): \"\"\"Create an In-Depth segment given its", "edge intersects if not edges: if (self.bottom == cub.top or", "class Point(object): __slots__ = ('x', 'y', 'z') def __init__(self, x,", "math import sqrt class Point(object): __slots__ = ('x', 'y', 'z')", "cub (Cuboid): The other cuboiud Returns: bool: True if it", "edge x coordinate\"\"\" return self.x + self.width @property def outeye(self):", "Point) and not isinstance(length, Point)) super(HSegment, self).__init__( start, Point(start.x +", "of this and cub If the cuboids are only touching", "depth will be returned Returns: Cuboid: Intersection. None: There was", "successfully joined, False otherwise \"\"\" if self.contains(other): return True if", "and self.width == other.width: z_min = min(self.outeye, other.outeye) z_max =", "other.top) self.y = y_min self.height = y_max - y_min return", "cub.bottom or self.left == cub.right or self.right == cub.left or", "self.start.distance(self.end) @property def top(self): return max(self.start.y, self.end.y) @property def bottom(self):", "self.z @property def ineye(self): \"\"\"Cuboid nearer from eye edge z", "== cub.right or self.right == cub.left or self.outeye == cub.ineye", "and not isinstance(length, Point)) super(HSegment, self).__init__( start, Point(start.x + length,", "not isinstance(length, Point)) super(HSegment, self).__init__( start, Point(start.x + length, start.y,", "'x', 'y', 'z', 'rid') def __init__(self, x, y, z, width,", "If the result is also a cuboid and the operation", "yield self.corner_bot_r yield self.corner_bot_l yield self.corner_top_l_out yield self.corner_top_r_out yield self.corner_bot_r_out", "max(self.right, other.right) self.x = x_min self.width = x_max - x_min", "return sqrt((self.x - point.x)**2 + (self.y - point.y)**2 + (", "other.height and \\ self.left == other.left and self.width == other.width:", "Lower right corner coordinates width - height - depth -", "was no intersection. \"\"\" if not self.intersects(cub, edges=edges): return None", "most end point and its length. Arguments: - start (Point):", "even touching if (self.bottom > cub.top or self.top < cub.bottom", "= min(self.left, other.left) x_max = max(self.right, other.right) self.x = x_min", "are considered an intersection, and a cuboid of 0 height", "length_squared(self): \"\"\"Faster than length and useful for some comparisons\"\"\" return", "self.corner_bot_l_out def __repr__(self): return \"R({}, {}, {}, {}, {}, {})\".format(", "to another point\"\"\" return sqrt((self.x - point.x)**2 + (self.y -", "coordinates width - height - depth - \"\"\" __slots__ =", "resulting of the intersection of this and cub If the", "= z self.rid = rid @property def bottom(self): \"\"\"Cuboid bottom", "length, start.y, start.z)) @property def length(self): return self.end.x - self.start.x", "cub.bottom) left = max(self.left, cub.left) top = min(self.top, cub.top) right", "== self.top and cub.outeye == self.ineye): return False return True", "isinstance(end, Point)) self.start = start self.end = end def __eq__(self,", "+ self.width @property def outeye(self): \"\"\"Cuboid farther from eye edge", "return True def intersection(self, cub, edges=False): \"\"\"Returns the cuboid resulting", "other.width self.height = other.height self.depth = other.depth return True if", "__init__(self, start, end): \"\"\"Arguments: start (Point): Segment start point end", "== other.y and self.z == other.z) def __repr__(self): return \"P({},", "z): \"\"\"Move Cuboid to x,y,z coordinates Arguments: x (int, float):", "(int, float): height (int, float): depth (int, float): rid (identifier", "Cuboid to x,y,z coordinates Arguments: x (int, float): X coordinate", "other): \"\"\"Try to join a cuboid to this one. If", "left, top - bottom, ineye - outeye) def join(self, other):", "Args: cub (Cuboid): Cuboid to test for intersections. edges (bool):", "- point.y)**2 + ( self.z - point.z)**2 class Segment(object): __slots__", "return self.end.z - self.start.z class Cuboid(object): \"\"\"Basic cuboid primitive class.", "def left(self): \"\"\"Cuboid left edge x coordinate\"\"\" return self.x @property", "segment given its bottom most end point and its length.", "is modified to the union. Arguments: other (Cuboid): Cuboid to", "self.__class__): return False return (self.width == other.width and self.height ==", "'end') def __init__(self, start, end): \"\"\"Arguments: start (Point): Segment start", "self.x @property def right(self): \"\"\"Cuboid right edge x coordinate\"\"\" return", "def __init__(self, start, length): \"\"\"Create an Horizontal segment given its", "other): \"\"\"Compare cuboids by volume (used for sorting)\"\"\" return self.volume()", "cub.z + cub.depth <= self.z + self.depth) def intersects(self, cub,", "return Point(self.right, self.top, self.outeye) @property def corner_bot_r(self): return Point(self.right, self.bottom,", "self.x = x self.y = y self.z = z def", "class Cuboid(object): \"\"\"Basic cuboid primitive class. x, y, z-> Lower", "and self.y == other.y and self.z == other.z) def __repr__(self):", "ineye(self): \"\"\"Cuboid nearer from eye edge z coordinate\"\"\" return self.z", "self.bottom == other.bottom and self.height == other.height and \\ self.left", "coordiante\"\"\" return self.y + self.height @property def left(self): \"\"\"Cuboid left", "Point) and not isinstance(length, Point)) super(VSegment, self).__init__( start, Point(start.x, start.y,", "self.top, self.outeye) @property def corner_top_r(self): return Point(self.right, self.top, self.outeye) @property", "Other cuboid is Right/Left from this if self.bottom == other.bottom", "point.z)**2) def distance_squared(self, point): return (self.x - point.x)**2 + (self.y", "intersects if (self.left == cub.right and self.bottom == cub.top and", "float): width (int, float): height (int, float): depth (int, float):", "to the union. Arguments: other (Cuboid): Cuboid to join Returns:", "if self.contains(other): return True if other.contains(self): self.x = other.x self.y", "or self.left == cub.right and cub.bottom == self.top and self.outeye", "== other.end def __repr__(self): return \"S({}, {})\".format(self.start, self.end) @property def", "union. Arguments: other (Cuboid): Cuboid to join Returns: bool: True", "{}, {})\".format( self.x, self.y, self.z, self.width, self.height, self.depth) def volume(self):", "self.end.x) @property def ineye(self): return max(self.start.z, self.end.z) @property def outeye(self):", "some comparisons\"\"\" return self.start.distance_squared(self.end) @property def length(self): return self.start.distance(self.end) @property", "self.right and cub.bottom == self.top and self.outeye == cub.ineye or", "\"\"\" # Not even touching if (self.bottom > cub.top or", "the union. Arguments: other (Cuboid): Cuboid to join Returns: bool:", "return False # Discard corner intersects if (self.left == cub.right", "touching edges are considered an intersection, and a cuboid of", "(cub.y >= self.y and cub.x >= self.x and cub.z >=", "start, length): \"\"\"Create a Vertical segment given its bottom most", "from eye edge z coordinate\"\"\" return self.z @property def ineye(self):", "volume of 0. Returns None if there is no intersection.", "True if it is inside this one, False otherwise \"\"\"", "self.x = other.x self.y = other.y self.z = other.z self.width", "float): height (int, float): depth (int, float): rid (identifier object):", "and self.y == other.y and self.z == other.z) def __hash__(self):", "= min(self.ineye, cub.ineye) return Cuboid( left, bottom, outeye, right -", "\"\"\"In-Depth Segment\"\"\" def __init__(self, start, length): \"\"\"Create an In-Depth segment", "cuboid is modified to the union. Arguments: other (Cuboid): Cuboid", "or not Returns: bool: True if the cuboids intersect, False", "returned will have a volume of 0. Returns None if", "join(self, other): \"\"\"Try to join a cuboid to this one.", "Segment(object): __slots__ = ('start', 'end') def __init__(self, start, end): \"\"\"Arguments:", "Point(self.right, self.top, self.outeye) @property def corner_bot_r(self): return Point(self.right, self.bottom, self.outeye)", "top = min(self.top, cub.top) right = min(self.right, cub.right) outeye =", "super(VSegment, self).__init__( start, Point(start.x, start.y + length, start.z)) @property def", ">= self.z and cub.y + cub.height <= self.y + self.height", "max(self.top, other.top) self.y = y_min self.height = y_max - y_min", "self.y = other.y self.z = other.z self.width = other.width self.height", "and self.depth == self.depth: y_min = min(self.bottom, other.bottom) y_max =", "self.right and cub.bottom == self.top and cub.outeye == self.ineye): return", "yield self.corner_bot_l yield self.corner_top_l_out yield self.corner_top_r_out yield self.corner_bot_r_out yield self.corner_bot_l_out", "other): \"\"\"Equal cuboids have same properties.\"\"\" if not isinstance(other, self.__class__):", "contains(self, cub): \"\"\"Tests if another cuboid is contained by this", "class HSegment(Segment): \"\"\"Horizontal Segment\"\"\" def __init__(self, start, length): \"\"\"Create an", "this if self.left == other.left and self.width == other.width and", "self.outeye == other.outeye and self.depth == self.depth: x_min = min(self.left,", "join Returns: bool: True when successfully joined, False otherwise \"\"\"", "@property def right(self): return max(self.start.x, self.end.x) @property def left(self): return", "segment given its left most end point and its length.", "if the cuboids intersect, False otherwise \"\"\" # Not even", "self.end.y - self.start.y class DSegment(Segment): \"\"\"In-Depth Segment\"\"\" def __init__(self, start,", "def __init__(self, x, y, z, width, height, depth, rid=None): \"\"\"Initiating", "not isinstance(other, self.__class__): return False return (self.width == other.width and", "or cub.left == self.right and cub.bottom == self.top and self.outeye", "self.x + self.width @property def outeye(self): \"\"\"Cuboid farther from eye", "Point) and not isinstance(length, Point)) super(VSegment, self).__init__( start, Point(start.x, start.y", "this cuboid and cub. Args: cub (Cuboid): Cuboid to test", "self.ineye or self.left == cub.right and cub.bottom == self.top and", "self.z = z self.rid = rid @property def bottom(self): \"\"\"Cuboid", "from this if self.left == other.left and self.width == other.width", "== self.right and cub.bottom == self.top and self.outeye == cub.ineye", "Right/Left from this if self.bottom == other.bottom and self.height ==", "return False return True def intersection(self, cub, edges=False): \"\"\"Returns the", "{}, {}, {}, {}, {})\".format( self.x, self.y, self.z, self.width, self.height,", "for some comparisons\"\"\" return self.start.distance_squared(self.end) @property def length(self): return self.start.distance(self.end)", "(int, float): y (int, float): z (int, float): width (int,", "return hash( (self.x, self.y, self.z, self.width, self.height, self.depth)) def __iter__(self):", "assert(isinstance(start, Point) and not isinstance(length, Point)) super(VSegment, self).__init__( start, Point(start.x,", "and self.end == other.end def __repr__(self): return \"S({}, {})\".format(self.start, self.end)", "\"\"\"Cuboid right edge x coordinate\"\"\" return self.x + self.width @property", "x,y,z coordinates Arguments: x (int, float): X coordinate y (int,", "start.y, start.z)) @property def length(self): return self.end.x - self.start.x class", "- depth - \"\"\" __slots__ = ('width', 'height', 'depth', 'x',", "from this if self.bottom == other.bottom and self.height == other.height", "= max(self.top, other.top) self.y = y_min self.height = y_max -", "coordinates Arguments: x (int, float): X coordinate y (int, float):", "start.z)) @property def length(self): return self.end.y - self.start.y class DSegment(Segment):", "utf-8 -*- from math import sqrt class Point(object): __slots__ =", "== cub.left or self.outeye == cub.ineye or self.ineye == cub.outeye):", "this one. If the result is also a cuboid and", "and cub.outeye == self.ineye or self.left == cub.right and cub.bottom", "self.depth == self.depth: x_min = min(self.left, other.left) x_max = max(self.right,", "self.end.y) @property def bottom(self): return min(self.start.y, self.end.y) @property def right(self):", "self.end == other.end def __repr__(self): return \"S({}, {})\".format(self.start, self.end) @property", "or cub.left == self.right and self.bottom == cub.top and self.outeye", "> cub.ineye or self.ineye < cub.outeye): return False # Discard", "\"\"\"Move Cuboid to x,y,z coordinates Arguments: x (int, float): X", "cuboid returned will have a volume of 0. Returns None", "cuboid primitive class. x, y, z-> Lower right corner coordinates", "assert(isinstance(start, Point) and isinstance(end, Point)) self.start = start self.end =", "is successful then this cuboid is modified to the union.", "== other.height and \\ self.left == other.left and self.width ==", "edge touching cuboids as intersects or not Returns: bool: True", "have same properties.\"\"\" if not isinstance(other, self.__class__): return False return", ">= 0 and depth >= 0) self.width = width self.height", "= y self.z = z self.rid = rid @property def", "corner_top_r_out(self): return Point(self.right, self.top, self.ineye) @property def corner_bot_r_out(self): return Point(self.right,", "a cuboid of 0 height or width or depth will", "for sorting)\"\"\" return self.volume() < other.volume() def __eq__(self, other): \"\"\"Equal", "{}, {}, {})\".format( self.x, self.y, self.z, self.width, self.height, self.depth) def", "cub.left == self.right and cub.bottom == self.top and cub.outeye ==", "self.x + self.width and cub.z + cub.depth <= self.z +", "class DSegment(Segment): \"\"\"In-Depth Segment\"\"\" def __init__(self, start, length): \"\"\"Create an", "other.z) def __hash__(self): return hash( (self.x, self.y, self.z, self.width, self.height,", "other.depth return True if not self.intersects(other, edges=True): return False #", "z_min self.depth = z_max - z_min return True return False", "== self.depth: y_min = min(self.bottom, other.bottom) y_max = max(self.top, other.top)", "farther from eye edge z coordinate\"\"\" return self.z @property def", "self.z = z def contains(self, cub): \"\"\"Tests if another cuboid", "If true, touching edges are considered an intersection, and a", "min(self.right, cub.right) outeye = max(self.outeye, cub.outeye) ineye = min(self.ineye, cub.ineye)", "self.left == other.left and self.width == other.width and \\ self.outeye", "cuboid. edges (bool): If true, touching edges are considered an", "= max(self.left, cub.left) top = min(self.top, cub.top) right = min(self.right,", "self.y + self.height @property def left(self): \"\"\"Cuboid left edge x", "== self.ineye or cub.left == self.right and cub.bottom == self.top", "Cuboid( left, bottom, outeye, right - left, top - bottom,", "self.width = x_max - x_min return True # Other cuboid", "== self.top and cub.outeye == self.ineye or cub.left == self.right", "edges=False): \"\"\"Detect intersections between this cuboid and cub. Args: cub", "= ('start', 'end') def __init__(self, start, end): \"\"\"Arguments: start (Point):", "self.height, self.depth) def volume(self): \"\"\"Cuboid volume\"\"\" return self.width * self.height", "coordinate\"\"\" return self.y @property def top(self): \"\"\"Cuboid top edge y", "a cuboid and the operation is successful then this cuboid", "point): return (self.x - point.x)**2 + (self.y - point.y)**2 +", "cub.x + cub.width <= self.x + self.width and cub.z +", "self.depth @property def corner_top_l(self): return Point(self.left, self.top, self.outeye) @property def", "= y_min self.height = y_max - y_min return True #", "cub.width <= self.x + self.width and cub.z + cub.depth <=", "Segment\"\"\" def __init__(self, start, length): \"\"\"Create an Horizontal segment given", "Horizontal segment given its left most end point and its", "move(self, x, y, z): \"\"\"Move Cuboid to x,y,z coordinates Arguments:", "{}, {})\".format(self.x, self.y, self.z) def distance(self, point): \"\"\"Calculate distance to", "height self.depth = depth self.x = x self.y = y", "@property def outeye(self): \"\"\"Cuboid farther from eye edge z coordinate\"\"\"", "if (self.bottom == cub.top or self.top == cub.bottom or self.left", "self.width * self.height * self.depth def move(self, x, y, z):", "<= self.x + self.width and cub.z + cub.depth <= self.z", "cuboid of 0 height or width or depth will be", "touching by their edges, and the argument 'edges' is True", "coding: utf-8 -*- from math import sqrt class Point(object): __slots__", "not edges: if (self.bottom == cub.top or self.top == cub.bottom", "= x self.y = y self.z = z self.rid =", "cub.right or self.right < cub.left or self.outeye > cub.ineye or", "= min(self.outeye, other.outeye) z_max = max(self.ineye, other.ineye) self.z = z_min", "or depth will be returned Returns: Cuboid: Intersection. None: There", "== self.ineye): return False return True def intersection(self, cub, edges=False):", "def volume(self): \"\"\"Cuboid volume\"\"\" return self.width * self.height * self.depth", "modified to the union. Arguments: other (Cuboid): Cuboid to join", "self.rid = rid @property def bottom(self): \"\"\"Cuboid bottom edge y", "edge y coordinate\"\"\" return self.y @property def top(self): \"\"\"Cuboid top", "touching if (self.bottom > cub.top or self.top < cub.bottom or", "and the argument 'edges' is True the cuboid returned will", "@property def corner_bot_r_out(self): return Point(self.right, self.bottom, self.ineye) @property def corner_bot_l_out(self):", "(bool): Accept edge touching cuboids as intersects or not Returns:", "return self.x @property def right(self): \"\"\"Cuboid right edge x coordinate\"\"\"", "x_min = min(self.left, other.left) x_max = max(self.right, other.right) self.x =", "y, z-> Lower right corner coordinates width - height -", "top - bottom, ineye - outeye) def join(self, other): \"\"\"Try", "@property def length_squared(self): \"\"\"Faster than length and useful for some", "== cub.top or self.top == cub.bottom or self.left == cub.right", "self.end = end def __eq__(self, other): if not isinstance(other, self.__class__):", "isinstance(length, Point)) super(VSegment, self).__init__( start, Point(start.x, start.y + length, start.z))", "and self.height == other.height and \\ self.outeye == other.outeye and", "as intersects or not Returns: bool: True if the cuboids", "True def intersection(self, cub, edges=False): \"\"\"Returns the cuboid resulting of" ]
[ "that is provided in the request that means if you", "on the system from PIL import Image # pillow, this", "the details of that won't change # unless you do", "creating objects # in an API. recipe = Recipe.objects.get(id=res.data['id']) #", "to simulate the list view in our serializer self.assertEqual(res.status_code, status.HTTP_200_OK)", "url = image_upload_url(self.recipe.id) # going to use the sample recipe", "{ 'title': 'Sample recipe', 'time_minutes': 10, 'price': 5.00, } defaults.update(params)", "reverse('recipe:recipe-upload-image', args=[recipe_id]) # generate our upload image url # you're", "the system and then you can remove that file after", "def setUp(self): self.client = APIClient() def test_required_auth(self): \"\"\"Test the authenticaiton", "saved the file it will be the seeking will be", "{ 'title': 'Test recipe with ingredients', 'ingredients': [ingredient1.id, ingredient2.id], 'time_minutes':", "the same as the tags that are in our queryset.", "our image class which will let us then # create", "'Fully Updated sample recipe', 'time_minutes': 25, 'price': 5.00 } url", "# this will create a comma separated list string and", "recipe2.tags.add(tag2) recipe3 = sample_recipe(user=self.user, title='Fish and chips') res = self.client.get(", "# what happens at the setup of the test def", "it will change are the # fields that are provided", "# We're going to retrieve an update to the recipe", "the payload those fields will actually be removed # from", "the test def setUp(self): self.client = APIClient() self.user = get_user_model().objects.create_user('user',", "getattr(recipe, key)) # assertion for each one of these keys,", "to the beginning of the file res = self.client.post(url, {'image':", "the fields that are provided # in the payload so", "the object that's being updated. def test_partial_update_recipe(self): \"\"\"Test updating a", "and chips') res = self.client.get( RECIPES_URL, {'tags': '{},{}'.format(tag1.id, tag2.id)} )", "there's two different HTTP methods: put, patch # patch: Patch", "# delete the image if it exists in the recipe", "all capitals. # app : identifier of the URL in", "# check if the tags that we created as our", "be assigned. self.assertIn(tag1, tags) self.assertIn(tag2, tags) # check if the", "# filter our recipes by the authenticated user recipes =", "status.HTTP_200_OK) # check that the images in the response so", "# this is the standard HTTP response code for creating", "be removed # from the object that you're updating def", "change # unless you do refresh from dB if the", "because when we do a HTTP put if we omit", "modified in the object that's being updated. def test_partial_update_recipe(self): \"\"\"Test", "# it to the tags get parameter # if our", "you specify arguments with the reverse function # you just", "object that you're updating def test_full_update_recipe(self): \"\"\"Test updating a recipe", "Recipe.objects.get(id=res.data['id']) # When you create an object using the Django", "what happens at the setup of the test def setUp(self):", "\"\"\"Test uploading an image to recipe\"\"\" url = image_upload_url(self.recipe.id) #", "that is the correct value assigned to our recipe model.", "APIClient() def test_required_auth(self): \"\"\"Test the authenticaiton is required\"\"\" res =", "the authenticaiton is required\"\"\" res = self.client.get(RECIPES_URL) self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED) class", "the request that means if you exclude # any fields", "class PrivateRecipeApiTests(TestCase): \"\"\"Test authenticated recipe API access\"\"\" def setUp(self): self.client", "user recipes = Recipe.objects.filter(user=self.user) serializer = RecipeSerializer(recipes, many=True) # many=true:", "# that is the correct value assigned to our recipe", "res = self.client.post(url, {'image': ntf}, format='multipart') # assertions # refreshing", "run some assertions to check that it # uploaded correctly", "'Test recipe with ingredients', 'ingredients': [ingredient1.id, ingredient2.id], 'time_minutes': 45, 'price':", "can update an object using the # API there's two", "generate our upload image url # you're going to need", "recipe with ingredients\"\"\" ingredient1 = sample_ingredient(user=self.user, name='Ingredient 1') ingredient2 =", "name='Ingredient 1') ingredient2 = sample_ingredient(user=self.user, name='Ingredient 2') payload = {", "of the file so use this seek function to set", "that is saved to our model self.assertTrue(os.path.exists(self.recipe.image.path)) def test_upload_image_bad_request(self): \"\"\"Test", "write an image # to that file and then we're", "URL so that is the URL of the # recipe", "app : identifier of the URL in the app #", "= RecipeSerializer(recipes, many=True) # many=true: this is because we were", "to check that it # uploaded correctly with tempfile.NamedTemporaryFile(suffix='.jpg') as", "which will then create a temp file # somewhere in", "is how you specify arguments with the reverse function #", "them by retrieving # all of the recipes from our", "self.assertIn(serializer2.data, res.data) self.assertNotIn(serializer3.data, res.data) # check the return result def", "existing tag that # we created won't be assigned to", "recipe detail\"\"\" recipe = sample_recipe(user=self.user) recipe.tags.add(sample_tag(user=self.user)) recipe.ingredients.add(sample_ingredient(user=self.user)) url = detail_url(recipe.id)", "create a temp file # somewhere in the system and", "\"\"\"Create and return a sample tag\"\"\" return Tag.objects.create(user=user, name=name) def", "access it then it would # just be blank because", "recipe = Recipe.objects.get(id=res.data['id']) # retrieve the created recipe tags =", "to upload an image def detail_url(recipe_id): \"\"\"Return recipe detail URL\"\"\"", "test_recipes_limited_to_user(self): \"\"\"Test retrieving recipes for user\"\"\" # test recipes are", "fields will actually be removed # from the object that", ") sample_recipe(user=user2) sample_recipe(user=self.user) res = self.client.get(RECIPES_URL) # filter our recipes", "create a temporary file we're going to write an image", "perform things like # creating path names and also checking", "list of recipes\"\"\" sample_recipe(user=self.user) sample_recipe(user=self.user) # we're going to access", "import os # this allows us to perform things like", "that we retrieved # test full update # put: it", "\"\"\"Test updating a recipe with put\"\"\" recipe = sample_recipe(user=self.user) recipe.tags.add(sample_tag(user=self.user))", "and then we're going to check # that is the", "payload those fields will actually be removed # from the", "call a function which will then create a temp file", "the tags get parameter # if our filtering is working", "that are omitted from # the request will not be", "then you can remove that file after # you've used", "the argument # when you use the two asterisks when", "system at a random # location usually in the /temp", "ntf.seek(0) # it's the way that Python reads files so", "and mushrooms') # test API res = self.client.get( RECIPES_URL, {'ingredients':", "def image_upload_url(recipe_id): \"\"\"Return URL for recipe image upload\"\"\" return reverse('recipe:recipe-upload-image',", "working # should only return the first two recipe #", "tempfile # allows you to call a function which will", "reads files so because we've # saved the file it", "have a sample tag assigned should not have any tags", "\"\"\"Test creating recipe with ingredients\"\"\" ingredient1 = sample_ingredient(user=self.user, name='Ingredient 1')", "# after the test runs it runs tear down def", "payload = { 'title': 'Test recipe with ingredients', 'ingredients': [ingredient1.id,", "# add a tag to the recipe new_tag = sample_tag(user=self.user,", "'price': 10.00, } res = self.client.post(RECIPES_URL, payload) # post this", "core.models import Recipe, Tag, Ingredient from ..serializers import RecipeSerializer, RecipeDetailSerializer", "tags) # check if the tags that we created as", "recipe.tags.add(sample_tag(user=self.user)) payload = { 'title': 'Fully Updated sample recipe', 'time_minutes':", "that the default router will create # for our viewset", "pointer back to the beginning of the file res =", "create # delete the image if it exists in the", "from ..serializers import RecipeSerializer, RecipeDetailSerializer import tempfile # allows you", "put if we omit a field # that should clear", "or we wanted to simulate the list view in our", "temporary file we're going to write an image # to", "specific tags\"\"\" recipe1 = sample_recipe(user=self.user, title='Thai vegetable curry') recipe2 =", "self.recipe = sample_recipe(user=self.user) # after the test runs it runs", "= sample_ingredient(user=self.user, name='Chicken') recipe1.ingredients.add(ingredient1) recipe2.ingredients.add(ingredient2) recipe3 = sample_recipe(user=self.user, title='Steak and", "if it exists in the recipe def test_upload_image_to_recipe(self): \"\"\"Test uploading", "access\"\"\" def setUp(self): self.client = APIClient() def test_required_auth(self): \"\"\"Test the", "beans on toast') recipe2 = sample_recipe(user=self.user, title='Chicken cacciatore') ingredient1 =", "pillow, this will import our image class which will let", "self.client.put(url, payload) recipe.refresh_from_db() self.assertEqual(recipe.title, payload['title']) self.assertEqual(recipe.time_minutes, payload['time_minutes']) self.assertEqual(recipe.price, payload['price']) tags", "I know that if we do res.data and # retrieve", "default behavior is that it will return a dictionary containing", "so now our recipe # that did have a sample", "= sample_recipe(user=self.user, title='Fish and chips') res = self.client.get( RECIPES_URL, {'tags':", "[new_tag.id]} # tags will be replaced with this new tag", "to have a detail action # this is how you", "you pass in a list of the # arguments you", "# uploaded correctly with tempfile.NamedTemporaryFile(suffix='.jpg') as ntf: img = Image.new('RGB',", "create a new model and you have a # reference", "PIL import Image # pillow, this will import our image", "we've # saved the file it will be the seeking", "# the request will not be modified in the object", "recipe = Recipe.objects.get(id=res.data['id']) # When you create an object using", "new tag and what we're going to do is we're", "{'title': 'Partially Updated sample recipe', 'tags': [new_tag.id]} # tags will", "= { 'title': 'Sample recipe', 'time_minutes': 10, 'price': 5.00, }", "field # that should clear the value of that field", "recipe3 = sample_recipe(user=self.user, title='Fish and chips') res = self.client.get( RECIPES_URL,", "test runs it runs tear down def tearDown(self): self.recipe.image.delete() #", "an image def detail_url(recipe_id): \"\"\"Return recipe detail URL\"\"\" return reverse('recipe:recipe-detail',", "# are assigned and just make sure they match what", "# many=true: this is because we were returning the list", "the same key in the recipe # payload[key]: This will", "recipe with ingredients', 'ingredients': [ingredient1.id, ingredient2.id], 'time_minutes': 45, 'price': 15.00", "then create a temp file # somewhere in the system", "invalid image\"\"\" url = image_upload_url(self.recipe.id) res = self.client.post(url, {'image': 'notimage'},", "this tag that we create here and we're going #", "the /temp folder # create a temporary file we're going", "def test_partial_update_recipe(self): \"\"\"Test updating a recipe with patch\"\"\" # make", "the images in the response so that's the path to", "simulate the list view in our serializer self.assertEqual(res.status_code, status.HTTP_200_OK) self.assertEqual(len(res.data),", "= APIClient() self.user = get_user_model().objects.create_user('user', 'testpass') self.client.force_authenticate(self.user) # authenticate our", "files so because we've # saved the file it will", "django.test import TestCase from django.urls import reverse from rest_framework import", "# the same as the tags that are in our", "and we're going to check if # they exist in", "# /api/recipe/recipes # /api/recipe/recipes/1/ (id) --> detail url def image_upload_url(recipe_id):", "this is how you specify arguments with the reverse function", "# you're going to need the existing recipe ID in", "to swap out this tag that we create here and", "image # to that file and then we're going to", "change are the # fields that are provided and any", "recipe with two tags', 'tags': [tag1.id, tag2.id], 'time_minutes': 30, 'price':", "with ingredients', 'ingredients': [ingredient1.id, ingredient2.id], 'time_minutes': 45, 'price': 15.00 }", "check if # they exist in the responses returned self.assertIn(serializer1.data,", "allows us to perform things like # creating path names", "will be replaced with this new tag so the existing", "We're going to retrieve an update to the recipe from", "this will create a comma separated list string and assign", "res = self.client.get(RECIPES_URL) # filter our recipes by the authenticated", "args=[recipe_id]) # name of the end point that the default", "image to recipe\"\"\" url = image_upload_url(self.recipe.id) # going to use", "actually be removed # from the object that you're updating", "get the id of the created object. # Next what", "payload = { 'title': 'Fully Updated sample recipe', 'time_minutes': 25,", "list view # or we wanted to simulate the list", "we created won't be assigned to it url = detail_url(recipe.id)", "def test_retrieve_recipes(self): \"\"\"Test retrieving list of recipes\"\"\" sample_recipe(user=self.user) sample_recipe(user=self.user) #", "retrieve the id key this will get the id of", "course'): \"\"\"Create and return a sample tag\"\"\" return Tag.objects.create(user=user, name=name)", "if our filtering is working # should only return the", "model self.assertTrue(os.path.exists(self.recipe.image.path)) def test_upload_image_bad_request(self): \"\"\"Test uploading an invalid image\"\"\" url", "recipe that we want to update. self.client.patch(url, payload) # make", "assigned to it url = detail_url(recipe.id) # the way that", "test # removing all of the test files that we", "if the tags that we created as our sample tags", "make request # We're going to retrieve an update to", "def setUp(self): self.client = APIClient() self.user = get_user_model().objects.create_user('user', 'testpass') self.client.force_authenticate(self.user)", "to the recipe new_tag = sample_tag(user=self.user, name='Curry') # add a", "= Recipe.objects.get(id=res.data['id']) ingredients = recipe.ingredients.all() # get the ingredients queryset", "ingredients\"\"\" recipe1 = sample_recipe(user=self.user, title='Posh beans on toast') recipe2 =", "= sample_ingredient(user=self.user, name='Ingredient 1') ingredient2 = sample_ingredient(user=self.user, name='Ingredient 2') payload", "Tag.objects.create(user=user, name=name) def sample_ingredient(user, name='Cinnamon'): \"\"\"Create and return a sample", "status.HTTP_400_BAD_REQUEST) def test_filter_recipes_by_tags(self): \"\"\"Test returning recipes with specific tags\"\"\" recipe1", "img = Image.new('RGB', (10, 10)) # creates black square img.save(ntf,", "single object self.assertEqual(res.data, serializer.data) def test_create_basic_recipe(self): \"\"\"Test creating recipe\"\"\" payload", "user\"\"\" # test recipes are limited to the authenticated user.", "are limited to the authenticated user. user2 = get_user_model().objects.create_user( '<EMAIL>',", "values have changed # in the database. self.assertEqual(recipe.title, payload['title']) tags", "square img.save(ntf, format='JPEG') ntf.seek(0) # it's the way that Python", "self.user = get_user_model().objects.create_user( '<EMAIL>', '<PASSWORD>' ) self.client.force_authenticate(self.user) def test_retrieve_recipes(self): \"\"\"Test", "sample_recipe(user=self.user) sample_recipe(user=self.user) # we're going to access them by retrieving", "to the tags get parameter # if our filtering is", "# test the response: serializer1 = RecipeSerializer(recipe1) serializer2 = RecipeSerializer(recipe2)", "that should be accessible self.assertIn('image', res.data) # check that the", "file on the system at a random # location usually", "\"\"\"Return URL for recipe image upload\"\"\" return reverse('recipe:recipe-upload-image', args=[recipe_id]) #", "is working # should only return the first two recipe", "detail_url(recipe_id): \"\"\"Return recipe detail URL\"\"\" return reverse('recipe:recipe-detail', args=[recipe_id]) # name", "= recipe.tags.all() self.assertEqual(len(tags), 1) self.assertIn(new_tag, tags) # check that the", "creates black square img.save(ntf, format='JPEG') ntf.seek(0) # it's the way", "return reverse('recipe:recipe-upload-image', args=[recipe_id]) # generate our upload image url #", "will actually be removed # from the object that you're", "that it will change are the # fields that are", "= reverse('recipe:recipe-list') # since we're going to need to access", "# they exist in the responses returned self.assertIn(serializer1.data, res.data) self.assertIn(serializer2.data,", "image class which will let us then # create test", "what we expect. recipe.refresh_from_db() # refreshes the details in our", "object. # Next what we're going to do is we're", "# any fields in the payload those fields will actually", "in our queryset. def test_create_recipe_with_ingredients(self): \"\"\"Test creating recipe with ingredients\"\"\"", "with a new tag payload = {'title': 'Partially Updated sample", "be the seeking will be done to the # end", "return a dictionary containing # the created object This is", "if # they exist in the responses returned self.assertIn(serializer1.data, res.data)", "ingredient\"\"\" return Ingredient.objects.create(user=user, name=name) def sample_recipe(user, **params): \"\"\"Create and return", "# API there's two different HTTP methods: put, patch #", "the full # object that is provided in the request", "an image # to that file and then we're going", "image def detail_url(recipe_id): \"\"\"Return recipe detail URL\"\"\" return reverse('recipe:recipe-detail', args=[recipe_id])", "of that field so now our recipe # that did", "that it will return a dictionary containing # the created", "10.00 } res = self.client.post(RECIPES_URL, payload) self.assertEqual(res.status_code, status.HTTP_201_CREATED) recipe =", "clear the value of that field so now our recipe", "recipe API access\"\"\" def setUp(self): self.client = APIClient() self.user =", "URL\"\"\" return reverse('recipe:recipe-detail', args=[recipe_id]) # name of the end point", "into the argument # when you use the two asterisks", "recipe.key) def test_create_recipe_with_tags(self): \"\"\"Test creating a recipe with tags\"\"\" tag1", "Python reads files so because we've # saved the file", "not be modified in the object that's being updated. def", "list string and assign # it to the tags get", "payload = { 'title': 'Test recipe with two tags', 'tags':", "recipes for user\"\"\" # test recipes are limited to the", "pass in args and then you pass in a list", ") serializer1 = RecipeSerializer(recipe1) serializer2 = RecipeSerializer(recipe2) serializer3 = RecipeSerializer(recipe3)", "0) # we will check that the tags assigned are", "'Test recipe with two tags', 'tags': [tag1.id, tag2.id], 'time_minutes': 30,", "you update an object using the Django rest framework #", "serializer = RecipeSerializer(recipes, many=True) self.assertEqual(res.status_code, status.HTTP_200_OK) self.assertEqual(res.data, serializer.data) def test_recipes_limited_to_user(self):", "HTTP response code for creating objects # in an API.", "how I know that if we do res.data and #", "up to the end # of the file so use", "recipe that gets created # it creates a named temporary", "sample_tag(user=self.user, name='Tag 1') tag2 = sample_tag(user=self.user, name='Tag 2') payload =", "that it # uploaded correctly with tempfile.NamedTemporaryFile(suffix='.jpg') as ntf: img", "assigned. self.assertIn(tag1, tags) self.assertIn(tag2, tags) # check if the tags", "from django.urls import reverse from rest_framework import status from rest_framework.test", "provided in the request that means if you exclude #", "setUp(self): self.client = APIClient() def test_required_auth(self): \"\"\"Test the authenticaiton is", "self.recipe.refresh_from_db() self.assertEqual(res.status_code, status.HTTP_200_OK) # check that the images in the", "with tags\"\"\" tag1 = sample_tag(user=self.user, name='Tag 1') tag2 = sample_tag(user=self.user,", "are the # fields that are provided and any fields", "make a request to change a field in our recipe.", "our sample tags are # the same as the tags", "you can remove that file after # you've used it", "serializer.data) def test_create_basic_recipe(self): \"\"\"Test creating recipe\"\"\" payload = { 'title':", "tag new tag is in the tags that we retrieved", "assertions # refreshing the database for our recipe self.recipe.refresh_from_db() self.assertEqual(res.status_code,", "= { 'title': 'Test recipe with two tags', 'tags': [tag1.id,", "details in our recipe from the database # typically when", "of the URL in the app # /api/recipe/recipes # /api/recipe/recipes/1/", "system and then you can remove that file after #", "sample_recipe(user=self.user) recipe.tags.add(sample_tag(user=self.user)) recipe.ingredients.add(sample_ingredient(user=self.user)) url = detail_url(recipe.id) res = self.client.get(url) serializer", "and full update of an object # there are two", "url = detail_url(recipe.id) res = self.client.get(url) serializer = RecipeDetailSerializer(recipe) #", "somewhere in the system and then you can remove that", "that won't change # unless you do refresh from dB", "going to upload that file # through the API like", "tests let's assign that as a variable # at top", "us to perform things like # creating path names and", "that are provided and any fields that are omitted from", "to write an image # to that file and then", "dictionary into the argument # when you use the two", "self.assertTrue(os.path.exists(self.recipe.image.path)) def test_upload_image_bad_request(self): \"\"\"Test uploading an invalid image\"\"\" url =", "# should only return the first two recipe # test", "with this new tag so the existing tag that #", "of the recipes from our database. res = self.client.get(RECIPES_URL) recipes", "which you can update an object using the # API", "= sample_ingredient(user=self.user, name='Ingredient 2') payload = { 'title': 'Test recipe", "'price': 15.00 } res = self.client.post(RECIPES_URL, payload) self.assertEqual(res.status_code, status.HTTP_201_CREATED) recipe", "# here we have single item def sample_tag(user, name='Main course'):", "use the two asterisks when calling a # function it", "reverse effect. class PublicRecipeApiTests(TestCase): \"\"\"Test unauthenticated recipe API access\"\"\" def", "we're going to run some assertions to check that it", "the image that should be accessible self.assertIn('image', res.data) # check", "be modified in the object that's being updated. def test_partial_update_recipe(self):", "in args and then you pass in a list of", "the database. self.assertEqual(recipe.title, payload['title']) tags = recipe.tags.all() self.assertEqual(len(tags), 1) self.assertIn(new_tag,", "result def test_filter_recipes_by_ingredients(self): \"\"\"Test returning recipes with specific ingredients\"\"\" recipe1", "of the created object. # Next what we're going to", "status.HTTP_201_CREATED) recipe = Recipe.objects.get(id=res.data['id']) # retrieve the created recipe tags", "from # an object by passing in a variable. (instead", "tear down def tearDown(self): self.recipe.image.delete() # make sure that our", "have a # reference to a model the details of", "it is # equal to the same key in the", "end # of the file so use this seek function", "name='Cinnamon'): \"\"\"Create and return a sample ingredient\"\"\" return Ingredient.objects.create(user=user, name=name)", "10, 'price': 5.00, } defaults.update(params) return Recipe.objects.create(user=user, **defaults) # convert", "list view in our serializer self.assertEqual(res.status_code, status.HTTP_200_OK) self.assertEqual(len(res.data), 1) self.assertEqual(res.data,", "Next what we're going to do is we're going to", "test images which we can then upload to our API", "'pass' ) sample_recipe(user=user2) sample_recipe(user=self.user) res = self.client.get(RECIPES_URL) # filter our", "that we want to update. self.client.patch(url, payload) # make request", "is saved to our model self.assertTrue(os.path.exists(self.recipe.image.path)) def test_upload_image_bad_request(self): \"\"\"Test uploading", "through the API like you would with a HTTP POST", "some assertions to check that it # uploaded correctly with", "with ingredients\"\"\" ingredient1 = sample_ingredient(user=self.user, name='Ingredient 1') ingredient2 = sample_ingredient(user=self.user,", "all the tests let's assign that as a variable #", "APIClient # use that for making our API requests from", "res = self.client.post(RECIPES_URL, payload) self.assertEqual(res.status_code, status.HTTP_201_CREATED) recipe = Recipe.objects.get(id=res.data['id']) #", "test_view_recipe_detail(self): \"\"\"Test viewing a recipe detail\"\"\" recipe = sample_recipe(user=self.user) recipe.tags.add(sample_tag(user=self.user))", "we're going to write an image # to that file", "tags # assigned class RecipeImageUploadTests(TestCase): # what happens at the", "test_partial_update_recipe(self): \"\"\"Test updating a recipe with patch\"\"\" # make a", "that the tags assigned are zero now as I explained", "of an object # there are two ways in which", "test def setUp(self): self.client = APIClient() self.user = get_user_model().objects.create_user('user', 'testpass')", "dB if the values have changed # in the database.", "sure that our file system is kept clean after our", "would # just be blank because you've already read up", "is that it will return a dictionary containing # the", "the id key this will get the id of the", "the way that you update an object using the Django", "our recipes by the authenticated user recipes = Recipe.objects.filter(user=self.user) serializer", "a field # that should clear the value of that", "location usually in the /temp folder # create a temporary", "of the file res = self.client.post(url, {'image': ntf}, format='multipart') #", "and then you pass in a list of the #", "the URL in the app # /api/recipe/recipes # /api/recipe/recipes/1/ (id)", "model. for key in payload.keys(): self.assertEqual(payload[key], getattr(recipe, key)) # assertion", "tag is in the tags that we retrieved # test", "recipe # test the response: serializer1 = RecipeSerializer(recipe1) serializer2 =", "will import our image class which will let us then", "Patch is used to update the fields that are provided", "self.recipe.image.delete() # make sure that our file system is kept", "allows you to retrieve an attribute from # an object", "system from PIL import Image # pillow, this will import", "name='Curry') # add a new tag and what we're going", "use that for making our API requests from core.models import", "ingredient1 = sample_ingredient(user=self.user, name='Feta cheese') ingredient2 = sample_ingredient(user=self.user, name='Chicken') recipe1.ingredients.add(ingredient1)", "try to access it then it would # just be", "def test_recipes_limited_to_user(self): \"\"\"Test retrieving recipes for user\"\"\" # test recipes", "that Python reads files so because we've # saved the", "won't change # unless you do refresh from dB if", "a sample tag assigned should not have any tags #", "name='Tag 1') tag2 = sample_tag(user=self.user, name='Tag 2') payload = {", "that as a variable # at top of the class", "do a HTTP put if we omit a field #", "cheese') ingredient2 = sample_ingredient(user=self.user, name='Chicken') recipe1.ingredients.add(ingredient1) recipe2.ingredients.add(ingredient2) recipe3 = sample_recipe(user=self.user,", "recipe1.tags.add(tag1) recipe2.tags.add(tag2) recipe3 = sample_recipe(user=self.user, title='Fish and chips') res =", "recipe.tags.add(sample_tag(user=self.user)) # add a tag to the recipe new_tag =", "sample_tag(user=self.user, name='Vegan') tag2 = sample_tag(user=self.user, name='Vegetarian') recipe1.tags.add(tag1) recipe2.tags.add(tag2) recipe3 =", "# test API res = self.client.get( RECIPES_URL, {'ingredients': '{},{}'.format(ingredient1.id, ingredient2.id)}", "there are two ways in which you can update an", "would with a HTTP POST and then # we're going", "post this payload dictionary to our recipes URL. self.assertEqual(res.status_code, status.HTTP_201_CREATED)", "the API like you would with a HTTP POST and", "that # are assigned and just make sure they match", "# test partial update and full update of an object", "ID in order to upload an image def detail_url(recipe_id): \"\"\"Return", "assertions to check that it # uploaded correctly with tempfile.NamedTemporaryFile(suffix='.jpg')", "test_create_recipe_with_tags(self): \"\"\"Test creating a recipe with tags\"\"\" tag1 = sample_tag(user=self.user,", "by the authenticated user recipes = Recipe.objects.filter(user=self.user) serializer = RecipeSerializer(recipes,", "that did have a sample tag assigned should not have", "updating a recipe with put\"\"\" recipe = sample_recipe(user=self.user) recipe.tags.add(sample_tag(user=self.user)) payload", "authenticaiton is required\"\"\" res = self.client.get(RECIPES_URL) self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED) class PrivateRecipeApiTests(TestCase):", "url = image_upload_url(self.recipe.id) res = self.client.post(url, {'image': 'notimage'}, format='multipart') self.assertEqual(res.status_code,", "unauthenticated recipe API access\"\"\" def setUp(self): self.client = APIClient() def", "response code for creating objects # in an API. recipe", "put: it will replace the object that we're updating with", "check the fields that # are assigned and just make", "it exists in the recipe def test_upload_image_to_recipe(self): \"\"\"Test uploading an", "the tag new tag is in the tags that we", "# going to use the sample recipe that gets created", "from django.test import TestCase from django.urls import reverse from rest_framework", "detail\"\"\" recipe = sample_recipe(user=self.user) recipe.tags.add(sample_tag(user=self.user)) recipe.ingredients.add(sample_ingredient(user=self.user)) url = detail_url(recipe.id) res", "to recipe\"\"\" url = image_upload_url(self.recipe.id) # going to use the", "that means if you exclude # any fields in the", "check that the tag new tag is in the tags", "for key in payload.keys(): self.assertEqual(payload[key], getattr(recipe, key)) # assertion for", "self.client.get( RECIPES_URL, {'tags': '{},{}'.format(tag1.id, tag2.id)} ) # this will create", "self.client.get(RECIPES_URL) recipes = Recipe.objects.all().order_by('-id') serializer = RecipeSerializer(recipes, many=True) self.assertEqual(res.status_code, status.HTTP_200_OK)", "here we have single item def sample_tag(user, name='Main course'): \"\"\"Create", "in the recipe def test_upload_image_to_recipe(self): \"\"\"Test uploading an image to", "assigned are zero now as I explained # because when", "check that the images in the response so that's the", "payload dictionary to our recipes URL. self.assertEqual(res.status_code, status.HTTP_201_CREATED) # this", "format='multipart') # assertions # refreshing the database for our recipe", "the # key in our payload object # getattr: that", "recipe = Recipe.objects.get(id=res.data['id']) ingredients = recipe.ingredients.all() # get the ingredients", "if you exclude # any fields in the payload those", "you to call a function which will then create a", "# check the return result def test_filter_recipes_by_ingredients(self): \"\"\"Test returning recipes", "going to run some assertions to check that it #", "and return a sample ingredient\"\"\" return Ingredient.objects.create(user=user, name=name) def sample_recipe(user,", "equal to the same key in the recipe # payload[key]:", "rest_framework.test import APIClient # use that for making our API", "the object that we're updating with the full # object", "the first two recipe # test the response: serializer1 =", "# patch: Patch is used to update the fields that", "= { 'title': 'Fully Updated sample recipe', 'time_minutes': 25, 'price':", "will let us then # create test images which we", "ingredient2 = sample_ingredient(user=self.user, name='Ingredient 2') payload = { 'title': 'Test", "self.client.post(RECIPES_URL, payload) # post this payload dictionary to our recipes", "# object that is provided in the request that means", "object using the # API there's two different HTTP methods:", "API res = self.client.get( RECIPES_URL, {'ingredients': '{},{}'.format(ingredient1.id, ingredient2.id)} ) serializer1", "# there are two ways in which you can update", "= recipe.tags.all() self.assertEqual(len(tags), 0) # we will check that the", "file and then we're going to upload that file #", "the tags assigned are zero now as I explained #", "# make a request to change a field in our", "name='Tag 2') payload = { 'title': 'Test recipe with two", "get_user_model().objects.create_user( '<EMAIL>', '<PASSWORD>' ) self.client.force_authenticate(self.user) def test_retrieve_recipes(self): \"\"\"Test retrieving list", "recipe # payload[key]: This will actually get the value of", "to retrieve an attribute from # an object by passing", "pass in a list of the # arguments you want", "payload) self.assertEqual(res.status_code, status.HTTP_201_CREATED) recipe = Recipe.objects.get(id=res.data['id']) ingredients = recipe.ingredients.all() #", "parameter # if our filtering is working # should only", ": identifier of the URL in the app # /api/recipe/recipes", "going to retrieve an update to the recipe from the", "the # database and then we're going to check the", "used it import os # this allows us to perform", "created recipe tags = recipe.tags.all() # retrieve the tags that", "tag assigned should not have any tags # assigned class", "first two recipe # test the response: serializer1 = RecipeSerializer(recipe1)", "it's the way that Python reads files so because we've", "to serialize a single object self.assertEqual(res.data, serializer.data) def test_create_basic_recipe(self): \"\"\"Test", "the tests let's assign that as a variable # at", "sample_tag(user, name='Main course'): \"\"\"Create and return a sample tag\"\"\" return", "# location usually in the /temp folder # create a", "\"\"\"Test updating a recipe with patch\"\"\" # make a request", "# database and then we're going to check the fields", "= detail_url(recipe.id) self.client.put(url, payload) recipe.refresh_from_db() self.assertEqual(recipe.title, payload['title']) self.assertEqual(recipe.time_minutes, payload['time_minutes']) self.assertEqual(recipe.price,", "that should clear the value of that field so now", "sample_ingredient(user=self.user, name='Ingredient 2') payload = { 'title': 'Test recipe with", "45, 'price': 15.00 } res = self.client.post(RECIPES_URL, payload) self.assertEqual(res.status_code, status.HTTP_201_CREATED)", "we created as our sample tags are # the same", "you use the detail URL so that is the URL", "here and we're going # to replace it with a", "that's being updated. def test_partial_update_recipe(self): \"\"\"Test updating a recipe with", "when we do a HTTP put if we omit a", "and just make sure they match what we expect. recipe.refresh_from_db()", "beginning of the file res = self.client.post(url, {'image': ntf}, format='multipart')", "chips') res = self.client.get( RECIPES_URL, {'tags': '{},{}'.format(tag1.id, tag2.id)} ) #", "tag so the existing tag that # we created won't", "database for our recipe self.recipe.refresh_from_db() self.assertEqual(res.status_code, status.HTTP_200_OK) # check that", "to set # the pointer back to the beginning of", "self.client.patch(url, payload) # make request # We're going to retrieve", "the tags that we created as our sample tags are", "a sample ingredient\"\"\" return Ingredient.objects.create(user=user, name=name) def sample_recipe(user, **params): \"\"\"Create", "status.HTTP_200_OK) self.assertEqual(res.data, serializer.data) def test_recipes_limited_to_user(self): \"\"\"Test retrieving recipes for user\"\"\"", "the path exists for the image that is saved to", "API there's two different HTTP methods: put, patch # patch:", "recipe2 = sample_recipe(user=self.user, title='Aubergine with tahini') tag1 = sample_tag(user=self.user, name='Vegan')", "arguments with the reverse function # you just pass in", "test_retrieve_recipes(self): \"\"\"Test retrieving list of recipes\"\"\" sample_recipe(user=self.user) sample_recipe(user=self.user) # we're", "the # fields that are provided and any fields that", "return Recipe.objects.create(user=user, **defaults) # convert the dictionary into the argument", "have a detail action # this is how you specify", "# assertion for each one of these keys, check that", "# retrieve the id key this will get the id", "ways in which you can update an object using the", "self.client.post(url, {'image': ntf}, format='multipart') # assertions # refreshing the database", "a recipe with tags\"\"\" tag1 = sample_tag(user=self.user, name='Tag 1') tag2", "our model self.assertTrue(os.path.exists(self.recipe.image.path)) def test_upload_image_bad_request(self): \"\"\"Test uploading an invalid image\"\"\"", "test_full_update_recipe(self): \"\"\"Test updating a recipe with put\"\"\" recipe = sample_recipe(user=self.user)", "# check that the tag new tag is in the", "import status from rest_framework.test import APIClient # use that for", "the authenticated user recipes = Recipe.objects.filter(user=self.user) serializer = RecipeSerializer(recipes, many=True)", "view in our serializer self.assertEqual(res.status_code, status.HTTP_200_OK) self.assertEqual(len(res.data), 1) self.assertEqual(res.data, serializer.data)", "need the existing recipe ID in order to upload an", "will not be modified in the object that's being updated.", "same as the tags that are in our queryset. def", "return a sample ingredient\"\"\" return Ingredient.objects.create(user=user, name=name) def sample_recipe(user, **params):", "} res = self.client.post(RECIPES_URL, payload) # post this payload dictionary", "check that it is # equal to the same key", "{ 'title': 'Fully Updated sample recipe', 'time_minutes': 25, 'price': 5.00", "test_required_auth(self): \"\"\"Test the authenticaiton is required\"\"\" res = self.client.get(RECIPES_URL) self.assertEqual(res.status_code,", "{'ingredients': '{},{}'.format(ingredient1.id, ingredient2.id)} ) serializer1 = RecipeSerializer(recipe1) serializer2 = RecipeSerializer(recipe2)", "recipe1 = sample_recipe(user=self.user, title='Thai vegetable curry') recipe2 = sample_recipe(user=self.user, title='Aubergine", "'time_minutes': 30, 'price': 10.00, } res = self.client.post(RECIPES_URL, payload) #", "res = self.client.get(url) serializer = RecipeDetailSerializer(recipe) # in this case", "of the test def setUp(self): self.client = APIClient() self.user =", "status from rest_framework.test import APIClient # use that for making", "in our recipe. recipe = sample_recipe(user=self.user) recipe.tags.add(sample_tag(user=self.user)) # add a", "that field so now our recipe # that did have", "changed # in the database. self.assertEqual(recipe.title, payload['title']) tags = recipe.tags.all()", "object using the Django rest framework the # default behavior", "the dictionary into the argument # when you use the", "that file after # you've used it import os #", "# we created won't be assigned to it url =", "= RecipeSerializer(recipe1) serializer2 = RecipeSerializer(recipe2) serializer3 = RecipeSerializer(recipe3) self.assertIn(serializer1.data, res.data)", "to call a function which will then create a temp", "sample tag\"\"\" return Tag.objects.create(user=user, name=name) def sample_ingredient(user, name='Cinnamon'): \"\"\"Create and", "from the # database and then we're going to check", "tahini') tag1 = sample_tag(user=self.user, name='Vegan') tag2 = sample_tag(user=self.user, name='Vegetarian') recipe1.tags.add(tag1)", "reverse function # you just pass in args and then", "# this is how you specify arguments with the reverse", "any fields in the payload those fields will actually be", "Django rest framework # view sets is you use the", "a detail action # this is how you specify arguments", "self.assertIn(ingredient2, ingredients) # test partial update and full update of", "kept clean after our test # removing all of the", "are provided and any fields that are omitted from #", "API requests from core.models import Recipe, Tag, Ingredient from ..serializers", "queryset self.assertEqual(ingredients.count(), 2) self.assertIn(ingredient1, ingredients) self.assertIn(ingredient2, ingredients) # test partial", "# just be blank because you've already read up to", "so the only fields that it will change are the", "files exist on the system from PIL import Image #", "recipe\"\"\" url = image_upload_url(self.recipe.id) # going to use the sample", "going to access them by retrieving # all of the", "have changed # in the database. self.assertEqual(recipe.title, payload['title']) tags =", "we're going to access them by retrieving # all of", "to it url = detail_url(recipe.id) # the way that you", "that gets created # it creates a named temporary file", "full # object that is provided in the request that", "test full update # put: it will replace the object", "know that if we do res.data and # retrieve the", "# reference to a model the details of that won't", "so the existing tag that # we created won't be", "object that is provided in the request that means if", "file it will be the seeking will be done to", "one of these keys, check that it is # equal", "import reverse from rest_framework import status from rest_framework.test import APIClient", "that's the path to # the image that should be", "an object # there are two ways in which you", "{'image': ntf}, format='multipart') # assertions # refreshing the database for", "payload = { 'title': 'Test recipe', 'time_minutes': 30, 'price': 10.00,", "HTTP put if we omit a field # that should", "get the ingredients queryset self.assertEqual(ingredients.count(), 2) self.assertIn(ingredient1, ingredients) self.assertIn(ingredient2, ingredients)", "to check if # they exist in the responses returned", "our database. res = self.client.get(RECIPES_URL) recipes = Recipe.objects.all().order_by('-id') serializer =", "database. self.assertEqual(recipe.title, payload['title']) tags = recipe.tags.all() self.assertEqual(len(tags), 1) self.assertIn(new_tag, tags)", "res = self.client.get( RECIPES_URL, {'ingredients': '{},{}'.format(ingredient1.id, ingredient2.id)} ) serializer1 =", "way that you update an object using the Django rest", "self.assertEqual(len(tags), 0) # we will check that the tags assigned", "runs tear down def tearDown(self): self.recipe.image.delete() # make sure that", "= RecipeSerializer(recipe2) serializer3 = RecipeSerializer(recipe3) self.assertIn(serializer1.data, res.data) self.assertIn(serializer2.data, res.data) self.assertNotIn(serializer3.data,", "to a model the details of that won't change #", "comma separated list string and assign # it to the", "also checking if files exist on the system from PIL", "the seeking will be done to the # end of", "to # the image that should be accessible self.assertIn('image', res.data)", "returned self.assertIn(serializer1.data, res.data) self.assertIn(serializer2.data, res.data) self.assertNotIn(serializer3.data, res.data) # check the", "recipes with specific ingredients\"\"\" recipe1 = sample_recipe(user=self.user, title='Posh beans on", "in payload.keys(): self.assertEqual(payload[key], getattr(recipe, key)) # assertion for each one", "do refresh from dB if the values have changed #", "\"\"\"Test returning recipes with specific ingredients\"\"\" recipe1 = sample_recipe(user=self.user, title='Posh", "the two asterisks when calling a # function it has", "# when you use the two asterisks when calling a", "= detail_url(recipe.id) # the way that you update an object", "a recipe with put\"\"\" recipe = sample_recipe(user=self.user) recipe.tags.add(sample_tag(user=self.user)) payload =", "omitted from # the request will not be modified in", "'title': 'Sample recipe', 'time_minutes': 10, 'price': 5.00, } defaults.update(params) return", "def test_view_recipe_detail(self): \"\"\"Test viewing a recipe detail\"\"\" recipe = sample_recipe(user=self.user)", "correct value assigned to our recipe model. for key in", "= Recipe.objects.all().order_by('-id') serializer = RecipeSerializer(recipes, many=True) self.assertEqual(res.status_code, status.HTTP_200_OK) self.assertEqual(res.data, serializer.data)", "res = self.client.get(RECIPES_URL) recipes = Recipe.objects.all().order_by('-id') serializer = RecipeSerializer(recipes, many=True)", "# refreshing the database for our recipe self.recipe.refresh_from_db() self.assertEqual(res.status_code, status.HTTP_200_OK)", "assign # it to the tags get parameter # if", "test_filter_recipes_by_tags(self): \"\"\"Test returning recipes with specific tags\"\"\" recipe1 = sample_recipe(user=self.user,", "we create # delete the image if it exists in", "from our database. res = self.client.get(RECIPES_URL) recipes = Recipe.objects.all().order_by('-id') serializer", "function which will then create a temp file # somewhere", "ingredient2 = sample_ingredient(user=self.user, name='Chicken') recipe1.ingredients.add(ingredient1) recipe2.ingredients.add(ingredient2) recipe3 = sample_recipe(user=self.user, title='Steak", "= APIClient() self.user = get_user_model().objects.create_user( '<EMAIL>', '<PASSWORD>' ) self.client.force_authenticate(self.user) def", "tag1 = sample_tag(user=self.user, name='Vegan') tag2 = sample_tag(user=self.user, name='Vegetarian') recipe1.tags.add(tag1) recipe2.tags.add(tag2)", "we were returning the list view # or we wanted", "our API RECIPES_URL = reverse('recipe:recipe-list') # since we're going to", "are zero now as I explained # because when we", "uploading an image to recipe\"\"\" url = image_upload_url(self.recipe.id) # going", "the recipe self.assertEqual(tags.count(), 2) # because we expect two tags", "tags assigned are zero now as I explained # because", "to retrieve an update to the recipe from the #", "RECIPES_URL = reverse('recipe:recipe-list') # since we're going to need to", "then it would # just be blank because you've already", "our recipe self.recipe.refresh_from_db() self.assertEqual(res.status_code, status.HTTP_200_OK) # check that the images", "url = detail_url(recipe.id) # the way that you update an", "is # equal to the same key in the recipe", "# test full update # put: it will replace the", "then upload to our API RECIPES_URL = reverse('recipe:recipe-list') # since", "to the same key in the recipe # payload[key]: This", "item def sample_tag(user, name='Main course'): \"\"\"Create and return a sample", "recipe', 'tags': [new_tag.id]} # tags will be replaced with this", "the setup of the test def setUp(self): self.client = APIClient()", "# test recipes are limited to the authenticated user. user2", "1) self.assertEqual(res.data, serializer.data) def test_view_recipe_detail(self): \"\"\"Test viewing a recipe detail\"\"\"", "RecipeSerializer(recipe2) serializer3 = RecipeSerializer(recipe3) self.assertIn(serializer1.data, res.data) self.assertIn(serializer2.data, res.data) self.assertNotIn(serializer3.data, res.data)", "# to swap out this tag that we create here", "all of the test files that we create # delete", "are assigned and just make sure they match what we", "self.assertEqual(recipe.title, payload['title']) self.assertEqual(recipe.time_minutes, payload['time_minutes']) self.assertEqual(recipe.price, payload['price']) tags = recipe.tags.all() self.assertEqual(len(tags),", "you've already read up to the end # of the", "the ingredients queryset self.assertEqual(ingredients.count(), 2) self.assertIn(ingredient1, ingredients) self.assertIn(ingredient2, ingredients) #", "\"\"\"Test creating a recipe with tags\"\"\" tag1 = sample_tag(user=self.user, name='Tag", "= sample_recipe(user=self.user) recipe.tags.add(sample_tag(user=self.user)) # add a tag to the recipe", "going to have a detail action # this is how", "# all of the recipes from our database. res =", "database. res = self.client.get(RECIPES_URL) recipes = Recipe.objects.all().order_by('-id') serializer = RecipeSerializer(recipes,", "that the images in the response so that's the path", "correctly with tempfile.NamedTemporaryFile(suffix='.jpg') as ntf: img = Image.new('RGB', (10, 10))", "recipe = sample_recipe(user=self.user) recipe.tags.add(sample_tag(user=self.user)) # add a tag to the", "all of the recipes from our database. res = self.client.get(RECIPES_URL)", "sample_recipe(user=self.user) # after the test runs it runs tear down", "the Django rest framework the # default behavior is that", "after the test runs it runs tear down def tearDown(self):", "that # we created won't be assigned to it url", "we're going to do is we're going to loop through", "will change are the # fields that are provided and", "with the recipe self.assertEqual(tags.count(), 2) # because we expect two", "self.assertEqual(res.status_code, status.HTTP_200_OK) # check that the images in the response", "is because we were returning the list view # or", "def test_create_recipe_with_tags(self): \"\"\"Test creating a recipe with tags\"\"\" tag1 =", "not have any tags # assigned class RecipeImageUploadTests(TestCase): # what", "payload.keys(): self.assertEqual(payload[key], getattr(recipe, key)) # assertion for each one of", "= detail_url(recipe.id) res = self.client.get(url) serializer = RecipeDetailSerializer(recipe) # in", "a dictionary containing # the created object This is how", "the value of the # key in our payload object", "and any fields that are omitted from # the request", "sample_ingredient(user=self.user, name='Chicken') recipe1.ingredients.add(ingredient1) recipe2.ingredients.add(ingredient2) recipe3 = sample_recipe(user=self.user, title='Steak and mushrooms')", "\"\"\"Return recipe detail URL\"\"\" return reverse('recipe:recipe-detail', args=[recipe_id]) # name of", "tags get parameter # if our filtering is working #", "things like # creating path names and also checking if", "Recipe.objects.get(id=res.data['id']) # retrieve the created recipe tags = recipe.tags.all() #", "self.assertIn(ingredient1, ingredients) self.assertIn(ingredient2, ingredients) # test partial update and full", "action # this is how you specify arguments with the", "sample tag assigned should not have any tags # assigned", "typically when you create a new model and you have", "read up to the end # of the file so", "(id) --> detail url def image_upload_url(recipe_id): \"\"\"Return URL for recipe", "do res.data and # retrieve the id key this will", "recipe tags = recipe.tags.all() # retrieve the tags that were", "sample_recipe(user=self.user, title='Thai vegetable curry') recipe2 = sample_recipe(user=self.user, title='Aubergine with tahini')", "image upload\"\"\" return reverse('recipe:recipe-upload-image', args=[recipe_id]) # generate our upload image", "Recipe, Tag, Ingredient from ..serializers import RecipeSerializer, RecipeDetailSerializer import tempfile", "in the payload so the only fields that it will", "identifier of the URL in the app # /api/recipe/recipes #", "serializer = RecipeDetailSerializer(recipe) # in this case we just want", "to add # here we have single item def sample_tag(user,", "with tempfile.NamedTemporaryFile(suffix='.jpg') as ntf: img = Image.new('RGB', (10, 10)) #", "we do res.data and # retrieve the id key this", "it to the tags get parameter # if our filtering", "= RecipeSerializer(recipe1) serializer2 = RecipeSerializer(recipe2) serializer3 = RecipeSerializer(recipe3) # serialize", "will check that the tags assigned are zero now as", "self.assertIn(tag1, tags) self.assertIn(tag2, tags) # check if the tags that", "dictionary containing # the created object This is how I", "the # default behavior is that it will return a", "going # to swap out this tag that we create", "at a random # location usually in the /temp folder", "using the Django rest framework the # default behavior is", "two tags to be assigned. self.assertIn(tag1, tags) self.assertIn(tag2, tags) #", "payload) # make request # We're going to retrieve an", "object using the Django rest framework # view sets is", "test recipes are limited to the authenticated user. user2 =", "usually in the /temp folder # create a temporary file", "after # you've used it import os # this allows", "Recipe.objects.get(id=res.data['id']) ingredients = recipe.ingredients.all() # get the ingredients queryset self.assertEqual(ingredients.count(),", "will be the seeking will be done to the #", "ID of the recipe that we want to update. self.client.patch(url,", "URL of the # recipe with the ID of the", "\"\"\"Test the authenticaiton is required\"\"\" res = self.client.get(RECIPES_URL) self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)", "HTTP POST and then # we're going to run some", "by passing in a variable. (instead of recipe.key) def test_create_recipe_with_tags(self):", "API access\"\"\" def setUp(self): self.client = APIClient() self.user = get_user_model().objects.create_user(", "name=name) def sample_ingredient(user, name='Cinnamon'): \"\"\"Create and return a sample ingredient\"\"\"", "that the path exists for the image that is saved", "specific ingredients\"\"\" recipe1 = sample_recipe(user=self.user, title='Posh beans on toast') recipe2", "url def image_upload_url(recipe_id): \"\"\"Return URL for recipe image upload\"\"\" return", "function # you just pass in args and then you", "we omit a field # that should clear the value", "for making our API requests from core.models import Recipe, Tag,", "to replace it with a new tag payload = {'title':", "rest_framework import status from rest_framework.test import APIClient # use that", "function it has the reverse effect. class PublicRecipeApiTests(TestCase): \"\"\"Test unauthenticated", "let's assign that as a variable # at top of", "recipe with the ID of the recipe that we want", "return reverse('recipe:recipe-detail', args=[recipe_id]) # name of the end point that", "in our recipe from the database # typically when you", "class RecipeImageUploadTests(TestCase): # what happens at the setup of the", "authenticated user. user2 = get_user_model().objects.create_user( '<EMAIL>', 'pass' ) sample_recipe(user=user2) sample_recipe(user=self.user)", "uploaded correctly with tempfile.NamedTemporaryFile(suffix='.jpg') as ntf: img = Image.new('RGB', (10,", "to our API RECIPES_URL = reverse('recipe:recipe-list') # since we're going", "exist in the responses returned self.assertIn(serializer1.data, res.data) self.assertIn(serializer2.data, res.data) self.assertNotIn(serializer3.data,", "update of an object # there are two ways in", "to loop through each # one of these keys and", "tags\"\"\" recipe1 = sample_recipe(user=self.user, title='Thai vegetable curry') recipe2 = sample_recipe(user=self.user,", "check # that is the correct value assigned to our", "# use that for making our API requests from core.models", "is we're going # to swap out this tag that", "clean after our test # removing all of the test", "capitals. # app : identifier of the URL in the", "# tags will be replaced with this new tag so", "(instead of recipe.key) def test_create_recipe_with_tags(self): \"\"\"Test creating a recipe with", "authenticate our user self.recipe = sample_recipe(user=self.user) # after the test", "{'image': 'notimage'}, format='multipart') self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST) def test_filter_recipes_by_tags(self): \"\"\"Test returning recipes", "= get_user_model().objects.create_user( '<EMAIL>', 'pass' ) sample_recipe(user=user2) sample_recipe(user=self.user) res = self.client.get(RECIPES_URL)", "= sample_recipe(user=self.user) # after the test runs it runs tear", "recipe.tags.add(sample_tag(user=self.user)) recipe.ingredients.add(sample_ingredient(user=self.user)) url = detail_url(recipe.id) res = self.client.get(url) serializer =", "for our viewset because we're going to have a detail", "from rest_framework import status from rest_framework.test import APIClient # use", "reverse('recipe:recipe-detail', args=[recipe_id]) # name of the end point that the", "sample_recipe(user=self.user) recipe.tags.add(sample_tag(user=self.user)) # add a tag to the recipe new_tag", "'price': 5.00 } url = detail_url(recipe.id) self.client.put(url, payload) recipe.refresh_from_db() self.assertEqual(recipe.title,", "res = self.client.get(RECIPES_URL) self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED) class PrivateRecipeApiTests(TestCase): \"\"\"Test authenticated recipe", "sample_recipe(user=self.user, title='Fish and chips') res = self.client.get( RECIPES_URL, {'tags': '{},{}'.format(tag1.id,", "that are in our queryset. def test_create_recipe_with_ingredients(self): \"\"\"Test creating recipe", "tags that we created as our sample tags are #", "self.client.post(url, {'image': 'notimage'}, format='multipart') self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST) def test_filter_recipes_by_tags(self): \"\"\"Test returning", "we're going to check the fields that # are assigned", "= APIClient() def test_required_auth(self): \"\"\"Test the authenticaiton is required\"\"\" res", "many=True) # many=true: this is because we were returning the", "after our test # removing all of the test files", "won't be assigned to it url = detail_url(recipe.id) # the", "res.data) # check the return result def test_filter_recipes_by_ingredients(self): \"\"\"Test returning", "'title': 'Test recipe with two tags', 'tags': [tag1.id, tag2.id], 'time_minutes':", "tag that # we created won't be assigned to it", "use this seek function to set # the pointer back", "# this allows us to perform things like # creating", "sample_ingredient(user, name='Cinnamon'): \"\"\"Create and return a sample ingredient\"\"\" return Ingredient.objects.create(user=user,", "get_user_model().objects.create_user('user', 'testpass') self.client.force_authenticate(self.user) # authenticate our user self.recipe = sample_recipe(user=self.user)", "tag payload = {'title': 'Partially Updated sample recipe', 'tags': [new_tag.id]}", "and return a sample recipe\"\"\" defaults = { 'title': 'Sample", "through each # one of these keys and then we're", "it runs tear down def tearDown(self): self.recipe.image.delete() # make sure", "upload to our API RECIPES_URL = reverse('recipe:recipe-list') # since we're", "updated. def test_partial_update_recipe(self): \"\"\"Test updating a recipe with patch\"\"\" #", "# create test images which we can then upload to", "one of these keys and then we're going to check", "to update. self.client.patch(url, payload) # make request # We're going", "upload that file # through the API like you would", "10.00, } res = self.client.post(RECIPES_URL, payload) # post this payload", "way that Python reads files so because we've # saved", "recipe self.assertEqual(tags.count(), 2) # because we expect two tags to", "RecipeSerializer(recipes, many=True) self.assertEqual(res.status_code, status.HTTP_200_OK) self.assertEqual(res.data, serializer.data) def test_recipes_limited_to_user(self): \"\"\"Test retrieving", "I explained # because when we do a HTTP put", "will then create a temp file # somewhere in the", "tags) self.assertIn(tag2, tags) # check if the tags that we", "this seek function to set # the pointer back to", "a temp file # somewhere in the system and then", "Recipe.objects.filter(user=self.user) serializer = RecipeSerializer(recipes, many=True) # many=true: this is because", "tag that we create here and we're going # to", "dictionary to our recipes URL. self.assertEqual(res.status_code, status.HTTP_201_CREATED) # this is", "sample ingredient\"\"\" return Ingredient.objects.create(user=user, name=name) def sample_recipe(user, **params): \"\"\"Create and", "RecipeSerializer(recipe1) serializer2 = RecipeSerializer(recipe2) serializer3 = RecipeSerializer(recipe3) # serialize the", "be blank because you've already read up to the end", "of recipes\"\"\" sample_recipe(user=self.user) sample_recipe(user=self.user) # we're going to access them", "recipe.ingredients.add(sample_ingredient(user=self.user)) url = detail_url(recipe.id) res = self.client.get(url) serializer = RecipeDetailSerializer(recipe)", "# the image that should be accessible self.assertIn('image', res.data) #", "add a new tag and what we're going to do", "path to # the image that should be accessible self.assertIn('image',", "used to update the fields that are provided # in", "in a variable. (instead of recipe.key) def test_create_recipe_with_tags(self): \"\"\"Test creating", "URL. self.assertEqual(res.status_code, status.HTTP_201_CREATED) # this is the standard HTTP response", "refreshes the details in our recipe from the database #", "just pass in args and then you pass in a", "self.client = APIClient() self.user = get_user_model().objects.create_user( '<EMAIL>', '<PASSWORD>' ) self.client.force_authenticate(self.user)", "upload image url # you're going to need the existing", "new model and you have a # reference to a", "because we've # saved the file it will be the", "recipe ID in order to upload an image def detail_url(recipe_id):", "as I explained # because when we do a HTTP", "did have a sample tag assigned should not have any", "view # or we wanted to simulate the list view", "retrieving list of recipes\"\"\" sample_recipe(user=self.user) sample_recipe(user=self.user) # we're going to", "res.data) # check that the path exists for the image", "ingredients) self.assertIn(ingredient2, ingredients) # test partial update and full update", "details of that won't change # unless you do refresh", "an object using the # API there's two different HTTP", "and we're going # to replace it with a new", "import get_user_model from django.test import TestCase from django.urls import reverse", "tearDown(self): self.recipe.image.delete() # make sure that our file system is", "= Recipe.objects.get(id=res.data['id']) # retrieve the created recipe tags = recipe.tags.all()", "replace it with a new tag payload = {'title': 'Partially", "database # typically when you create a new model and", "Ingredient.objects.create(user=user, name=name) def sample_recipe(user, **params): \"\"\"Create and return a sample", "sets is you use the detail URL so that is", "at top of the class in all capitals. # app", "check that the tags assigned are zero now as I", "using the Django rest framework # view sets is you", "RecipeSerializer, RecipeDetailSerializer import tempfile # allows you to call a", "# app : identifier of the URL in the app", "with a HTTP POST and then # we're going to", "'Partially Updated sample recipe', 'tags': [new_tag.id]} # tags will be", "self.assertEqual(recipe.time_minutes, payload['time_minutes']) self.assertEqual(recipe.price, payload['price']) tags = recipe.tags.all() self.assertEqual(len(tags), 0) #", "request will not be modified in the object that's being", "= self.client.post(RECIPES_URL, payload) # post this payload dictionary to our", "in the payload those fields will actually be removed #", "<reponame>tahmadvand/recipe_app_api from django.contrib.auth import get_user_model from django.test import TestCase from", "were returning the list view # or we wanted to", "a comma separated list string and assign # it to", "# because when we do a HTTP put if we", "it url = detail_url(recipe.id) # the way that you update", "= get_user_model().objects.create_user( '<EMAIL>', '<PASSWORD>' ) self.client.force_authenticate(self.user) def test_retrieve_recipes(self): \"\"\"Test retrieving", "django.contrib.auth import get_user_model from django.test import TestCase from django.urls import", "standard HTTP response code for creating objects # in an", "(10, 10)) # creates black square img.save(ntf, format='JPEG') ntf.seek(0) #", "self.client.force_authenticate(self.user) # authenticate our user self.recipe = sample_recipe(user=self.user) # after", "self.assertEqual(res.status_code, status.HTTP_201_CREATED) recipe = Recipe.objects.get(id=res.data['id']) # retrieve the created recipe", "our recipe. recipe = sample_recipe(user=self.user) recipe.tags.add(sample_tag(user=self.user)) # add a tag", "retrieved # test full update # put: it will replace", "for the image that is saved to our model self.assertTrue(os.path.exists(self.recipe.image.path))", "a random # location usually in the /temp folder #", "object # there are two ways in which you can", "self.assertEqual(res.status_code, status.HTTP_200_OK) self.assertEqual(res.data, serializer.data) def test_recipes_limited_to_user(self): \"\"\"Test retrieving recipes for", "more # or less all the tests let's assign that", "how you specify arguments with the reverse function # you", "ingredients', 'ingredients': [ingredient1.id, ingredient2.id], 'time_minutes': 45, 'price': 15.00 } res", "# of the file so use this seek function to", "= {'title': 'Partially Updated sample recipe', 'tags': [new_tag.id]} # tags", "actually get the value of the # key in our", "view sets is you use the detail URL so that", "tags', 'tags': [tag1.id, tag2.id], 'time_minutes': 30, 'price': 10.00 } res", "you have a # reference to a model the details", "class in all capitals. # app : identifier of the", "# /api/recipe/recipes/1/ (id) --> detail url def image_upload_url(recipe_id): \"\"\"Return URL", "tags\"\"\" tag1 = sample_tag(user=self.user, name='Tag 1') tag2 = sample_tag(user=self.user, name='Tag", "assigned and just make sure they match what we expect.", "what we're going to do is we're going to loop", "= sample_tag(user=self.user, name='Vegetarian') recipe1.tags.add(tag1) recipe2.tags.add(tag2) recipe3 = sample_recipe(user=self.user, title='Fish and", "def detail_url(recipe_id): \"\"\"Return recipe detail URL\"\"\" return reverse('recipe:recipe-detail', args=[recipe_id]) #", "from # the request will not be modified in the", "the list view in our serializer self.assertEqual(res.status_code, status.HTTP_200_OK) self.assertEqual(len(res.data), 1)", "folder # create a temporary file we're going to write", "retrieve an attribute from # an object by passing in", "PublicRecipeApiTests(TestCase): \"\"\"Test unauthenticated recipe API access\"\"\" def setUp(self): self.client =", "the database # typically when you create a new model", "the existing tag that # we created won't be assigned", "only return the first two recipe # test the response:", "in the recipe # payload[key]: This will actually get the", "} res = self.client.post(RECIPES_URL, payload) self.assertEqual(res.status_code, status.HTTP_201_CREATED) recipe = Recipe.objects.get(id=res.data['id'])", "the pointer back to the beginning of the file res", "sample_recipe(user=self.user, title='Posh beans on toast') recipe2 = sample_recipe(user=self.user, title='Chicken cacciatore')", "reference to a model the details of that won't change", "add a tag to the recipe new_tag = sample_tag(user=self.user, name='Curry')", "sample_recipe(user=self.user) recipe.tags.add(sample_tag(user=self.user)) payload = { 'title': 'Fully Updated sample recipe',", "the test runs it runs tear down def tearDown(self): self.recipe.image.delete()", "at the setup of the test def setUp(self): self.client =", "of the class in all capitals. # app : identifier", "we're going to upload that file # through the API", "end of the file so if you try to access", "saved to our model self.assertTrue(os.path.exists(self.recipe.image.path)) def test_upload_image_bad_request(self): \"\"\"Test uploading an", "system is kept clean after our test # removing all", "being updated. def test_partial_update_recipe(self): \"\"\"Test updating a recipe with patch\"\"\"", "is provided in the request that means if you exclude", "retrieve the created recipe tags = recipe.tags.all() # retrieve the", "created as our sample tags are # the same as", "recipe def test_upload_image_to_recipe(self): \"\"\"Test uploading an image to recipe\"\"\" url", "Django rest framework the # default behavior is that it", "will actually get the value of the # key in", "are # the same as the tags that are in", "from PIL import Image # pillow, this will import our", "payload['title']) self.assertEqual(recipe.time_minutes, payload['time_minutes']) self.assertEqual(recipe.price, payload['price']) tags = recipe.tags.all() self.assertEqual(len(tags), 0)", "of the test files that we create # delete the", "can remove that file after # you've used it import", "be accessible self.assertIn('image', res.data) # check that the path exists", "setup of the test def setUp(self): self.client = APIClient() self.user", "that the tag new tag is in the tags that", "= self.client.get(RECIPES_URL) self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED) class PrivateRecipeApiTests(TestCase): \"\"\"Test authenticated recipe API", "# since we're going to need to access the URL", "\"\"\"Test authenticated recipe API access\"\"\" def setUp(self): self.client = APIClient()", "you create an object using the Django rest framework the", "to the recipe from the # database and then we're", "to that file and then we're going to upload that", "recipes with specific tags\"\"\" recipe1 = sample_recipe(user=self.user, title='Thai vegetable curry')", "that you update an object using the Django rest framework", "are in our queryset. def test_create_recipe_with_ingredients(self): \"\"\"Test creating recipe with", "tags that are in our queryset. def test_create_recipe_with_ingredients(self): \"\"\"Test creating", "sample_recipe(user=user2) sample_recipe(user=self.user) res = self.client.get(RECIPES_URL) # filter our recipes by", "the payload so the only fields that it will change", "'tags': [new_tag.id]} # tags will be replaced with this new", "as the tags that are in our queryset. def test_create_recipe_with_ingredients(self):", "objects # in an API. recipe = Recipe.objects.get(id=res.data['id']) # When", "viewset because we're going to have a detail action #", "# for our viewset because we're going to have a", "recipes = Recipe.objects.all().order_by('-id') serializer = RecipeSerializer(recipes, many=True) self.assertEqual(res.status_code, status.HTTP_200_OK) self.assertEqual(res.data,", "temporary file on the system at a random # location", "return result def test_filter_recipes_by_ingredients(self): \"\"\"Test returning recipes with specific ingredients\"\"\"", "name='Vegetarian') recipe1.tags.add(tag1) recipe2.tags.add(tag2) recipe3 = sample_recipe(user=self.user, title='Fish and chips') res", "you exclude # any fields in the payload those fields", "file # somewhere in the system and then you can", "to check the fields that # are assigned and just", "case we just want to serialize a single object self.assertEqual(res.data,", "that our file system is kept clean after our test", "= sample_recipe(user=self.user) recipe.tags.add(sample_tag(user=self.user)) recipe.ingredients.add(sample_ingredient(user=self.user)) url = detail_url(recipe.id) res = self.client.get(url)", "sample_ingredient(user=self.user, name='Feta cheese') ingredient2 = sample_ingredient(user=self.user, name='Chicken') recipe1.ingredients.add(ingredient1) recipe2.ingredients.add(ingredient2) recipe3", "the file res = self.client.post(url, {'image': ntf}, format='multipart') # assertions", "recipe. recipe = sample_recipe(user=self.user) recipe.tags.add(sample_tag(user=self.user)) # add a tag to", "Updated sample recipe', 'tags': [new_tag.id]} # tags will be replaced", "behavior is that it will return a dictionary containing #", "def sample_recipe(user, **params): \"\"\"Create and return a sample recipe\"\"\" defaults", "'title': 'Fully Updated sample recipe', 'time_minutes': 25, 'price': 5.00 }", "that we're updating with the full # object that is", "recipe\"\"\" defaults = { 'title': 'Sample recipe', 'time_minutes': 10, 'price':", "creating path names and also checking if files exist on", "= sample_recipe(user=self.user, title='Thai vegetable curry') recipe2 = sample_recipe(user=self.user, title='Aubergine with", "= sample_tag(user=self.user, name='Tag 1') tag2 = sample_tag(user=self.user, name='Tag 2') payload", "we're updating with the full # object that is provided", "serializer1 = RecipeSerializer(recipe1) serializer2 = RecipeSerializer(recipe2) serializer3 = RecipeSerializer(recipe3) self.assertIn(serializer1.data,", "2) self.assertIn(ingredient1, ingredients) self.assertIn(ingredient2, ingredients) # test partial update and", "for our recipe self.recipe.refresh_from_db() self.assertEqual(res.status_code, status.HTTP_200_OK) # check that the", "recipe', 'time_minutes': 30, 'price': 10.00, } res = self.client.post(RECIPES_URL, payload)", "**params): \"\"\"Create and return a sample recipe\"\"\" defaults = {", "use the detail URL so that is the URL of", "going to do is we're going to loop through each", "the values have changed # in the database. self.assertEqual(recipe.title, payload['title'])", "Image.new('RGB', (10, 10)) # creates black square img.save(ntf, format='JPEG') ntf.seek(0)", "a sample tag\"\"\" return Tag.objects.create(user=user, name=name) def sample_ingredient(user, name='Cinnamon'): \"\"\"Create", "the file so use this seek function to set #", "check that the path exists for the image that is", "def test_create_basic_recipe(self): \"\"\"Test creating recipe\"\"\" payload = { 'title': 'Test", "can then upload to our API RECIPES_URL = reverse('recipe:recipe-list') #", "get_user_model().objects.create_user( '<EMAIL>', 'pass' ) sample_recipe(user=user2) sample_recipe(user=self.user) res = self.client.get(RECIPES_URL) #", "recipes URL. self.assertEqual(res.status_code, status.HTTP_201_CREATED) # this is the standard HTTP", "object This is how I know that if we do", "file so if you try to access it then it", "done to the # end of the file so if", "self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED) class PrivateRecipeApiTests(TestCase): \"\"\"Test authenticated recipe API access\"\"\" def", "limited to the authenticated user. user2 = get_user_model().objects.create_user( '<EMAIL>', 'pass'", "assigned class RecipeImageUploadTests(TestCase): # what happens at the setup of", "has the reverse effect. class PublicRecipeApiTests(TestCase): \"\"\"Test unauthenticated recipe API", "this is because we were returning the list view #", "to access them by retrieving # all of the recipes", "asterisks when calling a # function it has the reverse", "is how I know that if we do res.data and", "the only fields that it will change are the #", "sample recipe that gets created # it creates a named", "the authenticated user. user2 = get_user_model().objects.create_user( '<EMAIL>', 'pass' ) sample_recipe(user=user2)", "database and then we're going to check the fields that", "tag2 = sample_tag(user=self.user, name='Vegetarian') recipe1.tags.add(tag1) recipe2.tags.add(tag2) recipe3 = sample_recipe(user=self.user, title='Fish", "recipe with tags\"\"\" tag1 = sample_tag(user=self.user, name='Tag 1') tag2 =", "def test_upload_image_to_recipe(self): \"\"\"Test uploading an image to recipe\"\"\" url =", "which we can then upload to our API RECIPES_URL =", "you create a new model and you have a #", "required\"\"\" res = self.client.get(RECIPES_URL) self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED) class PrivateRecipeApiTests(TestCase): \"\"\"Test authenticated", "now our recipe # that did have a sample tag", "the database for our recipe self.recipe.refresh_from_db() self.assertEqual(res.status_code, status.HTTP_200_OK) # check", "delete the image if it exists in the recipe def", "# we're going to access them by retrieving # all", "self.assertEqual(len(res.data), 1) self.assertEqual(res.data, serializer.data) def test_view_recipe_detail(self): \"\"\"Test viewing a recipe", "serializer.data) def test_view_recipe_detail(self): \"\"\"Test viewing a recipe detail\"\"\" recipe =", "the # arguments you want to add # here we", "2) # because we expect two tags to be assigned.", "request # We're going to retrieve an update to the", "tag to the recipe new_tag = sample_tag(user=self.user, name='Curry') # add", "30, 'price': 10.00 } res = self.client.post(RECIPES_URL, payload) self.assertEqual(res.status_code, status.HTTP_201_CREATED)", "= sample_recipe(user=self.user, title='Chicken cacciatore') ingredient1 = sample_ingredient(user=self.user, name='Feta cheese') ingredient2", "our API requests from core.models import Recipe, Tag, Ingredient from", "that you're updating def test_full_update_recipe(self): \"\"\"Test updating a recipe with", "# or less all the tests let's assign that as", "we create here and we're going # to replace it", "import Image # pillow, this will import our image class", "import RecipeSerializer, RecipeDetailSerializer import tempfile # allows you to call", "to upload that file # through the API like you", "# it's the way that Python reads files so because", "returning the list view # or we wanted to simulate", "serializer self.assertEqual(res.status_code, status.HTTP_200_OK) self.assertEqual(len(res.data), 1) self.assertEqual(res.data, serializer.data) def test_view_recipe_detail(self): \"\"\"Test", "with tahini') tag1 = sample_tag(user=self.user, name='Vegan') tag2 = sample_tag(user=self.user, name='Vegetarian')", "let us then # create test images which we can", "= RecipeSerializer(recipe2) serializer3 = RecipeSerializer(recipe3) # serialize the recipes and", "update to the recipe from the # database and then", "a temporary file we're going to write an image #", "..serializers import RecipeSerializer, RecipeDetailSerializer import tempfile # allows you to", "in all capitals. # app : identifier of the URL", "= image_upload_url(self.recipe.id) res = self.client.post(url, {'image': 'notimage'}, format='multipart') self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)", "defaults = { 'title': 'Sample recipe', 'time_minutes': 10, 'price': 5.00,", "and return a sample tag\"\"\" return Tag.objects.create(user=user, name=name) def sample_ingredient(user,", "the details in our recipe from the database # typically", "replace the object that we're updating with the full #", "this will get the id of the created object. #", "patch: Patch is used to update the fields that are", "want to add # here we have single item def", "sample_recipe(user=self.user, title='Aubergine with tahini') tag1 = sample_tag(user=self.user, name='Vegan') tag2 =", "an attribute from # an object by passing in a", ") # this will create a comma separated list string", "# we will check that the tags assigned are zero", "the existing recipe ID in order to upload an image", "# or we wanted to simulate the list view in", "def test_upload_image_bad_request(self): \"\"\"Test uploading an invalid image\"\"\" url = image_upload_url(self.recipe.id)", "= RecipeSerializer(recipe3) # serialize the recipes and we're going to", "that we create # delete the image if it exists", "sample recipe\"\"\" defaults = { 'title': 'Sample recipe', 'time_minutes': 10,", "updating def test_full_update_recipe(self): \"\"\"Test updating a recipe with put\"\"\" recipe", ") self.client.force_authenticate(self.user) def test_retrieve_recipes(self): \"\"\"Test retrieving list of recipes\"\"\" sample_recipe(user=self.user)", "our test # removing all of the test files that", "= sample_tag(user=self.user, name='Vegan') tag2 = sample_tag(user=self.user, name='Vegetarian') recipe1.tags.add(tag1) recipe2.tags.add(tag2) recipe3", "attribute from # an object by passing in a variable.", "refreshing the database for our recipe self.recipe.refresh_from_db() self.assertEqual(res.status_code, status.HTTP_200_OK) #", "assertion for each one of these keys, check that it", "# creates black square img.save(ntf, format='JPEG') ntf.seek(0) # it's the", "return the first two recipe # test the response: serializer1", "title='Posh beans on toast') recipe2 = sample_recipe(user=self.user, title='Chicken cacciatore') ingredient1", "the object that you're updating def test_full_update_recipe(self): \"\"\"Test updating a", "def test_full_update_recipe(self): \"\"\"Test updating a recipe with put\"\"\" recipe =", "test_create_basic_recipe(self): \"\"\"Test creating recipe\"\"\" payload = { 'title': 'Test recipe',", "ingredients) # test partial update and full update of an", "HTTP methods: put, patch # patch: Patch is used to", "we want to update. self.client.patch(url, payload) # make request #", "image_upload_url(recipe_id): \"\"\"Return URL for recipe image upload\"\"\" return reverse('recipe:recipe-upload-image', args=[recipe_id])", "update # put: it will replace the object that we're", "many=true: this is because we were returning the list view", "is used to update the fields that are provided #", "payload object # getattr: that allows you to retrieve an", "= self.client.post(url, {'image': ntf}, format='multipart') # assertions # refreshing the", "and you have a # reference to a model the", "/api/recipe/recipes # /api/recipe/recipes/1/ (id) --> detail url def image_upload_url(recipe_id): \"\"\"Return", "that were created with the recipe self.assertEqual(tags.count(), 2) # because", "our serializer self.assertEqual(res.status_code, status.HTTP_200_OK) self.assertEqual(len(res.data), 1) self.assertEqual(res.data, serializer.data) def test_view_recipe_detail(self):", "our file system is kept clean after our test #", "out this tag that we create here and we're going", "fields in the payload those fields will actually be removed", "uploading an invalid image\"\"\" url = image_upload_url(self.recipe.id) res = self.client.post(url,", "to our model self.assertTrue(os.path.exists(self.recipe.image.path)) def test_upload_image_bad_request(self): \"\"\"Test uploading an invalid", "image url # you're going to need the existing recipe", "# generate our upload image url # you're going to", "recipe.tags.all() self.assertEqual(len(tags), 1) self.assertIn(new_tag, tags) # check that the tag", "and then # we're going to run some assertions to", "the class in all capitals. # app : identifier of", "will replace the object that we're updating with the full", "URL in more # or less all the tests let's", "two ways in which you can update an object using", "recipe self.recipe.refresh_from_db() self.assertEqual(res.status_code, status.HTTP_200_OK) # check that the images in", "to run some assertions to check that it # uploaded", "= self.client.post(RECIPES_URL, payload) self.assertEqual(res.status_code, status.HTTP_201_CREATED) recipe = Recipe.objects.get(id=res.data['id']) # retrieve", "recipe2.ingredients.add(ingredient2) recipe3 = sample_recipe(user=self.user, title='Steak and mushrooms') # test API", "a tag to the recipe new_tag = sample_tag(user=self.user, name='Curry') #", "sample_recipe(user=self.user) # we're going to access them by retrieving #", "user2 = get_user_model().objects.create_user( '<EMAIL>', 'pass' ) sample_recipe(user=user2) sample_recipe(user=self.user) res =", "format='JPEG') ntf.seek(0) # it's the way that Python reads files", "# recipe with the ID of the recipe that we", "the Django rest framework # view sets is you use", "fields that are omitted from # the request will not", "making our API requests from core.models import Recipe, Tag, Ingredient", "= sample_recipe(user=self.user) recipe.tags.add(sample_tag(user=self.user)) payload = { 'title': 'Fully Updated sample", "assign that as a variable # at top of the", "to change a field in our recipe. recipe = sample_recipe(user=self.user)", "expect. recipe.refresh_from_db() # refreshes the details in our recipe from", "our recipes URL. self.assertEqual(res.status_code, status.HTTP_201_CREATED) # this is the standard", "filter our recipes by the authenticated user recipes = Recipe.objects.filter(user=self.user)", "ingredients queryset self.assertEqual(ingredients.count(), 2) self.assertIn(ingredient1, ingredients) self.assertIn(ingredient2, ingredients) # test", "# creating path names and also checking if files exist", "'<EMAIL>', 'pass' ) sample_recipe(user=user2) sample_recipe(user=self.user) res = self.client.get(RECIPES_URL) # filter", "payload) self.assertEqual(res.status_code, status.HTTP_201_CREATED) recipe = Recipe.objects.get(id=res.data['id']) # retrieve the created", "from dB if the values have changed # in the", "you're updating def test_full_update_recipe(self): \"\"\"Test updating a recipe with put\"\"\"", "title='Chicken cacciatore') ingredient1 = sample_ingredient(user=self.user, name='Feta cheese') ingredient2 = sample_ingredient(user=self.user,", "response so that's the path to # the image that", "of that won't change # unless you do refresh from", "'time_minutes': 10, 'price': 5.00, } defaults.update(params) return Recipe.objects.create(user=user, **defaults) #", "set # the pointer back to the beginning of the", "going to need the existing recipe ID in order to", "value of that field so now our recipe # that", "file so use this seek function to set # the", "field so now our recipe # that did have a", "APIClient() self.user = get_user_model().objects.create_user( '<EMAIL>', '<PASSWORD>' ) self.client.force_authenticate(self.user) def test_retrieve_recipes(self):", "'testpass') self.client.force_authenticate(self.user) # authenticate our user self.recipe = sample_recipe(user=self.user) #", "to access it then it would # just be blank", "a field in our recipe. recipe = sample_recipe(user=self.user) recipe.tags.add(sample_tag(user=self.user)) #", "the end # of the file so use this seek", "unless you do refresh from dB if the values have", "to need the existing recipe ID in order to upload", "\"\"\"Test creating recipe\"\"\" payload = { 'title': 'Test recipe', 'time_minutes':", "many=True) self.assertEqual(res.status_code, status.HTTP_200_OK) self.assertEqual(res.data, serializer.data) def test_recipes_limited_to_user(self): \"\"\"Test retrieving recipes", "payload['title']) tags = recipe.tags.all() self.assertEqual(len(tags), 1) self.assertIn(new_tag, tags) # check", "= self.client.get(RECIPES_URL) # filter our recipes by the authenticated user", "a new model and you have a # reference to", "that file # through the API like you would with", "from django.contrib.auth import get_user_model from django.test import TestCase from django.urls", "those fields will actually be removed # from the object", "sure they match what we expect. recipe.refresh_from_db() # refreshes the", "with specific ingredients\"\"\" recipe1 = sample_recipe(user=self.user, title='Posh beans on toast')", "self.client = APIClient() def test_required_auth(self): \"\"\"Test the authenticaiton is required\"\"\"", "test the response: serializer1 = RecipeSerializer(recipe1) serializer2 = RecipeSerializer(recipe2) serializer3", "'ingredients': [ingredient1.id, ingredient2.id], 'time_minutes': 45, 'price': 15.00 } res =", "methods: put, patch # patch: Patch is used to update", "from core.models import Recipe, Tag, Ingredient from ..serializers import RecipeSerializer,", "access\"\"\" def setUp(self): self.client = APIClient() self.user = get_user_model().objects.create_user( '<EMAIL>',", "need to access the URL in more # or less", "new tag payload = {'title': 'Partially Updated sample recipe', 'tags':", "recipes by the authenticated user recipes = Recipe.objects.filter(user=self.user) serializer =", "sample recipe', 'tags': [new_tag.id]} # tags will be replaced with", "'price': 10.00 } res = self.client.post(RECIPES_URL, payload) self.assertEqual(res.status_code, status.HTTP_201_CREATED) recipe", "if you try to access it then it would #", "image that should be accessible self.assertIn('image', res.data) # check that", "# getattr: that allows you to retrieve an attribute from", "the recipe from the # database and then we're going", "recipe from the # database and then we're going to", "the image if it exists in the recipe def test_upload_image_to_recipe(self):", "by retrieving # all of the recipes from our database.", "to check # that is the correct value assigned to", "order to upload an image def detail_url(recipe_id): \"\"\"Return recipe detail", "toast') recipe2 = sample_recipe(user=self.user, title='Chicken cacciatore') ingredient1 = sample_ingredient(user=self.user, name='Feta", "an image to recipe\"\"\" url = image_upload_url(self.recipe.id) # going to", "these keys, check that it is # equal to the", "the created recipe tags = recipe.tags.all() # retrieve the tags", "the list view # or we wanted to simulate the", "in the response so that's the path to # the", "# somewhere in the system and then you can remove", "# you just pass in args and then you pass", "the recipe new_tag = sample_tag(user=self.user, name='Curry') # add a new", "TestCase from django.urls import reverse from rest_framework import status from", "variable # at top of the class in all capitals.", "refresh from dB if the values have changed # in", "this is the standard HTTP response code for creating objects", "wanted to simulate the list view in our serializer self.assertEqual(res.status_code,", "def test_create_recipe_with_ingredients(self): \"\"\"Test creating recipe with ingredients\"\"\" ingredient1 = sample_ingredient(user=self.user,", "= RecipeDetailSerializer(recipe) # in this case we just want to", "# in the payload so the only fields that it", "will get the id of the created object. # Next", "check the return result def test_filter_recipes_by_ingredients(self): \"\"\"Test returning recipes with", "\"\"\"Test returning recipes with specific tags\"\"\" recipe1 = sample_recipe(user=self.user, title='Thai", "key)) # assertion for each one of these keys, check", "framework # view sets is you use the detail URL", "will create a comma separated list string and assign #", "this will import our image class which will let us", "the file so if you try to access it then", "get_user_model from django.test import TestCase from django.urls import reverse from", "Updated sample recipe', 'time_minutes': 25, 'price': 5.00 } url =", "files that we create # delete the image if it", "we do a HTTP put if we omit a field", "a # reference to a model the details of that", "exist on the system from PIL import Image # pillow,", "'<EMAIL>', '<PASSWORD>' ) self.client.force_authenticate(self.user) def test_retrieve_recipes(self): \"\"\"Test retrieving list of", "with put\"\"\" recipe = sample_recipe(user=self.user) recipe.tags.add(sample_tag(user=self.user)) payload = { 'title':", "two tags', 'tags': [tag1.id, tag2.id], 'time_minutes': 30, 'price': 10.00 }", "the path to # the image that should be accessible", "authenticated user recipes = Recipe.objects.filter(user=self.user) serializer = RecipeSerializer(recipes, many=True) #", "an object using the Django rest framework # view sets", "recipe model. for key in payload.keys(): self.assertEqual(payload[key], getattr(recipe, key)) #", "# one of these keys and then we're going to", "less all the tests let's assign that as a variable", "code for creating objects # in an API. recipe =", "# in an API. recipe = Recipe.objects.get(id=res.data['id']) # When you", "self.client.get( RECIPES_URL, {'ingredients': '{},{}'.format(ingredient1.id, ingredient2.id)} ) serializer1 = RecipeSerializer(recipe1) serializer2", "like # creating path names and also checking if files", "'time_minutes': 45, 'price': 15.00 } res = self.client.post(RECIPES_URL, payload) self.assertEqual(res.status_code,", "new_tag = sample_tag(user=self.user, name='Curry') # add a new tag and", "file we're going to write an image # to that", "path names and also checking if files exist on the", "created object. # Next what we're going to do is", "This will actually get the value of the # key", "seeking will be done to the # end of the", "partial update and full update of an object # there", "two different HTTP methods: put, patch # patch: Patch is", "because we expect two tags to be assigned. self.assertIn(tag1, tags)", "and what we're going to do is we're going #", "# because we expect two tags to be assigned. self.assertIn(tag1,", "object # getattr: that allows you to retrieve an attribute", "'title': 'Test recipe with ingredients', 'ingredients': [ingredient1.id, ingredient2.id], 'time_minutes': 45,", "upload\"\"\" return reverse('recipe:recipe-upload-image', args=[recipe_id]) # generate our upload image url", "self.assertEqual(res.status_code, status.HTTP_200_OK) self.assertEqual(len(res.data), 1) self.assertEqual(res.data, serializer.data) def test_view_recipe_detail(self): \"\"\"Test viewing", "the app # /api/recipe/recipes # /api/recipe/recipes/1/ (id) --> detail url", "file system is kept clean after our test # removing", "= image_upload_url(self.recipe.id) # going to use the sample recipe that", "each one of these keys, check that it is #", "\"\"\"Test uploading an invalid image\"\"\" url = image_upload_url(self.recipe.id) res =", "create a comma separated list string and assign # it", "image_upload_url(self.recipe.id) # going to use the sample recipe that gets", "they match what we expect. recipe.refresh_from_db() # refreshes the details", "for user\"\"\" # test recipes are limited to the authenticated", "put\"\"\" recipe = sample_recipe(user=self.user) recipe.tags.add(sample_tag(user=self.user)) payload = { 'title': 'Fully", "to update the fields that are provided # in the", "so if you try to access it then it would", "runs it runs tear down def tearDown(self): self.recipe.image.delete() # make", "so that is the URL of the # recipe with", "recipes are limited to the authenticated user. user2 = get_user_model().objects.create_user(", "detail URL so that is the URL of the #", "payload) # post this payload dictionary to our recipes URL.", "so use this seek function to set # the pointer", "the recipe def test_upload_image_to_recipe(self): \"\"\"Test uploading an image to recipe\"\"\"", "black square img.save(ntf, format='JPEG') ntf.seek(0) # it's the way that", "the system from PIL import Image # pillow, this will", "existing recipe ID in order to upload an image def", "down def tearDown(self): self.recipe.image.delete() # make sure that our file", "recipes and we're going to check if # they exist", "[tag1.id, tag2.id], 'time_minutes': 30, 'price': 10.00 } res = self.client.post(RECIPES_URL,", "# convert the dictionary into the argument # when you", "def tearDown(self): self.recipe.image.delete() # make sure that our file system", "what we're going to do is we're going # to", "removed # from the object that you're updating def test_full_update_recipe(self):", "rest framework # view sets is you use the detail", "explained # because when we do a HTTP put if", "of these keys, check that it is # equal to", "when you use the two asterisks when calling a #", "exists for the image that is saved to our model", "key in the recipe # payload[key]: This will actually get", "using the # API there's two different HTTP methods: put,", "the reverse function # you just pass in args and", "self.assertIn(tag2, tags) # check if the tags that we created", "update and full update of an object # there are", "} defaults.update(params) return Recipe.objects.create(user=user, **defaults) # convert the dictionary into", "When you create an object using the Django rest framework", "detail url def image_upload_url(recipe_id): \"\"\"Return URL for recipe image upload\"\"\"", "tags = recipe.tags.all() self.assertEqual(len(tags), 1) self.assertIn(new_tag, tags) # check that", "in the tags that we retrieved # test full update", "request to change a field in our recipe. recipe =", "a variable. (instead of recipe.key) def test_create_recipe_with_tags(self): \"\"\"Test creating a", "we're going # to replace it with a new tag", "key in our payload object # getattr: that allows you", "tag2.id], 'time_minutes': 30, 'price': 10.00 } res = self.client.post(RECIPES_URL, payload)", "# it creates a named temporary file on the system", "URL for recipe image upload\"\"\" return reverse('recipe:recipe-upload-image', args=[recipe_id]) # generate", "with the reverse function # you just pass in args", "assigned should not have any tags # assigned class RecipeImageUploadTests(TestCase):", "# to that file and then we're going to upload", "in the system and then you can remove that file", "image_upload_url(self.recipe.id) res = self.client.post(url, {'image': 'notimage'}, format='multipart') self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST) def", "to do is we're going # to swap out this", "rest framework the # default behavior is that it will", "the URL in more # or less all the tests", "seek function to set # the pointer back to the", "an object by passing in a variable. (instead of recipe.key)", "self.client.get(RECIPES_URL) # filter our recipes by the authenticated user recipes", "10)) # creates black square img.save(ntf, format='JPEG') ntf.seek(0) # it's", "queryset. def test_create_recipe_with_ingredients(self): \"\"\"Test creating recipe with ingredients\"\"\" ingredient1 =", "the id of the created object. # Next what we're", "to be assigned. self.assertIn(tag1, tags) self.assertIn(tag2, tags) # check if", "in which you can update an object using the #", "returning recipes with specific tags\"\"\" recipe1 = sample_recipe(user=self.user, title='Thai vegetable", "any fields that are omitted from # the request will", "2') payload = { 'title': 'Test recipe with ingredients', 'ingredients':", "creates a named temporary file on the system at a", "class which will let us then # create test images", "RecipeDetailSerializer(recipe) # in this case we just want to serialize", "when you create a new model and you have a", "if we do res.data and # retrieve the id key", "file # through the API like you would with a", "# key in our payload object # getattr: that allows", "is you use the detail URL so that is the", "a named temporary file on the system at a random", "of the # recipe with the ID of the recipe", "1') tag2 = sample_tag(user=self.user, name='Tag 2') payload = { 'title':", "status.HTTP_201_CREATED) recipe = Recipe.objects.get(id=res.data['id']) ingredients = recipe.ingredients.all() # get the", "two recipe # test the response: serializer1 = RecipeSerializer(recipe1) serializer2", "convert the dictionary into the argument # when you use", "names and also checking if files exist on the system", "end point that the default router will create # for", "you're going to need the existing recipe ID in order", "ntf: img = Image.new('RGB', (10, 10)) # creates black square", "the created object This is how I know that if", "created object This is how I know that if we", "\"\"\"Test retrieving recipes for user\"\"\" # test recipes are limited", "# that did have a sample tag assigned should not", "replaced with this new tag so the existing tag that", "# post this payload dictionary to our recipes URL. self.assertEqual(res.status_code,", "import TestCase from django.urls import reverse from rest_framework import status", "the standard HTTP response code for creating objects # in", "happens at the setup of the test def setUp(self): self.client", "os # this allows us to perform things like #", "use the sample recipe that gets created # it creates", "detail action # this is how you specify arguments with", "/temp folder # create a temporary file we're going to", "our filtering is working # should only return the first", "the tags that we retrieved # test full update #", "30, 'price': 10.00, } res = self.client.post(RECIPES_URL, payload) # post", "to do is we're going to loop through each #", "of the recipe that we want to update. self.client.patch(url, payload)", "self.client.get(RECIPES_URL) self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED) class PrivateRecipeApiTests(TestCase): \"\"\"Test authenticated recipe API access\"\"\"", "# pillow, this will import our image class which will", "loop through each # one of these keys and then", "and then you can remove that file after # you've", "images which we can then upload to our API RECIPES_URL", "from the database # typically when you create a new", "Ingredient from ..serializers import RecipeSerializer, RecipeDetailSerializer import tempfile # allows", "going to check # that is the correct value assigned", "a HTTP put if we omit a field # that", "serialize the recipes and we're going to check if #", "we can then upload to our API RECIPES_URL = reverse('recipe:recipe-list')", "{'tags': '{},{}'.format(tag1.id, tag2.id)} ) # this will create a comma", "and assign # it to the tags get parameter #", "# serialize the recipes and we're going to check if", "then we're going to check the fields that # are", "our recipe # that did have a sample tag assigned", "# check that the path exists for the image that", "RecipeSerializer(recipes, many=True) # many=true: this is because we were returning", "create here and we're going # to replace it with", "each # one of these keys and then we're going", "from the object that you're updating def test_full_update_recipe(self): \"\"\"Test updating", "to the # end of the file so if you", "do is we're going to loop through each # one", "# payload[key]: This will actually get the value of the", "our payload object # getattr: that allows you to retrieve", "the reverse effect. class PublicRecipeApiTests(TestCase): \"\"\"Test unauthenticated recipe API access\"\"\"", "15.00 } res = self.client.post(RECIPES_URL, payload) self.assertEqual(res.status_code, status.HTTP_201_CREATED) recipe =", "= self.client.post(RECIPES_URL, payload) self.assertEqual(res.status_code, status.HTTP_201_CREATED) recipe = Recipe.objects.get(id=res.data['id']) ingredients =", "arguments you want to add # here we have single", "blank because you've already read up to the end #", "5.00, } defaults.update(params) return Recipe.objects.create(user=user, **defaults) # convert the dictionary", "app # /api/recipe/recipes # /api/recipe/recipes/1/ (id) --> detail url def", "PrivateRecipeApiTests(TestCase): \"\"\"Test authenticated recipe API access\"\"\" def setUp(self): self.client =", "def setUp(self): self.client = APIClient() self.user = get_user_model().objects.create_user( '<EMAIL>', '<PASSWORD>'", "sample_tag(user=self.user, name='Curry') # add a new tag and what we're", "# to replace it with a new tag payload =", "payload) recipe.refresh_from_db() self.assertEqual(recipe.title, payload['title']) self.assertEqual(recipe.time_minutes, payload['time_minutes']) self.assertEqual(recipe.price, payload['price']) tags =", "a recipe detail\"\"\" recipe = sample_recipe(user=self.user) recipe.tags.add(sample_tag(user=self.user)) recipe.ingredients.add(sample_ingredient(user=self.user)) url =", "create test images which we can then upload to our", "reverse from rest_framework import status from rest_framework.test import APIClient #", "should not have any tags # assigned class RecipeImageUploadTests(TestCase): #", "access the URL in more # or less all the", "ingredients\"\"\" ingredient1 = sample_ingredient(user=self.user, name='Ingredient 1') ingredient2 = sample_ingredient(user=self.user, name='Ingredient", "is the correct value assigned to our recipe model. for", "we expect two tags to be assigned. self.assertIn(tag1, tags) self.assertIn(tag2,", "self.assertEqual(recipe.title, payload['title']) tags = recipe.tags.all() self.assertEqual(len(tags), 1) self.assertIn(new_tag, tags) #", "in our serializer self.assertEqual(res.status_code, status.HTTP_200_OK) self.assertEqual(len(res.data), 1) self.assertEqual(res.data, serializer.data) def", "# make request # We're going to retrieve an update", "be done to the # end of the file so", "Image # pillow, this will import our image class which", "a list of the # arguments you want to add", "test_upload_image_to_recipe(self): \"\"\"Test uploading an image to recipe\"\"\" url = image_upload_url(self.recipe.id)", "authenticated recipe API access\"\"\" def setUp(self): self.client = APIClient() self.user", "our user self.recipe = sample_recipe(user=self.user) # after the test runs", "'{},{}'.format(tag1.id, tag2.id)} ) # this will create a comma separated", "self.assertIn('image', res.data) # check that the path exists for the", "'<PASSWORD>' ) self.client.force_authenticate(self.user) def test_retrieve_recipes(self): \"\"\"Test retrieving list of recipes\"\"\"", "# retrieve the created recipe tags = recipe.tags.all() # retrieve", "image if it exists in the recipe def test_upload_image_to_recipe(self): \"\"\"Test", "mushrooms') # test API res = self.client.get( RECIPES_URL, {'ingredients': '{},{}'.format(ingredient1.id,", "you would with a HTTP POST and then # we're", "make sure they match what we expect. recipe.refresh_from_db() # refreshes", "# default behavior is that it will return a dictionary", "recipes = Recipe.objects.filter(user=self.user) serializer = RecipeSerializer(recipes, many=True) # many=true: this", "expect two tags to be assigned. self.assertIn(tag1, tags) self.assertIn(tag2, tags)", "API RECIPES_URL = reverse('recipe:recipe-list') # since we're going to need", "ntf}, format='multipart') # assertions # refreshing the database for our", "recipe2 = sample_recipe(user=self.user, title='Chicken cacciatore') ingredient1 = sample_ingredient(user=self.user, name='Feta cheese')", "retrieving recipes for user\"\"\" # test recipes are limited to", "\"\"\"Test viewing a recipe detail\"\"\" recipe = sample_recipe(user=self.user) recipe.tags.add(sample_tag(user=self.user)) recipe.ingredients.add(sample_ingredient(user=self.user))", "import Recipe, Tag, Ingredient from ..serializers import RecipeSerializer, RecipeDetailSerializer import", "if the values have changed # in the database. self.assertEqual(recipe.title,", "be assigned to it url = detail_url(recipe.id) # the way", "# refreshes the details in our recipe from the database", "this allows us to perform things like # creating path", "the beginning of the file res = self.client.post(url, {'image': ntf},", "this payload dictionary to our recipes URL. self.assertEqual(res.status_code, status.HTTP_201_CREATED) #", "the recipes from our database. res = self.client.get(RECIPES_URL) recipes =", "just make sure they match what we expect. recipe.refresh_from_db() #", "id of the created object. # Next what we're going", "two asterisks when calling a # function it has the", "the system at a random # location usually in the", "tag2.id)} ) # this will create a comma separated list", "the default router will create # for our viewset because", "same key in the recipe # payload[key]: This will actually", "zero now as I explained # because when we do", "serializer3 = RecipeSerializer(recipe3) # serialize the recipes and we're going", "self.client.post(RECIPES_URL, payload) self.assertEqual(res.status_code, status.HTTP_201_CREATED) recipe = Recipe.objects.get(id=res.data['id']) # retrieve the", "function to set # the pointer back to the beginning", "recipe.ingredients.all() # get the ingredients queryset self.assertEqual(ingredients.count(), 2) self.assertIn(ingredient1, ingredients)", "update an object using the # API there's two different", "different HTTP methods: put, patch # patch: Patch is used", "temp file # somewhere in the system and then you", "= self.client.get(url) serializer = RecipeDetailSerializer(recipe) # in this case we", "ingredient1 = sample_ingredient(user=self.user, name='Ingredient 1') ingredient2 = sample_ingredient(user=self.user, name='Ingredient 2')", "as a variable # at top of the class in", "= Recipe.objects.filter(user=self.user) serializer = RecipeSerializer(recipes, many=True) # many=true: this is", "updating a recipe with patch\"\"\" # make a request to", "if files exist on the system from PIL import Image", "self.assertEqual(res.data, serializer.data) def test_recipes_limited_to_user(self): \"\"\"Test retrieving recipes for user\"\"\" #", "setUp(self): self.client = APIClient() self.user = get_user_model().objects.create_user( '<EMAIL>', '<PASSWORD>' )", "sample_tag(user=self.user, name='Vegetarian') recipe1.tags.add(tag1) recipe2.tags.add(tag2) recipe3 = sample_recipe(user=self.user, title='Fish and chips')", "= sample_recipe(user=self.user, title='Steak and mushrooms') # test API res =", "we're going to check # that is the correct value", "an object using the Django rest framework the # default", "in the app # /api/recipe/recipes # /api/recipe/recipes/1/ (id) --> detail", "= { 'title': 'Test recipe with ingredients', 'ingredients': [ingredient1.id, ingredient2.id],", "recipe from the database # typically when you create a", "Recipe.objects.create(user=user, **defaults) # convert the dictionary into the argument #", "then we're going to upload that file # through the", "# removing all of the test files that we create", "make sure that our file system is kept clean after", "detail_url(recipe.id) self.client.put(url, payload) recipe.refresh_from_db() self.assertEqual(recipe.title, payload['title']) self.assertEqual(recipe.time_minutes, payload['time_minutes']) self.assertEqual(recipe.price, payload['price'])", "return Ingredient.objects.create(user=user, name=name) def sample_recipe(user, **params): \"\"\"Create and return a", "= sample_tag(user=self.user, name='Tag 2') payload = { 'title': 'Test recipe", "containing # the created object This is how I know", "RecipeSerializer(recipe3) # serialize the recipes and we're going to check", "because we're going to have a detail action # this", "new tag so the existing tag that # we created", "is in the tags that we retrieved # test full", "to perform things like # creating path names and also", "'tags': [tag1.id, tag2.id], 'time_minutes': 30, 'price': 10.00 } res =", "will be done to the # end of the file", "the response so that's the path to # the image", "curry') recipe2 = sample_recipe(user=self.user, title='Aubergine with tahini') tag1 = sample_tag(user=self.user,", "it creates a named temporary file on the system at", "which will let us then # create test images which", "default router will create # for our viewset because we're", "serializer = RecipeSerializer(recipes, many=True) # many=true: this is because we", "defaults.update(params) return Recipe.objects.create(user=user, **defaults) # convert the dictionary into the", "in the database. self.assertEqual(recipe.title, payload['title']) tags = recipe.tags.all() self.assertEqual(len(tags), 1)", "you to retrieve an attribute from # an object by", "created won't be assigned to it url = detail_url(recipe.id) #", "a single object self.assertEqual(res.data, serializer.data) def test_create_basic_recipe(self): \"\"\"Test creating recipe\"\"\"", "a request to change a field in our recipe. recipe", "that is the URL of the # recipe with the", "a model the details of that won't change # unless", "an invalid image\"\"\" url = image_upload_url(self.recipe.id) res = self.client.post(url, {'image':", "sample_recipe(user=self.user, title='Steak and mushrooms') # test API res = self.client.get(", "already read up to the end # of the file", "as our sample tags are # the same as the", "created # it creates a named temporary file on the", "is kept clean after our test # removing all of", "recipe = sample_recipe(user=self.user) recipe.tags.add(sample_tag(user=self.user)) recipe.ingredients.add(sample_ingredient(user=self.user)) url = detail_url(recipe.id) res =", "match what we expect. recipe.refresh_from_db() # refreshes the details in", "# typically when you create a new model and you", "self.assertNotIn(serializer3.data, res.data) # check the return result def test_filter_recipes_by_ingredients(self): \"\"\"Test", "for each one of these keys, check that it is", "# you've used it import os # this allows us", "recipe', 'time_minutes': 25, 'price': 5.00 } url = detail_url(recipe.id) self.client.put(url,", "get the value of the # key in our payload", "tempfile.NamedTemporaryFile(suffix='.jpg') as ntf: img = Image.new('RGB', (10, 10)) # creates", "# saved the file it will be the seeking will", "recipe image upload\"\"\" return reverse('recipe:recipe-upload-image', args=[recipe_id]) # generate our upload", "patch\"\"\" # make a request to change a field in", "you want to add # here we have single item", "the detail URL so that is the URL of the", "check that it # uploaded correctly with tempfile.NamedTemporaryFile(suffix='.jpg') as ntf:", "provided # in the payload so the only fields that", "return Tag.objects.create(user=user, name=name) def sample_ingredient(user, name='Cinnamon'): \"\"\"Create and return a", "creating a recipe with tags\"\"\" tag1 = sample_tag(user=self.user, name='Tag 1')", "want to update. self.client.patch(url, payload) # make request # We're", "} url = detail_url(recipe.id) self.client.put(url, payload) recipe.refresh_from_db() self.assertEqual(recipe.title, payload['title']) self.assertEqual(recipe.time_minutes,", "update an object using the Django rest framework # view", "in an API. recipe = Recipe.objects.get(id=res.data['id']) # When you create", "so because we've # saved the file it will be", "--> detail url def image_upload_url(recipe_id): \"\"\"Return URL for recipe image", "should clear the value of that field so now our", "user self.recipe = sample_recipe(user=self.user) # after the test runs it", "on toast') recipe2 = sample_recipe(user=self.user, title='Chicken cacciatore') ingredient1 = sample_ingredient(user=self.user,", "res = self.client.post(url, {'image': 'notimage'}, format='multipart') self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST) def test_filter_recipes_by_tags(self):", "creating recipe\"\"\" payload = { 'title': 'Test recipe', 'time_minutes': 30,", "an update to the recipe from the # database and", "key this will get the id of the created object.", "gets created # it creates a named temporary file on", "recipe API access\"\"\" def setUp(self): self.client = APIClient() def test_required_auth(self):", "new tag is in the tags that we retrieved #", "and then we're going to check the fields that #", "will return a dictionary containing # the created object This", "the recipes and we're going to check if # they", "# the way that you update an object using the", "img.save(ntf, format='JPEG') ntf.seek(0) # it's the way that Python reads", "the test files that we create # delete the image", "cacciatore') ingredient1 = sample_ingredient(user=self.user, name='Feta cheese') ingredient2 = sample_ingredient(user=self.user, name='Chicken')", "user. user2 = get_user_model().objects.create_user( '<EMAIL>', 'pass' ) sample_recipe(user=user2) sample_recipe(user=self.user) res", "a new tag and what we're going to do is", "the responses returned self.assertIn(serializer1.data, res.data) self.assertIn(serializer2.data, res.data) self.assertNotIn(serializer3.data, res.data) #", "= self.client.post(url, {'image': 'notimage'}, format='multipart') self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST) def test_filter_recipes_by_tags(self): \"\"\"Test", "back to the beginning of the file res = self.client.post(url,", "# through the API like you would with a HTTP", "then # create test images which we can then upload", "test_upload_image_bad_request(self): \"\"\"Test uploading an invalid image\"\"\" url = image_upload_url(self.recipe.id) res", "with the ID of the recipe that we want to", "that allows you to retrieve an attribute from # an", "we retrieved # test full update # put: it will", "setUp(self): self.client = APIClient() self.user = get_user_model().objects.create_user('user', 'testpass') self.client.force_authenticate(self.user) #", "and also checking if files exist on the system from", "# retrieve the tags that were created with the recipe", "a HTTP POST and then # we're going to run", "you do refresh from dB if the values have changed", "self.assertEqual(ingredients.count(), 2) self.assertIn(ingredient1, ingredients) self.assertIn(ingredient2, ingredients) # test partial update", "= get_user_model().objects.create_user('user', 'testpass') self.client.force_authenticate(self.user) # authenticate our user self.recipe =", "that we create here and we're going # to replace", "it with a new tag payload = {'title': 'Partially Updated", "RecipeSerializer(recipe1) serializer2 = RecipeSerializer(recipe2) serializer3 = RecipeSerializer(recipe3) self.assertIn(serializer1.data, res.data) self.assertIn(serializer2.data,", "to our recipes URL. self.assertEqual(res.status_code, status.HTTP_201_CREATED) # this is the", "self.assertEqual(recipe.price, payload['price']) tags = recipe.tags.all() self.assertEqual(len(tags), 0) # we will", "res = self.client.get( RECIPES_URL, {'tags': '{},{}'.format(tag1.id, tag2.id)} ) # this", "a sample recipe\"\"\" defaults = { 'title': 'Sample recipe', 'time_minutes':", "random # location usually in the /temp folder # create", "# at top of the class in all capitals. #", "requests from core.models import Recipe, Tag, Ingredient from ..serializers import", "creating recipe with ingredients\"\"\" ingredient1 = sample_ingredient(user=self.user, name='Ingredient 1') ingredient2", "the recipe that we want to update. self.client.patch(url, payload) #", "'Sample recipe', 'time_minutes': 10, 'price': 5.00, } defaults.update(params) return Recipe.objects.create(user=user,", "object that we're updating with the full # object that", "the created object. # Next what we're going to do", "# put: it will replace the object that we're updating", "the tags that were created with the recipe self.assertEqual(tags.count(), 2)", "going to need to access the URL in more #", "ingredients = recipe.ingredients.all() # get the ingredients queryset self.assertEqual(ingredients.count(), 2)", "on the system at a random # location usually in", "we're going to do is we're going # to swap", "recipe1.ingredients.add(ingredient1) recipe2.ingredients.add(ingredient2) recipe3 = sample_recipe(user=self.user, title='Steak and mushrooms') # test", "an API. recipe = Recipe.objects.get(id=res.data['id']) # When you create an", "POST and then # we're going to run some assertions", "= sample_ingredient(user=self.user, name='Feta cheese') ingredient2 = sample_ingredient(user=self.user, name='Chicken') recipe1.ingredients.add(ingredient1) recipe2.ingredients.add(ingredient2)", "updating with the full # object that is provided in", "= self.client.get(RECIPES_URL) recipes = Recipe.objects.all().order_by('-id') serializer = RecipeSerializer(recipes, many=True) self.assertEqual(res.status_code,", "add # here we have single item def sample_tag(user, name='Main", "APIClient() self.user = get_user_model().objects.create_user('user', 'testpass') self.client.force_authenticate(self.user) # authenticate our user", "these keys and then we're going to check # that", "name='Feta cheese') ingredient2 = sample_ingredient(user=self.user, name='Chicken') recipe1.ingredients.add(ingredient1) recipe2.ingredients.add(ingredient2) recipe3 =", "you use the two asterisks when calling a # function", "= self.client.get( RECIPES_URL, {'ingredients': '{},{}'.format(ingredient1.id, ingredient2.id)} ) serializer1 = RecipeSerializer(recipe1)", "provided and any fields that are omitted from # the", "and then we're going to upload that file # through", "object that's being updated. def test_partial_update_recipe(self): \"\"\"Test updating a recipe", "recipe = sample_recipe(user=self.user) recipe.tags.add(sample_tag(user=self.user)) payload = { 'title': 'Fully Updated", "test API res = self.client.get( RECIPES_URL, {'ingredients': '{},{}'.format(ingredient1.id, ingredient2.id)} )", "we're going # to swap out this tag that we", "self.user = get_user_model().objects.create_user('user', 'testpass') self.client.force_authenticate(self.user) # authenticate our user self.recipe", "# assertions # refreshing the database for our recipe self.recipe.refresh_from_db()", "# function it has the reverse effect. class PublicRecipeApiTests(TestCase): \"\"\"Test", "'time_minutes': 25, 'price': 5.00 } url = detail_url(recipe.id) self.client.put(url, payload)", "name of the end point that the default router will", "sample recipe', 'time_minutes': 25, 'price': 5.00 } url = detail_url(recipe.id)", "we're going to need to access the URL in more", "like you would with a HTTP POST and then #", "recipe1 = sample_recipe(user=self.user, title='Posh beans on toast') recipe2 = sample_recipe(user=self.user,", "# that should clear the value of that field so", "# unless you do refresh from dB if the values", "recipe.refresh_from_db() self.assertEqual(recipe.title, payload['title']) self.assertEqual(recipe.time_minutes, payload['time_minutes']) self.assertEqual(recipe.price, payload['price']) tags = recipe.tags.all()", "are two ways in which you can update an object", "want to serialize a single object self.assertEqual(res.data, serializer.data) def test_create_basic_recipe(self):", "just want to serialize a single object self.assertEqual(res.data, serializer.data) def", "fields that are provided and any fields that are omitted", "title='Thai vegetable curry') recipe2 = sample_recipe(user=self.user, title='Aubergine with tahini') tag1", "# authenticate our user self.recipe = sample_recipe(user=self.user) # after the", "just be blank because you've already read up to the", "then you pass in a list of the # arguments", "file res = self.client.post(url, {'image': ntf}, format='multipart') # assertions #", "sample_ingredient(user=self.user, name='Ingredient 1') ingredient2 = sample_ingredient(user=self.user, name='Ingredient 2') payload =", "of recipe.key) def test_create_recipe_with_tags(self): \"\"\"Test creating a recipe with tags\"\"\"", "is the standard HTTP response code for creating objects #", "with patch\"\"\" # make a request to change a field", "images in the response so that's the path to #", "res = self.client.post(RECIPES_URL, payload) # post this payload dictionary to", "URL in the app # /api/recipe/recipes # /api/recipe/recipes/1/ (id) -->", "the recipe # payload[key]: This will actually get the value", "will create # for our viewset because we're going to", "request that means if you exclude # any fields in", "the response: serializer1 = RecipeSerializer(recipe1) serializer2 = RecipeSerializer(recipe2) serializer3 =", "you just pass in args and then you pass in", "means if you exclude # any fields in the payload", "that are provided # in the payload so the only", "object by passing in a variable. (instead of recipe.key) def", "test files that we create # delete the image if", "tag2 = sample_tag(user=self.user, name='Tag 2') payload = { 'title': 'Test", "the fields that # are assigned and just make sure", "change a field in our recipe. recipe = sample_recipe(user=self.user) recipe.tags.add(sample_tag(user=self.user))", "the way that Python reads files so because we've #", "[ingredient1.id, ingredient2.id], 'time_minutes': 45, 'price': 15.00 } res = self.client.post(RECIPES_URL,", "fields that are provided # in the payload so the", "put, patch # patch: Patch is used to update the", "going to check the fields that # are assigned and", "that for making our API requests from core.models import Recipe,", "import tempfile # allows you to call a function which", "list of the # arguments you want to add #", "serialize a single object self.assertEqual(res.data, serializer.data) def test_create_basic_recipe(self): \"\"\"Test creating", "This is how I know that if we do res.data", "image\"\"\" url = image_upload_url(self.recipe.id) res = self.client.post(url, {'image': 'notimage'}, format='multipart')", "in order to upload an image def detail_url(recipe_id): \"\"\"Return recipe", "self.client.get(url) serializer = RecipeDetailSerializer(recipe) # in this case we just", "because you've already read up to the end # of", "RecipeDetailSerializer import tempfile # allows you to call a function", "it will be the seeking will be done to the", "since we're going to need to access the URL in", "detail URL\"\"\" return reverse('recipe:recipe-detail', args=[recipe_id]) # name of the end", "vegetable curry') recipe2 = sample_recipe(user=self.user, title='Aubergine with tahini') tag1 =", "id key this will get the id of the created", "def sample_tag(user, name='Main course'): \"\"\"Create and return a sample tag\"\"\"", "of these keys and then we're going to check #", "res.data) self.assertNotIn(serializer3.data, res.data) # check the return result def test_filter_recipes_by_ingredients(self):", "removing all of the test files that we create #", "# an object by passing in a variable. (instead of", "payload['price']) tags = recipe.tags.all() self.assertEqual(len(tags), 0) # we will check", "recipe', 'time_minutes': 10, 'price': 5.00, } defaults.update(params) return Recipe.objects.create(user=user, **defaults)", "accessible self.assertIn('image', res.data) # check that the path exists for", "only fields that it will change are the # fields", "/api/recipe/recipes/1/ (id) --> detail url def image_upload_url(recipe_id): \"\"\"Return URL for", "self.client.post(RECIPES_URL, payload) self.assertEqual(res.status_code, status.HTTP_201_CREATED) recipe = Recipe.objects.get(id=res.data['id']) ingredients = recipe.ingredients.all()", "= RecipeSerializer(recipes, many=True) self.assertEqual(res.status_code, status.HTTP_200_OK) self.assertEqual(res.data, serializer.data) def test_recipes_limited_to_user(self): \"\"\"Test", "of the end point that the default router will create", "and # retrieve the id key this will get the", "args and then you pass in a list of the", "value assigned to our recipe model. for key in payload.keys():", "should only return the first two recipe # test the", "our viewset because we're going to have a detail action", "name=name) def sample_recipe(user, **params): \"\"\"Create and return a sample recipe\"\"\"", "value of the # key in our payload object #", "# name of the end point that the default router", "serializer2 = RecipeSerializer(recipe2) serializer3 = RecipeSerializer(recipe3) self.assertIn(serializer1.data, res.data) self.assertIn(serializer2.data, res.data)", "sample tags are # the same as the tags that", "to access the URL in more # or less all", "it # uploaded correctly with tempfile.NamedTemporaryFile(suffix='.jpg') as ntf: img =", "self.client.force_authenticate(self.user) def test_retrieve_recipes(self): \"\"\"Test retrieving list of recipes\"\"\" sample_recipe(user=self.user) sample_recipe(user=self.user)", "exists in the recipe def test_upload_image_to_recipe(self): \"\"\"Test uploading an image", "recipe with patch\"\"\" # make a request to change a", "create an object using the Django rest framework the #", "Recipe.objects.all().order_by('-id') serializer = RecipeSerializer(recipes, many=True) self.assertEqual(res.status_code, status.HTTP_200_OK) self.assertEqual(res.data, serializer.data) def", "tags to be assigned. self.assertIn(tag1, tags) self.assertIn(tag2, tags) # check", "res = self.client.post(RECIPES_URL, payload) self.assertEqual(res.status_code, status.HTTP_201_CREATED) recipe = Recipe.objects.get(id=res.data['id']) ingredients", "any tags # assigned class RecipeImageUploadTests(TestCase): # what happens at", "= recipe.tags.all() # retrieve the tags that were created with", "# if our filtering is working # should only return", "is required\"\"\" res = self.client.get(RECIPES_URL) self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED) class PrivateRecipeApiTests(TestCase): \"\"\"Test", "we're going to check if # they exist in the", "recipes from our database. res = self.client.get(RECIPES_URL) recipes = Recipe.objects.all().order_by('-id')", "sample_tag(user=self.user, name='Tag 2') payload = { 'title': 'Test recipe with", "{ 'title': 'Test recipe with two tags', 'tags': [tag1.id, tag2.id],", "we just want to serialize a single object self.assertEqual(res.data, serializer.data)", "**defaults) # convert the dictionary into the argument # when", "# in the database. self.assertEqual(recipe.title, payload['title']) tags = recipe.tags.all() self.assertEqual(len(tags),", "key in payload.keys(): self.assertEqual(payload[key], getattr(recipe, key)) # assertion for each", "test partial update and full update of an object #", "of the # key in our payload object # getattr:", "then we're going to check # that is the correct", "# equal to the same key in the recipe #", "for recipe image upload\"\"\" return reverse('recipe:recipe-upload-image', args=[recipe_id]) # generate our", "we're going to loop through each # one of these", "update the fields that are provided # in the payload", "in the /temp folder # create a temporary file we're", "payload['time_minutes']) self.assertEqual(recipe.price, payload['price']) tags = recipe.tags.all() self.assertEqual(len(tags), 0) # we", "name='Chicken') recipe1.ingredients.add(ingredient1) recipe2.ingredients.add(ingredient2) recipe3 = sample_recipe(user=self.user, title='Steak and mushrooms') #", "for creating objects # in an API. recipe = Recipe.objects.get(id=res.data['id'])", "res.data) self.assertIn(serializer2.data, res.data) self.assertNotIn(serializer3.data, res.data) # check the return result", "ingredient2.id)} ) serializer1 = RecipeSerializer(recipe1) serializer2 = RecipeSerializer(recipe2) serializer3 =", "tag1 = sample_tag(user=self.user, name='Tag 1') tag2 = sample_tag(user=self.user, name='Tag 2')", "it will replace the object that we're updating with the", "a function which will then create a temp file #", "we wanted to simulate the list view in our serializer", "sample_recipe(user=self.user) res = self.client.get(RECIPES_URL) # filter our recipes by the", "now as I explained # because when we do a", "args=[recipe_id]) # generate our upload image url # you're going", "retrieving # all of the recipes from our database. res", "string and assign # it to the tags get parameter", "# allows you to call a function which will then", "django.urls import reverse from rest_framework import status from rest_framework.test import", "the ID of the recipe that we want to update.", "it import os # this allows us to perform things", "= Image.new('RGB', (10, 10)) # creates black square img.save(ntf, format='JPEG')", "serializer2 = RecipeSerializer(recipe2) serializer3 = RecipeSerializer(recipe3) # serialize the recipes", "self.assertEqual(res.status_code, status.HTTP_201_CREATED) recipe = Recipe.objects.get(id=res.data['id']) ingredients = recipe.ingredients.all() # get", "tags) # check that the tag new tag is in", "'title': 'Test recipe', 'time_minutes': 30, 'price': 10.00, } res =", "detail_url(recipe.id) # the way that you update an object using", "# we're going to run some assertions to check that", "a # function it has the reverse effect. class PublicRecipeApiTests(TestCase):", "self.assertEqual(res.data, serializer.data) def test_view_recipe_detail(self): \"\"\"Test viewing a recipe detail\"\"\" recipe", "API access\"\"\" def setUp(self): self.client = APIClient() def test_required_auth(self): \"\"\"Test", "then # we're going to run some assertions to check", "name='Vegan') tag2 = sample_tag(user=self.user, name='Vegetarian') recipe1.tags.add(tag1) recipe2.tags.add(tag2) recipe3 = sample_recipe(user=self.user,", "a new tag payload = {'title': 'Partially Updated sample recipe',", "# end of the file so if you try to", "'time_minutes': 30, 'price': 10.00 } res = self.client.post(RECIPES_URL, payload) self.assertEqual(res.status_code,", "keys and then we're going to check # that is", "recipe # that did have a sample tag assigned should", "the tags that are in our queryset. def test_create_recipe_with_ingredients(self): \"\"\"Test", "our recipe model. for key in payload.keys(): self.assertEqual(payload[key], getattr(recipe, key))", "when calling a # function it has the reverse effect.", "# check that the images in the response so that's", "sample_recipe(user=self.user, title='Chicken cacciatore') ingredient1 = sample_ingredient(user=self.user, name='Feta cheese') ingredient2 =", "def test_required_auth(self): \"\"\"Test the authenticaiton is required\"\"\" res = self.client.get(RECIPES_URL)", "have any tags # assigned class RecipeImageUploadTests(TestCase): # what happens", "tags = recipe.tags.all() # retrieve the tags that were created", "update. self.client.patch(url, payload) # make request # We're going to", "to our recipe model. for key in payload.keys(): self.assertEqual(payload[key], getattr(recipe,", "remove that file after # you've used it import os", "do is we're going # to swap out this tag", "API like you would with a HTTP POST and then", "create # for our viewset because we're going to have", "self.assertEqual(res.data, serializer.data) def test_create_basic_recipe(self): \"\"\"Test creating recipe\"\"\" payload = {", "filtering is working # should only return the first two", "upload an image def detail_url(recipe_id): \"\"\"Return recipe detail URL\"\"\" return", "= recipe.ingredients.all() # get the ingredients queryset self.assertEqual(ingredients.count(), 2) self.assertIn(ingredient1,", "= self.client.get( RECIPES_URL, {'tags': '{},{}'.format(tag1.id, tag2.id)} ) # this will", "\"\"\"Test unauthenticated recipe API access\"\"\" def setUp(self): self.client = APIClient()", "were created with the recipe self.assertEqual(tags.count(), 2) # because we", "calling a # function it has the reverse effect. class", "us then # create test images which we can then", "that it is # equal to the same key in", "of the file so if you try to access it", "def test_filter_recipes_by_tags(self): \"\"\"Test returning recipes with specific tags\"\"\" recipe1 =", "the sample recipe that gets created # it creates a", "getattr: that allows you to retrieve an attribute from #", "return a sample tag\"\"\" return Tag.objects.create(user=user, name=name) def sample_ingredient(user, name='Cinnamon'):", "point that the default router will create # for our", "the # end of the file so if you try", "class PublicRecipeApiTests(TestCase): \"\"\"Test unauthenticated recipe API access\"\"\" def setUp(self): self.client", "test_filter_recipes_by_ingredients(self): \"\"\"Test returning recipes with specific ingredients\"\"\" recipe1 = sample_recipe(user=self.user,", "in the responses returned self.assertIn(serializer1.data, res.data) self.assertIn(serializer2.data, res.data) self.assertNotIn(serializer3.data, res.data)", "recipe.tags.all() # retrieve the tags that were created with the", "are provided # in the payload so the only fields", "image that is saved to our model self.assertTrue(os.path.exists(self.recipe.image.path)) def test_upload_image_bad_request(self):", "response: serializer1 = RecipeSerializer(recipe1) serializer2 = RecipeSerializer(recipe2) serializer3 = RecipeSerializer(recipe3)", "because we were returning the list view # or we", "going to do is we're going # to swap out", "self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST) def test_filter_recipes_by_tags(self): \"\"\"Test returning recipes with specific tags\"\"\"", "top of the class in all capitals. # app :", "# from the object that you're updating def test_full_update_recipe(self): \"\"\"Test", "to the authenticated user. user2 = get_user_model().objects.create_user( '<EMAIL>', 'pass' )", "url # you're going to need the existing recipe ID", "framework the # default behavior is that it will return", "sample_recipe(user, **params): \"\"\"Create and return a sample recipe\"\"\" defaults =", "status.HTTP_201_CREATED) # this is the standard HTTP response code for", "or less all the tests let's assign that as a", "# get the ingredients queryset self.assertEqual(ingredients.count(), 2) self.assertIn(ingredient1, ingredients) self.assertIn(ingredient2,", "effect. class PublicRecipeApiTests(TestCase): \"\"\"Test unauthenticated recipe API access\"\"\" def setUp(self):", "= sample_tag(user=self.user, name='Curry') # add a new tag and what", "we expect. recipe.refresh_from_db() # refreshes the details in our recipe", "import APIClient # use that for making our API requests", "that if we do res.data and # retrieve the id", "= sample_recipe(user=self.user, title='Posh beans on toast') recipe2 = sample_recipe(user=self.user, title='Chicken", "payload = {'title': 'Partially Updated sample recipe', 'tags': [new_tag.id]} #", "# assigned class RecipeImageUploadTests(TestCase): # what happens at the setup", "have single item def sample_tag(user, name='Main course'): \"\"\"Create and return", "of the # arguments you want to add # here", "serializer.data) def test_recipes_limited_to_user(self): \"\"\"Test retrieving recipes for user\"\"\" # test", "going # to replace it with a new tag payload", "recipe.refresh_from_db() # refreshes the details in our recipe from the", "'price': 5.00, } defaults.update(params) return Recipe.objects.create(user=user, **defaults) # convert the", "the correct value assigned to our recipe model. for key", "should be accessible self.assertIn('image', res.data) # check that the path", "self.assertEqual(res.status_code, status.HTTP_201_CREATED) # this is the standard HTTP response code", "omit a field # that should clear the value of", "# in this case we just want to serialize a", "full update of an object # there are two ways", "url = detail_url(recipe.id) self.client.put(url, payload) recipe.refresh_from_db() self.assertEqual(recipe.title, payload['title']) self.assertEqual(recipe.time_minutes, payload['time_minutes'])", "test_create_recipe_with_ingredients(self): \"\"\"Test creating recipe with ingredients\"\"\" ingredient1 = sample_ingredient(user=self.user, name='Ingredient", "you try to access it then it would # just", "recipe\"\"\" payload = { 'title': 'Test recipe', 'time_minutes': 30, 'price':", "2') payload = { 'title': 'Test recipe with two tags',", "# view sets is you use the detail URL so", "the file it will be the seeking will be done", "self.assertEqual(payload[key], getattr(recipe, key)) # assertion for each one of these", "check if the tags that we created as our sample", "# When you create an object using the Django rest", "it then it would # just be blank because you've", "fields that # are assigned and just make sure they", "in the object that's being updated. def test_partial_update_recipe(self): \"\"\"Test updating", "self.client = APIClient() self.user = get_user_model().objects.create_user('user', 'testpass') self.client.force_authenticate(self.user) # authenticate", "a variable # at top of the class in all", "they exist in the responses returned self.assertIn(serializer1.data, res.data) self.assertIn(serializer2.data, res.data)", "we will check that the tags assigned are zero now", "passing in a variable. (instead of recipe.key) def test_create_recipe_with_tags(self): \"\"\"Test", "this new tag so the existing tag that # we", "our queryset. def test_create_recipe_with_ingredients(self): \"\"\"Test creating recipe with ingredients\"\"\" ingredient1", "\"\"\"Create and return a sample ingredient\"\"\" return Ingredient.objects.create(user=user, name=name) def", "status.HTTP_200_OK) self.assertEqual(len(res.data), 1) self.assertEqual(res.data, serializer.data) def test_view_recipe_detail(self): \"\"\"Test viewing a", "res.data and # retrieve the id key this will get", "title='Fish and chips') res = self.client.get( RECIPES_URL, {'tags': '{},{}'.format(tag1.id, tag2.id)}", "in this case we just want to serialize a single", "'Test recipe', 'time_minutes': 30, 'price': 10.00, } res = self.client.post(RECIPES_URL,", "the # recipe with the ID of the recipe that", "the value of that field so now our recipe #", "going to use the sample recipe that gets created #", "recipe3 = sample_recipe(user=self.user, title='Steak and mushrooms') # test API res", "# Next what we're going to do is we're going", "keys, check that it is # equal to the same", "field in our recipe. recipe = sample_recipe(user=self.user) recipe.tags.add(sample_tag(user=self.user)) # add", "this case we just want to serialize a single object", "format='multipart') self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST) def test_filter_recipes_by_tags(self): \"\"\"Test returning recipes with specific", "tags that were created with the recipe self.assertEqual(tags.count(), 2) #", "title='Steak and mushrooms') # test API res = self.client.get( RECIPES_URL,", "RecipeImageUploadTests(TestCase): # what happens at the setup of the test", "access them by retrieving # all of the recipes from", "the end point that the default router will create #", "our recipe from the database # typically when you create", "recipe with put\"\"\" recipe = sample_recipe(user=self.user) recipe.tags.add(sample_tag(user=self.user)) payload = {", "going to loop through each # one of these keys", "you've used it import os # this allows us to", "# add a new tag and what we're going to", "it will return a dictionary containing # the created object", "# the pointer back to the beginning of the file", "a recipe with patch\"\"\" # make a request to change", "'notimage'}, format='multipart') self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST) def test_filter_recipes_by_tags(self): \"\"\"Test returning recipes with", "'{},{}'.format(ingredient1.id, ingredient2.id)} ) serializer1 = RecipeSerializer(recipe1) serializer2 = RecipeSerializer(recipe2) serializer3", "tag\"\"\" return Tag.objects.create(user=user, name=name) def sample_ingredient(user, name='Cinnamon'): \"\"\"Create and return", "viewing a recipe detail\"\"\" recipe = sample_recipe(user=self.user) recipe.tags.add(sample_tag(user=self.user)) recipe.ingredients.add(sample_ingredient(user=self.user)) url", "tags = recipe.tags.all() self.assertEqual(len(tags), 0) # we will check that", "that we created as our sample tags are # the", "allows you to call a function which will then create", "we're going to have a detail action # this is", "= sample_recipe(user=self.user, title='Aubergine with tahini') tag1 = sample_tag(user=self.user, name='Vegan') tag2", "ingredient2.id], 'time_minutes': 45, 'price': 15.00 } res = self.client.post(RECIPES_URL, payload)", "the URL of the # recipe with the ID of", "argument # when you use the two asterisks when calling", "our upload image url # you're going to need the", "single item def sample_tag(user, name='Main course'): \"\"\"Create and return a", "detail_url(recipe.id) res = self.client.get(url) serializer = RecipeDetailSerializer(recipe) # in this", "5.00 } url = detail_url(recipe.id) self.client.put(url, payload) recipe.refresh_from_db() self.assertEqual(recipe.title, payload['title'])", "so that's the path to # the image that should", "status.HTTP_401_UNAUTHORIZED) class PrivateRecipeApiTests(TestCase): \"\"\"Test authenticated recipe API access\"\"\" def setUp(self):", "to need to access the URL in more # or", "name='Main course'): \"\"\"Create and return a sample tag\"\"\" return Tag.objects.create(user=user,", "going to write an image # to that file and", "specify arguments with the reverse function # you just pass", "title='Aubergine with tahini') tag1 = sample_tag(user=self.user, name='Vegan') tag2 = sample_tag(user=self.user,", "with two tags', 'tags': [tag1.id, tag2.id], 'time_minutes': 30, 'price': 10.00", "the request will not be modified in the object that's", "that file and then we're going to upload that file", "to use the sample recipe that gets created # it", "path exists for the image that is saved to our", "with the full # object that is provided in the", "# create a temporary file we're going to write an", "# make sure that our file system is kept clean", "serializer1 = RecipeSerializer(recipe1) serializer2 = RecipeSerializer(recipe2) serializer3 = RecipeSerializer(recipe3) #", "recipe new_tag = sample_tag(user=self.user, name='Curry') # add a new tag", "in more # or less all the tests let's assign", "be replaced with this new tag so the existing tag", "is the URL of the # recipe with the ID", "reverse('recipe:recipe-list') # since we're going to need to access the", "get parameter # if our filtering is working # should", "retrieve an update to the recipe from the # database", "model and you have a # reference to a model", "{ 'title': 'Test recipe', 'time_minutes': 30, 'price': 10.00, } res", "fields that it will change are the # fields that", "the return result def test_filter_recipes_by_ingredients(self): \"\"\"Test returning recipes with specific", "you can update an object using the # API there's", "swap out this tag that we create here and we're", "API. recipe = Recipe.objects.get(id=res.data['id']) # When you create an object", "recipe detail URL\"\"\" return reverse('recipe:recipe-detail', args=[recipe_id]) # name of the", "the image that is saved to our model self.assertTrue(os.path.exists(self.recipe.image.path)) def", "responses returned self.assertIn(serializer1.data, res.data) self.assertIn(serializer2.data, res.data) self.assertNotIn(serializer3.data, res.data) # check", "it has the reverse effect. class PublicRecipeApiTests(TestCase): \"\"\"Test unauthenticated recipe", "created with the recipe self.assertEqual(tags.count(), 2) # because we expect", "25, 'price': 5.00 } url = detail_url(recipe.id) self.client.put(url, payload) recipe.refresh_from_db()", "import our image class which will let us then #", "RECIPES_URL, {'ingredients': '{},{}'.format(ingredient1.id, ingredient2.id)} ) serializer1 = RecipeSerializer(recipe1) serializer2 =", "if we omit a field # that should clear the", "\"\"\"Create and return a sample recipe\"\"\" defaults = { 'title':", "we have single item def sample_tag(user, name='Main course'): \"\"\"Create and", "file after # you've used it import os # this", "tag and what we're going to do is we're going", "tags will be replaced with this new tag so the", "assigned to our recipe model. for key in payload.keys(): self.assertEqual(payload[key],", "= { 'title': 'Test recipe', 'time_minutes': 30, 'price': 10.00, }", "RecipeSerializer(recipe2) serializer3 = RecipeSerializer(recipe3) # serialize the recipes and we're", "returning recipes with specific ingredients\"\"\" recipe1 = sample_recipe(user=self.user, title='Posh beans", "# fields that are provided and any fields that are", "in a list of the # arguments you want to", "model the details of that won't change # unless you", "= Recipe.objects.get(id=res.data['id']) # When you create an object using the", "1') ingredient2 = sample_ingredient(user=self.user, name='Ingredient 2') payload = { 'title':", "it would # just be blank because you've already read", "object self.assertEqual(res.data, serializer.data) def test_create_basic_recipe(self): \"\"\"Test creating recipe\"\"\" payload =", "self.assertEqual(len(tags), 1) self.assertIn(new_tag, tags) # check that the tag new", "retrieve the tags that were created with the recipe self.assertEqual(tags.count(),", "are omitted from # the request will not be modified", "return a sample recipe\"\"\" defaults = { 'title': 'Sample recipe',", "router will create # for our viewset because we're going", "named temporary file on the system at a random #", "tags are # the same as the tags that are", "RECIPES_URL, {'tags': '{},{}'.format(tag1.id, tag2.id)} ) # this will create a", "in our payload object # getattr: that allows you to", "with specific tags\"\"\" recipe1 = sample_recipe(user=self.user, title='Thai vegetable curry') recipe2", "Tag, Ingredient from ..serializers import RecipeSerializer, RecipeDetailSerializer import tempfile #", "from rest_framework.test import APIClient # use that for making our", "is we're going to loop through each # one of", "recipes\"\"\" sample_recipe(user=self.user) sample_recipe(user=self.user) # we're going to access them by", "recipe.tags.all() self.assertEqual(len(tags), 0) # we will check that the tags", "full update # put: it will replace the object that", "checking if files exist on the system from PIL import", "self.assertIn(new_tag, tags) # check that the tag new tag is", "in the request that means if you exclude # any", "def test_filter_recipes_by_ingredients(self): \"\"\"Test returning recipes with specific ingredients\"\"\" recipe1 =", "exclude # any fields in the payload those fields will", "payload[key]: This will actually get the value of the #", "as ntf: img = Image.new('RGB', (10, 10)) # creates black", "going to check if # they exist in the responses", "self.assertEqual(tags.count(), 2) # because we expect two tags to be", "tags that we retrieved # test full update # put:", "1) self.assertIn(new_tag, tags) # check that the tag new tag", "self.assertIn(serializer1.data, res.data) self.assertIn(serializer2.data, res.data) self.assertNotIn(serializer3.data, res.data) # check the return", "# the created object This is how I know that", "def sample_ingredient(user, name='Cinnamon'): \"\"\"Create and return a sample ingredient\"\"\" return", "payload so the only fields that it will change are", "to the end # of the file so use this", "separated list string and assign # it to the tags", "the # API there's two different HTTP methods: put, patch", "name='Ingredient 2') payload = { 'title': 'Test recipe with ingredients',", "variable. (instead of recipe.key) def test_create_recipe_with_tags(self): \"\"\"Test creating a recipe", "# arguments you want to add # here we have", "\"\"\"Test retrieving list of recipes\"\"\" sample_recipe(user=self.user) sample_recipe(user=self.user) # we're going", "patch # patch: Patch is used to update the fields" ]
[ "# Assignment 2 day 8 # you need to develop", "# any kind of function you want to build def", "8 # write a decorator function for taking input for", "it and handlethe subsequent errorusing Exception Handling try: f=open(\"abc.txt\",\"r\"); f.write(\"Heyy,", "input for you # any kind of function you want", "@getInput def addition(num1,num2): print(\"Addition = \",num1+num2) @getInput def subtraction(num1,num2): print(\"Subtraction", "function for taking input for you # any kind of", "number = \")) calculate_arg_fuc(a,b) return wrap_function @getInput def addition(num1,num2): print(\"Addition", "wrap_function(): print(\"Enter two numbers \") a=int(input(\"Enter first number = \"))", "a decorator function for taking input for you # any", "subtraction(num1,num2): print(\"Subtraction = \",num1-num2) @getInput def multiplication(num1,num2): print(\"Multiplication = \",num1*num2)", "open a file in read only mode and # try", "and handlethe subsequent errorusing Exception Handling try: f=open(\"abc.txt\",\"r\"); f.write(\"Heyy, i", "am prajval\"); f.close(); except: print(\"File is in read only mode...\")", "a python program to open a file in read only", "b=int(input(\"Enter second number = \")) calculate_arg_fuc(a,b) return wrap_function @getInput def", "in read only mode and # try writing something to", "\") a=int(input(\"Enter first number = \")) b=int(input(\"Enter second number =", "python program to open a file in read only mode", "calculate_arg_fuc(a,b) return wrap_function @getInput def addition(num1,num2): print(\"Addition = \",num1+num2) @getInput", "for taking input for you # any kind of function", "handlethe subsequent errorusing Exception Handling try: f=open(\"abc.txt\",\"r\"); f.write(\"Heyy, i am", "\",num1-num2) @getInput def multiplication(num1,num2): print(\"Multiplication = \",num1*num2) @getInput def division(num1,num2):", "# write a decorator function for taking input for you", "you want to build def getInput(calculate_arg_fuc): def wrap_function(): print(\"Enter two", "second number = \")) calculate_arg_fuc(a,b) return wrap_function @getInput def addition(num1,num2):", "addition(num1,num2): print(\"Addition = \",num1+num2) @getInput def subtraction(num1,num2): print(\"Subtraction = \",num1-num2)", "Assignment 2 day 8 # you need to develop a", "day 8 # you need to develop a python program", "need to develop a python program to open a file", "\")) b=int(input(\"Enter second number = \")) calculate_arg_fuc(a,b) return wrap_function @getInput", "for you # any kind of function you want to", "8 # you need to develop a python program to", "decorator function for taking input for you # any kind", "a file in read only mode and # try writing", "= \",num1-num2) @getInput def multiplication(num1,num2): print(\"Multiplication = \",num1*num2) @getInput def", "print(\"Enter two numbers \") a=int(input(\"Enter first number = \")) b=int(input(\"Enter", "<gh_stars>0 # Assignment 1 Day 8 # write a decorator", "two numbers \") a=int(input(\"Enter first number = \")) b=int(input(\"Enter second", "1 Day 8 # write a decorator function for taking", "write a decorator function for taking input for you #", "def subtraction(num1,num2): print(\"Subtraction = \",num1-num2) @getInput def multiplication(num1,num2): print(\"Multiplication =", "print(\"Addition = \",num1+num2) @getInput def subtraction(num1,num2): print(\"Subtraction = \",num1-num2) @getInput", "i am prajval\"); f.close(); except: print(\"File is in read only", "def multiplication(num1,num2): print(\"Multiplication = \",num1*num2) @getInput def division(num1,num2): print(\"Division =", "you need to develop a python program to open a", "mode and # try writing something to it and handlethe", "read only mode and # try writing something to it", "Exception Handling try: f=open(\"abc.txt\",\"r\"); f.write(\"Heyy, i am prajval\"); f.close(); except:", "getInput(calculate_arg_fuc): def wrap_function(): print(\"Enter two numbers \") a=int(input(\"Enter first number", "Day 8 # write a decorator function for taking input", "= \",num1+num2) @getInput def subtraction(num1,num2): print(\"Subtraction = \",num1-num2) @getInput def", "def division(num1,num2): print(\"Division = \",num1/num2) addition() subtraction() multiplication() division() #", "return wrap_function @getInput def addition(num1,num2): print(\"Addition = \",num1+num2) @getInput def", "division() # Assignment 2 day 8 # you need to", "taking input for you # any kind of function you", "Handling try: f=open(\"abc.txt\",\"r\"); f.write(\"Heyy, i am prajval\"); f.close(); except: print(\"File", "you # any kind of function you want to build", "\",num1*num2) @getInput def division(num1,num2): print(\"Division = \",num1/num2) addition() subtraction() multiplication()", "multiplication() division() # Assignment 2 day 8 # you need", "addition() subtraction() multiplication() division() # Assignment 2 day 8 #", "= \")) b=int(input(\"Enter second number = \")) calculate_arg_fuc(a,b) return wrap_function", "def wrap_function(): print(\"Enter two numbers \") a=int(input(\"Enter first number =", "kind of function you want to build def getInput(calculate_arg_fuc): def", "multiplication(num1,num2): print(\"Multiplication = \",num1*num2) @getInput def division(num1,num2): print(\"Division = \",num1/num2)", "@getInput def subtraction(num1,num2): print(\"Subtraction = \",num1-num2) @getInput def multiplication(num1,num2): print(\"Multiplication", "function you want to build def getInput(calculate_arg_fuc): def wrap_function(): print(\"Enter", "try: f=open(\"abc.txt\",\"r\"); f.write(\"Heyy, i am prajval\"); f.close(); except: print(\"File is", "numbers \") a=int(input(\"Enter first number = \")) b=int(input(\"Enter second number", "# you need to develop a python program to open", "2 day 8 # you need to develop a python", "# try writing something to it and handlethe subsequent errorusing", "of function you want to build def getInput(calculate_arg_fuc): def wrap_function():", "\")) calculate_arg_fuc(a,b) return wrap_function @getInput def addition(num1,num2): print(\"Addition = \",num1+num2)", "try writing something to it and handlethe subsequent errorusing Exception", "writing something to it and handlethe subsequent errorusing Exception Handling", "print(\"Division = \",num1/num2) addition() subtraction() multiplication() division() # Assignment 2", "build def getInput(calculate_arg_fuc): def wrap_function(): print(\"Enter two numbers \") a=int(input(\"Enter", "\",num1/num2) addition() subtraction() multiplication() division() # Assignment 2 day 8", "any kind of function you want to build def getInput(calculate_arg_fuc):", "= \",num1/num2) addition() subtraction() multiplication() division() # Assignment 2 day", "@getInput def multiplication(num1,num2): print(\"Multiplication = \",num1*num2) @getInput def division(num1,num2): print(\"Division", "errorusing Exception Handling try: f=open(\"abc.txt\",\"r\"); f.write(\"Heyy, i am prajval\"); f.close();", "f=open(\"abc.txt\",\"r\"); f.write(\"Heyy, i am prajval\"); f.close(); except: print(\"File is in", "= \",num1*num2) @getInput def division(num1,num2): print(\"Division = \",num1/num2) addition() subtraction()", "subsequent errorusing Exception Handling try: f=open(\"abc.txt\",\"r\"); f.write(\"Heyy, i am prajval\");", "= \")) calculate_arg_fuc(a,b) return wrap_function @getInput def addition(num1,num2): print(\"Addition =", "print(\"Subtraction = \",num1-num2) @getInput def multiplication(num1,num2): print(\"Multiplication = \",num1*num2) @getInput", "to build def getInput(calculate_arg_fuc): def wrap_function(): print(\"Enter two numbers \")", "number = \")) b=int(input(\"Enter second number = \")) calculate_arg_fuc(a,b) return", "to develop a python program to open a file in", "to open a file in read only mode and #", "\",num1+num2) @getInput def subtraction(num1,num2): print(\"Subtraction = \",num1-num2) @getInput def multiplication(num1,num2):", "only mode and # try writing something to it and", "and # try writing something to it and handlethe subsequent", "something to it and handlethe subsequent errorusing Exception Handling try:", "f.write(\"Heyy, i am prajval\"); f.close(); except: print(\"File is in read", "# Assignment 1 Day 8 # write a decorator function", "division(num1,num2): print(\"Division = \",num1/num2) addition() subtraction() multiplication() division() # Assignment", "Assignment 1 Day 8 # write a decorator function for", "@getInput def division(num1,num2): print(\"Division = \",num1/num2) addition() subtraction() multiplication() division()", "to it and handlethe subsequent errorusing Exception Handling try: f=open(\"abc.txt\",\"r\");", "develop a python program to open a file in read", "def getInput(calculate_arg_fuc): def wrap_function(): print(\"Enter two numbers \") a=int(input(\"Enter first", "wrap_function @getInput def addition(num1,num2): print(\"Addition = \",num1+num2) @getInput def subtraction(num1,num2):", "first number = \")) b=int(input(\"Enter second number = \")) calculate_arg_fuc(a,b)", "want to build def getInput(calculate_arg_fuc): def wrap_function(): print(\"Enter two numbers", "file in read only mode and # try writing something", "program to open a file in read only mode and", "print(\"Multiplication = \",num1*num2) @getInput def division(num1,num2): print(\"Division = \",num1/num2) addition()", "subtraction() multiplication() division() # Assignment 2 day 8 # you", "a=int(input(\"Enter first number = \")) b=int(input(\"Enter second number = \"))", "def addition(num1,num2): print(\"Addition = \",num1+num2) @getInput def subtraction(num1,num2): print(\"Subtraction =" ]
[ "2D coordinate frame. Parameters ---------- axes_order : tuple of int", "of this frame. \"\"\" def __init__(self, axes_order=(0, 1), unit=(u.pix, u.pix),", "in self.frames: fargs = [args[i] for i in frame.axes_order] print(frame,", "self.__class__.__name__ else: self._name = name if reference_position is not None:", "raise ValueError(\"Number of units does not match number of axes.\")", "def coordinates(self, *args): args = [args[i] for i in self.axes_order]", "'distance' in _axes_names: _axes_names.remove('distance') if axes_names is None: axes_names =", "super(CoordinateFrame, self).__init__() def __repr__(self): fmt = '<{0}(name=\"{1}\", unit={2}, axes_names={3}, axes_order={4}'.format(", "number of axes.\") else: axes_names = tuple([\"\"] * naxes) self._axes_names", "str Reference position. unit : str or units.Unit instance or", "The number of axes intheis frame.\"\"\" return self._naxes @property def", "current frame is used to compute the mapping inputs: outputs.", "= [] for frame in frames: axes_order.extend(frame.axes_order) for frame in", ": list of astropy.units.Unit Unit for each axis. axes_names :", "a SkyCoord object. Parameters ---------- args : float inputs to", "not match number of axes.\") else: axes_names = tuple([\"\"] *", ": str Reference position - one of `STANDARD_REFERENCE_POSITION` unit :", "def unit(self): \"\"\"The unit of this frame.\"\"\" return self._unit @property", "= (unit,) if len(unit) != naxes: raise ValueError(\"Number of units", "A custom name of this frame.\"\"\" self._name = val @property", "units.Unit instance Spectral unit. axes_names : str Spectral axis name.", "import coordinates as coord from astropy.extern import six from .", "reference_frame(self): return self._reference_frame @property def reference_position(self): try: return self._reference_position except", "!= naxes: raise ValueError(\"Number of axes names does not match", "is not None: fmt += ', reference_position=\"{0}\"'.format(self.reference_position) if self.reference_frame is", "= list(reference_frame.representation_component_names.values()) if 'distance' in _axes_names: _axes_names.remove('distance') if axes_names is", "None: axes_order = tuple(range(naxes)) if unit is None: unit =", "= list(range(naxes)) unit = list(range(naxes)) axes_names = list(range(naxes)) axes_order =", "self._reference_frame @property def reference_position(self): try: return self._reference_position except AttributeError: return", "unit(self): \"\"\"The unit of this frame.\"\"\" return self._unit @property def", "in this frame. name : str Name of this frame.", "!= len(axes_order): raise ValueError(\"Incorrect numbering of axes, \" \"axes_order should", "frame.axes_order) coo.append(frame.coordinates(*fargs)) return coo class Frame2D(CoordinateFrame): \"\"\" A 2D coordinate", "= [args[i] for i in self.axes_order] coo = tuple([arg *", "---------- args : float inputs to wcs.input_frame \"\"\" # Reorder", "super(CompositeFrame, self).__init__(naxes, axes_type=axes_type, axes_order=axes_order, unit=unit, axes_names=axes_names, name=name) @property def frames(self):", "= None super(CoordinateFrame, self).__init__() def __repr__(self): fmt = '<{0}(name=\"{1}\", unit={2},", "axtype axes_names[ind] = n unit[ind] = un if len(np.unique(axes_order)) !=", "# Licensed under a 3-clause BSD style license - see", "axes_names : str Spectral axis name. name : str Name", "---------- frames : list List of frames (TimeFrame, CelestialFrame, SpectralFrame,", "unit = (unit,) if len(unit) != naxes: raise ValueError(\"Number of", "[args[i] for i in frame.axes_order] print(frame, fargs, frame.axes_order) coo.append(frame.coordinates(*fargs)) return", "fmt += \", reference_frame={0}\".format(self.reference_frame) fmt += \")>\" return fmt def", "corresponds to this axis. unit : list of astropy.units.Unit Unit", "if isinstance(axes_names, six.string_types): axes_names = (axes_names,) else: axes_names = tuple(axes_names)", "is not None: return self._name else: return self.__class__.__name__ @property def", "self.__class__.__name__, self.name, self.unit, self.axes_names, self.axes_order) if self.reference_position is not None:", ": astropy.coordinates.builtin_frames A reference frame. reference_position : str Reference position.", "i in frame.axes_order] print(frame, fargs, frame.axes_order) coo.append(frame.coordinates(*fargs)) return coo class", "axes_names = (axes_names,) else: axes_names = tuple(axes_names) if len(axes_names) !=", "Units on axes. axes_names : list Names of the axes", "float inputs to wcs.input_frame \"\"\" # Reorder axes if necesary.", "un, n in zip(frame.axes_order, frame.axes_type, frame.unit, frame.axes_names): axes_type[ind] = axtype", "coo.append(frame.coordinates(*fargs)) return coo class Frame2D(CoordinateFrame): \"\"\" A 2D coordinate frame.", "* self.unit[0] class CompositeFrame(CoordinateFrame): \"\"\" Represents one or more frames.", "used with output_frame to convert to world coordinate objects). reference_position", "axis. reference_frame : astropy.coordinates.builtin_frames A reference frame. reference_position : str", "or int A dimension in the input data that corresponds", "instance Spectral unit. axes_names : str Spectral axis name. name", "reference_frame=reference_frame, unit=unit, name=name, reference_position=reference_position) def coordinates(self, *args): if np.isscalar(args): return", "if reference_frame.name.upper() in STANDARD_REFERENCE_FRAMES: _axes_names = list(reference_frame.representation_component_names.values()) if 'distance' in", "frame.\"\"\" return self._naxes @property def unit(self): \"\"\"The unit of this", "axes_type=\"SPECTRAL\", axes_order=axes_order, axes_names=axes_names, reference_frame=reference_frame, unit=unit, name=name, reference_position=reference_position) def coordinates(self, *args):", "fmt = '<{0}(name=\"{1}\", unit={2}, axes_names={3}, axes_order={4}'.format( self.__class__.__name__, self.name, self.unit, self.axes_names,", "'CoordinateFrame'] STANDARD_REFERENCE_FRAMES = [frame.upper() for frame in coord.builtin_frames.__all__] STANDARD_REFERENCE_POSITION =", "Name of this frame. \"\"\" def __init__(self, axes_order=None, reference_frame=None, unit=None,", "else: self._unit = tuple([u.Unit(au) for au in unit]) if axes_names", "Name of this frame. \"\"\" def __init__(self, naxes, axes_type, axes_order,", "list(reference_frame.representation_component_names.values()) if 'distance' in _axes_names: _axes_names.remove('distance') if axes_names is None:", "astropy.extern import six from . import utils as gwutils __all__", "\", reference_frame={0}\".format(self.reference_frame) fmt += \")>\" return fmt def __str__(self): if", "\"\"\" Create world coordinates object\"\"\" raise NotImplementedError(\"Subclasses may implement this\")", "= _axes_names naxes = len(_axes_names) _unit = list(reference_frame.representation_component_units.values()) if unit", "if unit is None and _unit: unit = _unit if", "axes intheis frame.\"\"\" return self._naxes @property def unit(self): \"\"\"The unit", "= len(_axes_names) _unit = list(reference_frame.representation_component_units.values()) if unit is None and", "unit[ind] = un if len(np.unique(axes_order)) != len(axes_order): raise ValueError(\"Incorrect numbering", "name. name : str Name for this frame. \"\"\" def", "naxes, axes_type, axes_order, reference_frame=None, reference_position=None, unit=None, axes_names=None, name=None): self._naxes =", "---------- axes_order : tuple or int A dimension in the", "(TimeFrame, CelestialFrame, SpectralFrame, CoordinateFrame). name : str Name for this", "gwutils __all__ = ['Frame2D', 'CelestialFrame', 'SpectralFrame', 'CompositeFrame', 'CoordinateFrame'] STANDARD_REFERENCE_FRAMES =", "self._axes_type = (axes_type,) else: self._axes_type = tuple(axes_type) self._reference_frame = reference_frame", "self).__init__(naxes, axes_type=axes_type, axes_order=axes_order, unit=unit, axes_names=axes_names, name=name) @property def frames(self): return", "mapping inputs: outputs. \"\"\" sep = self._separable(start_frame) inputs = []", "Spectral axis name. name : str Name for this frame.", "unit = tuple(unit) else: unit = (unit,) if len(unit) !=", "naxes : int Number of axes. axes_type : str One", "\"\"\" Create a SkyCoord object. Parameters ---------- args : float", "str Name for this frame. \"\"\" def __init__(self, frames, name=None):", "None: self._reference_position = reference_position else: self._reference_position = None super(CoordinateFrame, self).__init__()", "unit]) if axes_names is not None: if isinstance(axes_names, six.string_types): axes_names", "axes_names=axes_names, name=name) @property def frames(self): return self._frames def __repr__(self): return", "WCS pipeline The transform between start_frame and the current frame", "ValueError(\"Incorrect numbering of axes, \" \"axes_order should contain unique numbers,", "return args[0] * self.unit[0] class CompositeFrame(CoordinateFrame): \"\"\" Represents one or", "else: return args[0] * self.unit[0] class CompositeFrame(CoordinateFrame): \"\"\" Represents one", "fmt += ', reference_position=\"{0}\"'.format(self.reference_position) if self.reference_frame is not None: fmt", "axes_names=axes_names, name=name) def coordinates(self, *args): \"\"\" Create a SkyCoord object.", "axes_names = tuple([\"\"] * naxes) self._axes_names = axes_names if name", "self._naxes = naxes self._axes_order = tuple(axes_order) if isinstance(axes_type, six.string_types): self._axes_type", "---------- axes_order : tuple of int A dimension in the", "in unit]) if axes_names is not None: if isinstance(axes_names, six.string_types):", "tuple of indices which map inputs to axes.\"\"\" return self._axes_order", "= axes_names if name is None: self._name = self.__class__.__name__ else:", "axes_names = list(range(naxes)) axes_order = [] for frame in frames:", "reference_position=None, unit=None, axes_names=None, name=None): self._naxes = naxes self._axes_order = tuple(axes_order)", "except: raise class SpectralFrame(CoordinateFrame): \"\"\" Represents Spectral Frame Parameters ----------", "Names of axes in the frame.\"\"\" return self._axes_names @property def", "input_axes(self, start_frame=None): \"\"\" Computes which axes in `start_frame` contribute to", "_axes_names.remove('distance') if axes_names is None: axes_names = _axes_names naxes =", "is None: axes_order = tuple(range(naxes)) if unit is None: unit", "axis. axes_names : list Names of the axes in this", "2 if reference_frame is not None: if reference_frame.name.upper() in STANDARD_REFERENCE_FRAMES:", "try: return self._reference_position except AttributeError: return None def input_axes(self, start_frame=None):", "import units as u from astropy import utils as astutil", "each axis in the current frame. Parameters ---------- start_frame :", "if reference_frame is not None: if reference_frame.name.upper() in STANDARD_REFERENCE_FRAMES: _axes_names", "frames: axes_order.extend(frame.axes_order) for frame in frames: for ind, axtype, un,", "transform between start_frame and the current frame is used to", "= tuple([\"\"] * naxes) self._axes_names = axes_names if name is", "if necesary. try: return coord.SkyCoord(*args, unit=self.unit, frame=self._reference_frame) except: raise class", "SpectralFrame, CoordinateFrame). name : str Name for this frame. \"\"\"", "def name(self, val): \"\"\" A custom name of this frame.\"\"\"", "absolute_import, division, unicode_literals, print_function import numpy as np from astropy", "not match number of axes.\") else: self._unit = tuple([u.Unit(au) for", "axis in the current frame. Parameters ---------- start_frame : ~gwcs.coordinate_frames.CoordinateFrame", "to each axis in the current frame. Parameters ---------- start_frame", "str Reference position - one of `STANDARD_REFERENCE_POSITION` unit : list", "return self._axes_order @property def reference_frame(self): return self._reference_frame @property def reference_position(self):", "name of this frame.\"\"\" self._name = val @property def naxes(self):", "= ['SPATIAL'] * naxes super(CelestialFrame, self).__init__(naxes=naxes, axes_type=axes_type, axes_order=axes_order, reference_frame=reference_frame, unit=unit,", "list of astropy.units.Unit Unit for each axis. axes_names : list", "\"\"\" Names of axes in the frame.\"\"\" return self._axes_names @property", "return self._axes_names @property def axes_order(self): \"\"\" A tuple of indices", "naxes) self._axes_names = axes_names if name is None: self._name =", "return self._reference_frame @property def reference_position(self): try: return self._reference_position except AttributeError:", "import six from . import utils as gwutils __all__ =", "repr(self.frames) def coordinates(self, *args): coo = [] for frame in", "str One of [\"SPATIAL\", \"SPECTRAL\", \"TIME\"] axes_order : tuple of", "= (axes_type,) else: self._axes_type = tuple(axes_type) self._reference_frame = reference_frame if", "map inputs to axes.\"\"\" return self._axes_order @property def reference_frame(self): return", "axes_type = list(range(naxes)) unit = list(range(naxes)) axes_names = list(range(naxes)) axes_order", "this axis. reference_frame : astropy.coordinates.builtin_frames A reference frame. reference_position :", ": ~gwcs.coordinate_frames.CoordinateFrame A frame in the WCS pipeline The transform", "if isinstance(axes_type, six.string_types): self._axes_type = (axes_type,) else: self._axes_type = tuple(axes_type)", "astropy.coordinates.builtin_frames A reference frame. reference_position : str Reference position. unit", "axes_order = [] for frame in frames: axes_order.extend(frame.axes_order) for frame", "this frame.\"\"\" return self._name @name.setter def name(self, val): \"\"\" A", "print_function import numpy as np from astropy import units as", "self._unit @property def axes_names(self): \"\"\" Names of axes in the", "axes_type(self): \"\"\" Type of this frame : 'SPATIAL', 'SPECTRAL', 'TIME'.", "1), unit=(u.pix, u.pix), axes_names=('x', 'y'), name=None): super(Frame2D, self).__init__(2, [\"SPATIAL\", \"SPATIAL\"],", "\"\"\" Defines coordinate frames and ties them to data axes.", "A 2D coordinate frame. Parameters ---------- axes_order : tuple of", "does not match number of axes.\") else: self._unit = tuple([u.Unit(au)", "isinstance(axes_type, six.string_types): self._axes_type = (axes_type,) else: self._axes_type = tuple(axes_type) self._reference_frame", "tuple of int A dimension in the input data that", "\"\"\" A 2D coordinate frame. Parameters ---------- axes_order : tuple", "* naxes) axes_type = ['SPATIAL'] * naxes super(CelestialFrame, self).__init__(naxes=naxes, axes_type=axes_type,", "axes_names=('x', 'y'), name=None): super(Frame2D, self).__init__(2, [\"SPATIAL\", \"SPATIAL\"], axes_order, name=name, axes_names=axes_names,", "frames : list List of frames (TimeFrame, CelestialFrame, SpectralFrame, CoordinateFrame).", "axes_names=None, name=None): naxes = 2 if reference_frame is not None:", "frame (usually used with output_frame to convert to world coordinate", "* naxes super(CelestialFrame, self).__init__(naxes=naxes, axes_type=axes_type, axes_order=axes_order, reference_frame=reference_frame, unit=unit, axes_names=axes_names, name=name)", "axes_type, axes_order, reference_frame=None, reference_position=None, unit=None, axes_names=None, name=None): self._naxes = naxes", "to convert to world coordinate objects). reference_position : str Reference", "AttributeError: return None def input_axes(self, start_frame=None): \"\"\" Computes which axes", "those Units on axes. axes_names : list Names of the", "if np.isscalar(args): return args * self.unit[0] else: return args[0] *", "in coord.builtin_frames.__all__] STANDARD_REFERENCE_POSITION = [\"GEOCENTER\", \"BARYCENTER\", \"HELIOCENTER\", \"TOPOCENTER\", \"LSR\", \"LSRK\",", "ValueError(\"Number of units does not match number of axes.\") else:", ": tuple of int A dimension in the input data", "A reference frame. reference_position : str Reference position. unit :", "self.axes_order: inputs.append(list(sep[ax].nonzero()[0])) return inputs @property def axes_type(self): \"\"\" Type of", "on axes. axes_names : list Names of the axes in", "if axes_order is None: axes_order = tuple(range(naxes)) if unit is", "with output_frame to convert to world coordinate objects). reference_position :", "else: return self.__class__.__name__ @property def name(self): \"\"\" A custom name", "args = [args[i] for i in self.axes_order] coo = tuple([arg", "one or more frames. Parameters ---------- frames : list List", "return repr(self.frames) def coordinates(self, *args): coo = [] for frame", "the input data that corresponds to this axis. unit :", "name=name) @property def frames(self): return self._frames def __repr__(self): return repr(self.frames)", "and _unit: unit = _unit if axes_order is None: axes_order", "self._unit = tuple([u.Unit(au) for au in unit]) if axes_names is", "axes_order=axes_order, reference_frame=reference_frame, unit=unit, axes_names=axes_names, name=name) def coordinates(self, *args): \"\"\" Create", "axes_names[ind] = n unit[ind] = un if len(np.unique(axes_order)) != len(axes_order):", ": float inputs to wcs.input_frame \"\"\" # Reorder axes if", "raise ValueError(\"Incorrect numbering of axes, \" \"axes_order should contain unique", "unique numbers, \" \"got {}.\".format(axes_order)) super(CompositeFrame, self).__init__(naxes, axes_type=axes_type, axes_order=axes_order, unit=unit,", "A dimension in the input data that corresponds to this", "of units does not match number of axes.\") else: self._unit", "start_frame : ~gwcs.coordinate_frames.CoordinateFrame A frame in the WCS pipeline The", "super(CelestialFrame, self).__init__(naxes=naxes, axes_type=axes_type, axes_order=axes_order, reference_frame=reference_frame, unit=unit, axes_names=axes_names, name=name) def coordinates(self,", "Defines coordinate frames and ties them to data axes. \"\"\"", "if len(unit) != naxes: raise ValueError(\"Number of units does not", "return None def input_axes(self, start_frame=None): \"\"\" Computes which axes in", "self._name = val @property def naxes(self): \"\"\" The number of", "= [] for ax in self.axes_order: inputs.append(list(sep[ax].nonzero()[0])) return inputs @property", "reference_position=None): super(SpectralFrame, self).__init__(naxes=1, axes_type=\"SPECTRAL\", axes_order=axes_order, axes_names=axes_names, reference_frame=reference_frame, unit=unit, name=name, reference_position=reference_position)", "frames. Parameters ---------- frames : list List of frames (TimeFrame,", "between start_frame and the current frame is used to compute", "name=None, reference_position=None): super(SpectralFrame, self).__init__(naxes=1, axes_type=\"SPECTRAL\", axes_order=axes_order, axes_names=axes_names, reference_frame=reference_frame, unit=unit, name=name,", "reference_position is not None: self._reference_position = reference_position else: self._reference_position =", "= tuple(range(naxes)) if unit is None: unit = tuple([u.degree] *", ": str Name of this frame. \"\"\" def __init__(self, axes_order=(0,", "naxes self._axes_order = tuple(axes_order) if isinstance(axes_type, six.string_types): self._axes_type = (axes_type,)", "Licensed under a 3-clause BSD style license - see LICENSE.rst", "with output_frame to convert to world coordinate objects). unit :", "frame. Parameters ---------- axes_order : tuple of int A dimension", "tuple([arg * un for arg, un in zip(args, self.unit)]) return", "Base class for CoordinateFrames. Parameters ---------- naxes : int Number", "u.pix), axes_names=('x', 'y'), name=None): super(Frame2D, self).__init__(2, [\"SPATIAL\", \"SPATIAL\"], axes_order, name=name,", "def coordinates(self, *args): \"\"\" Create world coordinates object\"\"\" raise NotImplementedError(\"Subclasses", "data axes. \"\"\" from __future__ import absolute_import, division, unicode_literals, print_function", "self.axes_names, self.axes_order) if self.reference_position is not None: fmt += ',", "as u from astropy import utils as astutil from astropy", "axes_order : tuple or int A dimension in the input", "axes_order=(0, 1), unit=(u.pix, u.pix), axes_names=('x', 'y'), name=None): super(Frame2D, self).__init__(2, [\"SPATIAL\",", "of `STANDARD_REFERENCE_POSITION` unit : list of astropy.units.Unit Unit for each", "Create a SkyCoord object. Parameters ---------- args : float inputs", "\" \"axes_order should contain unique numbers, \" \"got {}.\".format(axes_order)) super(CompositeFrame,", ": str Reference position. unit : str or units.Unit instance", "from astropy import utils as astutil from astropy import coordinates", "to wcs.input_frame \"\"\" # Reorder axes if necesary. try: return", "return self._name else: return self.__class__.__name__ @property def name(self): \"\"\" A", "to axes.\"\"\" return self._axes_order @property def reference_frame(self): return self._reference_frame @property", "`start_frame` contribute to each axis in the current frame. Parameters", "axes_order is None: axes_order = tuple(range(naxes)) if unit is None:", "= list(range(naxes)) axes_names = list(range(naxes)) axes_order = [] for frame", "frame.unit, frame.axes_names): axes_type[ind] = axtype axes_names[ind] = n unit[ind] =", "self._name = self.__class__.__name__ else: self._name = name if reference_position is", "= _unit if axes_order is None: axes_order = tuple(range(naxes)) if", "position - one of `STANDARD_REFERENCE_POSITION` unit : list of astropy.units.Unit", "@property def name(self): \"\"\" A custom name of this frame.\"\"\"", "---------- start_frame : ~gwcs.coordinate_frames.CoordinateFrame A frame in the WCS pipeline", "import numpy as np from astropy import units as u", "naxes) axes_type = ['SPATIAL'] * naxes super(CelestialFrame, self).__init__(naxes=naxes, axes_type=axes_type, axes_order=axes_order,", "frame in frames: axes_order.extend(frame.axes_order) for frame in frames: for ind,", "= tuple(axes_order) if isinstance(axes_type, six.string_types): self._axes_type = (axes_type,) else: self._axes_type", "self._name is not None: return self._name else: return self.__class__.__name__ @property", "tuple(range(naxes)) if unit is None: unit = tuple([u.degree] * naxes)", "int A dimension in the input data that corresponds to", "reference_position : str Reference position - one of `STANDARD_REFERENCE_POSITION` unit", "unit : list of astropy.units.Unit Unit for each axis. axes_names", "frame : 'SPATIAL', 'SPECTRAL', 'TIME'. \"\"\" return self._axes_type def coordinates(self,", "not None: return self._name else: return self.__class__.__name__ @property def name(self):", "of int A dimension in the input data that corresponds", "to this axis. reference_frame : astropy.coordinates.builtin_frames A reference frame. reference_position", "__repr__(self): return repr(self.frames) def coordinates(self, *args): coo = [] for", "axes_order : tuple of int A dimension in the input", "if self.reference_frame is not None: fmt += \", reference_frame={0}\".format(self.reference_frame) fmt", "import absolute_import, division, unicode_literals, print_function import numpy as np from", "axes.\") else: self._unit = tuple([u.Unit(au) for au in unit]) if", "under a 3-clause BSD style license - see LICENSE.rst \"\"\"", "to this axis. reference_frame : astropy.coordinates.builtin_frames Reference frame (usually used", "necesary. try: return coord.SkyCoord(*args, unit=self.unit, frame=self._reference_frame) except: raise class SpectralFrame(CoordinateFrame):", "\"SPATIAL\"], axes_order, name=name, axes_names=axes_names, unit=unit) def coordinates(self, *args): args =", "None: return self._name else: return self.__class__.__name__ @property def name(self): \"\"\"", "def coordinates(self, *args): if np.isscalar(args): return args * self.unit[0] else:", "coo = tuple([arg * un for arg, un in zip(args,", "def __init__(self, frames, name=None): self._frames = frames[:] naxes = sum([frame._naxes", "len(axes_names) != naxes: raise ValueError(\"Number of axes names does not", "object. Parameters ---------- args : float inputs to wcs.input_frame \"\"\"", "unit=self.unit, frame=self._reference_frame) except: raise class SpectralFrame(CoordinateFrame): \"\"\" Represents Spectral Frame", "[] for ax in self.axes_order: inputs.append(list(sep[ax].nonzero()[0])) return inputs @property def", "coordinates(self, *args): if np.isscalar(args): return args * self.unit[0] else: return", "unit=None, axes_names=None, name=None): self._naxes = naxes self._axes_order = tuple(axes_order) if", "np from astropy import units as u from astropy import", "as np from astropy import units as u from astropy", "objects). reference_position : str Reference position - one of `STANDARD_REFERENCE_POSITION`", "reference_position=reference_position) def coordinates(self, *args): if np.isscalar(args): return args * self.unit[0]", "this frame. \"\"\" def __init__(self, axes_order=(0,), reference_frame=None, unit=None, axes_names=None, name=None,", "axes_names=None, name=None, reference_position=None): super(SpectralFrame, self).__init__(naxes=1, axes_type=\"SPECTRAL\", axes_order=axes_order, axes_names=axes_names, reference_frame=reference_frame, unit=unit,", "coo class Frame2D(CoordinateFrame): \"\"\" A 2D coordinate frame. Parameters ----------", "A tuple of indices which map inputs to axes.\"\"\" return", "to convert to world coordinate objects). unit : str or", "division, unicode_literals, print_function import numpy as np from astropy import", "list Names of the axes in this frame. name :", "units does not match number of axes.\") else: self._unit =", "the WCS pipeline The transform between start_frame and the current", "is not None: if reference_frame.name.upper() in STANDARD_REFERENCE_FRAMES: _axes_names = list(reference_frame.representation_component_names.values())", "return coo class Frame2D(CoordinateFrame): \"\"\" A 2D coordinate frame. Parameters", "name(self, val): \"\"\" A custom name of this frame.\"\"\" self._name", "Represents one or more frames. Parameters ---------- frames : list", "is used to compute the mapping inputs: outputs. \"\"\" sep", "object\"\"\" raise NotImplementedError(\"Subclasses may implement this\") class CelestialFrame(CoordinateFrame): \"\"\" Celestial", "if unit is None: unit = tuple([u.degree] * naxes) axes_type", "!= naxes: raise ValueError(\"Number of units does not match number", "does not match number of axes.\") else: axes_names = tuple([\"\"]", "and ties them to data axes. \"\"\" from __future__ import", "naxes super(CelestialFrame, self).__init__(naxes=naxes, axes_type=axes_type, axes_order=axes_order, reference_frame=reference_frame, unit=unit, axes_names=axes_names, name=name) def", "return inputs @property def axes_type(self): \"\"\" Type of this frame", "val @property def naxes(self): \"\"\" The number of axes intheis", "None: fmt += ', reference_position=\"{0}\"'.format(self.reference_position) if self.reference_frame is not None:", "(usually used with output_frame to convert to world coordinate objects).", "= tuple([arg * un for arg, un in zip(args, self.unit)])", "which map inputs to axes.\"\"\" return self._axes_order @property def reference_frame(self):", "self._name else: return self.__class__.__name__ @property def name(self): \"\"\" A custom", "+= \")>\" return fmt def __str__(self): if self._name is not", "A frame in the WCS pipeline The transform between start_frame", "utils as gwutils __all__ = ['Frame2D', 'CelestialFrame', 'SpectralFrame', 'CompositeFrame', 'CoordinateFrame']", "*args): if np.isscalar(args): return args * self.unit[0] else: return args[0]", "= name if reference_position is not None: self._reference_position = reference_position", "[frame.upper() for frame in coord.builtin_frames.__all__] STANDARD_REFERENCE_POSITION = [\"GEOCENTER\", \"BARYCENTER\", \"HELIOCENTER\",", ": list Names of the axes in this frame. name", "str Name for this frame. \"\"\" def __init__(self, axes_order=(0,), reference_frame=None,", "axes_order=axes_order, unit=unit, axes_names=axes_names, name=name) @property def frames(self): return self._frames def", "in zip(frame.axes_order, frame.axes_type, frame.unit, frame.axes_names): axes_type[ind] = axtype axes_names[ind] =", "* naxes) self._axes_names = axes_names if name is None: self._name", "for ind, axtype, un, n in zip(frame.axes_order, frame.axes_type, frame.unit, frame.axes_names):", "tuple or int A dimension in the input data that", "return args * self.unit[0] else: return args[0] * self.unit[0] class", "unit={2}, axes_names={3}, axes_order={4}'.format( self.__class__.__name__, self.name, self.unit, self.axes_names, self.axes_order) if self.reference_position", "def __init__(self, naxes, axes_type, axes_order, reference_frame=None, reference_position=None, unit=None, axes_names=None, name=None):", "* un for arg, un in zip(args, self.unit)]) return coo", "def name(self): \"\"\" A custom name of this frame.\"\"\" return", "reference_position else: self._reference_position = None super(CoordinateFrame, self).__init__() def __repr__(self): fmt", "self._frames = frames[:] naxes = sum([frame._naxes for frame in self._frames])", "for frame in coord.builtin_frames.__all__] STANDARD_REFERENCE_POSITION = [\"GEOCENTER\", \"BARYCENTER\", \"HELIOCENTER\", \"TOPOCENTER\",", "= tuple(axes_names) if len(axes_names) != naxes: raise ValueError(\"Number of axes", "sep = self._separable(start_frame) inputs = [] for ax in self.axes_order:", "reference_frame=None, unit=None, axes_names=None, name=None): naxes = 2 if reference_frame is", "naxes(self): \"\"\" The number of axes intheis frame.\"\"\" return self._naxes", "frame=self._reference_frame) except: raise class SpectralFrame(CoordinateFrame): \"\"\" Represents Spectral Frame Parameters", "coordinate frames and ties them to data axes. \"\"\" from", "self.__class__.__name__ @property def name(self): \"\"\" A custom name of this", "to this axis. unit : list of astropy.units.Unit Unit for", "Parameters ---------- frames : list List of frames (TimeFrame, CelestialFrame,", "@name.setter def name(self, val): \"\"\" A custom name of this", "frames: for ind, axtype, un, n in zip(frame.axes_order, frame.axes_type, frame.unit,", "\"SPECTRAL\", \"TIME\"] axes_order : tuple of int A dimension in", "None: if astutil.isiterable(unit): unit = tuple(unit) else: unit = (unit,)", "- one of `STANDARD_REFERENCE_POSITION` unit : list of astropy.units.Unit Unit", "output_frame to convert to world coordinate objects). reference_position : str", "six.string_types): self._axes_type = (axes_type,) else: self._axes_type = tuple(axes_type) self._reference_frame =", "or units.Unit instance or iterable of those Units on axes.", "of those Units on axes. axes_names : list Names of", "= n unit[ind] = un if len(np.unique(axes_order)) != len(axes_order): raise", "\"\"\" def __init__(self, axes_order=(0, 1), unit=(u.pix, u.pix), axes_names=('x', 'y'), name=None):", "unit=unit) def coordinates(self, *args): args = [args[i] for i in", "unit=None, axes_names=None, name=None): naxes = 2 if reference_frame is not", "args * self.unit[0] else: return args[0] * self.unit[0] class CompositeFrame(CoordinateFrame):", "`STANDARD_REFERENCE_POSITION` unit : list of astropy.units.Unit Unit for each axis.", "self.frames: fargs = [args[i] for i in frame.axes_order] print(frame, fargs,", "reference_frame if unit is not None: if astutil.isiterable(unit): unit =", "_axes_names: _axes_names.remove('distance') if axes_names is None: axes_names = _axes_names naxes", ": str Spectral axis name. name : str Name for", "them to data axes. \"\"\" from __future__ import absolute_import, division,", ": str Name of this frame. \"\"\" def __init__(self, axes_order=None,", "len(_axes_names) _unit = list(reference_frame.representation_component_units.values()) if unit is None and _unit:", "if reference_position is not None: self._reference_position = reference_position else: self._reference_position", "more frames. Parameters ---------- frames : list List of frames", "not None: fmt += \", reference_frame={0}\".format(self.reference_frame) fmt += \")>\" return", "un if len(np.unique(axes_order)) != len(axes_order): raise ValueError(\"Incorrect numbering of axes,", "name=name, axes_names=axes_names, unit=unit) def coordinates(self, *args): args = [args[i] for", "def __init__(self, axes_order=(0,), reference_frame=None, unit=None, axes_names=None, name=None, reference_position=None): super(SpectralFrame, self).__init__(naxes=1,", "unit : str or units.Unit instance or iterable of those", "fargs, frame.axes_order) coo.append(frame.coordinates(*fargs)) return coo class Frame2D(CoordinateFrame): \"\"\" A 2D", "coordinates(self, *args): \"\"\" Create a SkyCoord object. Parameters ---------- args", "inputs @property def axes_type(self): \"\"\" Type of this frame :", "axes_type : str One of [\"SPATIAL\", \"SPECTRAL\", \"TIME\"] axes_order :", "coord.builtin_frames.__all__] STANDARD_REFERENCE_POSITION = [\"GEOCENTER\", \"BARYCENTER\", \"HELIOCENTER\", \"TOPOCENTER\", \"LSR\", \"LSRK\", \"LSRD\",", "frames(self): return self._frames def __repr__(self): return repr(self.frames) def coordinates(self, *args):", "to data axes. \"\"\" from __future__ import absolute_import, division, unicode_literals,", "= 2 if reference_frame is not None: if reference_frame.name.upper() in", "_unit: unit = _unit if axes_order is None: axes_order =", "*args): args = [args[i] for i in self.axes_order] coo =", "reference_frame : astropy.coordinates.builtin_frames Reference frame (usually used with output_frame to", "this frame. name : str Name of this frame. \"\"\"", "name : str Name of this frame. \"\"\" def __init__(self,", "frame.axes_type, frame.unit, frame.axes_names): axes_type[ind] = axtype axes_names[ind] = n unit[ind]", "axes.\") else: axes_names = tuple([\"\"] * naxes) self._axes_names = axes_names", "ax in self.axes_order: inputs.append(list(sep[ax].nonzero()[0])) return inputs @property def axes_type(self): \"\"\"", ": str Name for this frame. \"\"\" def __init__(self, axes_order=(0,),", "'SPATIAL', 'SPECTRAL', 'TIME'. \"\"\" return self._axes_type def coordinates(self, *args): \"\"\"", "ties them to data axes. \"\"\" from __future__ import absolute_import,", "to world coordinate objects). reference_position : str Reference position -", "may implement this\") class CelestialFrame(CoordinateFrame): \"\"\" Celestial Frame Representation Parameters", "to compute the mapping inputs: outputs. \"\"\" sep = self._separable(start_frame)", "or more frames. Parameters ---------- frames : list List of", "self._axes_order @property def reference_frame(self): return self._reference_frame @property def reference_position(self): try:", "(unit,) if len(unit) != naxes: raise ValueError(\"Number of units does", "in frames: for ind, axtype, un, n in zip(frame.axes_order, frame.axes_type,", "', reference_position=\"{0}\"'.format(self.reference_position) if self.reference_frame is not None: fmt += \",", "axes_type[ind] = axtype axes_names[ind] = n unit[ind] = un if", "in the input data that corresponds to this axis. reference_frame", "'y'), name=None): super(Frame2D, self).__init__(2, [\"SPATIAL\", \"SPATIAL\"], axes_order, name=name, axes_names=axes_names, unit=unit)", "raise ValueError(\"Number of axes names does not match number of", "= frames[:] naxes = sum([frame._naxes for frame in self._frames]) axes_type", "LICENSE.rst \"\"\" Defines coordinate frames and ties them to data", "for au in unit]) if axes_names is not None: if", "not None: fmt += ', reference_position=\"{0}\"'.format(self.reference_position) if self.reference_frame is not", "Name for this frame. \"\"\" def __init__(self, frames, name=None): self._frames", "try: return coord.SkyCoord(*args, unit=self.unit, frame=self._reference_frame) except: raise class SpectralFrame(CoordinateFrame): \"\"\"", "self.unit[0] class CompositeFrame(CoordinateFrame): \"\"\" Represents one or more frames. Parameters", "in self._frames]) axes_type = list(range(naxes)) unit = list(range(naxes)) axes_names =", "unit is None: unit = tuple([u.degree] * naxes) axes_type =", "pipeline The transform between start_frame and the current frame is", "current frame. Parameters ---------- start_frame : ~gwcs.coordinate_frames.CoordinateFrame A frame in", "of the axes in this frame. name : str Name", "'CelestialFrame', 'SpectralFrame', 'CompositeFrame', 'CoordinateFrame'] STANDARD_REFERENCE_FRAMES = [frame.upper() for frame in", "of axes names does not match number of axes.\") else:", "reference_frame=reference_frame, unit=unit, axes_names=axes_names, name=name) def coordinates(self, *args): \"\"\" Create a", "A custom name of this frame.\"\"\" return self._name @name.setter def", "\"LOCAL_GROUP_CENTER\"] class CoordinateFrame(object): \"\"\" Base class for CoordinateFrames. Parameters ----------", "\"\"\" def __init__(self, naxes, axes_type, axes_order, reference_frame=None, reference_position=None, unit=None, axes_names=None,", "for i in frame.axes_order] print(frame, fargs, frame.axes_order) coo.append(frame.coordinates(*fargs)) return coo", "args : float inputs to wcs.input_frame \"\"\" # Reorder axes", ". import utils as gwutils __all__ = ['Frame2D', 'CelestialFrame', 'SpectralFrame',", "self.name, self.unit, self.axes_names, self.axes_order) if self.reference_position is not None: fmt", "def naxes(self): \"\"\" The number of axes intheis frame.\"\"\" return", "number of axes intheis frame.\"\"\" return self._naxes @property def unit(self):", "self._naxes @property def unit(self): \"\"\"The unit of this frame.\"\"\" return", "coordinate frame. Parameters ---------- axes_order : tuple of int A", "contribute to each axis in the current frame. Parameters ----------", "def axes_names(self): \"\"\" Names of axes in the frame.\"\"\" return", "coordinates(self, *args): \"\"\" Create world coordinates object\"\"\" raise NotImplementedError(\"Subclasses may", "\"TIME\"] axes_order : tuple of int A dimension in the", ": str or units.Unit instance Spectral unit. axes_names : str", "axes.\"\"\" return self._axes_order @property def reference_frame(self): return self._reference_frame @property def", "def reference_position(self): try: return self._reference_position except AttributeError: return None def", "self.unit[0] else: return args[0] * self.unit[0] class CompositeFrame(CoordinateFrame): \"\"\" Represents", "Parameters ---------- naxes : int Number of axes. axes_type :", "style license - see LICENSE.rst \"\"\" Defines coordinate frames and", "= [] for frame in self.frames: fargs = [args[i] for", "{}.\".format(axes_order)) super(CompositeFrame, self).__init__(naxes, axes_type=axes_type, axes_order=axes_order, unit=unit, axes_names=axes_names, name=name) @property def", "in frames: axes_order.extend(frame.axes_order) for frame in frames: for ind, axtype,", "Reorder axes if necesary. try: return coord.SkyCoord(*args, unit=self.unit, frame=self._reference_frame) except:", "coordinates object\"\"\" raise NotImplementedError(\"Subclasses may implement this\") class CelestialFrame(CoordinateFrame): \"\"\"", "CoordinateFrame). name : str Name for this frame. \"\"\" def", "__repr__(self): fmt = '<{0}(name=\"{1}\", unit={2}, axes_names={3}, axes_order={4}'.format( self.__class__.__name__, self.name, self.unit,", "= naxes self._axes_order = tuple(axes_order) if isinstance(axes_type, six.string_types): self._axes_type =", "for each axis. axes_names : list Names of the axes", "world coordinate objects). unit : str or units.Unit instance Spectral", "number of axes.\") else: self._unit = tuple([u.Unit(au) for au in", "of axes.\") else: axes_names = tuple([\"\"] * naxes) self._axes_names =", "used to compute the mapping inputs: outputs. \"\"\" sep =", "None and _unit: unit = _unit if axes_order is None:", "len(axes_order): raise ValueError(\"Incorrect numbering of axes, \" \"axes_order should contain", "numpy as np from astropy import units as u from", "axes_names if name is None: self._name = self.__class__.__name__ else: self._name", "for frame in self.frames: fargs = [args[i] for i in", ": astropy.coordinates.builtin_frames Reference frame (usually used with output_frame to convert", "self).__init__(naxes=1, axes_type=\"SPECTRAL\", axes_order=axes_order, axes_names=axes_names, reference_frame=reference_frame, unit=unit, name=name, reference_position=reference_position) def coordinates(self,", "def __str__(self): if self._name is not None: return self._name else:", "in the frame.\"\"\" return self._axes_names @property def axes_order(self): \"\"\" A", "for frame in frames: for ind, axtype, un, n in", "license - see LICENSE.rst \"\"\" Defines coordinate frames and ties", "Name of this frame. \"\"\" def __init__(self, axes_order=(0, 1), unit=(u.pix,", "self.reference_position is not None: fmt += ', reference_position=\"{0}\"'.format(self.reference_position) if self.reference_frame", "= ['Frame2D', 'CelestialFrame', 'SpectralFrame', 'CompositeFrame', 'CoordinateFrame'] STANDARD_REFERENCE_FRAMES = [frame.upper() for", "is not None: fmt += \", reference_frame={0}\".format(self.reference_frame) fmt += \")>\"", "coordinates(self, *args): coo = [] for frame in self.frames: fargs", "in the input data that corresponds to this axis. unit", "the frame.\"\"\" return self._axes_names @property def axes_order(self): \"\"\" A tuple", "import utils as astutil from astropy import coordinates as coord", "STANDARD_REFERENCE_POSITION = [\"GEOCENTER\", \"BARYCENTER\", \"HELIOCENTER\", \"TOPOCENTER\", \"LSR\", \"LSRK\", \"LSRD\", \"GALACTIC_CENTER\",", "Names of the axes in this frame. name : str", "return self._axes_type def coordinates(self, *args): \"\"\" Create world coordinates object\"\"\"", "for this frame. \"\"\" def __init__(self, frames, name=None): self._frames =", "= un if len(np.unique(axes_order)) != len(axes_order): raise ValueError(\"Incorrect numbering of", "and the current frame is used to compute the mapping", "SpectralFrame(CoordinateFrame): \"\"\" Represents Spectral Frame Parameters ---------- axes_order : tuple", "= (axes_names,) else: axes_names = tuple(axes_names) if len(axes_names) != naxes:", "coordinates(self, *args): args = [args[i] for i in self.axes_order] coo", "return self._name @name.setter def name(self, val): \"\"\" A custom name", "axis. reference_frame : astropy.coordinates.builtin_frames Reference frame (usually used with output_frame", "= self._separable(start_frame) inputs = [] for ax in self.axes_order: inputs.append(list(sep[ax].nonzero()[0]))", "if axes_names is None: axes_names = _axes_names naxes = len(_axes_names)", "\" \"got {}.\".format(axes_order)) super(CompositeFrame, self).__init__(naxes, axes_type=axes_type, axes_order=axes_order, unit=unit, axes_names=axes_names, name=name)", "_unit = list(reference_frame.representation_component_units.values()) if unit is None and _unit: unit", "implement this\") class CelestialFrame(CoordinateFrame): \"\"\" Celestial Frame Representation Parameters ----------", "return self._frames def __repr__(self): return repr(self.frames) def coordinates(self, *args): coo", "Frame Representation Parameters ---------- axes_order : tuple of int A", "else: self._name = name if reference_position is not None: self._reference_position", "used with output_frame to convert to world coordinate objects). unit", "else: axes_names = tuple([\"\"] * naxes) self._axes_names = axes_names if", "astutil.isiterable(unit): unit = tuple(unit) else: unit = (unit,) if len(unit)", "STANDARD_REFERENCE_FRAMES: _axes_names = list(reference_frame.representation_component_names.values()) if 'distance' in _axes_names: _axes_names.remove('distance') if", "Represents Spectral Frame Parameters ---------- axes_order : tuple or int", "unit : str or units.Unit instance Spectral unit. axes_names :", "frame. Parameters ---------- start_frame : ~gwcs.coordinate_frames.CoordinateFrame A frame in the", ": str Name of this frame. \"\"\" def __init__(self, naxes,", "if self._name is not None: return self._name else: return self.__class__.__name__", "of [\"SPATIAL\", \"SPECTRAL\", \"TIME\"] axes_order : tuple of int A", "@property def naxes(self): \"\"\" The number of axes intheis frame.\"\"\"", "for this frame. \"\"\" def __init__(self, axes_order=(0,), reference_frame=None, unit=None, axes_names=None,", "instance or iterable of those Units on axes. axes_names :", "axes. axes_names : list Names of the axes in this", "frame. \"\"\" def __init__(self, naxes, axes_type, axes_order, reference_frame=None, reference_position=None, unit=None,", "\"\"\" A tuple of indices which map inputs to axes.\"\"\"", "frame. \"\"\" def __init__(self, axes_order=None, reference_frame=None, unit=None, axes_names=None, name=None): naxes", "that corresponds to this axis. reference_frame : astropy.coordinates.builtin_frames Reference frame", "name : str Name for this frame. \"\"\" def __init__(self,", "axes_names = tuple(axes_names) if len(axes_names) != naxes: raise ValueError(\"Number of", "axes if necesary. try: return coord.SkyCoord(*args, unit=self.unit, frame=self._reference_frame) except: raise", "sum([frame._naxes for frame in self._frames]) axes_type = list(range(naxes)) unit =", "Frame2D(CoordinateFrame): \"\"\" A 2D coordinate frame. Parameters ---------- axes_order :", "def __init__(self, axes_order=(0, 1), unit=(u.pix, u.pix), axes_names=('x', 'y'), name=None): super(Frame2D,", "is None and _unit: unit = _unit if axes_order is", "axes, \" \"axes_order should contain unique numbers, \" \"got {}.\".format(axes_order))", "for ax in self.axes_order: inputs.append(list(sep[ax].nonzero()[0])) return inputs @property def axes_type(self):", "axes_type=axes_type, axes_order=axes_order, unit=unit, axes_names=axes_names, name=name) @property def frames(self): return self._frames", "fmt += \")>\" return fmt def __str__(self): if self._name is", "\"BARYCENTER\", \"HELIOCENTER\", \"TOPOCENTER\", \"LSR\", \"LSRK\", \"LSRD\", \"GALACTIC_CENTER\", \"LOCAL_GROUP_CENTER\"] class CoordinateFrame(object):", "of astropy.units.Unit Unit for each axis. axes_names : list Names", "au in unit]) if axes_names is not None: if isinstance(axes_names,", "tuple(axes_names) if len(axes_names) != naxes: raise ValueError(\"Number of axes names", "len(np.unique(axes_order)) != len(axes_order): raise ValueError(\"Incorrect numbering of axes, \" \"axes_order", "\"\"\"The unit of this frame.\"\"\" return self._unit @property def axes_names(self):", "as astutil from astropy import coordinates as coord from astropy.extern", "inputs: outputs. \"\"\" sep = self._separable(start_frame) inputs = [] for", "wcs.input_frame \"\"\" # Reorder axes if necesary. try: return coord.SkyCoord(*args,", "astropy.coordinates.builtin_frames Reference frame (usually used with output_frame to convert to", "outputs. \"\"\" sep = self._separable(start_frame) inputs = [] for ax", "str or units.Unit instance or iterable of those Units on", ": str Name for this frame. \"\"\" def __init__(self, frames,", ": list List of frames (TimeFrame, CelestialFrame, SpectralFrame, CoordinateFrame). name", "axis. unit : list of astropy.units.Unit Unit for each axis.", "this frame. \"\"\" def __init__(self, axes_order=(0, 1), unit=(u.pix, u.pix), axes_names=('x',", "= tuple([u.Unit(au) for au in unit]) if axes_names is not", "def coordinates(self, *args): coo = [] for frame in self.frames:", "[\"SPATIAL\", \"SPECTRAL\", \"TIME\"] axes_order : tuple of int A dimension", "frame.\"\"\" self._name = val @property def naxes(self): \"\"\" The number", "i in self.axes_order] coo = tuple([arg * un for arg,", "str Name of this frame. \"\"\" def __init__(self, axes_order=None, reference_frame=None,", "unit = _unit if axes_order is None: axes_order = tuple(range(naxes))", "astropy import units as u from astropy import utils as", "\"\"\" A custom name of this frame.\"\"\" return self._name @name.setter", "frame. \"\"\" def __init__(self, frames, name=None): self._frames = frames[:] naxes", "Number of axes. axes_type : str One of [\"SPATIAL\", \"SPECTRAL\",", "as coord from astropy.extern import six from . import utils", "---------- naxes : int Number of axes. axes_type : str", "six from . import utils as gwutils __all__ = ['Frame2D',", "input data that corresponds to this axis. reference_frame : astropy.coordinates.builtin_frames", "the axes in this frame. name : str Name of", "(axes_names,) else: axes_names = tuple(axes_names) if len(axes_names) != naxes: raise", "= tuple([u.degree] * naxes) axes_type = ['SPATIAL'] * naxes super(CelestialFrame,", "coordinate objects). unit : str or units.Unit instance Spectral unit.", "return coord.SkyCoord(*args, unit=self.unit, frame=self._reference_frame) except: raise class SpectralFrame(CoordinateFrame): \"\"\" Represents", "CelestialFrame(CoordinateFrame): \"\"\" Celestial Frame Representation Parameters ---------- axes_order : tuple", "name=None): super(Frame2D, self).__init__(2, [\"SPATIAL\", \"SPATIAL\"], axes_order, name=name, axes_names=axes_names, unit=unit) def", ": str One of [\"SPATIAL\", \"SPECTRAL\", \"TIME\"] axes_order : tuple", "\"\"\" from __future__ import absolute_import, division, unicode_literals, print_function import numpy", "name=name, reference_position=reference_position) def coordinates(self, *args): if np.isscalar(args): return args *", "return fmt def __str__(self): if self._name is not None: return", "ind, axtype, un, n in zip(frame.axes_order, frame.axes_type, frame.unit, frame.axes_names): axes_type[ind]", "frame is used to compute the mapping inputs: outputs. \"\"\"", "def __repr__(self): return repr(self.frames) def coordinates(self, *args): coo = []", "axes. \"\"\" from __future__ import absolute_import, division, unicode_literals, print_function import", "tuple([\"\"] * naxes) self._axes_names = axes_names if name is None:", "name=name) def coordinates(self, *args): \"\"\" Create a SkyCoord object. Parameters", "axes_names=axes_names, reference_frame=reference_frame, unit=unit, name=name, reference_position=reference_position) def coordinates(self, *args): if np.isscalar(args):", "in self.axes_order] coo = tuple([arg * un for arg, un", "world coordinates object\"\"\" raise NotImplementedError(\"Subclasses may implement this\") class CelestialFrame(CoordinateFrame):", "reference_frame=None, reference_position=None, unit=None, axes_names=None, name=None): self._naxes = naxes self._axes_order =", "in STANDARD_REFERENCE_FRAMES: _axes_names = list(reference_frame.representation_component_names.values()) if 'distance' in _axes_names: _axes_names.remove('distance')", "inputs.append(list(sep[ax].nonzero()[0])) return inputs @property def axes_type(self): \"\"\" Type of this", "if axes_names is not None: if isinstance(axes_names, six.string_types): axes_names =", "tuple([u.degree] * naxes) axes_type = ['SPATIAL'] * naxes super(CelestialFrame, self).__init__(naxes=naxes,", "is not None: self._reference_position = reference_position else: self._reference_position = None", "tuple(axes_order) if isinstance(axes_type, six.string_types): self._axes_type = (axes_type,) else: self._axes_type =", "_axes_names naxes = len(_axes_names) _unit = list(reference_frame.representation_component_units.values()) if unit is", "should contain unique numbers, \" \"got {}.\".format(axes_order)) super(CompositeFrame, self).__init__(naxes, axes_type=axes_type,", "__init__(self, axes_order=(0,), reference_frame=None, unit=None, axes_names=None, name=None, reference_position=None): super(SpectralFrame, self).__init__(naxes=1, axes_type=\"SPECTRAL\",", "print(frame, fargs, frame.axes_order) coo.append(frame.coordinates(*fargs)) return coo class Frame2D(CoordinateFrame): \"\"\" A", "The transform between start_frame and the current frame is used", "self.axes_order] coo = tuple([arg * un for arg, un in", "self.axes_order) if self.reference_position is not None: fmt += ', reference_position=\"{0}\"'.format(self.reference_position)", "Parameters ---------- axes_order : tuple or int A dimension in", "\"LSRD\", \"GALACTIC_CENTER\", \"LOCAL_GROUP_CENTER\"] class CoordinateFrame(object): \"\"\" Base class for CoordinateFrames.", "= tuple(axes_type) self._reference_frame = reference_frame if unit is not None:", "that corresponds to this axis. reference_frame : astropy.coordinates.builtin_frames A reference", "objects). unit : str or units.Unit instance Spectral unit. axes_names", "['Frame2D', 'CelestialFrame', 'SpectralFrame', 'CompositeFrame', 'CoordinateFrame'] STANDARD_REFERENCE_FRAMES = [frame.upper() for frame", "frame in frames: for ind, axtype, un, n in zip(frame.axes_order,", "this frame. \"\"\" def __init__(self, naxes, axes_type, axes_order, reference_frame=None, reference_position=None,", "unit=unit, name=name, reference_position=reference_position) def coordinates(self, *args): if np.isscalar(args): return args", "self._reference_position = None super(CoordinateFrame, self).__init__() def __repr__(self): fmt = '<{0}(name=\"{1}\",", "from astropy import coordinates as coord from astropy.extern import six", "frame. \"\"\" def __init__(self, axes_order=(0, 1), unit=(u.pix, u.pix), axes_names=('x', 'y'),", "axes_names={3}, axes_order={4}'.format( self.__class__.__name__, self.name, self.unit, self.axes_names, self.axes_order) if self.reference_position is", "~gwcs.coordinate_frames.CoordinateFrame A frame in the WCS pipeline The transform between", "None: axes_names = _axes_names naxes = len(_axes_names) _unit = list(reference_frame.representation_component_units.values())", "convert to world coordinate objects). reference_position : str Reference position", "from . import utils as gwutils __all__ = ['Frame2D', 'CelestialFrame',", "list(reference_frame.representation_component_units.values()) if unit is None and _unit: unit = _unit", "for frame in frames: axes_order.extend(frame.axes_order) for frame in frames: for", "of this frame.\"\"\" self._name = val @property def naxes(self): \"\"\"", "world coordinate objects). reference_position : str Reference position - one", "self._axes_names = axes_names if name is None: self._name = self.__class__.__name__", "match number of axes.\") else: axes_names = tuple([\"\"] * naxes)", "frame. name : str Name of this frame. \"\"\" def", "\"\"\" def __init__(self, axes_order=None, reference_frame=None, unit=None, axes_names=None, name=None): naxes =", "inputs to axes.\"\"\" return self._axes_order @property def reference_frame(self): return self._reference_frame", "or iterable of those Units on axes. axes_names : list", "@property def unit(self): \"\"\"The unit of this frame.\"\"\" return self._unit", "return self._naxes @property def unit(self): \"\"\"The unit of this frame.\"\"\"", "frame. \"\"\" def __init__(self, axes_order=(0,), reference_frame=None, unit=None, axes_names=None, name=None, reference_position=None):", "Type of this frame : 'SPATIAL', 'SPECTRAL', 'TIME'. \"\"\" return", "val): \"\"\" A custom name of this frame.\"\"\" self._name =", "Create world coordinates object\"\"\" raise NotImplementedError(\"Subclasses may implement this\") class", "raise class SpectralFrame(CoordinateFrame): \"\"\" Represents Spectral Frame Parameters ---------- axes_order", "List of frames (TimeFrame, CelestialFrame, SpectralFrame, CoordinateFrame). name : str", "self._reference_frame = reference_frame if unit is not None: if astutil.isiterable(unit):", "axes_order=None, reference_frame=None, unit=None, axes_names=None, name=None): naxes = 2 if reference_frame", "reference_frame=None, unit=None, axes_names=None, name=None, reference_position=None): super(SpectralFrame, self).__init__(naxes=1, axes_type=\"SPECTRAL\", axes_order=axes_order, axes_names=axes_names,", "\"\"\" Computes which axes in `start_frame` contribute to each axis", "\"LSRK\", \"LSRD\", \"GALACTIC_CENTER\", \"LOCAL_GROUP_CENTER\"] class CoordinateFrame(object): \"\"\" Base class for", "Reference position - one of `STANDARD_REFERENCE_POSITION` unit : list of", "except AttributeError: return None def input_axes(self, start_frame=None): \"\"\" Computes which", "reference_frame : astropy.coordinates.builtin_frames A reference frame. reference_position : str Reference", "self._frames]) axes_type = list(range(naxes)) unit = list(range(naxes)) axes_names = list(range(naxes))", "def __init__(self, axes_order=None, reference_frame=None, unit=None, axes_names=None, name=None): naxes = 2", "@property def reference_position(self): try: return self._reference_position except AttributeError: return None", "custom name of this frame.\"\"\" return self._name @name.setter def name(self,", "list(range(naxes)) unit = list(range(naxes)) axes_names = list(range(naxes)) axes_order = []", "in frame.axes_order] print(frame, fargs, frame.axes_order) coo.append(frame.coordinates(*fargs)) return coo class Frame2D(CoordinateFrame):", "axes_order, reference_frame=None, reference_position=None, unit=None, axes_names=None, name=None): self._naxes = naxes self._axes_order", "astutil from astropy import coordinates as coord from astropy.extern import", "self._separable(start_frame) inputs = [] for ax in self.axes_order: inputs.append(list(sep[ax].nonzero()[0])) return", "isinstance(axes_names, six.string_types): axes_names = (axes_names,) else: axes_names = tuple(axes_names) if", "this frame.\"\"\" return self._unit @property def axes_names(self): \"\"\" Names of", "inputs to wcs.input_frame \"\"\" # Reorder axes if necesary. try:", "name(self): \"\"\" A custom name of this frame.\"\"\" return self._name", "this frame.\"\"\" self._name = val @property def naxes(self): \"\"\" The", ": str or units.Unit instance or iterable of those Units", "unit of this frame.\"\"\" return self._unit @property def axes_names(self): \"\"\"", "tuple(unit) else: unit = (unit,) if len(unit) != naxes: raise", "= tuple(unit) else: unit = (unit,) if len(unit) != naxes:", "from astropy.extern import six from . import utils as gwutils", "output_frame to convert to world coordinate objects). unit : str", "frames and ties them to data axes. \"\"\" from __future__", "axes_names : list Names of the axes in this frame.", "of axes. axes_type : str One of [\"SPATIAL\", \"SPECTRAL\", \"TIME\"]", "def reference_frame(self): return self._reference_frame @property def reference_position(self): try: return self._reference_position", "\"\"\" Base class for CoordinateFrames. Parameters ---------- naxes : int", "\"\"\" Type of this frame : 'SPATIAL', 'SPECTRAL', 'TIME'. \"\"\"", "'SpectralFrame', 'CompositeFrame', 'CoordinateFrame'] STANDARD_REFERENCE_FRAMES = [frame.upper() for frame in coord.builtin_frames.__all__]", "axes names does not match number of axes.\") else: axes_names", "this axis. reference_frame : astropy.coordinates.builtin_frames Reference frame (usually used with", "frame.\"\"\" return self._unit @property def axes_names(self): \"\"\" Names of axes", "else: unit = (unit,) if len(unit) != naxes: raise ValueError(\"Number", "\"\"\" Represents Spectral Frame Parameters ---------- axes_order : tuple or", "self).__init__(naxes=naxes, axes_type=axes_type, axes_order=axes_order, reference_frame=reference_frame, unit=unit, axes_names=axes_names, name=name) def coordinates(self, *args):", "start_frame=None): \"\"\" Computes which axes in `start_frame` contribute to each", "unit = tuple([u.degree] * naxes) axes_type = ['SPATIAL'] * naxes", "unit=unit, axes_names=axes_names, name=name) @property def frames(self): return self._frames def __repr__(self):", "match number of axes.\") else: self._unit = tuple([u.Unit(au) for au", "frame in coord.builtin_frames.__all__] STANDARD_REFERENCE_POSITION = [\"GEOCENTER\", \"BARYCENTER\", \"HELIOCENTER\", \"TOPOCENTER\", \"LSR\",", "corresponds to this axis. reference_frame : astropy.coordinates.builtin_frames Reference frame (usually", "\"axes_order should contain unique numbers, \" \"got {}.\".format(axes_order)) super(CompositeFrame, self).__init__(naxes,", "self).__init__() def __repr__(self): fmt = '<{0}(name=\"{1}\", unit={2}, axes_names={3}, axes_order={4}'.format( self.__class__.__name__,", "Spectral Frame Parameters ---------- axes_order : tuple or int A", "def __repr__(self): fmt = '<{0}(name=\"{1}\", unit={2}, axes_names={3}, axes_order={4}'.format( self.__class__.__name__, self.name,", "def input_axes(self, start_frame=None): \"\"\" Computes which axes in `start_frame` contribute", "Unit for each axis. axes_names : list Names of the", "in self.axes_order: inputs.append(list(sep[ax].nonzero()[0])) return inputs @property def axes_type(self): \"\"\" Type", "def coordinates(self, *args): \"\"\" Create a SkyCoord object. Parameters ----------", "name is None: self._name = self.__class__.__name__ else: self._name = name", "iterable of those Units on axes. axes_names : list Names", "of this frame.\"\"\" return self._name @name.setter def name(self, val): \"\"\"", "else: self._axes_type = tuple(axes_type) self._reference_frame = reference_frame if unit is", "\"LSR\", \"LSRK\", \"LSRD\", \"GALACTIC_CENTER\", \"LOCAL_GROUP_CENTER\"] class CoordinateFrame(object): \"\"\" Base class", "unit = list(range(naxes)) axes_names = list(range(naxes)) axes_order = [] for", "fargs = [args[i] for i in frame.axes_order] print(frame, fargs, frame.axes_order)", "of axes in the frame.\"\"\" return self._axes_names @property def axes_order(self):", "one of `STANDARD_REFERENCE_POSITION` unit : list of astropy.units.Unit Unit for", "this axis. unit : list of astropy.units.Unit Unit for each", "= list(range(naxes)) axes_order = [] for frame in frames: axes_order.extend(frame.axes_order)", "reference_frame={0}\".format(self.reference_frame) fmt += \")>\" return fmt def __str__(self): if self._name", "@property def axes_names(self): \"\"\" Names of axes in the frame.\"\"\"", "'TIME'. \"\"\" return self._axes_type def coordinates(self, *args): \"\"\" Create world", "class CompositeFrame(CoordinateFrame): \"\"\" Represents one or more frames. Parameters ----------", "- see LICENSE.rst \"\"\" Defines coordinate frames and ties them", "= reference_position else: self._reference_position = None super(CoordinateFrame, self).__init__() def __repr__(self):", "Parameters ---------- args : float inputs to wcs.input_frame \"\"\" #", "self._axes_type = tuple(axes_type) self._reference_frame = reference_frame if unit is not", "None super(CoordinateFrame, self).__init__() def __repr__(self): fmt = '<{0}(name=\"{1}\", unit={2}, axes_names={3},", "self._axes_names @property def axes_order(self): \"\"\" A tuple of indices which", "numbering of axes, \" \"axes_order should contain unique numbers, \"", "__str__(self): if self._name is not None: return self._name else: return", "frame in self.frames: fargs = [args[i] for i in frame.axes_order]", "axes_order={4}'.format( self.__class__.__name__, self.name, self.unit, self.axes_names, self.axes_order) if self.reference_position is not", "= [\"GEOCENTER\", \"BARYCENTER\", \"HELIOCENTER\", \"TOPOCENTER\", \"LSR\", \"LSRK\", \"LSRD\", \"GALACTIC_CENTER\", \"LOCAL_GROUP_CENTER\"]", "@property def axes_order(self): \"\"\" A tuple of indices which map", "name=None): naxes = 2 if reference_frame is not None: if", "class Frame2D(CoordinateFrame): \"\"\" A 2D coordinate frame. Parameters ---------- axes_order", "unit is not None: if astutil.isiterable(unit): unit = tuple(unit) else:", "frame. reference_position : str Reference position. unit : str or", "def axes_type(self): \"\"\" Type of this frame : 'SPATIAL', 'SPECTRAL',", "self).__init__(2, [\"SPATIAL\", \"SPATIAL\"], axes_order, name=name, axes_names=axes_names, unit=unit) def coordinates(self, *args):", "= sum([frame._naxes for frame in self._frames]) axes_type = list(range(naxes)) unit", "frame.axes_names): axes_type[ind] = axtype axes_names[ind] = n unit[ind] = un", "SkyCoord object. Parameters ---------- args : float inputs to wcs.input_frame", "this frame. \"\"\" def __init__(self, axes_order=None, reference_frame=None, unit=None, axes_names=None, name=None):", "in the current frame. Parameters ---------- start_frame : ~gwcs.coordinate_frames.CoordinateFrame A", "\"GALACTIC_CENTER\", \"LOCAL_GROUP_CENTER\"] class CoordinateFrame(object): \"\"\" Base class for CoordinateFrames. Parameters", "One of [\"SPATIAL\", \"SPECTRAL\", \"TIME\"] axes_order : tuple of int", "\"\"\" Represents one or more frames. Parameters ---------- frames :", "\"\"\" Celestial Frame Representation Parameters ---------- axes_order : tuple of", "int Number of axes. axes_type : str One of [\"SPATIAL\",", "__all__ = ['Frame2D', 'CelestialFrame', 'SpectralFrame', 'CompositeFrame', 'CoordinateFrame'] STANDARD_REFERENCE_FRAMES = [frame.upper()", "__future__ import absolute_import, division, unicode_literals, print_function import numpy as np", "self._name @name.setter def name(self, val): \"\"\" A custom name of", "in the WCS pipeline The transform between start_frame and the", "len(unit) != naxes: raise ValueError(\"Number of units does not match", "\"got {}.\".format(axes_order)) super(CompositeFrame, self).__init__(naxes, axes_type=axes_type, axes_order=axes_order, unit=unit, axes_names=axes_names, name=name) @property", "class SpectralFrame(CoordinateFrame): \"\"\" Represents Spectral Frame Parameters ---------- axes_order :", "reference_position=\"{0}\"'.format(self.reference_position) if self.reference_frame is not None: fmt += \", reference_frame={0}\".format(self.reference_frame)", "naxes = sum([frame._naxes for frame in self._frames]) axes_type = list(range(naxes))", "+= \", reference_frame={0}\".format(self.reference_frame) fmt += \")>\" return fmt def __str__(self):", "frame.axes_order] print(frame, fargs, frame.axes_order) coo.append(frame.coordinates(*fargs)) return coo class Frame2D(CoordinateFrame): \"\"\"", "@property def axes_type(self): \"\"\" Type of this frame : 'SPATIAL',", "STANDARD_REFERENCE_FRAMES = [frame.upper() for frame in coord.builtin_frames.__all__] STANDARD_REFERENCE_POSITION = [\"GEOCENTER\",", "CoordinateFrames. Parameters ---------- naxes : int Number of axes. axes_type", "axes. axes_type : str One of [\"SPATIAL\", \"SPECTRAL\", \"TIME\"] axes_order", "class for CoordinateFrames. Parameters ---------- naxes : int Number of", "unicode_literals, print_function import numpy as np from astropy import units", "n in zip(frame.axes_order, frame.axes_type, frame.unit, frame.axes_names): axes_type[ind] = axtype axes_names[ind]", "['SPATIAL'] * naxes super(CelestialFrame, self).__init__(naxes=naxes, axes_type=axes_type, axes_order=axes_order, reference_frame=reference_frame, unit=unit, axes_names=axes_names,", "@property def reference_frame(self): return self._reference_frame @property def reference_position(self): try: return", "of this frame. \"\"\" def __init__(self, naxes, axes_type, axes_order, reference_frame=None,", "a 3-clause BSD style license - see LICENSE.rst \"\"\" Defines", "from astropy import units as u from astropy import utils", "axes_names is not None: if isinstance(axes_names, six.string_types): axes_names = (axes_names,)", "else: axes_names = tuple(axes_names) if len(axes_names) != naxes: raise ValueError(\"Number", "[] for frame in self.frames: fargs = [args[i] for i", "return self._unit @property def axes_names(self): \"\"\" Names of axes in", "corresponds to this axis. reference_frame : astropy.coordinates.builtin_frames A reference frame.", "is None: self._name = self.__class__.__name__ else: self._name = name if", "convert to world coordinate objects). unit : str or units.Unit", "of axes intheis frame.\"\"\" return self._naxes @property def unit(self): \"\"\"The", "list List of frames (TimeFrame, CelestialFrame, SpectralFrame, CoordinateFrame). name :", "# Reorder axes if necesary. try: return coord.SkyCoord(*args, unit=self.unit, frame=self._reference_frame)", "str Name of this frame. \"\"\" def __init__(self, axes_order=(0, 1),", "axes_type = ['SPATIAL'] * naxes super(CelestialFrame, self).__init__(naxes=naxes, axes_type=axes_type, axes_order=axes_order, reference_frame=reference_frame,", "this frame. \"\"\" def __init__(self, frames, name=None): self._frames = frames[:]", "if self.reference_position is not None: fmt += ', reference_position=\"{0}\"'.format(self.reference_position) if", "the mapping inputs: outputs. \"\"\" sep = self._separable(start_frame) inputs =", "is None: unit = tuple([u.degree] * naxes) axes_type = ['SPATIAL']", "axes_order.extend(frame.axes_order) for frame in frames: for ind, axtype, un, n", "units as u from astropy import utils as astutil from", "axes in `start_frame` contribute to each axis in the current", "Name for this frame. \"\"\" def __init__(self, axes_order=(0,), reference_frame=None, unit=None,", "units.Unit instance or iterable of those Units on axes. axes_names", "CelestialFrame, SpectralFrame, CoordinateFrame). name : str Name for this frame.", "if 'distance' in _axes_names: _axes_names.remove('distance') if axes_names is None: axes_names", "inputs = [] for ax in self.axes_order: inputs.append(list(sep[ax].nonzero()[0])) return inputs", "[] for frame in frames: axes_order.extend(frame.axes_order) for frame in frames:", "@property def frames(self): return self._frames def __repr__(self): return repr(self.frames) def", "args[0] * self.unit[0] class CompositeFrame(CoordinateFrame): \"\"\" Represents one or more", "or units.Unit instance Spectral unit. axes_names : str Spectral axis", "Celestial Frame Representation Parameters ---------- axes_order : tuple of int", "BSD style license - see LICENSE.rst \"\"\" Defines coordinate frames", "of this frame.\"\"\" return self._unit @property def axes_names(self): \"\"\" Names", "def frames(self): return self._frames def __repr__(self): return repr(self.frames) def coordinates(self,", "axes_order, name=name, axes_names=axes_names, unit=unit) def coordinates(self, *args): args = [args[i]", "frames (TimeFrame, CelestialFrame, SpectralFrame, CoordinateFrame). name : str Name for", "from __future__ import absolute_import, division, unicode_literals, print_function import numpy as", "Parameters ---------- start_frame : ~gwcs.coordinate_frames.CoordinateFrame A frame in the WCS", "None: if isinstance(axes_names, six.string_types): axes_names = (axes_names,) else: axes_names =", "Representation Parameters ---------- axes_order : tuple of int A dimension", "each axis. axes_names : list Names of the axes in", "*args): coo = [] for frame in self.frames: fargs =", "is None: axes_names = _axes_names naxes = len(_axes_names) _unit =", "start_frame and the current frame is used to compute the", "3-clause BSD style license - see LICENSE.rst \"\"\" Defines coordinate", "Reference position. unit : str or units.Unit instance or iterable", "= axtype axes_names[ind] = n unit[ind] = un if len(np.unique(axes_order))", "axes_order=(0,), reference_frame=None, unit=None, axes_names=None, name=None, reference_position=None): super(SpectralFrame, self).__init__(naxes=1, axes_type=\"SPECTRAL\", axes_order=axes_order,", "the current frame. Parameters ---------- start_frame : ~gwcs.coordinate_frames.CoordinateFrame A frame", "reference_position : str Reference position. unit : str or units.Unit", "str Name of this frame. \"\"\" def __init__(self, naxes, axes_type,", "coordinate objects). reference_position : str Reference position - one of", "if name is None: self._name = self.__class__.__name__ else: self._name =", "name if reference_position is not None: self._reference_position = reference_position else:", "axes in the frame.\"\"\" return self._axes_names @property def axes_order(self): \"\"\"", "of this frame : 'SPATIAL', 'SPECTRAL', 'TIME'. \"\"\" return self._axes_type", "this frame : 'SPATIAL', 'SPECTRAL', 'TIME'. \"\"\" return self._axes_type def", "not None: if astutil.isiterable(unit): unit = tuple(unit) else: unit =", "class CelestialFrame(CoordinateFrame): \"\"\" Celestial Frame Representation Parameters ---------- axes_order :", ": tuple or int A dimension in the input data", "axes_order(self): \"\"\" A tuple of indices which map inputs to", "for frame in self._frames]) axes_type = list(range(naxes)) unit = list(range(naxes))", "if len(np.unique(axes_order)) != len(axes_order): raise ValueError(\"Incorrect numbering of axes, \"", "contain unique numbers, \" \"got {}.\".format(axes_order)) super(CompositeFrame, self).__init__(naxes, axes_type=axes_type, axes_order=axes_order,", "input data that corresponds to this axis. unit : list", "intheis frame.\"\"\" return self._naxes @property def unit(self): \"\"\"The unit of", "list(range(naxes)) axes_order = [] for frame in frames: axes_order.extend(frame.axes_order) for", "[\"GEOCENTER\", \"BARYCENTER\", \"HELIOCENTER\", \"TOPOCENTER\", \"LSR\", \"LSRK\", \"LSRD\", \"GALACTIC_CENTER\", \"LOCAL_GROUP_CENTER\"] class", "else: self._reference_position = None super(CoordinateFrame, self).__init__() def __repr__(self): fmt =", "None: fmt += \", reference_frame={0}\".format(self.reference_frame) fmt += \")>\" return fmt", "for CoordinateFrames. Parameters ---------- naxes : int Number of axes.", "None: if reference_frame.name.upper() in STANDARD_REFERENCE_FRAMES: _axes_names = list(reference_frame.representation_component_names.values()) if 'distance'", "naxes: raise ValueError(\"Number of units does not match number of", "\")>\" return fmt def __str__(self): if self._name is not None:", "custom name of this frame.\"\"\" self._name = val @property def", "n unit[ind] = un if len(np.unique(axes_order)) != len(axes_order): raise ValueError(\"Incorrect", "self._axes_order = tuple(axes_order) if isinstance(axes_type, six.string_types): self._axes_type = (axes_type,) else:", "this\") class CelestialFrame(CoordinateFrame): \"\"\" Celestial Frame Representation Parameters ---------- axes_order", "unit=None, axes_names=None, name=None, reference_position=None): super(SpectralFrame, self).__init__(naxes=1, axes_type=\"SPECTRAL\", axes_order=axes_order, axes_names=axes_names, reference_frame=reference_frame,", "(axes_type,) else: self._axes_type = tuple(axes_type) self._reference_frame = reference_frame if unit", "not None: self._reference_position = reference_position else: self._reference_position = None super(CoordinateFrame,", "= val @property def naxes(self): \"\"\" The number of axes", "frame in self._frames]) axes_type = list(range(naxes)) unit = list(range(naxes)) axes_names", "super(Frame2D, self).__init__(2, [\"SPATIAL\", \"SPATIAL\"], axes_order, name=name, axes_names=axes_names, unit=unit) def coordinates(self,", "= [frame.upper() for frame in coord.builtin_frames.__all__] STANDARD_REFERENCE_POSITION = [\"GEOCENTER\", \"BARYCENTER\",", "\"\"\" return self._axes_type def coordinates(self, *args): \"\"\" Create world coordinates", "if astutil.isiterable(unit): unit = tuple(unit) else: unit = (unit,) if", "'CompositeFrame', 'CoordinateFrame'] STANDARD_REFERENCE_FRAMES = [frame.upper() for frame in coord.builtin_frames.__all__] STANDARD_REFERENCE_POSITION", "see LICENSE.rst \"\"\" Defines coordinate frames and ties them to", "'<{0}(name=\"{1}\", unit={2}, axes_names={3}, axes_order={4}'.format( self.__class__.__name__, self.name, self.unit, self.axes_names, self.axes_order) if", "coordinates as coord from astropy.extern import six from . import", "np.isscalar(args): return args * self.unit[0] else: return args[0] * self.unit[0]", "CoordinateFrame(object): \"\"\" Base class for CoordinateFrames. Parameters ---------- naxes :", "as gwutils __all__ = ['Frame2D', 'CelestialFrame', 'SpectralFrame', 'CompositeFrame', 'CoordinateFrame'] STANDARD_REFERENCE_FRAMES", "Parameters ---------- axes_order : tuple of int A dimension in", "= self.__class__.__name__ else: self._name = name if reference_position is not", "coord from astropy.extern import six from . import utils as", "numbers, \" \"got {}.\".format(axes_order)) super(CompositeFrame, self).__init__(naxes, axes_type=axes_type, axes_order=axes_order, unit=unit, axes_names=axes_names,", "coo = [] for frame in self.frames: fargs = [args[i]", "[\"SPATIAL\", \"SPATIAL\"], axes_order, name=name, axes_names=axes_names, unit=unit) def coordinates(self, *args): args", "data that corresponds to this axis. unit : list of", "*args): \"\"\" Create a SkyCoord object. Parameters ---------- args :", "naxes: raise ValueError(\"Number of axes names does not match number", "indices which map inputs to axes.\"\"\" return self._axes_order @property def", "self._name = name if reference_position is not None: self._reference_position =", "* self.unit[0] else: return args[0] * self.unit[0] class CompositeFrame(CoordinateFrame): \"\"\"", "six.string_types): axes_names = (axes_names,) else: axes_names = tuple(axes_names) if len(axes_names)", "is not None: if astutil.isiterable(unit): unit = tuple(unit) else: unit", "frames[:] naxes = sum([frame._naxes for frame in self._frames]) axes_type =", "'SPECTRAL', 'TIME'. \"\"\" return self._axes_type def coordinates(self, *args): \"\"\" Create", "naxes = len(_axes_names) _unit = list(reference_frame.representation_component_units.values()) if unit is None", "in _axes_names: _axes_names.remove('distance') if axes_names is None: axes_names = _axes_names", "\"\"\" A custom name of this frame.\"\"\" self._name = val", "is not None: if isinstance(axes_names, six.string_types): axes_names = (axes_names,) else:", "class CoordinateFrame(object): \"\"\" Base class for CoordinateFrames. Parameters ---------- naxes", "import utils as gwutils __all__ = ['Frame2D', 'CelestialFrame', 'SpectralFrame', 'CompositeFrame',", "axes_names=None, name=None): self._naxes = naxes self._axes_order = tuple(axes_order) if isinstance(axes_type,", "of indices which map inputs to axes.\"\"\" return self._axes_order @property", "of this frame. \"\"\" def __init__(self, axes_order=None, reference_frame=None, unit=None, axes_names=None,", "axtype, un, n in zip(frame.axes_order, frame.axes_type, frame.unit, frame.axes_names): axes_type[ind] =", "frame in the WCS pipeline The transform between start_frame and", "in `start_frame` contribute to each axis in the current frame.", "_axes_names = list(reference_frame.representation_component_names.values()) if 'distance' in _axes_names: _axes_names.remove('distance') if axes_names", ": int Number of axes. axes_type : str One of", "Computes which axes in `start_frame` contribute to each axis in", "[args[i] for i in self.axes_order] coo = tuple([arg * un", "frame.\"\"\" return self._name @name.setter def name(self, val): \"\"\" A custom", "raise NotImplementedError(\"Subclasses may implement this\") class CelestialFrame(CoordinateFrame): \"\"\" Celestial Frame", "self._axes_type def coordinates(self, *args): \"\"\" Create world coordinates object\"\"\" raise", "str or units.Unit instance Spectral unit. axes_names : str Spectral", "CompositeFrame(CoordinateFrame): \"\"\" Represents one or more frames. Parameters ---------- frames", "zip(frame.axes_order, frame.axes_type, frame.unit, frame.axes_names): axes_type[ind] = axtype axes_names[ind] = n", "data that corresponds to this axis. reference_frame : astropy.coordinates.builtin_frames A", "if len(axes_names) != naxes: raise ValueError(\"Number of axes names does", "str Spectral axis name. name : str Name for this", "__init__(self, axes_order=(0, 1), unit=(u.pix, u.pix), axes_names=('x', 'y'), name=None): super(Frame2D, self).__init__(2,", "+= ', reference_position=\"{0}\"'.format(self.reference_position) if self.reference_frame is not None: fmt +=", "which axes in `start_frame` contribute to each axis in the", "unit=unit, axes_names=axes_names, name=name) def coordinates(self, *args): \"\"\" Create a SkyCoord", "self._frames def __repr__(self): return repr(self.frames) def coordinates(self, *args): coo =", "_unit if axes_order is None: axes_order = tuple(range(naxes)) if unit", "*args): \"\"\" Create world coordinates object\"\"\" raise NotImplementedError(\"Subclasses may implement", "\"\"\" # Reorder axes if necesary. try: return coord.SkyCoord(*args, unit=self.unit,", "that corresponds to this axis. unit : list of astropy.units.Unit", "axes_order = tuple(range(naxes)) if unit is None: unit = tuple([u.degree]", "not None: if isinstance(axes_names, six.string_types): axes_names = (axes_names,) else: axes_names", "frame.\"\"\" return self._axes_names @property def axes_order(self): \"\"\" A tuple of", "name of this frame.\"\"\" return self._name @name.setter def name(self, val):", "\"\"\" def __init__(self, axes_order=(0,), reference_frame=None, unit=None, axes_names=None, name=None, reference_position=None): super(SpectralFrame,", "axes in this frame. name : str Name of this", "the input data that corresponds to this axis. reference_frame :", "self.reference_frame is not None: fmt += \", reference_frame={0}\".format(self.reference_frame) fmt +=", "position. unit : str or units.Unit instance or iterable of", "for i in self.axes_order] coo = tuple([arg * un for", "u from astropy import utils as astutil from astropy import", "None: self._name = self.__class__.__name__ else: self._name = name if reference_position", "reference_position(self): try: return self._reference_position except AttributeError: return None def input_axes(self,", "not None: if reference_frame.name.upper() in STANDARD_REFERENCE_FRAMES: _axes_names = list(reference_frame.representation_component_names.values()) if", "None: unit = tuple([u.degree] * naxes) axes_type = ['SPATIAL'] *", "axes_names=axes_names, unit=unit) def coordinates(self, *args): args = [args[i] for i", "the current frame is used to compute the mapping inputs:", "tuple([u.Unit(au) for au in unit]) if axes_names is not None:", "__init__(self, axes_order=None, reference_frame=None, unit=None, axes_names=None, name=None): naxes = 2 if", "= reference_frame if unit is not None: if astutil.isiterable(unit): unit", "= list(reference_frame.representation_component_units.values()) if unit is None and _unit: unit =", "NotImplementedError(\"Subclasses may implement this\") class CelestialFrame(CoordinateFrame): \"\"\" Celestial Frame Representation", "reference frame. reference_position : str Reference position. unit : str", "unit is None and _unit: unit = _unit if axes_order", "__init__(self, frames, name=None): self._frames = frames[:] naxes = sum([frame._naxes for", "naxes = 2 if reference_frame is not None: if reference_frame.name.upper()", "\"\"\" The number of axes intheis frame.\"\"\" return self._naxes @property", "def axes_order(self): \"\"\" A tuple of indices which map inputs", "coord.SkyCoord(*args, unit=self.unit, frame=self._reference_frame) except: raise class SpectralFrame(CoordinateFrame): \"\"\" Represents Spectral", "tuple(axes_type) self._reference_frame = reference_frame if unit is not None: if", "astropy import utils as astutil from astropy import coordinates as", "= '<{0}(name=\"{1}\", unit={2}, axes_names={3}, axes_order={4}'.format( self.__class__.__name__, self.name, self.unit, self.axes_names, self.axes_order)", "axes_names = _axes_names naxes = len(_axes_names) _unit = list(reference_frame.representation_component_units.values()) if", "if unit is not None: if astutil.isiterable(unit): unit = tuple(unit)", "self.unit, self.axes_names, self.axes_order) if self.reference_position is not None: fmt +=", "axis name. name : str Name for this frame. \"\"\"", "\"\"\" def __init__(self, frames, name=None): self._frames = frames[:] naxes =", "astropy import coordinates as coord from astropy.extern import six from", "list(range(naxes)) axes_names = list(range(naxes)) axes_order = [] for frame in", "Frame Parameters ---------- axes_order : tuple or int A dimension", "frames, name=None): self._frames = frames[:] naxes = sum([frame._naxes for frame", "astropy.units.Unit Unit for each axis. axes_names : list Names of", "ValueError(\"Number of axes names does not match number of axes.\")", "compute the mapping inputs: outputs. \"\"\" sep = self._separable(start_frame) inputs", "of axes, \" \"axes_order should contain unique numbers, \" \"got", "dimension in the input data that corresponds to this axis.", "name=None): self._naxes = naxes self._axes_order = tuple(axes_order) if isinstance(axes_type, six.string_types):", ": 'SPATIAL', 'SPECTRAL', 'TIME'. \"\"\" return self._axes_type def coordinates(self, *args):", "name=None): self._frames = frames[:] naxes = sum([frame._naxes for frame in", "\"HELIOCENTER\", \"TOPOCENTER\", \"LSR\", \"LSRK\", \"LSRD\", \"GALACTIC_CENTER\", \"LOCAL_GROUP_CENTER\"] class CoordinateFrame(object): \"\"\"", "reference_frame is not None: if reference_frame.name.upper() in STANDARD_REFERENCE_FRAMES: _axes_names =", "\"\"\" sep = self._separable(start_frame) inputs = [] for ax in", "self._reference_position = reference_position else: self._reference_position = None super(CoordinateFrame, self).__init__() def", "axes_names is None: axes_names = _axes_names naxes = len(_axes_names) _unit", "self._reference_position except AttributeError: return None def input_axes(self, start_frame=None): \"\"\" Computes", "to world coordinate objects). unit : str or units.Unit instance", "data that corresponds to this axis. reference_frame : astropy.coordinates.builtin_frames Reference", "super(SpectralFrame, self).__init__(naxes=1, axes_type=\"SPECTRAL\", axes_order=axes_order, axes_names=axes_names, reference_frame=reference_frame, unit=unit, name=name, reference_position=reference_position) def", "__init__(self, naxes, axes_type, axes_order, reference_frame=None, reference_position=None, unit=None, axes_names=None, name=None): self._naxes", "reference_frame.name.upper() in STANDARD_REFERENCE_FRAMES: _axes_names = list(reference_frame.representation_component_names.values()) if 'distance' in _axes_names:", "None def input_axes(self, start_frame=None): \"\"\" Computes which axes in `start_frame`", "= [args[i] for i in frame.axes_order] print(frame, fargs, frame.axes_order) coo.append(frame.coordinates(*fargs))", "Spectral unit. axes_names : str Spectral axis name. name :", "of frames (TimeFrame, CelestialFrame, SpectralFrame, CoordinateFrame). name : str Name", "return self._reference_position except AttributeError: return None def input_axes(self, start_frame=None): \"\"\"", "unit=(u.pix, u.pix), axes_names=('x', 'y'), name=None): super(Frame2D, self).__init__(2, [\"SPATIAL\", \"SPATIAL\"], axes_order,", "\"TOPOCENTER\", \"LSR\", \"LSRK\", \"LSRD\", \"GALACTIC_CENTER\", \"LOCAL_GROUP_CENTER\"] class CoordinateFrame(object): \"\"\" Base", "Reference frame (usually used with output_frame to convert to world", "axes_names(self): \"\"\" Names of axes in the frame.\"\"\" return self._axes_names", "axes_order=axes_order, axes_names=axes_names, reference_frame=reference_frame, unit=unit, name=name, reference_position=reference_position) def coordinates(self, *args): if", "of axes.\") else: self._unit = tuple([u.Unit(au) for au in unit])", "axes_type=axes_type, axes_order=axes_order, reference_frame=reference_frame, unit=unit, axes_names=axes_names, name=name) def coordinates(self, *args): \"\"\"", "unit. axes_names : str Spectral axis name. name : str", "return self.__class__.__name__ @property def name(self): \"\"\" A custom name of", "names does not match number of axes.\") else: axes_names =", "utils as astutil from astropy import coordinates as coord from", "fmt def __str__(self): if self._name is not None: return self._name" ]
[ "the locator item in item list (you can unfold it", "Attached item will show up under the locator item in", "def isRotationConstraint(self): \"\"\" Tests if this is rotation constraint. Returns", "constraint offset vector. Returns ------- modo.Vector3 \"\"\" x = self._item.channel('offset.X').get()", "class represents Transform Constraint channel modifier. Parameters ---------- modoItem :", "'rot' SCALE = 'scl' class CMTransformConstraint(object): \"\"\" This class represents", "show up under the locator item in item list (you", "= itemSelection.getOfTypeModo(\"cmTransformConstraint\")[0] cnsItem.name = name ChannelModifierUtils.attachModifierToItem(cnsItem, hostItem) return CMTransformConstraint(cnsItem) @property", "assembly pass an item that is not a group. This", "item list). Attached modifiers are getting deleted together with locator", "------- str One of TransformConstraintOperation constants. \"\"\" return self._item.channel('operation').get() @property", "to which the constraint will be added. Passing this item", "new offset for the constraint. Parameters ---------- offsetVec : modo.Vector3", "of TransformConstraintOperation constants. \"\"\" return self._item.channel('operation').get() @property def inputChannel(self): return", "to locator type item. Attached item will show up under", "@offset.setter def offset(self, offsetVec): \"\"\" Sets new offset for the", "type item. Attached item will show up under the locator", "constraint to the scene. Parameters ---------- assemblyItem : modo.Item This", "this is rotation constraint. Returns ------- bool \"\"\" return self.operation", "outputChannel(self): return self._item.channel('matrixOutput') @property def isRotationConstraint(self): \"\"\" Tests if this", "Tests if this is rotation constraint. Returns ------- bool \"\"\"", "that is not a group. This doesn't throw an error", "run('modifier.create \"cmTransformConstraint:rot\" item:{%s} insert:false' % assemblyItem.id) cnsItem = itemSelection.getOfTypeModo(\"cmTransformConstraint\")[0] cnsItem.name", "self._item # -------- Private methods def __init__(self, modoItem): if modoItem.type", "getting deleted together with locator they are attached to. Parameters", "'chanMods') class TransformConstraintOperation(object): POSITION = 'pos' ROTATION = 'rot' SCALE", "item you want to attach modifier to. \"\"\" item.ItemUtils.addForwardGraphConnections(modifierModoItem, hostModoItem,", "to. \"\"\" item.ItemUtils.addForwardGraphConnections(modifierModoItem, hostModoItem, 'chanMods') class TransformConstraintOperation(object): POSITION = 'pos'", "== self.Operation.ROTATION @property def offset(self): \"\"\" Gets the constraint offset", "you don't want to add constraints to any assembly pass", "modo.Item Constraint can be attached to an item such that", "to an item such that it'll be under this item", "= TransformConstraintOperation @classmethod def new(cls, assemblyItem, hostItem, name='TransformConstraint'): \"\"\" Adds", "modifiers are getting deleted together with locator they are attached", "item will show up under the locator item in item", "-------- Private methods def __init__(self, modoItem): if modoItem.type != 'cmTransformConstraint':", "POSITION = 'pos' ROTATION = 'rot' SCALE = 'scl' class", "modo import select import item from run import run class", "Parameters ---------- assemblyItem : modo.Item This is assembly item to", "= 'rot' SCALE = 'scl' class CMTransformConstraint(object): \"\"\" This class", "locator they are attached to. Parameters ---------- modifierModoItem : modo.Item", "name ChannelModifierUtils.attachModifierToItem(cnsItem, hostItem) return CMTransformConstraint(cnsItem) @property def operation(self): \"\"\" Gets", "TransformConstraintOperation(object): POSITION = 'pos' ROTATION = 'rot' SCALE = 'scl'", "deleted when the host item is deleted. name : str", "ChannelModifierUtils.attachModifierToItem(cnsItem, hostItem) return CMTransformConstraint(cnsItem) @property def operation(self): \"\"\" Gets the", "offset for the constraint. Parameters ---------- offsetVec : modo.Vector3 \"\"\"", "modo.Item This is assembly item to which the constraint will", "\"\"\" self._item.channel('offset.X').set(offsetVec[0], 0.0, key=False, action=lx.symbol.s_ACTIONLAYER_SETUP) self._item.channel('offset.Y').set(offsetVec[1], 0.0, key=False, action=lx.symbol.s_ACTIONLAYER_SETUP) self._item.channel('offset.Z').set(offsetVec[2],", "any assembly pass an item that is not a group.", "get deleted when the host item is deleted. name :", "name='TransformConstraint'): \"\"\" Adds new transform constraint to the scene. Parameters", "assemblyItem : modo.Item This is assembly item to which the", ": modo.Item Constraint can be attached to an item such", "import run class ChannelModifierUtils(object): @classmethod def attachModifierToItem(cls, modifierModoItem, hostModoItem): \"\"\"", "under this item in item list. It'll also get deleted", "key=False, action=lx.symbol.s_ACTIONLAYER_SETUP) @property def modoItem(self): return self._item # -------- Private", "methods def __init__(self, modoItem): if modoItem.type != 'cmTransformConstraint': raise TypeError", "also get deleted when the host item is deleted. name", "self._item.channel('offset.Y').get() z = self._item.channel('offset.Z').get() return modo.Vector3(x, y, z) @offset.setter def", "def offset(self): \"\"\" Gets the constraint offset vector. Returns -------", "import lx import modo import select import item from run", "add constraint to any groups either. hostItem : modo.Item Constraint", "modifierModoItem, hostModoItem): \"\"\" Allows for attaching modifier to locator type", "Passing this item is mandatory. However, if you don't want", "for attaching modifier to locator type item. Attached item will", "return modo.Vector3(x, y, z) @offset.setter def offset(self, offsetVec): \"\"\" Sets", "is assembly item to which the constraint will be added.", "with a little plus icons next to item name in", "to any assembly pass an item that is not a", ": modo.Item The constraint modo item. \"\"\" Operation = TransformConstraintOperation", "Transform Constraint channel modifier. Parameters ---------- modoItem : modo.Item The", "of the constraint. Returns ------- str One of TransformConstraintOperation constants.", "either. hostItem : modo.Item Constraint can be attached to an", "CMTransformConstraint(cnsItem) @property def operation(self): \"\"\" Gets the type of the", "y = self._item.channel('offset.Y').get() z = self._item.channel('offset.Z').get() return modo.Vector3(x, y, z)", "attach. hostModoItem : modo.Item Locator type item you want to", "modoItem : modo.Item The constraint modo item. \"\"\" Operation =", "to the scene. Parameters ---------- assemblyItem : modo.Item This is", "from run import run class ChannelModifierUtils(object): @classmethod def attachModifierToItem(cls, modifierModoItem,", "run import run class ChannelModifierUtils(object): @classmethod def attachModifierToItem(cls, modifierModoItem, hostModoItem):", "hostModoItem, 'chanMods') class TransformConstraintOperation(object): POSITION = 'pos' ROTATION = 'rot'", "this item is mandatory. However, if you don't want to", "under the locator item in item list (you can unfold", "= 'pos' ROTATION = 'rot' SCALE = 'scl' class CMTransformConstraint(object):", "the constraint. Returns ------- str One of TransformConstraintOperation constants. \"\"\"", "---------- modifierModoItem : modo.Item Modifier item that you want to", "SCALE = 'scl' class CMTransformConstraint(object): \"\"\" This class represents Transform", "% assemblyItem.id) cnsItem = itemSelection.getOfTypeModo(\"cmTransformConstraint\")[0] cnsItem.name = name ChannelModifierUtils.attachModifierToItem(cnsItem, hostItem)", "isRotationConstraint(self): \"\"\" Tests if this is rotation constraint. Returns -------", "not a group. This doesn't throw an error and it", "type of the constraint. Returns ------- str One of TransformConstraintOperation", "import modo import select import item from run import run", "the scene. Parameters ---------- assemblyItem : modo.Item This is assembly", "key=False, action=lx.symbol.s_ACTIONLAYER_SETUP) self._item.channel('offset.Z').set(offsetVec[2], 0.0, key=False, action=lx.symbol.s_ACTIONLAYER_SETUP) @property def modoItem(self): return", "any groups either. hostItem : modo.Item Constraint can be attached", "rotation constraint. Returns ------- bool \"\"\" return self.operation == self.Operation.ROTATION", "an item that is not a group. This doesn't throw", "name : str Name for new constraint item. Returns -------", "to attach modifier to. \"\"\" item.ItemUtils.addForwardGraphConnections(modifierModoItem, hostModoItem, 'chanMods') class TransformConstraintOperation(object):", "assembly item to which the constraint will be added. Passing", "want to attach. hostModoItem : modo.Item Locator type item you", "item list. It'll also get deleted when the host item", "str One of TransformConstraintOperation constants. \"\"\" return self._item.channel('operation').get() @property def", "def offset(self, offsetVec): \"\"\" Sets new offset for the constraint.", "action=lx.symbol.s_ACTIONLAYER_SETUP) @property def modoItem(self): return self._item # -------- Private methods", "self._item.channel('matrixOutput') @property def isRotationConstraint(self): \"\"\" Tests if this is rotation", "Returns ------- modo.Vector3 \"\"\" x = self._item.channel('offset.X').get() y = self._item.channel('offset.Y').get()", "name in item list). Attached modifiers are getting deleted together", "can be attached to an item such that it'll be", "item. Returns ------- CMTransformConstraint \"\"\" itemSelection = select.ItemSelection() itemSelection.clear() run('modifier.create", "list (you can unfold it with a little plus icons", "= self._item.channel('offset.Y').get() z = self._item.channel('offset.Z').get() return modo.Vector3(x, y, z) @offset.setter", "you want to attach modifier to. \"\"\" item.ItemUtils.addForwardGraphConnections(modifierModoItem, hostModoItem, 'chanMods')", "return self.operation == self.Operation.ROTATION @property def offset(self): \"\"\" Gets the", "\"\"\" Sets new offset for the constraint. Parameters ---------- offsetVec", "Private methods def __init__(self, modoItem): if modoItem.type != 'cmTransformConstraint': raise", "self.Operation.ROTATION @property def offset(self): \"\"\" Gets the constraint offset vector.", "modifier to locator type item. Attached item will show up", "added. Passing this item is mandatory. However, if you don't", "(you can unfold it with a little plus icons next", "you want to attach. hostModoItem : modo.Item Locator type item", "doesn't throw an error and it doesn't add constraint to", "constraint. Returns ------- bool \"\"\" return self.operation == self.Operation.ROTATION @property", "= self._item.channel('offset.X').get() y = self._item.channel('offset.Y').get() z = self._item.channel('offset.Z').get() return modo.Vector3(x,", "constraints to any assembly pass an item that is not", "= 'scl' class CMTransformConstraint(object): \"\"\" This class represents Transform Constraint", "item is mandatory. However, if you don't want to add", "be attached to an item such that it'll be under", "cnsItem = itemSelection.getOfTypeModo(\"cmTransformConstraint\")[0] cnsItem.name = name ChannelModifierUtils.attachModifierToItem(cnsItem, hostItem) return CMTransformConstraint(cnsItem)", "locator type item. Attached item will show up under the", "item to which the constraint will be added. Passing this", "------- bool \"\"\" return self.operation == self.Operation.ROTATION @property def offset(self):", "to add constraints to any assembly pass an item that", "type item you want to attach modifier to. \"\"\" item.ItemUtils.addForwardGraphConnections(modifierModoItem,", "return CMTransformConstraint(cnsItem) @property def operation(self): \"\"\" Gets the type of", "bool \"\"\" return self.operation == self.Operation.ROTATION @property def offset(self): \"\"\"", "item. \"\"\" Operation = TransformConstraintOperation @classmethod def new(cls, assemblyItem, hostItem,", "which the constraint will be added. Passing this item is", "offsetVec : modo.Vector3 \"\"\" self._item.channel('offset.X').set(offsetVec[0], 0.0, key=False, action=lx.symbol.s_ACTIONLAYER_SETUP) self._item.channel('offset.Y').set(offsetVec[1], 0.0,", "Modifier item that you want to attach. hostModoItem : modo.Item", "CMTransformConstraint \"\"\" itemSelection = select.ItemSelection() itemSelection.clear() run('modifier.create \"cmTransformConstraint:rot\" item:{%s} insert:false'", "offset(self, offsetVec): \"\"\" Sets new offset for the constraint. Parameters", "if this is rotation constraint. Returns ------- bool \"\"\" return", "mandatory. However, if you don't want to add constraints to", "group. This doesn't throw an error and it doesn't add", "\"\"\" Tests if this is rotation constraint. Returns ------- bool", "Constraint can be attached to an item such that it'll", "inputChannel(self): return self._item.channel('matrixInput') @property def outputChannel(self): return self._item.channel('matrixOutput') @property def", "Returns ------- CMTransformConstraint \"\"\" itemSelection = select.ItemSelection() itemSelection.clear() run('modifier.create \"cmTransformConstraint:rot\"", "item is deleted. name : str Name for new constraint", "up under the locator item in item list (you can", "that it'll be under this item in item list. It'll", "It'll also get deleted when the host item is deleted.", "item that you want to attach. hostModoItem : modo.Item Locator", "0.0, key=False, action=lx.symbol.s_ACTIONLAYER_SETUP) @property def modoItem(self): return self._item # --------", "modo.Vector3 \"\"\" x = self._item.channel('offset.X').get() y = self._item.channel('offset.Y').get() z =", "@property def inputChannel(self): return self._item.channel('matrixInput') @property def outputChannel(self): return self._item.channel('matrixOutput')", "modifierModoItem : modo.Item Modifier item that you want to attach.", "@classmethod def attachModifierToItem(cls, modifierModoItem, hostModoItem): \"\"\" Allows for attaching modifier", "ROTATION = 'rot' SCALE = 'scl' class CMTransformConstraint(object): \"\"\" This", "Returns ------- bool \"\"\" return self.operation == self.Operation.ROTATION @property def", "item in item list. It'll also get deleted when the", "offsetVec): \"\"\" Sets new offset for the constraint. Parameters ----------", "are attached to. Parameters ---------- modifierModoItem : modo.Item Modifier item", "they are attached to. Parameters ---------- modifierModoItem : modo.Item Modifier", ": str Name for new constraint item. Returns ------- CMTransformConstraint", "self._item.channel('operation').get() @property def inputChannel(self): return self._item.channel('matrixInput') @property def outputChannel(self): return", "that you want to attach. hostModoItem : modo.Item Locator type", "want to add constraints to any assembly pass an item", "str Name for new constraint item. Returns ------- CMTransformConstraint \"\"\"", "with locator they are attached to. Parameters ---------- modifierModoItem :", "Sets new offset for the constraint. Parameters ---------- offsetVec :", "\"\"\" Operation = TransformConstraintOperation @classmethod def new(cls, assemblyItem, hostItem, name='TransformConstraint'):", "---------- offsetVec : modo.Vector3 \"\"\" self._item.channel('offset.X').set(offsetVec[0], 0.0, key=False, action=lx.symbol.s_ACTIONLAYER_SETUP) self._item.channel('offset.Y').set(offsetVec[1],", "an item such that it'll be under this item in", "hostItem, name='TransformConstraint'): \"\"\" Adds new transform constraint to the scene.", "error and it doesn't add constraint to any groups either.", "an error and it doesn't add constraint to any groups", "item:{%s} insert:false' % assemblyItem.id) cnsItem = itemSelection.getOfTypeModo(\"cmTransformConstraint\")[0] cnsItem.name = name", "modo item. \"\"\" Operation = TransformConstraintOperation @classmethod def new(cls, assemblyItem,", "deleted. name : str Name for new constraint item. Returns", "in item list. It'll also get deleted when the host", "host item is deleted. name : str Name for new", "item such that it'll be under this item in item", "Returns ------- str One of TransformConstraintOperation constants. \"\"\" return self._item.channel('operation').get()", "---------- assemblyItem : modo.Item This is assembly item to which", "the constraint offset vector. Returns ------- modo.Vector3 \"\"\" x =", "cnsItem.name = name ChannelModifierUtils.attachModifierToItem(cnsItem, hostItem) return CMTransformConstraint(cnsItem) @property def operation(self):", "------- CMTransformConstraint \"\"\" itemSelection = select.ItemSelection() itemSelection.clear() run('modifier.create \"cmTransformConstraint:rot\" item:{%s}", "itemSelection = select.ItemSelection() itemSelection.clear() run('modifier.create \"cmTransformConstraint:rot\" item:{%s} insert:false' % assemblyItem.id)", "hostItem) return CMTransformConstraint(cnsItem) @property def operation(self): \"\"\" Gets the type", "action=lx.symbol.s_ACTIONLAYER_SETUP) self._item.channel('offset.Z').set(offsetVec[2], 0.0, key=False, action=lx.symbol.s_ACTIONLAYER_SETUP) @property def modoItem(self): return self._item", "Constraint channel modifier. Parameters ---------- modoItem : modo.Item The constraint", "item name in item list). Attached modifiers are getting deleted", "offset(self): \"\"\" Gets the constraint offset vector. Returns ------- modo.Vector3", "to. Parameters ---------- modifierModoItem : modo.Item Modifier item that you", "Parameters ---------- offsetVec : modo.Vector3 \"\"\" self._item.channel('offset.X').set(offsetVec[0], 0.0, key=False, action=lx.symbol.s_ACTIONLAYER_SETUP)", "are getting deleted together with locator they are attached to.", "will be added. Passing this item is mandatory. However, if", "for the constraint. Parameters ---------- offsetVec : modo.Vector3 \"\"\" self._item.channel('offset.X').set(offsetVec[0],", "\"\"\" Gets the type of the constraint. Returns ------- str", "item in item list (you can unfold it with a", "import item from run import run class ChannelModifierUtils(object): @classmethod def", "item.ItemUtils.addForwardGraphConnections(modifierModoItem, hostModoItem, 'chanMods') class TransformConstraintOperation(object): POSITION = 'pos' ROTATION =", "Name for new constraint item. Returns ------- CMTransformConstraint \"\"\" itemSelection", "x = self._item.channel('offset.X').get() y = self._item.channel('offset.Y').get() z = self._item.channel('offset.Z').get() return", "this item in item list. It'll also get deleted when", "constants. \"\"\" return self._item.channel('operation').get() @property def inputChannel(self): return self._item.channel('matrixInput') @property", "for new constraint item. Returns ------- CMTransformConstraint \"\"\" itemSelection =", "\"\"\" This class represents Transform Constraint channel modifier. Parameters ----------", "Gets the constraint offset vector. Returns ------- modo.Vector3 \"\"\" x", "new constraint item. Returns ------- CMTransformConstraint \"\"\" itemSelection = select.ItemSelection()", "insert:false' % assemblyItem.id) cnsItem = itemSelection.getOfTypeModo(\"cmTransformConstraint\")[0] cnsItem.name = name ChannelModifierUtils.attachModifierToItem(cnsItem,", "\"cmTransformConstraint:rot\" item:{%s} insert:false' % assemblyItem.id) cnsItem = itemSelection.getOfTypeModo(\"cmTransformConstraint\")[0] cnsItem.name =", "@property def offset(self): \"\"\" Gets the constraint offset vector. Returns", "z = self._item.channel('offset.Z').get() return modo.Vector3(x, y, z) @offset.setter def offset(self,", "self._item.channel('matrixInput') @property def outputChannel(self): return self._item.channel('matrixOutput') @property def isRotationConstraint(self): \"\"\"", "Parameters ---------- modoItem : modo.Item The constraint modo item. \"\"\"", "= self._item.channel('offset.Z').get() return modo.Vector3(x, y, z) @offset.setter def offset(self, offsetVec):", "hostItem : modo.Item Constraint can be attached to an item", "such that it'll be under this item in item list.", "item from run import run class ChannelModifierUtils(object): @classmethod def attachModifierToItem(cls,", "self._item.channel('offset.Y').set(offsetVec[1], 0.0, key=False, action=lx.symbol.s_ACTIONLAYER_SETUP) self._item.channel('offset.Z').set(offsetVec[2], 0.0, key=False, action=lx.symbol.s_ACTIONLAYER_SETUP) @property def", "modo.Item Locator type item you want to attach modifier to.", "deleted together with locator they are attached to. Parameters ----------", "throw an error and it doesn't add constraint to any", "\"\"\" Gets the constraint offset vector. Returns ------- modo.Vector3 \"\"\"", "offset vector. Returns ------- modo.Vector3 \"\"\" x = self._item.channel('offset.X').get() y", "\"\"\" x = self._item.channel('offset.X').get() y = self._item.channel('offset.Y').get() z = self._item.channel('offset.Z').get()", "z) @offset.setter def offset(self, offsetVec): \"\"\" Sets new offset for", "the constraint. Parameters ---------- offsetVec : modo.Vector3 \"\"\" self._item.channel('offset.X').set(offsetVec[0], 0.0,", "self._item.channel('offset.X').get() y = self._item.channel('offset.Y').get() z = self._item.channel('offset.Z').get() return modo.Vector3(x, y,", "'pos' ROTATION = 'rot' SCALE = 'scl' class CMTransformConstraint(object): \"\"\"", "will show up under the locator item in item list", ": modo.Item Locator type item you want to attach modifier", "0.0, key=False, action=lx.symbol.s_ACTIONLAYER_SETUP) self._item.channel('offset.Y').set(offsetVec[1], 0.0, key=False, action=lx.symbol.s_ACTIONLAYER_SETUP) self._item.channel('offset.Z').set(offsetVec[2], 0.0, key=False,", "modo.Item The constraint modo item. \"\"\" Operation = TransformConstraintOperation @classmethod", ": modo.Vector3 \"\"\" self._item.channel('offset.X').set(offsetVec[0], 0.0, key=False, action=lx.symbol.s_ACTIONLAYER_SETUP) self._item.channel('offset.Y').set(offsetVec[1], 0.0, key=False,", "constraint will be added. Passing this item is mandatory. However,", "assemblyItem, hostItem, name='TransformConstraint'): \"\"\" Adds new transform constraint to the", "a group. This doesn't throw an error and it doesn't", "the constraint will be added. Passing this item is mandatory.", "\"\"\" return self._item.channel('operation').get() @property def inputChannel(self): return self._item.channel('matrixInput') @property def", "modifier to. \"\"\" item.ItemUtils.addForwardGraphConnections(modifierModoItem, hostModoItem, 'chanMods') class TransformConstraintOperation(object): POSITION =", "key=False, action=lx.symbol.s_ACTIONLAYER_SETUP) self._item.channel('offset.Y').set(offsetVec[1], 0.0, key=False, action=lx.symbol.s_ACTIONLAYER_SETUP) self._item.channel('offset.Z').set(offsetVec[2], 0.0, key=False, action=lx.symbol.s_ACTIONLAYER_SETUP)", "scene. Parameters ---------- assemblyItem : modo.Item This is assembly item", "@property def outputChannel(self): return self._item.channel('matrixOutput') @property def isRotationConstraint(self): \"\"\" Tests", "operation(self): \"\"\" Gets the type of the constraint. Returns -------", "the host item is deleted. name : str Name for", "y, z) @offset.setter def offset(self, offsetVec): \"\"\" Sets new offset", "Attached modifiers are getting deleted together with locator they are", "__init__(self, modoItem): if modoItem.type != 'cmTransformConstraint': raise TypeError self._item =", "self._item.channel('offset.Z').get() return modo.Vector3(x, y, z) @offset.setter def offset(self, offsetVec): \"\"\"", "select.ItemSelection() itemSelection.clear() run('modifier.create \"cmTransformConstraint:rot\" item:{%s} insert:false' % assemblyItem.id) cnsItem =", "new transform constraint to the scene. Parameters ---------- assemblyItem :", "@property def modoItem(self): return self._item # -------- Private methods def", "modo.Item Modifier item that you want to attach. hostModoItem :", "item list (you can unfold it with a little plus", "pass an item that is not a group. This doesn't", "if you don't want to add constraints to any assembly", "to attach. hostModoItem : modo.Item Locator type item you want", "little plus icons next to item name in item list).", "next to item name in item list). Attached modifiers are", "class CMTransformConstraint(object): \"\"\" This class represents Transform Constraint channel modifier.", "def new(cls, assemblyItem, hostItem, name='TransformConstraint'): \"\"\" Adds new transform constraint", "def outputChannel(self): return self._item.channel('matrixOutput') @property def isRotationConstraint(self): \"\"\" Tests if", "new(cls, assemblyItem, hostItem, name='TransformConstraint'): \"\"\" Adds new transform constraint to", "attach modifier to. \"\"\" item.ItemUtils.addForwardGraphConnections(modifierModoItem, hostModoItem, 'chanMods') class TransformConstraintOperation(object): POSITION", "= name ChannelModifierUtils.attachModifierToItem(cnsItem, hostItem) return CMTransformConstraint(cnsItem) @property def operation(self): \"\"\"", "ChannelModifierUtils(object): @classmethod def attachModifierToItem(cls, modifierModoItem, hostModoItem): \"\"\" Allows for attaching", "it'll be under this item in item list. It'll also", "attached to an item such that it'll be under this", "class TransformConstraintOperation(object): POSITION = 'pos' ROTATION = 'rot' SCALE =", "itemSelection.clear() run('modifier.create \"cmTransformConstraint:rot\" item:{%s} insert:false' % assemblyItem.id) cnsItem = itemSelection.getOfTypeModo(\"cmTransformConstraint\")[0]", "Locator type item you want to attach modifier to. \"\"\"", "def attachModifierToItem(cls, modifierModoItem, hostModoItem): \"\"\" Allows for attaching modifier to", "The constraint modo item. \"\"\" Operation = TransformConstraintOperation @classmethod def", "attachModifierToItem(cls, modifierModoItem, hostModoItem): \"\"\" Allows for attaching modifier to locator", "modo.Vector3 \"\"\" self._item.channel('offset.X').set(offsetVec[0], 0.0, key=False, action=lx.symbol.s_ACTIONLAYER_SETUP) self._item.channel('offset.Y').set(offsetVec[1], 0.0, key=False, action=lx.symbol.s_ACTIONLAYER_SETUP)", "vector. Returns ------- modo.Vector3 \"\"\" x = self._item.channel('offset.X').get() y =", "to item name in item list). Attached modifiers are getting", "together with locator they are attached to. Parameters ---------- modifierModoItem", "------- modo.Vector3 \"\"\" x = self._item.channel('offset.X').get() y = self._item.channel('offset.Y').get() z", "@property def isRotationConstraint(self): \"\"\" Tests if this is rotation constraint.", "select import item from run import run class ChannelModifierUtils(object): @classmethod", "modoItem(self): return self._item # -------- Private methods def __init__(self, modoItem):", "def operation(self): \"\"\" Gets the type of the constraint. Returns", "plus icons next to item name in item list). Attached", "doesn't add constraint to any groups either. hostItem : modo.Item", "be under this item in item list. It'll also get", "CMTransformConstraint(object): \"\"\" This class represents Transform Constraint channel modifier. Parameters", "transform constraint to the scene. Parameters ---------- assemblyItem : modo.Item", "hostModoItem : modo.Item Locator type item you want to attach", "channel modifier. Parameters ---------- modoItem : modo.Item The constraint modo", "when the host item is deleted. name : str Name", "One of TransformConstraintOperation constants. \"\"\" return self._item.channel('operation').get() @property def inputChannel(self):", "return self._item.channel('operation').get() @property def inputChannel(self): return self._item.channel('matrixInput') @property def outputChannel(self):", "represents Transform Constraint channel modifier. Parameters ---------- modoItem : modo.Item", "self.operation == self.Operation.ROTATION @property def offset(self): \"\"\" Gets the constraint", "don't want to add constraints to any assembly pass an", "= select.ItemSelection() itemSelection.clear() run('modifier.create \"cmTransformConstraint:rot\" item:{%s} insert:false' % assemblyItem.id) cnsItem", "\"\"\" itemSelection = select.ItemSelection() itemSelection.clear() run('modifier.create \"cmTransformConstraint:rot\" item:{%s} insert:false' %", "it with a little plus icons next to item name", "This class represents Transform Constraint channel modifier. Parameters ---------- modoItem", "it doesn't add constraint to any groups either. hostItem :", "hostModoItem): \"\"\" Allows for attaching modifier to locator type item.", "item that is not a group. This doesn't throw an", "\"\"\" return self.operation == self.Operation.ROTATION @property def offset(self): \"\"\" Gets", "constraint. Parameters ---------- offsetVec : modo.Vector3 \"\"\" self._item.channel('offset.X').set(offsetVec[0], 0.0, key=False,", "def __init__(self, modoItem): if modoItem.type != 'cmTransformConstraint': raise TypeError self._item", "0.0, key=False, action=lx.symbol.s_ACTIONLAYER_SETUP) self._item.channel('offset.Z').set(offsetVec[2], 0.0, key=False, action=lx.symbol.s_ACTIONLAYER_SETUP) @property def modoItem(self):", "unfold it with a little plus icons next to item", "attached to. Parameters ---------- modifierModoItem : modo.Item Modifier item that", "the type of the constraint. Returns ------- str One of", "return self._item.channel('matrixOutput') @property def isRotationConstraint(self): \"\"\" Tests if this is", "in item list). Attached modifiers are getting deleted together with", "can unfold it with a little plus icons next to", "class ChannelModifierUtils(object): @classmethod def attachModifierToItem(cls, modifierModoItem, hostModoItem): \"\"\" Allows for", "Adds new transform constraint to the scene. Parameters ---------- assemblyItem", "locator item in item list (you can unfold it with", "def modoItem(self): return self._item # -------- Private methods def __init__(self,", "itemSelection.getOfTypeModo(\"cmTransformConstraint\")[0] cnsItem.name = name ChannelModifierUtils.attachModifierToItem(cnsItem, hostItem) return CMTransformConstraint(cnsItem) @property def", "want to attach modifier to. \"\"\" item.ItemUtils.addForwardGraphConnections(modifierModoItem, hostModoItem, 'chanMods') class", "run class ChannelModifierUtils(object): @classmethod def attachModifierToItem(cls, modifierModoItem, hostModoItem): \"\"\" Allows", "def inputChannel(self): return self._item.channel('matrixInput') @property def outputChannel(self): return self._item.channel('matrixOutput') @property", "add constraints to any assembly pass an item that is", "\"\"\" item.ItemUtils.addForwardGraphConnections(modifierModoItem, hostModoItem, 'chanMods') class TransformConstraintOperation(object): POSITION = 'pos' ROTATION", "self._item.channel('offset.X').set(offsetVec[0], 0.0, key=False, action=lx.symbol.s_ACTIONLAYER_SETUP) self._item.channel('offset.Y').set(offsetVec[1], 0.0, key=False, action=lx.symbol.s_ACTIONLAYER_SETUP) self._item.channel('offset.Z').set(offsetVec[2], 0.0,", "This is assembly item to which the constraint will be", "@property def operation(self): \"\"\" Gets the type of the constraint.", "Gets the type of the constraint. Returns ------- str One", "is deleted. name : str Name for new constraint item.", "constraint item. Returns ------- CMTransformConstraint \"\"\" itemSelection = select.ItemSelection() itemSelection.clear()", "TransformConstraintOperation @classmethod def new(cls, assemblyItem, hostItem, name='TransformConstraint'): \"\"\" Adds new", "is mandatory. However, if you don't want to add constraints", "\"\"\" Allows for attaching modifier to locator type item. Attached", "attaching modifier to locator type item. Attached item will show", "be added. Passing this item is mandatory. However, if you", "modifier. Parameters ---------- modoItem : modo.Item The constraint modo item.", "TransformConstraintOperation constants. \"\"\" return self._item.channel('operation').get() @property def inputChannel(self): return self._item.channel('matrixInput')", "Parameters ---------- modifierModoItem : modo.Item Modifier item that you want", "self._item.channel('offset.Z').set(offsetVec[2], 0.0, key=False, action=lx.symbol.s_ACTIONLAYER_SETUP) @property def modoItem(self): return self._item #", "\"\"\" Adds new transform constraint to the scene. Parameters ----------", "# -------- Private methods def __init__(self, modoItem): if modoItem.type !=", "groups either. hostItem : modo.Item Constraint can be attached to", "action=lx.symbol.s_ACTIONLAYER_SETUP) self._item.channel('offset.Y').set(offsetVec[1], 0.0, key=False, action=lx.symbol.s_ACTIONLAYER_SETUP) self._item.channel('offset.Z').set(offsetVec[2], 0.0, key=False, action=lx.symbol.s_ACTIONLAYER_SETUP) @property", "modoItem): if modoItem.type != 'cmTransformConstraint': raise TypeError self._item = modoItem", ": modo.Item This is assembly item to which the constraint", "return self._item.channel('matrixInput') @property def outputChannel(self): return self._item.channel('matrixOutput') @property def isRotationConstraint(self):", "a little plus icons next to item name in item", "is not a group. This doesn't throw an error and", "Allows for attaching modifier to locator type item. Attached item", "import select import item from run import run class ChannelModifierUtils(object):", "lx import modo import select import item from run import", "list). Attached modifiers are getting deleted together with locator they", "assemblyItem.id) cnsItem = itemSelection.getOfTypeModo(\"cmTransformConstraint\")[0] cnsItem.name = name ChannelModifierUtils.attachModifierToItem(cnsItem, hostItem) return", "constraint. Returns ------- str One of TransformConstraintOperation constants. \"\"\" return", "return self._item # -------- Private methods def __init__(self, modoItem): if", "Operation = TransformConstraintOperation @classmethod def new(cls, assemblyItem, hostItem, name='TransformConstraint'): \"\"\"", ": modo.Item Modifier item that you want to attach. hostModoItem", "constraint modo item. \"\"\" Operation = TransformConstraintOperation @classmethod def new(cls,", "@classmethod def new(cls, assemblyItem, hostItem, name='TransformConstraint'): \"\"\" Adds new transform", "'scl' class CMTransformConstraint(object): \"\"\" This class represents Transform Constraint channel", "modo.Vector3(x, y, z) @offset.setter def offset(self, offsetVec): \"\"\" Sets new", "is rotation constraint. Returns ------- bool \"\"\" return self.operation ==", "---------- modoItem : modo.Item The constraint modo item. \"\"\" Operation", "This doesn't throw an error and it doesn't add constraint", "constraint to any groups either. hostItem : modo.Item Constraint can", "and it doesn't add constraint to any groups either. hostItem", "to any groups either. hostItem : modo.Item Constraint can be", "list. It'll also get deleted when the host item is", "However, if you don't want to add constraints to any", "icons next to item name in item list). Attached modifiers", "in item list (you can unfold it with a little", "item. Attached item will show up under the locator item" ]