code stringlengths 114 1.05M | path stringlengths 3 312 | quality_prob float64 0.5 0.99 | learning_prob float64 0.2 1 | filename stringlengths 3 168 | kind stringclasses 1
value |
|---|---|---|---|---|---|
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_map)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp861',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x0080: 0x00c7, # LATIN CAPITAL LETTER C WITH CEDILLA
0x0081: 0x00fc, # LATIN SMALL LETTER U WITH DIAERESIS
0x0082: 0x00e9, # LATIN SMALL LETTER E WITH ACUTE
0x0083: 0x00e2, # LATIN SMALL LETTER A WITH CIRCUMFLEX
0x0084: 0x00e4, # LATIN SMALL LETTER A WITH DIAERESIS
0x0085: 0x00e0, # LATIN SMALL LETTER A WITH GRAVE
0x0086: 0x00e5, # LATIN SMALL LETTER A WITH RING ABOVE
0x0087: 0x00e7, # LATIN SMALL LETTER C WITH CEDILLA
0x0088: 0x00ea, # LATIN SMALL LETTER E WITH CIRCUMFLEX
0x0089: 0x00eb, # LATIN SMALL LETTER E WITH DIAERESIS
0x008a: 0x00e8, # LATIN SMALL LETTER E WITH GRAVE
0x008b: 0x00d0, # LATIN CAPITAL LETTER ETH
0x008c: 0x00f0, # LATIN SMALL LETTER ETH
0x008d: 0x00de, # LATIN CAPITAL LETTER THORN
0x008e: 0x00c4, # LATIN CAPITAL LETTER A WITH DIAERESIS
0x008f: 0x00c5, # LATIN CAPITAL LETTER A WITH RING ABOVE
0x0090: 0x00c9, # LATIN CAPITAL LETTER E WITH ACUTE
0x0091: 0x00e6, # LATIN SMALL LIGATURE AE
0x0092: 0x00c6, # LATIN CAPITAL LIGATURE AE
0x0093: 0x00f4, # LATIN SMALL LETTER O WITH CIRCUMFLEX
0x0094: 0x00f6, # LATIN SMALL LETTER O WITH DIAERESIS
0x0095: 0x00fe, # LATIN SMALL LETTER THORN
0x0096: 0x00fb, # LATIN SMALL LETTER U WITH CIRCUMFLEX
0x0097: 0x00dd, # LATIN CAPITAL LETTER Y WITH ACUTE
0x0098: 0x00fd, # LATIN SMALL LETTER Y WITH ACUTE
0x0099: 0x00d6, # LATIN CAPITAL LETTER O WITH DIAERESIS
0x009a: 0x00dc, # LATIN CAPITAL LETTER U WITH DIAERESIS
0x009b: 0x00f8, # LATIN SMALL LETTER O WITH STROKE
0x009c: 0x00a3, # POUND SIGN
0x009d: 0x00d8, # LATIN CAPITAL LETTER O WITH STROKE
0x009e: 0x20a7, # PESETA SIGN
0x009f: 0x0192, # LATIN SMALL LETTER F WITH HOOK
0x00a0: 0x00e1, # LATIN SMALL LETTER A WITH ACUTE
0x00a1: 0x00ed, # LATIN SMALL LETTER I WITH ACUTE
0x00a2: 0x00f3, # LATIN SMALL LETTER O WITH ACUTE
0x00a3: 0x00fa, # LATIN SMALL LETTER U WITH ACUTE
0x00a4: 0x00c1, # LATIN CAPITAL LETTER A WITH ACUTE
0x00a5: 0x00cd, # LATIN CAPITAL LETTER I WITH ACUTE
0x00a6: 0x00d3, # LATIN CAPITAL LETTER O WITH ACUTE
0x00a7: 0x00da, # LATIN CAPITAL LETTER U WITH ACUTE
0x00a8: 0x00bf, # INVERTED QUESTION MARK
0x00a9: 0x2310, # REVERSED NOT SIGN
0x00aa: 0x00ac, # NOT SIGN
0x00ab: 0x00bd, # VULGAR FRACTION ONE HALF
0x00ac: 0x00bc, # VULGAR FRACTION ONE QUARTER
0x00ad: 0x00a1, # INVERTED EXCLAMATION MARK
0x00ae: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00af: 0x00bb, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00b0: 0x2591, # LIGHT SHADE
0x00b1: 0x2592, # MEDIUM SHADE
0x00b2: 0x2593, # DARK SHADE
0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL
0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x00b5: 0x2561, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
0x00b6: 0x2562, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
0x00b7: 0x2556, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
0x00b8: 0x2555, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL
0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT
0x00bd: 0x255c, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
0x00be: 0x255b, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT
0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL
0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x00c6: 0x255e, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
0x00c7: 0x255f, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL
0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x00cf: 0x2567, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
0x00d0: 0x2568, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
0x00d1: 0x2564, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
0x00d2: 0x2565, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
0x00d3: 0x2559, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
0x00d4: 0x2558, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
0x00d5: 0x2552, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
0x00d6: 0x2553, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
0x00d7: 0x256b, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
0x00d8: 0x256a, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT
0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x00db: 0x2588, # FULL BLOCK
0x00dc: 0x2584, # LOWER HALF BLOCK
0x00dd: 0x258c, # LEFT HALF BLOCK
0x00de: 0x2590, # RIGHT HALF BLOCK
0x00df: 0x2580, # UPPER HALF BLOCK
0x00e0: 0x03b1, # GREEK SMALL LETTER ALPHA
0x00e1: 0x00df, # LATIN SMALL LETTER SHARP S
0x00e2: 0x0393, # GREEK CAPITAL LETTER GAMMA
0x00e3: 0x03c0, # GREEK SMALL LETTER PI
0x00e4: 0x03a3, # GREEK CAPITAL LETTER SIGMA
0x00e5: 0x03c3, # GREEK SMALL LETTER SIGMA
0x00e6: 0x00b5, # MICRO SIGN
0x00e7: 0x03c4, # GREEK SMALL LETTER TAU
0x00e8: 0x03a6, # GREEK CAPITAL LETTER PHI
0x00e9: 0x0398, # GREEK CAPITAL LETTER THETA
0x00ea: 0x03a9, # GREEK CAPITAL LETTER OMEGA
0x00eb: 0x03b4, # GREEK SMALL LETTER DELTA
0x00ec: 0x221e, # INFINITY
0x00ed: 0x03c6, # GREEK SMALL LETTER PHI
0x00ee: 0x03b5, # GREEK SMALL LETTER EPSILON
0x00ef: 0x2229, # INTERSECTION
0x00f0: 0x2261, # IDENTICAL TO
0x00f1: 0x00b1, # PLUS-MINUS SIGN
0x00f2: 0x2265, # GREATER-THAN OR EQUAL TO
0x00f3: 0x2264, # LESS-THAN OR EQUAL TO
0x00f4: 0x2320, # TOP HALF INTEGRAL
0x00f5: 0x2321, # BOTTOM HALF INTEGRAL
0x00f6: 0x00f7, # DIVISION SIGN
0x00f7: 0x2248, # ALMOST EQUAL TO
0x00f8: 0x00b0, # DEGREE SIGN
0x00f9: 0x2219, # BULLET OPERATOR
0x00fa: 0x00b7, # MIDDLE DOT
0x00fb: 0x221a, # SQUARE ROOT
0x00fc: 0x207f, # SUPERSCRIPT LATIN SMALL LETTER N
0x00fd: 0x00b2, # SUPERSCRIPT TWO
0x00fe: 0x25a0, # BLACK SQUARE
0x00ff: 0x00a0, # NO-BREAK SPACE
})
### Decoding Table
decoding_table = (
'\x00' # 0x0000 -> NULL
'\x01' # 0x0001 -> START OF HEADING
'\x02' # 0x0002 -> START OF TEXT
'\x03' # 0x0003 -> END OF TEXT
'\x04' # 0x0004 -> END OF TRANSMISSION
'\x05' # 0x0005 -> ENQUIRY
'\x06' # 0x0006 -> ACKNOWLEDGE
'\x07' # 0x0007 -> BELL
'\x08' # 0x0008 -> BACKSPACE
'\t' # 0x0009 -> HORIZONTAL TABULATION
'\n' # 0x000a -> LINE FEED
'\x0b' # 0x000b -> VERTICAL TABULATION
'\x0c' # 0x000c -> FORM FEED
'\r' # 0x000d -> CARRIAGE RETURN
'\x0e' # 0x000e -> SHIFT OUT
'\x0f' # 0x000f -> SHIFT IN
'\x10' # 0x0010 -> DATA LINK ESCAPE
'\x11' # 0x0011 -> DEVICE CONTROL ONE
'\x12' # 0x0012 -> DEVICE CONTROL TWO
'\x13' # 0x0013 -> DEVICE CONTROL THREE
'\x14' # 0x0014 -> DEVICE CONTROL FOUR
'\x15' # 0x0015 -> NEGATIVE ACKNOWLEDGE
'\x16' # 0x0016 -> SYNCHRONOUS IDLE
'\x17' # 0x0017 -> END OF TRANSMISSION BLOCK
'\x18' # 0x0018 -> CANCEL
'\x19' # 0x0019 -> END OF MEDIUM
'\x1a' # 0x001a -> SUBSTITUTE
'\x1b' # 0x001b -> ESCAPE
'\x1c' # 0x001c -> FILE SEPARATOR
'\x1d' # 0x001d -> GROUP SEPARATOR
'\x1e' # 0x001e -> RECORD SEPARATOR
'\x1f' # 0x001f -> UNIT SEPARATOR
' ' # 0x0020 -> SPACE
'!' # 0x0021 -> EXCLAMATION MARK
'"' # 0x0022 -> QUOTATION MARK
'#' # 0x0023 -> NUMBER SIGN
'$' # 0x0024 -> DOLLAR SIGN
'%' # 0x0025 -> PERCENT SIGN
'&' # 0x0026 -> AMPERSAND
"'" # 0x0027 -> APOSTROPHE
'(' # 0x0028 -> LEFT PARENTHESIS
')' # 0x0029 -> RIGHT PARENTHESIS
'*' # 0x002a -> ASTERISK
'+' # 0x002b -> PLUS SIGN
',' # 0x002c -> COMMA
'-' # 0x002d -> HYPHEN-MINUS
'.' # 0x002e -> FULL STOP
'/' # 0x002f -> SOLIDUS
'0' # 0x0030 -> DIGIT ZERO
'1' # 0x0031 -> DIGIT ONE
'2' # 0x0032 -> DIGIT TWO
'3' # 0x0033 -> DIGIT THREE
'4' # 0x0034 -> DIGIT FOUR
'5' # 0x0035 -> DIGIT FIVE
'6' # 0x0036 -> DIGIT SIX
'7' # 0x0037 -> DIGIT SEVEN
'8' # 0x0038 -> DIGIT EIGHT
'9' # 0x0039 -> DIGIT NINE
':' # 0x003a -> COLON
';' # 0x003b -> SEMICOLON
'<' # 0x003c -> LESS-THAN SIGN
'=' # 0x003d -> EQUALS SIGN
'>' # 0x003e -> GREATER-THAN SIGN
'?' # 0x003f -> QUESTION MARK
'@' # 0x0040 -> COMMERCIAL AT
'A' # 0x0041 -> LATIN CAPITAL LETTER A
'B' # 0x0042 -> LATIN CAPITAL LETTER B
'C' # 0x0043 -> LATIN CAPITAL LETTER C
'D' # 0x0044 -> LATIN CAPITAL LETTER D
'E' # 0x0045 -> LATIN CAPITAL LETTER E
'F' # 0x0046 -> LATIN CAPITAL LETTER F
'G' # 0x0047 -> LATIN CAPITAL LETTER G
'H' # 0x0048 -> LATIN CAPITAL LETTER H
'I' # 0x0049 -> LATIN CAPITAL LETTER I
'J' # 0x004a -> LATIN CAPITAL LETTER J
'K' # 0x004b -> LATIN CAPITAL LETTER K
'L' # 0x004c -> LATIN CAPITAL LETTER L
'M' # 0x004d -> LATIN CAPITAL LETTER M
'N' # 0x004e -> LATIN CAPITAL LETTER N
'O' # 0x004f -> LATIN CAPITAL LETTER O
'P' # 0x0050 -> LATIN CAPITAL LETTER P
'Q' # 0x0051 -> LATIN CAPITAL LETTER Q
'R' # 0x0052 -> LATIN CAPITAL LETTER R
'S' # 0x0053 -> LATIN CAPITAL LETTER S
'T' # 0x0054 -> LATIN CAPITAL LETTER T
'U' # 0x0055 -> LATIN CAPITAL LETTER U
'V' # 0x0056 -> LATIN CAPITAL LETTER V
'W' # 0x0057 -> LATIN CAPITAL LETTER W
'X' # 0x0058 -> LATIN CAPITAL LETTER X
'Y' # 0x0059 -> LATIN CAPITAL LETTER Y
'Z' # 0x005a -> LATIN CAPITAL LETTER Z
'[' # 0x005b -> LEFT SQUARE BRACKET
'\\' # 0x005c -> REVERSE SOLIDUS
']' # 0x005d -> RIGHT SQUARE BRACKET
'^' # 0x005e -> CIRCUMFLEX ACCENT
'_' # 0x005f -> LOW LINE
'`' # 0x0060 -> GRAVE ACCENT
'a' # 0x0061 -> LATIN SMALL LETTER A
'b' # 0x0062 -> LATIN SMALL LETTER B
'c' # 0x0063 -> LATIN SMALL LETTER C
'd' # 0x0064 -> LATIN SMALL LETTER D
'e' # 0x0065 -> LATIN SMALL LETTER E
'f' # 0x0066 -> LATIN SMALL LETTER F
'g' # 0x0067 -> LATIN SMALL LETTER G
'h' # 0x0068 -> LATIN SMALL LETTER H
'i' # 0x0069 -> LATIN SMALL LETTER I
'j' # 0x006a -> LATIN SMALL LETTER J
'k' # 0x006b -> LATIN SMALL LETTER K
'l' # 0x006c -> LATIN SMALL LETTER L
'm' # 0x006d -> LATIN SMALL LETTER M
'n' # 0x006e -> LATIN SMALL LETTER N
'o' # 0x006f -> LATIN SMALL LETTER O
'p' # 0x0070 -> LATIN SMALL LETTER P
'q' # 0x0071 -> LATIN SMALL LETTER Q
'r' # 0x0072 -> LATIN SMALL LETTER R
's' # 0x0073 -> LATIN SMALL LETTER S
't' # 0x0074 -> LATIN SMALL LETTER T
'u' # 0x0075 -> LATIN SMALL LETTER U
'v' # 0x0076 -> LATIN SMALL LETTER V
'w' # 0x0077 -> LATIN SMALL LETTER W
'x' # 0x0078 -> LATIN SMALL LETTER X
'y' # 0x0079 -> LATIN SMALL LETTER Y
'z' # 0x007a -> LATIN SMALL LETTER Z
'{' # 0x007b -> LEFT CURLY BRACKET
'|' # 0x007c -> VERTICAL LINE
'}' # 0x007d -> RIGHT CURLY BRACKET
'~' # 0x007e -> TILDE
'\x7f' # 0x007f -> DELETE
'\xc7' # 0x0080 -> LATIN CAPITAL LETTER C WITH CEDILLA
'\xfc' # 0x0081 -> LATIN SMALL LETTER U WITH DIAERESIS
'\xe9' # 0x0082 -> LATIN SMALL LETTER E WITH ACUTE
'\xe2' # 0x0083 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
'\xe4' # 0x0084 -> LATIN SMALL LETTER A WITH DIAERESIS
'\xe0' # 0x0085 -> LATIN SMALL LETTER A WITH GRAVE
'\xe5' # 0x0086 -> LATIN SMALL LETTER A WITH RING ABOVE
'\xe7' # 0x0087 -> LATIN SMALL LETTER C WITH CEDILLA
'\xea' # 0x0088 -> LATIN SMALL LETTER E WITH CIRCUMFLEX
'\xeb' # 0x0089 -> LATIN SMALL LETTER E WITH DIAERESIS
'\xe8' # 0x008a -> LATIN SMALL LETTER E WITH GRAVE
'\xd0' # 0x008b -> LATIN CAPITAL LETTER ETH
'\xf0' # 0x008c -> LATIN SMALL LETTER ETH
'\xde' # 0x008d -> LATIN CAPITAL LETTER THORN
'\xc4' # 0x008e -> LATIN CAPITAL LETTER A WITH DIAERESIS
'\xc5' # 0x008f -> LATIN CAPITAL LETTER A WITH RING ABOVE
'\xc9' # 0x0090 -> LATIN CAPITAL LETTER E WITH ACUTE
'\xe6' # 0x0091 -> LATIN SMALL LIGATURE AE
'\xc6' # 0x0092 -> LATIN CAPITAL LIGATURE AE
'\xf4' # 0x0093 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
'\xf6' # 0x0094 -> LATIN SMALL LETTER O WITH DIAERESIS
'\xfe' # 0x0095 -> LATIN SMALL LETTER THORN
'\xfb' # 0x0096 -> LATIN SMALL LETTER U WITH CIRCUMFLEX
'\xdd' # 0x0097 -> LATIN CAPITAL LETTER Y WITH ACUTE
'\xfd' # 0x0098 -> LATIN SMALL LETTER Y WITH ACUTE
'\xd6' # 0x0099 -> LATIN CAPITAL LETTER O WITH DIAERESIS
'\xdc' # 0x009a -> LATIN CAPITAL LETTER U WITH DIAERESIS
'\xf8' # 0x009b -> LATIN SMALL LETTER O WITH STROKE
'\xa3' # 0x009c -> POUND SIGN
'\xd8' # 0x009d -> LATIN CAPITAL LETTER O WITH STROKE
'\u20a7' # 0x009e -> PESETA SIGN
'\u0192' # 0x009f -> LATIN SMALL LETTER F WITH HOOK
'\xe1' # 0x00a0 -> LATIN SMALL LETTER A WITH ACUTE
'\xed' # 0x00a1 -> LATIN SMALL LETTER I WITH ACUTE
'\xf3' # 0x00a2 -> LATIN SMALL LETTER O WITH ACUTE
'\xfa' # 0x00a3 -> LATIN SMALL LETTER U WITH ACUTE
'\xc1' # 0x00a4 -> LATIN CAPITAL LETTER A WITH ACUTE
'\xcd' # 0x00a5 -> LATIN CAPITAL LETTER I WITH ACUTE
'\xd3' # 0x00a6 -> LATIN CAPITAL LETTER O WITH ACUTE
'\xda' # 0x00a7 -> LATIN CAPITAL LETTER U WITH ACUTE
'\xbf' # 0x00a8 -> INVERTED QUESTION MARK
'\u2310' # 0x00a9 -> REVERSED NOT SIGN
'\xac' # 0x00aa -> NOT SIGN
'\xbd' # 0x00ab -> VULGAR FRACTION ONE HALF
'\xbc' # 0x00ac -> VULGAR FRACTION ONE QUARTER
'\xa1' # 0x00ad -> INVERTED EXCLAMATION MARK
'\xab' # 0x00ae -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
'\xbb' # 0x00af -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
'\u2591' # 0x00b0 -> LIGHT SHADE
'\u2592' # 0x00b1 -> MEDIUM SHADE
'\u2593' # 0x00b2 -> DARK SHADE
'\u2502' # 0x00b3 -> BOX DRAWINGS LIGHT VERTICAL
'\u2524' # 0x00b4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT
'\u2561' # 0x00b5 -> BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
'\u2562' # 0x00b6 -> BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
'\u2556' # 0x00b7 -> BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
'\u2555' # 0x00b8 -> BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
'\u2563' # 0x00b9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT
'\u2551' # 0x00ba -> BOX DRAWINGS DOUBLE VERTICAL
'\u2557' # 0x00bb -> BOX DRAWINGS DOUBLE DOWN AND LEFT
'\u255d' # 0x00bc -> BOX DRAWINGS DOUBLE UP AND LEFT
'\u255c' # 0x00bd -> BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
'\u255b' # 0x00be -> BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
'\u2510' # 0x00bf -> BOX DRAWINGS LIGHT DOWN AND LEFT
'\u2514' # 0x00c0 -> BOX DRAWINGS LIGHT UP AND RIGHT
'\u2534' # 0x00c1 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL
'\u252c' # 0x00c2 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
'\u251c' # 0x00c3 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT
'\u2500' # 0x00c4 -> BOX DRAWINGS LIGHT HORIZONTAL
'\u253c' # 0x00c5 -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
'\u255e' # 0x00c6 -> BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
'\u255f' # 0x00c7 -> BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
'\u255a' # 0x00c8 -> BOX DRAWINGS DOUBLE UP AND RIGHT
'\u2554' # 0x00c9 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT
'\u2569' # 0x00ca -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL
'\u2566' # 0x00cb -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
'\u2560' # 0x00cc -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
'\u2550' # 0x00cd -> BOX DRAWINGS DOUBLE HORIZONTAL
'\u256c' # 0x00ce -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
'\u2567' # 0x00cf -> BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
'\u2568' # 0x00d0 -> BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
'\u2564' # 0x00d1 -> BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
'\u2565' # 0x00d2 -> BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
'\u2559' # 0x00d3 -> BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
'\u2558' # 0x00d4 -> BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
'\u2552' # 0x00d5 -> BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
'\u2553' # 0x00d6 -> BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
'\u256b' # 0x00d7 -> BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
'\u256a' # 0x00d8 -> BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
'\u2518' # 0x00d9 -> BOX DRAWINGS LIGHT UP AND LEFT
'\u250c' # 0x00da -> BOX DRAWINGS LIGHT DOWN AND RIGHT
'\u2588' # 0x00db -> FULL BLOCK
'\u2584' # 0x00dc -> LOWER HALF BLOCK
'\u258c' # 0x00dd -> LEFT HALF BLOCK
'\u2590' # 0x00de -> RIGHT HALF BLOCK
'\u2580' # 0x00df -> UPPER HALF BLOCK
'\u03b1' # 0x00e0 -> GREEK SMALL LETTER ALPHA
'\xdf' # 0x00e1 -> LATIN SMALL LETTER SHARP S
'\u0393' # 0x00e2 -> GREEK CAPITAL LETTER GAMMA
'\u03c0' # 0x00e3 -> GREEK SMALL LETTER PI
'\u03a3' # 0x00e4 -> GREEK CAPITAL LETTER SIGMA
'\u03c3' # 0x00e5 -> GREEK SMALL LETTER SIGMA
'\xb5' # 0x00e6 -> MICRO SIGN
'\u03c4' # 0x00e7 -> GREEK SMALL LETTER TAU
'\u03a6' # 0x00e8 -> GREEK CAPITAL LETTER PHI
'\u0398' # 0x00e9 -> GREEK CAPITAL LETTER THETA
'\u03a9' # 0x00ea -> GREEK CAPITAL LETTER OMEGA
'\u03b4' # 0x00eb -> GREEK SMALL LETTER DELTA
'\u221e' # 0x00ec -> INFINITY
'\u03c6' # 0x00ed -> GREEK SMALL LETTER PHI
'\u03b5' # 0x00ee -> GREEK SMALL LETTER EPSILON
'\u2229' # 0x00ef -> INTERSECTION
'\u2261' # 0x00f0 -> IDENTICAL TO
'\xb1' # 0x00f1 -> PLUS-MINUS SIGN
'\u2265' # 0x00f2 -> GREATER-THAN OR EQUAL TO
'\u2264' # 0x00f3 -> LESS-THAN OR EQUAL TO
'\u2320' # 0x00f4 -> TOP HALF INTEGRAL
'\u2321' # 0x00f5 -> BOTTOM HALF INTEGRAL
'\xf7' # 0x00f6 -> DIVISION SIGN
'\u2248' # 0x00f7 -> ALMOST EQUAL TO
'\xb0' # 0x00f8 -> DEGREE SIGN
'\u2219' # 0x00f9 -> BULLET OPERATOR
'\xb7' # 0x00fa -> MIDDLE DOT
'\u221a' # 0x00fb -> SQUARE ROOT
'\u207f' # 0x00fc -> SUPERSCRIPT LATIN SMALL LETTER N
'\xb2' # 0x00fd -> SUPERSCRIPT TWO
'\u25a0' # 0x00fe -> BLACK SQUARE
'\xa0' # 0x00ff -> NO-BREAK SPACE
)
### Encoding Map
encoding_map = {
0x0000: 0x0000, # NULL
0x0001: 0x0001, # START OF HEADING
0x0002: 0x0002, # START OF TEXT
0x0003: 0x0003, # END OF TEXT
0x0004: 0x0004, # END OF TRANSMISSION
0x0005: 0x0005, # ENQUIRY
0x0006: 0x0006, # ACKNOWLEDGE
0x0007: 0x0007, # BELL
0x0008: 0x0008, # BACKSPACE
0x0009: 0x0009, # HORIZONTAL TABULATION
0x000a: 0x000a, # LINE FEED
0x000b: 0x000b, # VERTICAL TABULATION
0x000c: 0x000c, # FORM FEED
0x000d: 0x000d, # CARRIAGE RETURN
0x000e: 0x000e, # SHIFT OUT
0x000f: 0x000f, # SHIFT IN
0x0010: 0x0010, # DATA LINK ESCAPE
0x0011: 0x0011, # DEVICE CONTROL ONE
0x0012: 0x0012, # DEVICE CONTROL TWO
0x0013: 0x0013, # DEVICE CONTROL THREE
0x0014: 0x0014, # DEVICE CONTROL FOUR
0x0015: 0x0015, # NEGATIVE ACKNOWLEDGE
0x0016: 0x0016, # SYNCHRONOUS IDLE
0x0017: 0x0017, # END OF TRANSMISSION BLOCK
0x0018: 0x0018, # CANCEL
0x0019: 0x0019, # END OF MEDIUM
0x001a: 0x001a, # SUBSTITUTE
0x001b: 0x001b, # ESCAPE
0x001c: 0x001c, # FILE SEPARATOR
0x001d: 0x001d, # GROUP SEPARATOR
0x001e: 0x001e, # RECORD SEPARATOR
0x001f: 0x001f, # UNIT SEPARATOR
0x0020: 0x0020, # SPACE
0x0021: 0x0021, # EXCLAMATION MARK
0x0022: 0x0022, # QUOTATION MARK
0x0023: 0x0023, # NUMBER SIGN
0x0024: 0x0024, # DOLLAR SIGN
0x0025: 0x0025, # PERCENT SIGN
0x0026: 0x0026, # AMPERSAND
0x0027: 0x0027, # APOSTROPHE
0x0028: 0x0028, # LEFT PARENTHESIS
0x0029: 0x0029, # RIGHT PARENTHESIS
0x002a: 0x002a, # ASTERISK
0x002b: 0x002b, # PLUS SIGN
0x002c: 0x002c, # COMMA
0x002d: 0x002d, # HYPHEN-MINUS
0x002e: 0x002e, # FULL STOP
0x002f: 0x002f, # SOLIDUS
0x0030: 0x0030, # DIGIT ZERO
0x0031: 0x0031, # DIGIT ONE
0x0032: 0x0032, # DIGIT TWO
0x0033: 0x0033, # DIGIT THREE
0x0034: 0x0034, # DIGIT FOUR
0x0035: 0x0035, # DIGIT FIVE
0x0036: 0x0036, # DIGIT SIX
0x0037: 0x0037, # DIGIT SEVEN
0x0038: 0x0038, # DIGIT EIGHT
0x0039: 0x0039, # DIGIT NINE
0x003a: 0x003a, # COLON
0x003b: 0x003b, # SEMICOLON
0x003c: 0x003c, # LESS-THAN SIGN
0x003d: 0x003d, # EQUALS SIGN
0x003e: 0x003e, # GREATER-THAN SIGN
0x003f: 0x003f, # QUESTION MARK
0x0040: 0x0040, # COMMERCIAL AT
0x0041: 0x0041, # LATIN CAPITAL LETTER A
0x0042: 0x0042, # LATIN CAPITAL LETTER B
0x0043: 0x0043, # LATIN CAPITAL LETTER C
0x0044: 0x0044, # LATIN CAPITAL LETTER D
0x0045: 0x0045, # LATIN CAPITAL LETTER E
0x0046: 0x0046, # LATIN CAPITAL LETTER F
0x0047: 0x0047, # LATIN CAPITAL LETTER G
0x0048: 0x0048, # LATIN CAPITAL LETTER H
0x0049: 0x0049, # LATIN CAPITAL LETTER I
0x004a: 0x004a, # LATIN CAPITAL LETTER J
0x004b: 0x004b, # LATIN CAPITAL LETTER K
0x004c: 0x004c, # LATIN CAPITAL LETTER L
0x004d: 0x004d, # LATIN CAPITAL LETTER M
0x004e: 0x004e, # LATIN CAPITAL LETTER N
0x004f: 0x004f, # LATIN CAPITAL LETTER O
0x0050: 0x0050, # LATIN CAPITAL LETTER P
0x0051: 0x0051, # LATIN CAPITAL LETTER Q
0x0052: 0x0052, # LATIN CAPITAL LETTER R
0x0053: 0x0053, # LATIN CAPITAL LETTER S
0x0054: 0x0054, # LATIN CAPITAL LETTER T
0x0055: 0x0055, # LATIN CAPITAL LETTER U
0x0056: 0x0056, # LATIN CAPITAL LETTER V
0x0057: 0x0057, # LATIN CAPITAL LETTER W
0x0058: 0x0058, # LATIN CAPITAL LETTER X
0x0059: 0x0059, # LATIN CAPITAL LETTER Y
0x005a: 0x005a, # LATIN CAPITAL LETTER Z
0x005b: 0x005b, # LEFT SQUARE BRACKET
0x005c: 0x005c, # REVERSE SOLIDUS
0x005d: 0x005d, # RIGHT SQUARE BRACKET
0x005e: 0x005e, # CIRCUMFLEX ACCENT
0x005f: 0x005f, # LOW LINE
0x0060: 0x0060, # GRAVE ACCENT
0x0061: 0x0061, # LATIN SMALL LETTER A
0x0062: 0x0062, # LATIN SMALL LETTER B
0x0063: 0x0063, # LATIN SMALL LETTER C
0x0064: 0x0064, # LATIN SMALL LETTER D
0x0065: 0x0065, # LATIN SMALL LETTER E
0x0066: 0x0066, # LATIN SMALL LETTER F
0x0067: 0x0067, # LATIN SMALL LETTER G
0x0068: 0x0068, # LATIN SMALL LETTER H
0x0069: 0x0069, # LATIN SMALL LETTER I
0x006a: 0x006a, # LATIN SMALL LETTER J
0x006b: 0x006b, # LATIN SMALL LETTER K
0x006c: 0x006c, # LATIN SMALL LETTER L
0x006d: 0x006d, # LATIN SMALL LETTER M
0x006e: 0x006e, # LATIN SMALL LETTER N
0x006f: 0x006f, # LATIN SMALL LETTER O
0x0070: 0x0070, # LATIN SMALL LETTER P
0x0071: 0x0071, # LATIN SMALL LETTER Q
0x0072: 0x0072, # LATIN SMALL LETTER R
0x0073: 0x0073, # LATIN SMALL LETTER S
0x0074: 0x0074, # LATIN SMALL LETTER T
0x0075: 0x0075, # LATIN SMALL LETTER U
0x0076: 0x0076, # LATIN SMALL LETTER V
0x0077: 0x0077, # LATIN SMALL LETTER W
0x0078: 0x0078, # LATIN SMALL LETTER X
0x0079: 0x0079, # LATIN SMALL LETTER Y
0x007a: 0x007a, # LATIN SMALL LETTER Z
0x007b: 0x007b, # LEFT CURLY BRACKET
0x007c: 0x007c, # VERTICAL LINE
0x007d: 0x007d, # RIGHT CURLY BRACKET
0x007e: 0x007e, # TILDE
0x007f: 0x007f, # DELETE
0x00a0: 0x00ff, # NO-BREAK SPACE
0x00a1: 0x00ad, # INVERTED EXCLAMATION MARK
0x00a3: 0x009c, # POUND SIGN
0x00ab: 0x00ae, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00ac: 0x00aa, # NOT SIGN
0x00b0: 0x00f8, # DEGREE SIGN
0x00b1: 0x00f1, # PLUS-MINUS SIGN
0x00b2: 0x00fd, # SUPERSCRIPT TWO
0x00b5: 0x00e6, # MICRO SIGN
0x00b7: 0x00fa, # MIDDLE DOT
0x00bb: 0x00af, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00bc: 0x00ac, # VULGAR FRACTION ONE QUARTER
0x00bd: 0x00ab, # VULGAR FRACTION ONE HALF
0x00bf: 0x00a8, # INVERTED QUESTION MARK
0x00c1: 0x00a4, # LATIN CAPITAL LETTER A WITH ACUTE
0x00c4: 0x008e, # LATIN CAPITAL LETTER A WITH DIAERESIS
0x00c5: 0x008f, # LATIN CAPITAL LETTER A WITH RING ABOVE
0x00c6: 0x0092, # LATIN CAPITAL LIGATURE AE
0x00c7: 0x0080, # LATIN CAPITAL LETTER C WITH CEDILLA
0x00c9: 0x0090, # LATIN CAPITAL LETTER E WITH ACUTE
0x00cd: 0x00a5, # LATIN CAPITAL LETTER I WITH ACUTE
0x00d0: 0x008b, # LATIN CAPITAL LETTER ETH
0x00d3: 0x00a6, # LATIN CAPITAL LETTER O WITH ACUTE
0x00d6: 0x0099, # LATIN CAPITAL LETTER O WITH DIAERESIS
0x00d8: 0x009d, # LATIN CAPITAL LETTER O WITH STROKE
0x00da: 0x00a7, # LATIN CAPITAL LETTER U WITH ACUTE
0x00dc: 0x009a, # LATIN CAPITAL LETTER U WITH DIAERESIS
0x00dd: 0x0097, # LATIN CAPITAL LETTER Y WITH ACUTE
0x00de: 0x008d, # LATIN CAPITAL LETTER THORN
0x00df: 0x00e1, # LATIN SMALL LETTER SHARP S
0x00e0: 0x0085, # LATIN SMALL LETTER A WITH GRAVE
0x00e1: 0x00a0, # LATIN SMALL LETTER A WITH ACUTE
0x00e2: 0x0083, # LATIN SMALL LETTER A WITH CIRCUMFLEX
0x00e4: 0x0084, # LATIN SMALL LETTER A WITH DIAERESIS
0x00e5: 0x0086, # LATIN SMALL LETTER A WITH RING ABOVE
0x00e6: 0x0091, # LATIN SMALL LIGATURE AE
0x00e7: 0x0087, # LATIN SMALL LETTER C WITH CEDILLA
0x00e8: 0x008a, # LATIN SMALL LETTER E WITH GRAVE
0x00e9: 0x0082, # LATIN SMALL LETTER E WITH ACUTE
0x00ea: 0x0088, # LATIN SMALL LETTER E WITH CIRCUMFLEX
0x00eb: 0x0089, # LATIN SMALL LETTER E WITH DIAERESIS
0x00ed: 0x00a1, # LATIN SMALL LETTER I WITH ACUTE
0x00f0: 0x008c, # LATIN SMALL LETTER ETH
0x00f3: 0x00a2, # LATIN SMALL LETTER O WITH ACUTE
0x00f4: 0x0093, # LATIN SMALL LETTER O WITH CIRCUMFLEX
0x00f6: 0x0094, # LATIN SMALL LETTER O WITH DIAERESIS
0x00f7: 0x00f6, # DIVISION SIGN
0x00f8: 0x009b, # LATIN SMALL LETTER O WITH STROKE
0x00fa: 0x00a3, # LATIN SMALL LETTER U WITH ACUTE
0x00fb: 0x0096, # LATIN SMALL LETTER U WITH CIRCUMFLEX
0x00fc: 0x0081, # LATIN SMALL LETTER U WITH DIAERESIS
0x00fd: 0x0098, # LATIN SMALL LETTER Y WITH ACUTE
0x00fe: 0x0095, # LATIN SMALL LETTER THORN
0x0192: 0x009f, # LATIN SMALL LETTER F WITH HOOK
0x0393: 0x00e2, # GREEK CAPITAL LETTER GAMMA
0x0398: 0x00e9, # GREEK CAPITAL LETTER THETA
0x03a3: 0x00e4, # GREEK CAPITAL LETTER SIGMA
0x03a6: 0x00e8, # GREEK CAPITAL LETTER PHI
0x03a9: 0x00ea, # GREEK CAPITAL LETTER OMEGA
0x03b1: 0x00e0, # GREEK SMALL LETTER ALPHA
0x03b4: 0x00eb, # GREEK SMALL LETTER DELTA
0x03b5: 0x00ee, # GREEK SMALL LETTER EPSILON
0x03c0: 0x00e3, # GREEK SMALL LETTER PI
0x03c3: 0x00e5, # GREEK SMALL LETTER SIGMA
0x03c4: 0x00e7, # GREEK SMALL LETTER TAU
0x03c6: 0x00ed, # GREEK SMALL LETTER PHI
0x207f: 0x00fc, # SUPERSCRIPT LATIN SMALL LETTER N
0x20a7: 0x009e, # PESETA SIGN
0x2219: 0x00f9, # BULLET OPERATOR
0x221a: 0x00fb, # SQUARE ROOT
0x221e: 0x00ec, # INFINITY
0x2229: 0x00ef, # INTERSECTION
0x2248: 0x00f7, # ALMOST EQUAL TO
0x2261: 0x00f0, # IDENTICAL TO
0x2264: 0x00f3, # LESS-THAN OR EQUAL TO
0x2265: 0x00f2, # GREATER-THAN OR EQUAL TO
0x2310: 0x00a9, # REVERSED NOT SIGN
0x2320: 0x00f4, # TOP HALF INTEGRAL
0x2321: 0x00f5, # BOTTOM HALF INTEGRAL
0x2500: 0x00c4, # BOX DRAWINGS LIGHT HORIZONTAL
0x2502: 0x00b3, # BOX DRAWINGS LIGHT VERTICAL
0x250c: 0x00da, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x2510: 0x00bf, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x2514: 0x00c0, # BOX DRAWINGS LIGHT UP AND RIGHT
0x2518: 0x00d9, # BOX DRAWINGS LIGHT UP AND LEFT
0x251c: 0x00c3, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x2524: 0x00b4, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x252c: 0x00c2, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x2534: 0x00c1, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x253c: 0x00c5, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x2550: 0x00cd, # BOX DRAWINGS DOUBLE HORIZONTAL
0x2551: 0x00ba, # BOX DRAWINGS DOUBLE VERTICAL
0x2552: 0x00d5, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
0x2553: 0x00d6, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
0x2554: 0x00c9, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x2555: 0x00b8, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
0x2556: 0x00b7, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
0x2557: 0x00bb, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x2558: 0x00d4, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
0x2559: 0x00d3, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
0x255a: 0x00c8, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x255b: 0x00be, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
0x255c: 0x00bd, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
0x255d: 0x00bc, # BOX DRAWINGS DOUBLE UP AND LEFT
0x255e: 0x00c6, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
0x255f: 0x00c7, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
0x2560: 0x00cc, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x2561: 0x00b5, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
0x2562: 0x00b6, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
0x2563: 0x00b9, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x2564: 0x00d1, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
0x2565: 0x00d2, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
0x2566: 0x00cb, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x2567: 0x00cf, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
0x2568: 0x00d0, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
0x2569: 0x00ca, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x256a: 0x00d8, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
0x256b: 0x00d7, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
0x256c: 0x00ce, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x2580: 0x00df, # UPPER HALF BLOCK
0x2584: 0x00dc, # LOWER HALF BLOCK
0x2588: 0x00db, # FULL BLOCK
0x258c: 0x00dd, # LEFT HALF BLOCK
0x2590: 0x00de, # RIGHT HALF BLOCK
0x2591: 0x00b0, # LIGHT SHADE
0x2592: 0x00b1, # MEDIUM SHADE
0x2593: 0x00b2, # DARK SHADE
0x25a0: 0x00fe, # BLACK SQUARE
} | /regulome_web-2.0rc1.tar.gz/regulome_web-2.0rc1/regulome_app/webapp/static/brython/Lib/encodings/cp861.py | 0.480722 | 0.212743 | cp861.py | pypi |
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_map)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp852',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x0080: 0x00c7, # LATIN CAPITAL LETTER C WITH CEDILLA
0x0081: 0x00fc, # LATIN SMALL LETTER U WITH DIAERESIS
0x0082: 0x00e9, # LATIN SMALL LETTER E WITH ACUTE
0x0083: 0x00e2, # LATIN SMALL LETTER A WITH CIRCUMFLEX
0x0084: 0x00e4, # LATIN SMALL LETTER A WITH DIAERESIS
0x0085: 0x016f, # LATIN SMALL LETTER U WITH RING ABOVE
0x0086: 0x0107, # LATIN SMALL LETTER C WITH ACUTE
0x0087: 0x00e7, # LATIN SMALL LETTER C WITH CEDILLA
0x0088: 0x0142, # LATIN SMALL LETTER L WITH STROKE
0x0089: 0x00eb, # LATIN SMALL LETTER E WITH DIAERESIS
0x008a: 0x0150, # LATIN CAPITAL LETTER O WITH DOUBLE ACUTE
0x008b: 0x0151, # LATIN SMALL LETTER O WITH DOUBLE ACUTE
0x008c: 0x00ee, # LATIN SMALL LETTER I WITH CIRCUMFLEX
0x008d: 0x0179, # LATIN CAPITAL LETTER Z WITH ACUTE
0x008e: 0x00c4, # LATIN CAPITAL LETTER A WITH DIAERESIS
0x008f: 0x0106, # LATIN CAPITAL LETTER C WITH ACUTE
0x0090: 0x00c9, # LATIN CAPITAL LETTER E WITH ACUTE
0x0091: 0x0139, # LATIN CAPITAL LETTER L WITH ACUTE
0x0092: 0x013a, # LATIN SMALL LETTER L WITH ACUTE
0x0093: 0x00f4, # LATIN SMALL LETTER O WITH CIRCUMFLEX
0x0094: 0x00f6, # LATIN SMALL LETTER O WITH DIAERESIS
0x0095: 0x013d, # LATIN CAPITAL LETTER L WITH CARON
0x0096: 0x013e, # LATIN SMALL LETTER L WITH CARON
0x0097: 0x015a, # LATIN CAPITAL LETTER S WITH ACUTE
0x0098: 0x015b, # LATIN SMALL LETTER S WITH ACUTE
0x0099: 0x00d6, # LATIN CAPITAL LETTER O WITH DIAERESIS
0x009a: 0x00dc, # LATIN CAPITAL LETTER U WITH DIAERESIS
0x009b: 0x0164, # LATIN CAPITAL LETTER T WITH CARON
0x009c: 0x0165, # LATIN SMALL LETTER T WITH CARON
0x009d: 0x0141, # LATIN CAPITAL LETTER L WITH STROKE
0x009e: 0x00d7, # MULTIPLICATION SIGN
0x009f: 0x010d, # LATIN SMALL LETTER C WITH CARON
0x00a0: 0x00e1, # LATIN SMALL LETTER A WITH ACUTE
0x00a1: 0x00ed, # LATIN SMALL LETTER I WITH ACUTE
0x00a2: 0x00f3, # LATIN SMALL LETTER O WITH ACUTE
0x00a3: 0x00fa, # LATIN SMALL LETTER U WITH ACUTE
0x00a4: 0x0104, # LATIN CAPITAL LETTER A WITH OGONEK
0x00a5: 0x0105, # LATIN SMALL LETTER A WITH OGONEK
0x00a6: 0x017d, # LATIN CAPITAL LETTER Z WITH CARON
0x00a7: 0x017e, # LATIN SMALL LETTER Z WITH CARON
0x00a8: 0x0118, # LATIN CAPITAL LETTER E WITH OGONEK
0x00a9: 0x0119, # LATIN SMALL LETTER E WITH OGONEK
0x00aa: 0x00ac, # NOT SIGN
0x00ab: 0x017a, # LATIN SMALL LETTER Z WITH ACUTE
0x00ac: 0x010c, # LATIN CAPITAL LETTER C WITH CARON
0x00ad: 0x015f, # LATIN SMALL LETTER S WITH CEDILLA
0x00ae: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00af: 0x00bb, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00b0: 0x2591, # LIGHT SHADE
0x00b1: 0x2592, # MEDIUM SHADE
0x00b2: 0x2593, # DARK SHADE
0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL
0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x00b5: 0x00c1, # LATIN CAPITAL LETTER A WITH ACUTE
0x00b6: 0x00c2, # LATIN CAPITAL LETTER A WITH CIRCUMFLEX
0x00b7: 0x011a, # LATIN CAPITAL LETTER E WITH CARON
0x00b8: 0x015e, # LATIN CAPITAL LETTER S WITH CEDILLA
0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL
0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT
0x00bd: 0x017b, # LATIN CAPITAL LETTER Z WITH DOT ABOVE
0x00be: 0x017c, # LATIN SMALL LETTER Z WITH DOT ABOVE
0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT
0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL
0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x00c6: 0x0102, # LATIN CAPITAL LETTER A WITH BREVE
0x00c7: 0x0103, # LATIN SMALL LETTER A WITH BREVE
0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL
0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x00cf: 0x00a4, # CURRENCY SIGN
0x00d0: 0x0111, # LATIN SMALL LETTER D WITH STROKE
0x00d1: 0x0110, # LATIN CAPITAL LETTER D WITH STROKE
0x00d2: 0x010e, # LATIN CAPITAL LETTER D WITH CARON
0x00d3: 0x00cb, # LATIN CAPITAL LETTER E WITH DIAERESIS
0x00d4: 0x010f, # LATIN SMALL LETTER D WITH CARON
0x00d5: 0x0147, # LATIN CAPITAL LETTER N WITH CARON
0x00d6: 0x00cd, # LATIN CAPITAL LETTER I WITH ACUTE
0x00d7: 0x00ce, # LATIN CAPITAL LETTER I WITH CIRCUMFLEX
0x00d8: 0x011b, # LATIN SMALL LETTER E WITH CARON
0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT
0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x00db: 0x2588, # FULL BLOCK
0x00dc: 0x2584, # LOWER HALF BLOCK
0x00dd: 0x0162, # LATIN CAPITAL LETTER T WITH CEDILLA
0x00de: 0x016e, # LATIN CAPITAL LETTER U WITH RING ABOVE
0x00df: 0x2580, # UPPER HALF BLOCK
0x00e0: 0x00d3, # LATIN CAPITAL LETTER O WITH ACUTE
0x00e1: 0x00df, # LATIN SMALL LETTER SHARP S
0x00e2: 0x00d4, # LATIN CAPITAL LETTER O WITH CIRCUMFLEX
0x00e3: 0x0143, # LATIN CAPITAL LETTER N WITH ACUTE
0x00e4: 0x0144, # LATIN SMALL LETTER N WITH ACUTE
0x00e5: 0x0148, # LATIN SMALL LETTER N WITH CARON
0x00e6: 0x0160, # LATIN CAPITAL LETTER S WITH CARON
0x00e7: 0x0161, # LATIN SMALL LETTER S WITH CARON
0x00e8: 0x0154, # LATIN CAPITAL LETTER R WITH ACUTE
0x00e9: 0x00da, # LATIN CAPITAL LETTER U WITH ACUTE
0x00ea: 0x0155, # LATIN SMALL LETTER R WITH ACUTE
0x00eb: 0x0170, # LATIN CAPITAL LETTER U WITH DOUBLE ACUTE
0x00ec: 0x00fd, # LATIN SMALL LETTER Y WITH ACUTE
0x00ed: 0x00dd, # LATIN CAPITAL LETTER Y WITH ACUTE
0x00ee: 0x0163, # LATIN SMALL LETTER T WITH CEDILLA
0x00ef: 0x00b4, # ACUTE ACCENT
0x00f0: 0x00ad, # SOFT HYPHEN
0x00f1: 0x02dd, # DOUBLE ACUTE ACCENT
0x00f2: 0x02db, # OGONEK
0x00f3: 0x02c7, # CARON
0x00f4: 0x02d8, # BREVE
0x00f5: 0x00a7, # SECTION SIGN
0x00f6: 0x00f7, # DIVISION SIGN
0x00f7: 0x00b8, # CEDILLA
0x00f8: 0x00b0, # DEGREE SIGN
0x00f9: 0x00a8, # DIAERESIS
0x00fa: 0x02d9, # DOT ABOVE
0x00fb: 0x0171, # LATIN SMALL LETTER U WITH DOUBLE ACUTE
0x00fc: 0x0158, # LATIN CAPITAL LETTER R WITH CARON
0x00fd: 0x0159, # LATIN SMALL LETTER R WITH CARON
0x00fe: 0x25a0, # BLACK SQUARE
0x00ff: 0x00a0, # NO-BREAK SPACE
})
### Decoding Table
decoding_table = (
'\x00' # 0x0000 -> NULL
'\x01' # 0x0001 -> START OF HEADING
'\x02' # 0x0002 -> START OF TEXT
'\x03' # 0x0003 -> END OF TEXT
'\x04' # 0x0004 -> END OF TRANSMISSION
'\x05' # 0x0005 -> ENQUIRY
'\x06' # 0x0006 -> ACKNOWLEDGE
'\x07' # 0x0007 -> BELL
'\x08' # 0x0008 -> BACKSPACE
'\t' # 0x0009 -> HORIZONTAL TABULATION
'\n' # 0x000a -> LINE FEED
'\x0b' # 0x000b -> VERTICAL TABULATION
'\x0c' # 0x000c -> FORM FEED
'\r' # 0x000d -> CARRIAGE RETURN
'\x0e' # 0x000e -> SHIFT OUT
'\x0f' # 0x000f -> SHIFT IN
'\x10' # 0x0010 -> DATA LINK ESCAPE
'\x11' # 0x0011 -> DEVICE CONTROL ONE
'\x12' # 0x0012 -> DEVICE CONTROL TWO
'\x13' # 0x0013 -> DEVICE CONTROL THREE
'\x14' # 0x0014 -> DEVICE CONTROL FOUR
'\x15' # 0x0015 -> NEGATIVE ACKNOWLEDGE
'\x16' # 0x0016 -> SYNCHRONOUS IDLE
'\x17' # 0x0017 -> END OF TRANSMISSION BLOCK
'\x18' # 0x0018 -> CANCEL
'\x19' # 0x0019 -> END OF MEDIUM
'\x1a' # 0x001a -> SUBSTITUTE
'\x1b' # 0x001b -> ESCAPE
'\x1c' # 0x001c -> FILE SEPARATOR
'\x1d' # 0x001d -> GROUP SEPARATOR
'\x1e' # 0x001e -> RECORD SEPARATOR
'\x1f' # 0x001f -> UNIT SEPARATOR
' ' # 0x0020 -> SPACE
'!' # 0x0021 -> EXCLAMATION MARK
'"' # 0x0022 -> QUOTATION MARK
'#' # 0x0023 -> NUMBER SIGN
'$' # 0x0024 -> DOLLAR SIGN
'%' # 0x0025 -> PERCENT SIGN
'&' # 0x0026 -> AMPERSAND
"'" # 0x0027 -> APOSTROPHE
'(' # 0x0028 -> LEFT PARENTHESIS
')' # 0x0029 -> RIGHT PARENTHESIS
'*' # 0x002a -> ASTERISK
'+' # 0x002b -> PLUS SIGN
',' # 0x002c -> COMMA
'-' # 0x002d -> HYPHEN-MINUS
'.' # 0x002e -> FULL STOP
'/' # 0x002f -> SOLIDUS
'0' # 0x0030 -> DIGIT ZERO
'1' # 0x0031 -> DIGIT ONE
'2' # 0x0032 -> DIGIT TWO
'3' # 0x0033 -> DIGIT THREE
'4' # 0x0034 -> DIGIT FOUR
'5' # 0x0035 -> DIGIT FIVE
'6' # 0x0036 -> DIGIT SIX
'7' # 0x0037 -> DIGIT SEVEN
'8' # 0x0038 -> DIGIT EIGHT
'9' # 0x0039 -> DIGIT NINE
':' # 0x003a -> COLON
';' # 0x003b -> SEMICOLON
'<' # 0x003c -> LESS-THAN SIGN
'=' # 0x003d -> EQUALS SIGN
'>' # 0x003e -> GREATER-THAN SIGN
'?' # 0x003f -> QUESTION MARK
'@' # 0x0040 -> COMMERCIAL AT
'A' # 0x0041 -> LATIN CAPITAL LETTER A
'B' # 0x0042 -> LATIN CAPITAL LETTER B
'C' # 0x0043 -> LATIN CAPITAL LETTER C
'D' # 0x0044 -> LATIN CAPITAL LETTER D
'E' # 0x0045 -> LATIN CAPITAL LETTER E
'F' # 0x0046 -> LATIN CAPITAL LETTER F
'G' # 0x0047 -> LATIN CAPITAL LETTER G
'H' # 0x0048 -> LATIN CAPITAL LETTER H
'I' # 0x0049 -> LATIN CAPITAL LETTER I
'J' # 0x004a -> LATIN CAPITAL LETTER J
'K' # 0x004b -> LATIN CAPITAL LETTER K
'L' # 0x004c -> LATIN CAPITAL LETTER L
'M' # 0x004d -> LATIN CAPITAL LETTER M
'N' # 0x004e -> LATIN CAPITAL LETTER N
'O' # 0x004f -> LATIN CAPITAL LETTER O
'P' # 0x0050 -> LATIN CAPITAL LETTER P
'Q' # 0x0051 -> LATIN CAPITAL LETTER Q
'R' # 0x0052 -> LATIN CAPITAL LETTER R
'S' # 0x0053 -> LATIN CAPITAL LETTER S
'T' # 0x0054 -> LATIN CAPITAL LETTER T
'U' # 0x0055 -> LATIN CAPITAL LETTER U
'V' # 0x0056 -> LATIN CAPITAL LETTER V
'W' # 0x0057 -> LATIN CAPITAL LETTER W
'X' # 0x0058 -> LATIN CAPITAL LETTER X
'Y' # 0x0059 -> LATIN CAPITAL LETTER Y
'Z' # 0x005a -> LATIN CAPITAL LETTER Z
'[' # 0x005b -> LEFT SQUARE BRACKET
'\\' # 0x005c -> REVERSE SOLIDUS
']' # 0x005d -> RIGHT SQUARE BRACKET
'^' # 0x005e -> CIRCUMFLEX ACCENT
'_' # 0x005f -> LOW LINE
'`' # 0x0060 -> GRAVE ACCENT
'a' # 0x0061 -> LATIN SMALL LETTER A
'b' # 0x0062 -> LATIN SMALL LETTER B
'c' # 0x0063 -> LATIN SMALL LETTER C
'd' # 0x0064 -> LATIN SMALL LETTER D
'e' # 0x0065 -> LATIN SMALL LETTER E
'f' # 0x0066 -> LATIN SMALL LETTER F
'g' # 0x0067 -> LATIN SMALL LETTER G
'h' # 0x0068 -> LATIN SMALL LETTER H
'i' # 0x0069 -> LATIN SMALL LETTER I
'j' # 0x006a -> LATIN SMALL LETTER J
'k' # 0x006b -> LATIN SMALL LETTER K
'l' # 0x006c -> LATIN SMALL LETTER L
'm' # 0x006d -> LATIN SMALL LETTER M
'n' # 0x006e -> LATIN SMALL LETTER N
'o' # 0x006f -> LATIN SMALL LETTER O
'p' # 0x0070 -> LATIN SMALL LETTER P
'q' # 0x0071 -> LATIN SMALL LETTER Q
'r' # 0x0072 -> LATIN SMALL LETTER R
's' # 0x0073 -> LATIN SMALL LETTER S
't' # 0x0074 -> LATIN SMALL LETTER T
'u' # 0x0075 -> LATIN SMALL LETTER U
'v' # 0x0076 -> LATIN SMALL LETTER V
'w' # 0x0077 -> LATIN SMALL LETTER W
'x' # 0x0078 -> LATIN SMALL LETTER X
'y' # 0x0079 -> LATIN SMALL LETTER Y
'z' # 0x007a -> LATIN SMALL LETTER Z
'{' # 0x007b -> LEFT CURLY BRACKET
'|' # 0x007c -> VERTICAL LINE
'}' # 0x007d -> RIGHT CURLY BRACKET
'~' # 0x007e -> TILDE
'\x7f' # 0x007f -> DELETE
'\xc7' # 0x0080 -> LATIN CAPITAL LETTER C WITH CEDILLA
'\xfc' # 0x0081 -> LATIN SMALL LETTER U WITH DIAERESIS
'\xe9' # 0x0082 -> LATIN SMALL LETTER E WITH ACUTE
'\xe2' # 0x0083 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
'\xe4' # 0x0084 -> LATIN SMALL LETTER A WITH DIAERESIS
'\u016f' # 0x0085 -> LATIN SMALL LETTER U WITH RING ABOVE
'\u0107' # 0x0086 -> LATIN SMALL LETTER C WITH ACUTE
'\xe7' # 0x0087 -> LATIN SMALL LETTER C WITH CEDILLA
'\u0142' # 0x0088 -> LATIN SMALL LETTER L WITH STROKE
'\xeb' # 0x0089 -> LATIN SMALL LETTER E WITH DIAERESIS
'\u0150' # 0x008a -> LATIN CAPITAL LETTER O WITH DOUBLE ACUTE
'\u0151' # 0x008b -> LATIN SMALL LETTER O WITH DOUBLE ACUTE
'\xee' # 0x008c -> LATIN SMALL LETTER I WITH CIRCUMFLEX
'\u0179' # 0x008d -> LATIN CAPITAL LETTER Z WITH ACUTE
'\xc4' # 0x008e -> LATIN CAPITAL LETTER A WITH DIAERESIS
'\u0106' # 0x008f -> LATIN CAPITAL LETTER C WITH ACUTE
'\xc9' # 0x0090 -> LATIN CAPITAL LETTER E WITH ACUTE
'\u0139' # 0x0091 -> LATIN CAPITAL LETTER L WITH ACUTE
'\u013a' # 0x0092 -> LATIN SMALL LETTER L WITH ACUTE
'\xf4' # 0x0093 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
'\xf6' # 0x0094 -> LATIN SMALL LETTER O WITH DIAERESIS
'\u013d' # 0x0095 -> LATIN CAPITAL LETTER L WITH CARON
'\u013e' # 0x0096 -> LATIN SMALL LETTER L WITH CARON
'\u015a' # 0x0097 -> LATIN CAPITAL LETTER S WITH ACUTE
'\u015b' # 0x0098 -> LATIN SMALL LETTER S WITH ACUTE
'\xd6' # 0x0099 -> LATIN CAPITAL LETTER O WITH DIAERESIS
'\xdc' # 0x009a -> LATIN CAPITAL LETTER U WITH DIAERESIS
'\u0164' # 0x009b -> LATIN CAPITAL LETTER T WITH CARON
'\u0165' # 0x009c -> LATIN SMALL LETTER T WITH CARON
'\u0141' # 0x009d -> LATIN CAPITAL LETTER L WITH STROKE
'\xd7' # 0x009e -> MULTIPLICATION SIGN
'\u010d' # 0x009f -> LATIN SMALL LETTER C WITH CARON
'\xe1' # 0x00a0 -> LATIN SMALL LETTER A WITH ACUTE
'\xed' # 0x00a1 -> LATIN SMALL LETTER I WITH ACUTE
'\xf3' # 0x00a2 -> LATIN SMALL LETTER O WITH ACUTE
'\xfa' # 0x00a3 -> LATIN SMALL LETTER U WITH ACUTE
'\u0104' # 0x00a4 -> LATIN CAPITAL LETTER A WITH OGONEK
'\u0105' # 0x00a5 -> LATIN SMALL LETTER A WITH OGONEK
'\u017d' # 0x00a6 -> LATIN CAPITAL LETTER Z WITH CARON
'\u017e' # 0x00a7 -> LATIN SMALL LETTER Z WITH CARON
'\u0118' # 0x00a8 -> LATIN CAPITAL LETTER E WITH OGONEK
'\u0119' # 0x00a9 -> LATIN SMALL LETTER E WITH OGONEK
'\xac' # 0x00aa -> NOT SIGN
'\u017a' # 0x00ab -> LATIN SMALL LETTER Z WITH ACUTE
'\u010c' # 0x00ac -> LATIN CAPITAL LETTER C WITH CARON
'\u015f' # 0x00ad -> LATIN SMALL LETTER S WITH CEDILLA
'\xab' # 0x00ae -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
'\xbb' # 0x00af -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
'\u2591' # 0x00b0 -> LIGHT SHADE
'\u2592' # 0x00b1 -> MEDIUM SHADE
'\u2593' # 0x00b2 -> DARK SHADE
'\u2502' # 0x00b3 -> BOX DRAWINGS LIGHT VERTICAL
'\u2524' # 0x00b4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT
'\xc1' # 0x00b5 -> LATIN CAPITAL LETTER A WITH ACUTE
'\xc2' # 0x00b6 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
'\u011a' # 0x00b7 -> LATIN CAPITAL LETTER E WITH CARON
'\u015e' # 0x00b8 -> LATIN CAPITAL LETTER S WITH CEDILLA
'\u2563' # 0x00b9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT
'\u2551' # 0x00ba -> BOX DRAWINGS DOUBLE VERTICAL
'\u2557' # 0x00bb -> BOX DRAWINGS DOUBLE DOWN AND LEFT
'\u255d' # 0x00bc -> BOX DRAWINGS DOUBLE UP AND LEFT
'\u017b' # 0x00bd -> LATIN CAPITAL LETTER Z WITH DOT ABOVE
'\u017c' # 0x00be -> LATIN SMALL LETTER Z WITH DOT ABOVE
'\u2510' # 0x00bf -> BOX DRAWINGS LIGHT DOWN AND LEFT
'\u2514' # 0x00c0 -> BOX DRAWINGS LIGHT UP AND RIGHT
'\u2534' # 0x00c1 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL
'\u252c' # 0x00c2 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
'\u251c' # 0x00c3 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT
'\u2500' # 0x00c4 -> BOX DRAWINGS LIGHT HORIZONTAL
'\u253c' # 0x00c5 -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
'\u0102' # 0x00c6 -> LATIN CAPITAL LETTER A WITH BREVE
'\u0103' # 0x00c7 -> LATIN SMALL LETTER A WITH BREVE
'\u255a' # 0x00c8 -> BOX DRAWINGS DOUBLE UP AND RIGHT
'\u2554' # 0x00c9 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT
'\u2569' # 0x00ca -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL
'\u2566' # 0x00cb -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
'\u2560' # 0x00cc -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
'\u2550' # 0x00cd -> BOX DRAWINGS DOUBLE HORIZONTAL
'\u256c' # 0x00ce -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
'\xa4' # 0x00cf -> CURRENCY SIGN
'\u0111' # 0x00d0 -> LATIN SMALL LETTER D WITH STROKE
'\u0110' # 0x00d1 -> LATIN CAPITAL LETTER D WITH STROKE
'\u010e' # 0x00d2 -> LATIN CAPITAL LETTER D WITH CARON
'\xcb' # 0x00d3 -> LATIN CAPITAL LETTER E WITH DIAERESIS
'\u010f' # 0x00d4 -> LATIN SMALL LETTER D WITH CARON
'\u0147' # 0x00d5 -> LATIN CAPITAL LETTER N WITH CARON
'\xcd' # 0x00d6 -> LATIN CAPITAL LETTER I WITH ACUTE
'\xce' # 0x00d7 -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
'\u011b' # 0x00d8 -> LATIN SMALL LETTER E WITH CARON
'\u2518' # 0x00d9 -> BOX DRAWINGS LIGHT UP AND LEFT
'\u250c' # 0x00da -> BOX DRAWINGS LIGHT DOWN AND RIGHT
'\u2588' # 0x00db -> FULL BLOCK
'\u2584' # 0x00dc -> LOWER HALF BLOCK
'\u0162' # 0x00dd -> LATIN CAPITAL LETTER T WITH CEDILLA
'\u016e' # 0x00de -> LATIN CAPITAL LETTER U WITH RING ABOVE
'\u2580' # 0x00df -> UPPER HALF BLOCK
'\xd3' # 0x00e0 -> LATIN CAPITAL LETTER O WITH ACUTE
'\xdf' # 0x00e1 -> LATIN SMALL LETTER SHARP S
'\xd4' # 0x00e2 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
'\u0143' # 0x00e3 -> LATIN CAPITAL LETTER N WITH ACUTE
'\u0144' # 0x00e4 -> LATIN SMALL LETTER N WITH ACUTE
'\u0148' # 0x00e5 -> LATIN SMALL LETTER N WITH CARON
'\u0160' # 0x00e6 -> LATIN CAPITAL LETTER S WITH CARON
'\u0161' # 0x00e7 -> LATIN SMALL LETTER S WITH CARON
'\u0154' # 0x00e8 -> LATIN CAPITAL LETTER R WITH ACUTE
'\xda' # 0x00e9 -> LATIN CAPITAL LETTER U WITH ACUTE
'\u0155' # 0x00ea -> LATIN SMALL LETTER R WITH ACUTE
'\u0170' # 0x00eb -> LATIN CAPITAL LETTER U WITH DOUBLE ACUTE
'\xfd' # 0x00ec -> LATIN SMALL LETTER Y WITH ACUTE
'\xdd' # 0x00ed -> LATIN CAPITAL LETTER Y WITH ACUTE
'\u0163' # 0x00ee -> LATIN SMALL LETTER T WITH CEDILLA
'\xb4' # 0x00ef -> ACUTE ACCENT
'\xad' # 0x00f0 -> SOFT HYPHEN
'\u02dd' # 0x00f1 -> DOUBLE ACUTE ACCENT
'\u02db' # 0x00f2 -> OGONEK
'\u02c7' # 0x00f3 -> CARON
'\u02d8' # 0x00f4 -> BREVE
'\xa7' # 0x00f5 -> SECTION SIGN
'\xf7' # 0x00f6 -> DIVISION SIGN
'\xb8' # 0x00f7 -> CEDILLA
'\xb0' # 0x00f8 -> DEGREE SIGN
'\xa8' # 0x00f9 -> DIAERESIS
'\u02d9' # 0x00fa -> DOT ABOVE
'\u0171' # 0x00fb -> LATIN SMALL LETTER U WITH DOUBLE ACUTE
'\u0158' # 0x00fc -> LATIN CAPITAL LETTER R WITH CARON
'\u0159' # 0x00fd -> LATIN SMALL LETTER R WITH CARON
'\u25a0' # 0x00fe -> BLACK SQUARE
'\xa0' # 0x00ff -> NO-BREAK SPACE
)
### Encoding Map
encoding_map = {
0x0000: 0x0000, # NULL
0x0001: 0x0001, # START OF HEADING
0x0002: 0x0002, # START OF TEXT
0x0003: 0x0003, # END OF TEXT
0x0004: 0x0004, # END OF TRANSMISSION
0x0005: 0x0005, # ENQUIRY
0x0006: 0x0006, # ACKNOWLEDGE
0x0007: 0x0007, # BELL
0x0008: 0x0008, # BACKSPACE
0x0009: 0x0009, # HORIZONTAL TABULATION
0x000a: 0x000a, # LINE FEED
0x000b: 0x000b, # VERTICAL TABULATION
0x000c: 0x000c, # FORM FEED
0x000d: 0x000d, # CARRIAGE RETURN
0x000e: 0x000e, # SHIFT OUT
0x000f: 0x000f, # SHIFT IN
0x0010: 0x0010, # DATA LINK ESCAPE
0x0011: 0x0011, # DEVICE CONTROL ONE
0x0012: 0x0012, # DEVICE CONTROL TWO
0x0013: 0x0013, # DEVICE CONTROL THREE
0x0014: 0x0014, # DEVICE CONTROL FOUR
0x0015: 0x0015, # NEGATIVE ACKNOWLEDGE
0x0016: 0x0016, # SYNCHRONOUS IDLE
0x0017: 0x0017, # END OF TRANSMISSION BLOCK
0x0018: 0x0018, # CANCEL
0x0019: 0x0019, # END OF MEDIUM
0x001a: 0x001a, # SUBSTITUTE
0x001b: 0x001b, # ESCAPE
0x001c: 0x001c, # FILE SEPARATOR
0x001d: 0x001d, # GROUP SEPARATOR
0x001e: 0x001e, # RECORD SEPARATOR
0x001f: 0x001f, # UNIT SEPARATOR
0x0020: 0x0020, # SPACE
0x0021: 0x0021, # EXCLAMATION MARK
0x0022: 0x0022, # QUOTATION MARK
0x0023: 0x0023, # NUMBER SIGN
0x0024: 0x0024, # DOLLAR SIGN
0x0025: 0x0025, # PERCENT SIGN
0x0026: 0x0026, # AMPERSAND
0x0027: 0x0027, # APOSTROPHE
0x0028: 0x0028, # LEFT PARENTHESIS
0x0029: 0x0029, # RIGHT PARENTHESIS
0x002a: 0x002a, # ASTERISK
0x002b: 0x002b, # PLUS SIGN
0x002c: 0x002c, # COMMA
0x002d: 0x002d, # HYPHEN-MINUS
0x002e: 0x002e, # FULL STOP
0x002f: 0x002f, # SOLIDUS
0x0030: 0x0030, # DIGIT ZERO
0x0031: 0x0031, # DIGIT ONE
0x0032: 0x0032, # DIGIT TWO
0x0033: 0x0033, # DIGIT THREE
0x0034: 0x0034, # DIGIT FOUR
0x0035: 0x0035, # DIGIT FIVE
0x0036: 0x0036, # DIGIT SIX
0x0037: 0x0037, # DIGIT SEVEN
0x0038: 0x0038, # DIGIT EIGHT
0x0039: 0x0039, # DIGIT NINE
0x003a: 0x003a, # COLON
0x003b: 0x003b, # SEMICOLON
0x003c: 0x003c, # LESS-THAN SIGN
0x003d: 0x003d, # EQUALS SIGN
0x003e: 0x003e, # GREATER-THAN SIGN
0x003f: 0x003f, # QUESTION MARK
0x0040: 0x0040, # COMMERCIAL AT
0x0041: 0x0041, # LATIN CAPITAL LETTER A
0x0042: 0x0042, # LATIN CAPITAL LETTER B
0x0043: 0x0043, # LATIN CAPITAL LETTER C
0x0044: 0x0044, # LATIN CAPITAL LETTER D
0x0045: 0x0045, # LATIN CAPITAL LETTER E
0x0046: 0x0046, # LATIN CAPITAL LETTER F
0x0047: 0x0047, # LATIN CAPITAL LETTER G
0x0048: 0x0048, # LATIN CAPITAL LETTER H
0x0049: 0x0049, # LATIN CAPITAL LETTER I
0x004a: 0x004a, # LATIN CAPITAL LETTER J
0x004b: 0x004b, # LATIN CAPITAL LETTER K
0x004c: 0x004c, # LATIN CAPITAL LETTER L
0x004d: 0x004d, # LATIN CAPITAL LETTER M
0x004e: 0x004e, # LATIN CAPITAL LETTER N
0x004f: 0x004f, # LATIN CAPITAL LETTER O
0x0050: 0x0050, # LATIN CAPITAL LETTER P
0x0051: 0x0051, # LATIN CAPITAL LETTER Q
0x0052: 0x0052, # LATIN CAPITAL LETTER R
0x0053: 0x0053, # LATIN CAPITAL LETTER S
0x0054: 0x0054, # LATIN CAPITAL LETTER T
0x0055: 0x0055, # LATIN CAPITAL LETTER U
0x0056: 0x0056, # LATIN CAPITAL LETTER V
0x0057: 0x0057, # LATIN CAPITAL LETTER W
0x0058: 0x0058, # LATIN CAPITAL LETTER X
0x0059: 0x0059, # LATIN CAPITAL LETTER Y
0x005a: 0x005a, # LATIN CAPITAL LETTER Z
0x005b: 0x005b, # LEFT SQUARE BRACKET
0x005c: 0x005c, # REVERSE SOLIDUS
0x005d: 0x005d, # RIGHT SQUARE BRACKET
0x005e: 0x005e, # CIRCUMFLEX ACCENT
0x005f: 0x005f, # LOW LINE
0x0060: 0x0060, # GRAVE ACCENT
0x0061: 0x0061, # LATIN SMALL LETTER A
0x0062: 0x0062, # LATIN SMALL LETTER B
0x0063: 0x0063, # LATIN SMALL LETTER C
0x0064: 0x0064, # LATIN SMALL LETTER D
0x0065: 0x0065, # LATIN SMALL LETTER E
0x0066: 0x0066, # LATIN SMALL LETTER F
0x0067: 0x0067, # LATIN SMALL LETTER G
0x0068: 0x0068, # LATIN SMALL LETTER H
0x0069: 0x0069, # LATIN SMALL LETTER I
0x006a: 0x006a, # LATIN SMALL LETTER J
0x006b: 0x006b, # LATIN SMALL LETTER K
0x006c: 0x006c, # LATIN SMALL LETTER L
0x006d: 0x006d, # LATIN SMALL LETTER M
0x006e: 0x006e, # LATIN SMALL LETTER N
0x006f: 0x006f, # LATIN SMALL LETTER O
0x0070: 0x0070, # LATIN SMALL LETTER P
0x0071: 0x0071, # LATIN SMALL LETTER Q
0x0072: 0x0072, # LATIN SMALL LETTER R
0x0073: 0x0073, # LATIN SMALL LETTER S
0x0074: 0x0074, # LATIN SMALL LETTER T
0x0075: 0x0075, # LATIN SMALL LETTER U
0x0076: 0x0076, # LATIN SMALL LETTER V
0x0077: 0x0077, # LATIN SMALL LETTER W
0x0078: 0x0078, # LATIN SMALL LETTER X
0x0079: 0x0079, # LATIN SMALL LETTER Y
0x007a: 0x007a, # LATIN SMALL LETTER Z
0x007b: 0x007b, # LEFT CURLY BRACKET
0x007c: 0x007c, # VERTICAL LINE
0x007d: 0x007d, # RIGHT CURLY BRACKET
0x007e: 0x007e, # TILDE
0x007f: 0x007f, # DELETE
0x00a0: 0x00ff, # NO-BREAK SPACE
0x00a4: 0x00cf, # CURRENCY SIGN
0x00a7: 0x00f5, # SECTION SIGN
0x00a8: 0x00f9, # DIAERESIS
0x00ab: 0x00ae, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00ac: 0x00aa, # NOT SIGN
0x00ad: 0x00f0, # SOFT HYPHEN
0x00b0: 0x00f8, # DEGREE SIGN
0x00b4: 0x00ef, # ACUTE ACCENT
0x00b8: 0x00f7, # CEDILLA
0x00bb: 0x00af, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00c1: 0x00b5, # LATIN CAPITAL LETTER A WITH ACUTE
0x00c2: 0x00b6, # LATIN CAPITAL LETTER A WITH CIRCUMFLEX
0x00c4: 0x008e, # LATIN CAPITAL LETTER A WITH DIAERESIS
0x00c7: 0x0080, # LATIN CAPITAL LETTER C WITH CEDILLA
0x00c9: 0x0090, # LATIN CAPITAL LETTER E WITH ACUTE
0x00cb: 0x00d3, # LATIN CAPITAL LETTER E WITH DIAERESIS
0x00cd: 0x00d6, # LATIN CAPITAL LETTER I WITH ACUTE
0x00ce: 0x00d7, # LATIN CAPITAL LETTER I WITH CIRCUMFLEX
0x00d3: 0x00e0, # LATIN CAPITAL LETTER O WITH ACUTE
0x00d4: 0x00e2, # LATIN CAPITAL LETTER O WITH CIRCUMFLEX
0x00d6: 0x0099, # LATIN CAPITAL LETTER O WITH DIAERESIS
0x00d7: 0x009e, # MULTIPLICATION SIGN
0x00da: 0x00e9, # LATIN CAPITAL LETTER U WITH ACUTE
0x00dc: 0x009a, # LATIN CAPITAL LETTER U WITH DIAERESIS
0x00dd: 0x00ed, # LATIN CAPITAL LETTER Y WITH ACUTE
0x00df: 0x00e1, # LATIN SMALL LETTER SHARP S
0x00e1: 0x00a0, # LATIN SMALL LETTER A WITH ACUTE
0x00e2: 0x0083, # LATIN SMALL LETTER A WITH CIRCUMFLEX
0x00e4: 0x0084, # LATIN SMALL LETTER A WITH DIAERESIS
0x00e7: 0x0087, # LATIN SMALL LETTER C WITH CEDILLA
0x00e9: 0x0082, # LATIN SMALL LETTER E WITH ACUTE
0x00eb: 0x0089, # LATIN SMALL LETTER E WITH DIAERESIS
0x00ed: 0x00a1, # LATIN SMALL LETTER I WITH ACUTE
0x00ee: 0x008c, # LATIN SMALL LETTER I WITH CIRCUMFLEX
0x00f3: 0x00a2, # LATIN SMALL LETTER O WITH ACUTE
0x00f4: 0x0093, # LATIN SMALL LETTER O WITH CIRCUMFLEX
0x00f6: 0x0094, # LATIN SMALL LETTER O WITH DIAERESIS
0x00f7: 0x00f6, # DIVISION SIGN
0x00fa: 0x00a3, # LATIN SMALL LETTER U WITH ACUTE
0x00fc: 0x0081, # LATIN SMALL LETTER U WITH DIAERESIS
0x00fd: 0x00ec, # LATIN SMALL LETTER Y WITH ACUTE
0x0102: 0x00c6, # LATIN CAPITAL LETTER A WITH BREVE
0x0103: 0x00c7, # LATIN SMALL LETTER A WITH BREVE
0x0104: 0x00a4, # LATIN CAPITAL LETTER A WITH OGONEK
0x0105: 0x00a5, # LATIN SMALL LETTER A WITH OGONEK
0x0106: 0x008f, # LATIN CAPITAL LETTER C WITH ACUTE
0x0107: 0x0086, # LATIN SMALL LETTER C WITH ACUTE
0x010c: 0x00ac, # LATIN CAPITAL LETTER C WITH CARON
0x010d: 0x009f, # LATIN SMALL LETTER C WITH CARON
0x010e: 0x00d2, # LATIN CAPITAL LETTER D WITH CARON
0x010f: 0x00d4, # LATIN SMALL LETTER D WITH CARON
0x0110: 0x00d1, # LATIN CAPITAL LETTER D WITH STROKE
0x0111: 0x00d0, # LATIN SMALL LETTER D WITH STROKE
0x0118: 0x00a8, # LATIN CAPITAL LETTER E WITH OGONEK
0x0119: 0x00a9, # LATIN SMALL LETTER E WITH OGONEK
0x011a: 0x00b7, # LATIN CAPITAL LETTER E WITH CARON
0x011b: 0x00d8, # LATIN SMALL LETTER E WITH CARON
0x0139: 0x0091, # LATIN CAPITAL LETTER L WITH ACUTE
0x013a: 0x0092, # LATIN SMALL LETTER L WITH ACUTE
0x013d: 0x0095, # LATIN CAPITAL LETTER L WITH CARON
0x013e: 0x0096, # LATIN SMALL LETTER L WITH CARON
0x0141: 0x009d, # LATIN CAPITAL LETTER L WITH STROKE
0x0142: 0x0088, # LATIN SMALL LETTER L WITH STROKE
0x0143: 0x00e3, # LATIN CAPITAL LETTER N WITH ACUTE
0x0144: 0x00e4, # LATIN SMALL LETTER N WITH ACUTE
0x0147: 0x00d5, # LATIN CAPITAL LETTER N WITH CARON
0x0148: 0x00e5, # LATIN SMALL LETTER N WITH CARON
0x0150: 0x008a, # LATIN CAPITAL LETTER O WITH DOUBLE ACUTE
0x0151: 0x008b, # LATIN SMALL LETTER O WITH DOUBLE ACUTE
0x0154: 0x00e8, # LATIN CAPITAL LETTER R WITH ACUTE
0x0155: 0x00ea, # LATIN SMALL LETTER R WITH ACUTE
0x0158: 0x00fc, # LATIN CAPITAL LETTER R WITH CARON
0x0159: 0x00fd, # LATIN SMALL LETTER R WITH CARON
0x015a: 0x0097, # LATIN CAPITAL LETTER S WITH ACUTE
0x015b: 0x0098, # LATIN SMALL LETTER S WITH ACUTE
0x015e: 0x00b8, # LATIN CAPITAL LETTER S WITH CEDILLA
0x015f: 0x00ad, # LATIN SMALL LETTER S WITH CEDILLA
0x0160: 0x00e6, # LATIN CAPITAL LETTER S WITH CARON
0x0161: 0x00e7, # LATIN SMALL LETTER S WITH CARON
0x0162: 0x00dd, # LATIN CAPITAL LETTER T WITH CEDILLA
0x0163: 0x00ee, # LATIN SMALL LETTER T WITH CEDILLA
0x0164: 0x009b, # LATIN CAPITAL LETTER T WITH CARON
0x0165: 0x009c, # LATIN SMALL LETTER T WITH CARON
0x016e: 0x00de, # LATIN CAPITAL LETTER U WITH RING ABOVE
0x016f: 0x0085, # LATIN SMALL LETTER U WITH RING ABOVE
0x0170: 0x00eb, # LATIN CAPITAL LETTER U WITH DOUBLE ACUTE
0x0171: 0x00fb, # LATIN SMALL LETTER U WITH DOUBLE ACUTE
0x0179: 0x008d, # LATIN CAPITAL LETTER Z WITH ACUTE
0x017a: 0x00ab, # LATIN SMALL LETTER Z WITH ACUTE
0x017b: 0x00bd, # LATIN CAPITAL LETTER Z WITH DOT ABOVE
0x017c: 0x00be, # LATIN SMALL LETTER Z WITH DOT ABOVE
0x017d: 0x00a6, # LATIN CAPITAL LETTER Z WITH CARON
0x017e: 0x00a7, # LATIN SMALL LETTER Z WITH CARON
0x02c7: 0x00f3, # CARON
0x02d8: 0x00f4, # BREVE
0x02d9: 0x00fa, # DOT ABOVE
0x02db: 0x00f2, # OGONEK
0x02dd: 0x00f1, # DOUBLE ACUTE ACCENT
0x2500: 0x00c4, # BOX DRAWINGS LIGHT HORIZONTAL
0x2502: 0x00b3, # BOX DRAWINGS LIGHT VERTICAL
0x250c: 0x00da, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x2510: 0x00bf, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x2514: 0x00c0, # BOX DRAWINGS LIGHT UP AND RIGHT
0x2518: 0x00d9, # BOX DRAWINGS LIGHT UP AND LEFT
0x251c: 0x00c3, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x2524: 0x00b4, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x252c: 0x00c2, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x2534: 0x00c1, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x253c: 0x00c5, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x2550: 0x00cd, # BOX DRAWINGS DOUBLE HORIZONTAL
0x2551: 0x00ba, # BOX DRAWINGS DOUBLE VERTICAL
0x2554: 0x00c9, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x2557: 0x00bb, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x255a: 0x00c8, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x255d: 0x00bc, # BOX DRAWINGS DOUBLE UP AND LEFT
0x2560: 0x00cc, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x2563: 0x00b9, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x2566: 0x00cb, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x2569: 0x00ca, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x256c: 0x00ce, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x2580: 0x00df, # UPPER HALF BLOCK
0x2584: 0x00dc, # LOWER HALF BLOCK
0x2588: 0x00db, # FULL BLOCK
0x2591: 0x00b0, # LIGHT SHADE
0x2592: 0x00b1, # MEDIUM SHADE
0x2593: 0x00b2, # DARK SHADE
0x25a0: 0x00fe, # BLACK SQUARE
} | /regulome_web-2.0rc1.tar.gz/regulome_web-2.0rc1/regulome_app/webapp/static/brython/Lib/encodings/cp852.py | 0.487307 | 0.232288 | cp852.py | pypi |
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_map)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp437',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x0080: 0x00c7, # LATIN CAPITAL LETTER C WITH CEDILLA
0x0081: 0x00fc, # LATIN SMALL LETTER U WITH DIAERESIS
0x0082: 0x00e9, # LATIN SMALL LETTER E WITH ACUTE
0x0083: 0x00e2, # LATIN SMALL LETTER A WITH CIRCUMFLEX
0x0084: 0x00e4, # LATIN SMALL LETTER A WITH DIAERESIS
0x0085: 0x00e0, # LATIN SMALL LETTER A WITH GRAVE
0x0086: 0x00e5, # LATIN SMALL LETTER A WITH RING ABOVE
0x0087: 0x00e7, # LATIN SMALL LETTER C WITH CEDILLA
0x0088: 0x00ea, # LATIN SMALL LETTER E WITH CIRCUMFLEX
0x0089: 0x00eb, # LATIN SMALL LETTER E WITH DIAERESIS
0x008a: 0x00e8, # LATIN SMALL LETTER E WITH GRAVE
0x008b: 0x00ef, # LATIN SMALL LETTER I WITH DIAERESIS
0x008c: 0x00ee, # LATIN SMALL LETTER I WITH CIRCUMFLEX
0x008d: 0x00ec, # LATIN SMALL LETTER I WITH GRAVE
0x008e: 0x00c4, # LATIN CAPITAL LETTER A WITH DIAERESIS
0x008f: 0x00c5, # LATIN CAPITAL LETTER A WITH RING ABOVE
0x0090: 0x00c9, # LATIN CAPITAL LETTER E WITH ACUTE
0x0091: 0x00e6, # LATIN SMALL LIGATURE AE
0x0092: 0x00c6, # LATIN CAPITAL LIGATURE AE
0x0093: 0x00f4, # LATIN SMALL LETTER O WITH CIRCUMFLEX
0x0094: 0x00f6, # LATIN SMALL LETTER O WITH DIAERESIS
0x0095: 0x00f2, # LATIN SMALL LETTER O WITH GRAVE
0x0096: 0x00fb, # LATIN SMALL LETTER U WITH CIRCUMFLEX
0x0097: 0x00f9, # LATIN SMALL LETTER U WITH GRAVE
0x0098: 0x00ff, # LATIN SMALL LETTER Y WITH DIAERESIS
0x0099: 0x00d6, # LATIN CAPITAL LETTER O WITH DIAERESIS
0x009a: 0x00dc, # LATIN CAPITAL LETTER U WITH DIAERESIS
0x009b: 0x00a2, # CENT SIGN
0x009c: 0x00a3, # POUND SIGN
0x009d: 0x00a5, # YEN SIGN
0x009e: 0x20a7, # PESETA SIGN
0x009f: 0x0192, # LATIN SMALL LETTER F WITH HOOK
0x00a0: 0x00e1, # LATIN SMALL LETTER A WITH ACUTE
0x00a1: 0x00ed, # LATIN SMALL LETTER I WITH ACUTE
0x00a2: 0x00f3, # LATIN SMALL LETTER O WITH ACUTE
0x00a3: 0x00fa, # LATIN SMALL LETTER U WITH ACUTE
0x00a4: 0x00f1, # LATIN SMALL LETTER N WITH TILDE
0x00a5: 0x00d1, # LATIN CAPITAL LETTER N WITH TILDE
0x00a6: 0x00aa, # FEMININE ORDINAL INDICATOR
0x00a7: 0x00ba, # MASCULINE ORDINAL INDICATOR
0x00a8: 0x00bf, # INVERTED QUESTION MARK
0x00a9: 0x2310, # REVERSED NOT SIGN
0x00aa: 0x00ac, # NOT SIGN
0x00ab: 0x00bd, # VULGAR FRACTION ONE HALF
0x00ac: 0x00bc, # VULGAR FRACTION ONE QUARTER
0x00ad: 0x00a1, # INVERTED EXCLAMATION MARK
0x00ae: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00af: 0x00bb, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00b0: 0x2591, # LIGHT SHADE
0x00b1: 0x2592, # MEDIUM SHADE
0x00b2: 0x2593, # DARK SHADE
0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL
0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x00b5: 0x2561, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
0x00b6: 0x2562, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
0x00b7: 0x2556, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
0x00b8: 0x2555, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL
0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT
0x00bd: 0x255c, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
0x00be: 0x255b, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT
0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL
0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x00c6: 0x255e, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
0x00c7: 0x255f, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL
0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x00cf: 0x2567, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
0x00d0: 0x2568, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
0x00d1: 0x2564, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
0x00d2: 0x2565, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
0x00d3: 0x2559, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
0x00d4: 0x2558, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
0x00d5: 0x2552, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
0x00d6: 0x2553, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
0x00d7: 0x256b, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
0x00d8: 0x256a, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT
0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x00db: 0x2588, # FULL BLOCK
0x00dc: 0x2584, # LOWER HALF BLOCK
0x00dd: 0x258c, # LEFT HALF BLOCK
0x00de: 0x2590, # RIGHT HALF BLOCK
0x00df: 0x2580, # UPPER HALF BLOCK
0x00e0: 0x03b1, # GREEK SMALL LETTER ALPHA
0x00e1: 0x00df, # LATIN SMALL LETTER SHARP S
0x00e2: 0x0393, # GREEK CAPITAL LETTER GAMMA
0x00e3: 0x03c0, # GREEK SMALL LETTER PI
0x00e4: 0x03a3, # GREEK CAPITAL LETTER SIGMA
0x00e5: 0x03c3, # GREEK SMALL LETTER SIGMA
0x00e6: 0x00b5, # MICRO SIGN
0x00e7: 0x03c4, # GREEK SMALL LETTER TAU
0x00e8: 0x03a6, # GREEK CAPITAL LETTER PHI
0x00e9: 0x0398, # GREEK CAPITAL LETTER THETA
0x00ea: 0x03a9, # GREEK CAPITAL LETTER OMEGA
0x00eb: 0x03b4, # GREEK SMALL LETTER DELTA
0x00ec: 0x221e, # INFINITY
0x00ed: 0x03c6, # GREEK SMALL LETTER PHI
0x00ee: 0x03b5, # GREEK SMALL LETTER EPSILON
0x00ef: 0x2229, # INTERSECTION
0x00f0: 0x2261, # IDENTICAL TO
0x00f1: 0x00b1, # PLUS-MINUS SIGN
0x00f2: 0x2265, # GREATER-THAN OR EQUAL TO
0x00f3: 0x2264, # LESS-THAN OR EQUAL TO
0x00f4: 0x2320, # TOP HALF INTEGRAL
0x00f5: 0x2321, # BOTTOM HALF INTEGRAL
0x00f6: 0x00f7, # DIVISION SIGN
0x00f7: 0x2248, # ALMOST EQUAL TO
0x00f8: 0x00b0, # DEGREE SIGN
0x00f9: 0x2219, # BULLET OPERATOR
0x00fa: 0x00b7, # MIDDLE DOT
0x00fb: 0x221a, # SQUARE ROOT
0x00fc: 0x207f, # SUPERSCRIPT LATIN SMALL LETTER N
0x00fd: 0x00b2, # SUPERSCRIPT TWO
0x00fe: 0x25a0, # BLACK SQUARE
0x00ff: 0x00a0, # NO-BREAK SPACE
})
### Decoding Table
decoding_table = (
'\x00' # 0x0000 -> NULL
'\x01' # 0x0001 -> START OF HEADING
'\x02' # 0x0002 -> START OF TEXT
'\x03' # 0x0003 -> END OF TEXT
'\x04' # 0x0004 -> END OF TRANSMISSION
'\x05' # 0x0005 -> ENQUIRY
'\x06' # 0x0006 -> ACKNOWLEDGE
'\x07' # 0x0007 -> BELL
'\x08' # 0x0008 -> BACKSPACE
'\t' # 0x0009 -> HORIZONTAL TABULATION
'\n' # 0x000a -> LINE FEED
'\x0b' # 0x000b -> VERTICAL TABULATION
'\x0c' # 0x000c -> FORM FEED
'\r' # 0x000d -> CARRIAGE RETURN
'\x0e' # 0x000e -> SHIFT OUT
'\x0f' # 0x000f -> SHIFT IN
'\x10' # 0x0010 -> DATA LINK ESCAPE
'\x11' # 0x0011 -> DEVICE CONTROL ONE
'\x12' # 0x0012 -> DEVICE CONTROL TWO
'\x13' # 0x0013 -> DEVICE CONTROL THREE
'\x14' # 0x0014 -> DEVICE CONTROL FOUR
'\x15' # 0x0015 -> NEGATIVE ACKNOWLEDGE
'\x16' # 0x0016 -> SYNCHRONOUS IDLE
'\x17' # 0x0017 -> END OF TRANSMISSION BLOCK
'\x18' # 0x0018 -> CANCEL
'\x19' # 0x0019 -> END OF MEDIUM
'\x1a' # 0x001a -> SUBSTITUTE
'\x1b' # 0x001b -> ESCAPE
'\x1c' # 0x001c -> FILE SEPARATOR
'\x1d' # 0x001d -> GROUP SEPARATOR
'\x1e' # 0x001e -> RECORD SEPARATOR
'\x1f' # 0x001f -> UNIT SEPARATOR
' ' # 0x0020 -> SPACE
'!' # 0x0021 -> EXCLAMATION MARK
'"' # 0x0022 -> QUOTATION MARK
'#' # 0x0023 -> NUMBER SIGN
'$' # 0x0024 -> DOLLAR SIGN
'%' # 0x0025 -> PERCENT SIGN
'&' # 0x0026 -> AMPERSAND
"'" # 0x0027 -> APOSTROPHE
'(' # 0x0028 -> LEFT PARENTHESIS
')' # 0x0029 -> RIGHT PARENTHESIS
'*' # 0x002a -> ASTERISK
'+' # 0x002b -> PLUS SIGN
',' # 0x002c -> COMMA
'-' # 0x002d -> HYPHEN-MINUS
'.' # 0x002e -> FULL STOP
'/' # 0x002f -> SOLIDUS
'0' # 0x0030 -> DIGIT ZERO
'1' # 0x0031 -> DIGIT ONE
'2' # 0x0032 -> DIGIT TWO
'3' # 0x0033 -> DIGIT THREE
'4' # 0x0034 -> DIGIT FOUR
'5' # 0x0035 -> DIGIT FIVE
'6' # 0x0036 -> DIGIT SIX
'7' # 0x0037 -> DIGIT SEVEN
'8' # 0x0038 -> DIGIT EIGHT
'9' # 0x0039 -> DIGIT NINE
':' # 0x003a -> COLON
';' # 0x003b -> SEMICOLON
'<' # 0x003c -> LESS-THAN SIGN
'=' # 0x003d -> EQUALS SIGN
'>' # 0x003e -> GREATER-THAN SIGN
'?' # 0x003f -> QUESTION MARK
'@' # 0x0040 -> COMMERCIAL AT
'A' # 0x0041 -> LATIN CAPITAL LETTER A
'B' # 0x0042 -> LATIN CAPITAL LETTER B
'C' # 0x0043 -> LATIN CAPITAL LETTER C
'D' # 0x0044 -> LATIN CAPITAL LETTER D
'E' # 0x0045 -> LATIN CAPITAL LETTER E
'F' # 0x0046 -> LATIN CAPITAL LETTER F
'G' # 0x0047 -> LATIN CAPITAL LETTER G
'H' # 0x0048 -> LATIN CAPITAL LETTER H
'I' # 0x0049 -> LATIN CAPITAL LETTER I
'J' # 0x004a -> LATIN CAPITAL LETTER J
'K' # 0x004b -> LATIN CAPITAL LETTER K
'L' # 0x004c -> LATIN CAPITAL LETTER L
'M' # 0x004d -> LATIN CAPITAL LETTER M
'N' # 0x004e -> LATIN CAPITAL LETTER N
'O' # 0x004f -> LATIN CAPITAL LETTER O
'P' # 0x0050 -> LATIN CAPITAL LETTER P
'Q' # 0x0051 -> LATIN CAPITAL LETTER Q
'R' # 0x0052 -> LATIN CAPITAL LETTER R
'S' # 0x0053 -> LATIN CAPITAL LETTER S
'T' # 0x0054 -> LATIN CAPITAL LETTER T
'U' # 0x0055 -> LATIN CAPITAL LETTER U
'V' # 0x0056 -> LATIN CAPITAL LETTER V
'W' # 0x0057 -> LATIN CAPITAL LETTER W
'X' # 0x0058 -> LATIN CAPITAL LETTER X
'Y' # 0x0059 -> LATIN CAPITAL LETTER Y
'Z' # 0x005a -> LATIN CAPITAL LETTER Z
'[' # 0x005b -> LEFT SQUARE BRACKET
'\\' # 0x005c -> REVERSE SOLIDUS
']' # 0x005d -> RIGHT SQUARE BRACKET
'^' # 0x005e -> CIRCUMFLEX ACCENT
'_' # 0x005f -> LOW LINE
'`' # 0x0060 -> GRAVE ACCENT
'a' # 0x0061 -> LATIN SMALL LETTER A
'b' # 0x0062 -> LATIN SMALL LETTER B
'c' # 0x0063 -> LATIN SMALL LETTER C
'd' # 0x0064 -> LATIN SMALL LETTER D
'e' # 0x0065 -> LATIN SMALL LETTER E
'f' # 0x0066 -> LATIN SMALL LETTER F
'g' # 0x0067 -> LATIN SMALL LETTER G
'h' # 0x0068 -> LATIN SMALL LETTER H
'i' # 0x0069 -> LATIN SMALL LETTER I
'j' # 0x006a -> LATIN SMALL LETTER J
'k' # 0x006b -> LATIN SMALL LETTER K
'l' # 0x006c -> LATIN SMALL LETTER L
'm' # 0x006d -> LATIN SMALL LETTER M
'n' # 0x006e -> LATIN SMALL LETTER N
'o' # 0x006f -> LATIN SMALL LETTER O
'p' # 0x0070 -> LATIN SMALL LETTER P
'q' # 0x0071 -> LATIN SMALL LETTER Q
'r' # 0x0072 -> LATIN SMALL LETTER R
's' # 0x0073 -> LATIN SMALL LETTER S
't' # 0x0074 -> LATIN SMALL LETTER T
'u' # 0x0075 -> LATIN SMALL LETTER U
'v' # 0x0076 -> LATIN SMALL LETTER V
'w' # 0x0077 -> LATIN SMALL LETTER W
'x' # 0x0078 -> LATIN SMALL LETTER X
'y' # 0x0079 -> LATIN SMALL LETTER Y
'z' # 0x007a -> LATIN SMALL LETTER Z
'{' # 0x007b -> LEFT CURLY BRACKET
'|' # 0x007c -> VERTICAL LINE
'}' # 0x007d -> RIGHT CURLY BRACKET
'~' # 0x007e -> TILDE
'\x7f' # 0x007f -> DELETE
'\xc7' # 0x0080 -> LATIN CAPITAL LETTER C WITH CEDILLA
'\xfc' # 0x0081 -> LATIN SMALL LETTER U WITH DIAERESIS
'\xe9' # 0x0082 -> LATIN SMALL LETTER E WITH ACUTE
'\xe2' # 0x0083 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
'\xe4' # 0x0084 -> LATIN SMALL LETTER A WITH DIAERESIS
'\xe0' # 0x0085 -> LATIN SMALL LETTER A WITH GRAVE
'\xe5' # 0x0086 -> LATIN SMALL LETTER A WITH RING ABOVE
'\xe7' # 0x0087 -> LATIN SMALL LETTER C WITH CEDILLA
'\xea' # 0x0088 -> LATIN SMALL LETTER E WITH CIRCUMFLEX
'\xeb' # 0x0089 -> LATIN SMALL LETTER E WITH DIAERESIS
'\xe8' # 0x008a -> LATIN SMALL LETTER E WITH GRAVE
'\xef' # 0x008b -> LATIN SMALL LETTER I WITH DIAERESIS
'\xee' # 0x008c -> LATIN SMALL LETTER I WITH CIRCUMFLEX
'\xec' # 0x008d -> LATIN SMALL LETTER I WITH GRAVE
'\xc4' # 0x008e -> LATIN CAPITAL LETTER A WITH DIAERESIS
'\xc5' # 0x008f -> LATIN CAPITAL LETTER A WITH RING ABOVE
'\xc9' # 0x0090 -> LATIN CAPITAL LETTER E WITH ACUTE
'\xe6' # 0x0091 -> LATIN SMALL LIGATURE AE
'\xc6' # 0x0092 -> LATIN CAPITAL LIGATURE AE
'\xf4' # 0x0093 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
'\xf6' # 0x0094 -> LATIN SMALL LETTER O WITH DIAERESIS
'\xf2' # 0x0095 -> LATIN SMALL LETTER O WITH GRAVE
'\xfb' # 0x0096 -> LATIN SMALL LETTER U WITH CIRCUMFLEX
'\xf9' # 0x0097 -> LATIN SMALL LETTER U WITH GRAVE
'\xff' # 0x0098 -> LATIN SMALL LETTER Y WITH DIAERESIS
'\xd6' # 0x0099 -> LATIN CAPITAL LETTER O WITH DIAERESIS
'\xdc' # 0x009a -> LATIN CAPITAL LETTER U WITH DIAERESIS
'\xa2' # 0x009b -> CENT SIGN
'\xa3' # 0x009c -> POUND SIGN
'\xa5' # 0x009d -> YEN SIGN
'\u20a7' # 0x009e -> PESETA SIGN
'\u0192' # 0x009f -> LATIN SMALL LETTER F WITH HOOK
'\xe1' # 0x00a0 -> LATIN SMALL LETTER A WITH ACUTE
'\xed' # 0x00a1 -> LATIN SMALL LETTER I WITH ACUTE
'\xf3' # 0x00a2 -> LATIN SMALL LETTER O WITH ACUTE
'\xfa' # 0x00a3 -> LATIN SMALL LETTER U WITH ACUTE
'\xf1' # 0x00a4 -> LATIN SMALL LETTER N WITH TILDE
'\xd1' # 0x00a5 -> LATIN CAPITAL LETTER N WITH TILDE
'\xaa' # 0x00a6 -> FEMININE ORDINAL INDICATOR
'\xba' # 0x00a7 -> MASCULINE ORDINAL INDICATOR
'\xbf' # 0x00a8 -> INVERTED QUESTION MARK
'\u2310' # 0x00a9 -> REVERSED NOT SIGN
'\xac' # 0x00aa -> NOT SIGN
'\xbd' # 0x00ab -> VULGAR FRACTION ONE HALF
'\xbc' # 0x00ac -> VULGAR FRACTION ONE QUARTER
'\xa1' # 0x00ad -> INVERTED EXCLAMATION MARK
'\xab' # 0x00ae -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
'\xbb' # 0x00af -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
'\u2591' # 0x00b0 -> LIGHT SHADE
'\u2592' # 0x00b1 -> MEDIUM SHADE
'\u2593' # 0x00b2 -> DARK SHADE
'\u2502' # 0x00b3 -> BOX DRAWINGS LIGHT VERTICAL
'\u2524' # 0x00b4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT
'\u2561' # 0x00b5 -> BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
'\u2562' # 0x00b6 -> BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
'\u2556' # 0x00b7 -> BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
'\u2555' # 0x00b8 -> BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
'\u2563' # 0x00b9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT
'\u2551' # 0x00ba -> BOX DRAWINGS DOUBLE VERTICAL
'\u2557' # 0x00bb -> BOX DRAWINGS DOUBLE DOWN AND LEFT
'\u255d' # 0x00bc -> BOX DRAWINGS DOUBLE UP AND LEFT
'\u255c' # 0x00bd -> BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
'\u255b' # 0x00be -> BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
'\u2510' # 0x00bf -> BOX DRAWINGS LIGHT DOWN AND LEFT
'\u2514' # 0x00c0 -> BOX DRAWINGS LIGHT UP AND RIGHT
'\u2534' # 0x00c1 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL
'\u252c' # 0x00c2 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
'\u251c' # 0x00c3 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT
'\u2500' # 0x00c4 -> BOX DRAWINGS LIGHT HORIZONTAL
'\u253c' # 0x00c5 -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
'\u255e' # 0x00c6 -> BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
'\u255f' # 0x00c7 -> BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
'\u255a' # 0x00c8 -> BOX DRAWINGS DOUBLE UP AND RIGHT
'\u2554' # 0x00c9 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT
'\u2569' # 0x00ca -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL
'\u2566' # 0x00cb -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
'\u2560' # 0x00cc -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
'\u2550' # 0x00cd -> BOX DRAWINGS DOUBLE HORIZONTAL
'\u256c' # 0x00ce -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
'\u2567' # 0x00cf -> BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
'\u2568' # 0x00d0 -> BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
'\u2564' # 0x00d1 -> BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
'\u2565' # 0x00d2 -> BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
'\u2559' # 0x00d3 -> BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
'\u2558' # 0x00d4 -> BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
'\u2552' # 0x00d5 -> BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
'\u2553' # 0x00d6 -> BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
'\u256b' # 0x00d7 -> BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
'\u256a' # 0x00d8 -> BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
'\u2518' # 0x00d9 -> BOX DRAWINGS LIGHT UP AND LEFT
'\u250c' # 0x00da -> BOX DRAWINGS LIGHT DOWN AND RIGHT
'\u2588' # 0x00db -> FULL BLOCK
'\u2584' # 0x00dc -> LOWER HALF BLOCK
'\u258c' # 0x00dd -> LEFT HALF BLOCK
'\u2590' # 0x00de -> RIGHT HALF BLOCK
'\u2580' # 0x00df -> UPPER HALF BLOCK
'\u03b1' # 0x00e0 -> GREEK SMALL LETTER ALPHA
'\xdf' # 0x00e1 -> LATIN SMALL LETTER SHARP S
'\u0393' # 0x00e2 -> GREEK CAPITAL LETTER GAMMA
'\u03c0' # 0x00e3 -> GREEK SMALL LETTER PI
'\u03a3' # 0x00e4 -> GREEK CAPITAL LETTER SIGMA
'\u03c3' # 0x00e5 -> GREEK SMALL LETTER SIGMA
'\xb5' # 0x00e6 -> MICRO SIGN
'\u03c4' # 0x00e7 -> GREEK SMALL LETTER TAU
'\u03a6' # 0x00e8 -> GREEK CAPITAL LETTER PHI
'\u0398' # 0x00e9 -> GREEK CAPITAL LETTER THETA
'\u03a9' # 0x00ea -> GREEK CAPITAL LETTER OMEGA
'\u03b4' # 0x00eb -> GREEK SMALL LETTER DELTA
'\u221e' # 0x00ec -> INFINITY
'\u03c6' # 0x00ed -> GREEK SMALL LETTER PHI
'\u03b5' # 0x00ee -> GREEK SMALL LETTER EPSILON
'\u2229' # 0x00ef -> INTERSECTION
'\u2261' # 0x00f0 -> IDENTICAL TO
'\xb1' # 0x00f1 -> PLUS-MINUS SIGN
'\u2265' # 0x00f2 -> GREATER-THAN OR EQUAL TO
'\u2264' # 0x00f3 -> LESS-THAN OR EQUAL TO
'\u2320' # 0x00f4 -> TOP HALF INTEGRAL
'\u2321' # 0x00f5 -> BOTTOM HALF INTEGRAL
'\xf7' # 0x00f6 -> DIVISION SIGN
'\u2248' # 0x00f7 -> ALMOST EQUAL TO
'\xb0' # 0x00f8 -> DEGREE SIGN
'\u2219' # 0x00f9 -> BULLET OPERATOR
'\xb7' # 0x00fa -> MIDDLE DOT
'\u221a' # 0x00fb -> SQUARE ROOT
'\u207f' # 0x00fc -> SUPERSCRIPT LATIN SMALL LETTER N
'\xb2' # 0x00fd -> SUPERSCRIPT TWO
'\u25a0' # 0x00fe -> BLACK SQUARE
'\xa0' # 0x00ff -> NO-BREAK SPACE
)
### Encoding Map
encoding_map = {
0x0000: 0x0000, # NULL
0x0001: 0x0001, # START OF HEADING
0x0002: 0x0002, # START OF TEXT
0x0003: 0x0003, # END OF TEXT
0x0004: 0x0004, # END OF TRANSMISSION
0x0005: 0x0005, # ENQUIRY
0x0006: 0x0006, # ACKNOWLEDGE
0x0007: 0x0007, # BELL
0x0008: 0x0008, # BACKSPACE
0x0009: 0x0009, # HORIZONTAL TABULATION
0x000a: 0x000a, # LINE FEED
0x000b: 0x000b, # VERTICAL TABULATION
0x000c: 0x000c, # FORM FEED
0x000d: 0x000d, # CARRIAGE RETURN
0x000e: 0x000e, # SHIFT OUT
0x000f: 0x000f, # SHIFT IN
0x0010: 0x0010, # DATA LINK ESCAPE
0x0011: 0x0011, # DEVICE CONTROL ONE
0x0012: 0x0012, # DEVICE CONTROL TWO
0x0013: 0x0013, # DEVICE CONTROL THREE
0x0014: 0x0014, # DEVICE CONTROL FOUR
0x0015: 0x0015, # NEGATIVE ACKNOWLEDGE
0x0016: 0x0016, # SYNCHRONOUS IDLE
0x0017: 0x0017, # END OF TRANSMISSION BLOCK
0x0018: 0x0018, # CANCEL
0x0019: 0x0019, # END OF MEDIUM
0x001a: 0x001a, # SUBSTITUTE
0x001b: 0x001b, # ESCAPE
0x001c: 0x001c, # FILE SEPARATOR
0x001d: 0x001d, # GROUP SEPARATOR
0x001e: 0x001e, # RECORD SEPARATOR
0x001f: 0x001f, # UNIT SEPARATOR
0x0020: 0x0020, # SPACE
0x0021: 0x0021, # EXCLAMATION MARK
0x0022: 0x0022, # QUOTATION MARK
0x0023: 0x0023, # NUMBER SIGN
0x0024: 0x0024, # DOLLAR SIGN
0x0025: 0x0025, # PERCENT SIGN
0x0026: 0x0026, # AMPERSAND
0x0027: 0x0027, # APOSTROPHE
0x0028: 0x0028, # LEFT PARENTHESIS
0x0029: 0x0029, # RIGHT PARENTHESIS
0x002a: 0x002a, # ASTERISK
0x002b: 0x002b, # PLUS SIGN
0x002c: 0x002c, # COMMA
0x002d: 0x002d, # HYPHEN-MINUS
0x002e: 0x002e, # FULL STOP
0x002f: 0x002f, # SOLIDUS
0x0030: 0x0030, # DIGIT ZERO
0x0031: 0x0031, # DIGIT ONE
0x0032: 0x0032, # DIGIT TWO
0x0033: 0x0033, # DIGIT THREE
0x0034: 0x0034, # DIGIT FOUR
0x0035: 0x0035, # DIGIT FIVE
0x0036: 0x0036, # DIGIT SIX
0x0037: 0x0037, # DIGIT SEVEN
0x0038: 0x0038, # DIGIT EIGHT
0x0039: 0x0039, # DIGIT NINE
0x003a: 0x003a, # COLON
0x003b: 0x003b, # SEMICOLON
0x003c: 0x003c, # LESS-THAN SIGN
0x003d: 0x003d, # EQUALS SIGN
0x003e: 0x003e, # GREATER-THAN SIGN
0x003f: 0x003f, # QUESTION MARK
0x0040: 0x0040, # COMMERCIAL AT
0x0041: 0x0041, # LATIN CAPITAL LETTER A
0x0042: 0x0042, # LATIN CAPITAL LETTER B
0x0043: 0x0043, # LATIN CAPITAL LETTER C
0x0044: 0x0044, # LATIN CAPITAL LETTER D
0x0045: 0x0045, # LATIN CAPITAL LETTER E
0x0046: 0x0046, # LATIN CAPITAL LETTER F
0x0047: 0x0047, # LATIN CAPITAL LETTER G
0x0048: 0x0048, # LATIN CAPITAL LETTER H
0x0049: 0x0049, # LATIN CAPITAL LETTER I
0x004a: 0x004a, # LATIN CAPITAL LETTER J
0x004b: 0x004b, # LATIN CAPITAL LETTER K
0x004c: 0x004c, # LATIN CAPITAL LETTER L
0x004d: 0x004d, # LATIN CAPITAL LETTER M
0x004e: 0x004e, # LATIN CAPITAL LETTER N
0x004f: 0x004f, # LATIN CAPITAL LETTER O
0x0050: 0x0050, # LATIN CAPITAL LETTER P
0x0051: 0x0051, # LATIN CAPITAL LETTER Q
0x0052: 0x0052, # LATIN CAPITAL LETTER R
0x0053: 0x0053, # LATIN CAPITAL LETTER S
0x0054: 0x0054, # LATIN CAPITAL LETTER T
0x0055: 0x0055, # LATIN CAPITAL LETTER U
0x0056: 0x0056, # LATIN CAPITAL LETTER V
0x0057: 0x0057, # LATIN CAPITAL LETTER W
0x0058: 0x0058, # LATIN CAPITAL LETTER X
0x0059: 0x0059, # LATIN CAPITAL LETTER Y
0x005a: 0x005a, # LATIN CAPITAL LETTER Z
0x005b: 0x005b, # LEFT SQUARE BRACKET
0x005c: 0x005c, # REVERSE SOLIDUS
0x005d: 0x005d, # RIGHT SQUARE BRACKET
0x005e: 0x005e, # CIRCUMFLEX ACCENT
0x005f: 0x005f, # LOW LINE
0x0060: 0x0060, # GRAVE ACCENT
0x0061: 0x0061, # LATIN SMALL LETTER A
0x0062: 0x0062, # LATIN SMALL LETTER B
0x0063: 0x0063, # LATIN SMALL LETTER C
0x0064: 0x0064, # LATIN SMALL LETTER D
0x0065: 0x0065, # LATIN SMALL LETTER E
0x0066: 0x0066, # LATIN SMALL LETTER F
0x0067: 0x0067, # LATIN SMALL LETTER G
0x0068: 0x0068, # LATIN SMALL LETTER H
0x0069: 0x0069, # LATIN SMALL LETTER I
0x006a: 0x006a, # LATIN SMALL LETTER J
0x006b: 0x006b, # LATIN SMALL LETTER K
0x006c: 0x006c, # LATIN SMALL LETTER L
0x006d: 0x006d, # LATIN SMALL LETTER M
0x006e: 0x006e, # LATIN SMALL LETTER N
0x006f: 0x006f, # LATIN SMALL LETTER O
0x0070: 0x0070, # LATIN SMALL LETTER P
0x0071: 0x0071, # LATIN SMALL LETTER Q
0x0072: 0x0072, # LATIN SMALL LETTER R
0x0073: 0x0073, # LATIN SMALL LETTER S
0x0074: 0x0074, # LATIN SMALL LETTER T
0x0075: 0x0075, # LATIN SMALL LETTER U
0x0076: 0x0076, # LATIN SMALL LETTER V
0x0077: 0x0077, # LATIN SMALL LETTER W
0x0078: 0x0078, # LATIN SMALL LETTER X
0x0079: 0x0079, # LATIN SMALL LETTER Y
0x007a: 0x007a, # LATIN SMALL LETTER Z
0x007b: 0x007b, # LEFT CURLY BRACKET
0x007c: 0x007c, # VERTICAL LINE
0x007d: 0x007d, # RIGHT CURLY BRACKET
0x007e: 0x007e, # TILDE
0x007f: 0x007f, # DELETE
0x00a0: 0x00ff, # NO-BREAK SPACE
0x00a1: 0x00ad, # INVERTED EXCLAMATION MARK
0x00a2: 0x009b, # CENT SIGN
0x00a3: 0x009c, # POUND SIGN
0x00a5: 0x009d, # YEN SIGN
0x00aa: 0x00a6, # FEMININE ORDINAL INDICATOR
0x00ab: 0x00ae, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00ac: 0x00aa, # NOT SIGN
0x00b0: 0x00f8, # DEGREE SIGN
0x00b1: 0x00f1, # PLUS-MINUS SIGN
0x00b2: 0x00fd, # SUPERSCRIPT TWO
0x00b5: 0x00e6, # MICRO SIGN
0x00b7: 0x00fa, # MIDDLE DOT
0x00ba: 0x00a7, # MASCULINE ORDINAL INDICATOR
0x00bb: 0x00af, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00bc: 0x00ac, # VULGAR FRACTION ONE QUARTER
0x00bd: 0x00ab, # VULGAR FRACTION ONE HALF
0x00bf: 0x00a8, # INVERTED QUESTION MARK
0x00c4: 0x008e, # LATIN CAPITAL LETTER A WITH DIAERESIS
0x00c5: 0x008f, # LATIN CAPITAL LETTER A WITH RING ABOVE
0x00c6: 0x0092, # LATIN CAPITAL LIGATURE AE
0x00c7: 0x0080, # LATIN CAPITAL LETTER C WITH CEDILLA
0x00c9: 0x0090, # LATIN CAPITAL LETTER E WITH ACUTE
0x00d1: 0x00a5, # LATIN CAPITAL LETTER N WITH TILDE
0x00d6: 0x0099, # LATIN CAPITAL LETTER O WITH DIAERESIS
0x00dc: 0x009a, # LATIN CAPITAL LETTER U WITH DIAERESIS
0x00df: 0x00e1, # LATIN SMALL LETTER SHARP S
0x00e0: 0x0085, # LATIN SMALL LETTER A WITH GRAVE
0x00e1: 0x00a0, # LATIN SMALL LETTER A WITH ACUTE
0x00e2: 0x0083, # LATIN SMALL LETTER A WITH CIRCUMFLEX
0x00e4: 0x0084, # LATIN SMALL LETTER A WITH DIAERESIS
0x00e5: 0x0086, # LATIN SMALL LETTER A WITH RING ABOVE
0x00e6: 0x0091, # LATIN SMALL LIGATURE AE
0x00e7: 0x0087, # LATIN SMALL LETTER C WITH CEDILLA
0x00e8: 0x008a, # LATIN SMALL LETTER E WITH GRAVE
0x00e9: 0x0082, # LATIN SMALL LETTER E WITH ACUTE
0x00ea: 0x0088, # LATIN SMALL LETTER E WITH CIRCUMFLEX
0x00eb: 0x0089, # LATIN SMALL LETTER E WITH DIAERESIS
0x00ec: 0x008d, # LATIN SMALL LETTER I WITH GRAVE
0x00ed: 0x00a1, # LATIN SMALL LETTER I WITH ACUTE
0x00ee: 0x008c, # LATIN SMALL LETTER I WITH CIRCUMFLEX
0x00ef: 0x008b, # LATIN SMALL LETTER I WITH DIAERESIS
0x00f1: 0x00a4, # LATIN SMALL LETTER N WITH TILDE
0x00f2: 0x0095, # LATIN SMALL LETTER O WITH GRAVE
0x00f3: 0x00a2, # LATIN SMALL LETTER O WITH ACUTE
0x00f4: 0x0093, # LATIN SMALL LETTER O WITH CIRCUMFLEX
0x00f6: 0x0094, # LATIN SMALL LETTER O WITH DIAERESIS
0x00f7: 0x00f6, # DIVISION SIGN
0x00f9: 0x0097, # LATIN SMALL LETTER U WITH GRAVE
0x00fa: 0x00a3, # LATIN SMALL LETTER U WITH ACUTE
0x00fb: 0x0096, # LATIN SMALL LETTER U WITH CIRCUMFLEX
0x00fc: 0x0081, # LATIN SMALL LETTER U WITH DIAERESIS
0x00ff: 0x0098, # LATIN SMALL LETTER Y WITH DIAERESIS
0x0192: 0x009f, # LATIN SMALL LETTER F WITH HOOK
0x0393: 0x00e2, # GREEK CAPITAL LETTER GAMMA
0x0398: 0x00e9, # GREEK CAPITAL LETTER THETA
0x03a3: 0x00e4, # GREEK CAPITAL LETTER SIGMA
0x03a6: 0x00e8, # GREEK CAPITAL LETTER PHI
0x03a9: 0x00ea, # GREEK CAPITAL LETTER OMEGA
0x03b1: 0x00e0, # GREEK SMALL LETTER ALPHA
0x03b4: 0x00eb, # GREEK SMALL LETTER DELTA
0x03b5: 0x00ee, # GREEK SMALL LETTER EPSILON
0x03c0: 0x00e3, # GREEK SMALL LETTER PI
0x03c3: 0x00e5, # GREEK SMALL LETTER SIGMA
0x03c4: 0x00e7, # GREEK SMALL LETTER TAU
0x03c6: 0x00ed, # GREEK SMALL LETTER PHI
0x207f: 0x00fc, # SUPERSCRIPT LATIN SMALL LETTER N
0x20a7: 0x009e, # PESETA SIGN
0x2219: 0x00f9, # BULLET OPERATOR
0x221a: 0x00fb, # SQUARE ROOT
0x221e: 0x00ec, # INFINITY
0x2229: 0x00ef, # INTERSECTION
0x2248: 0x00f7, # ALMOST EQUAL TO
0x2261: 0x00f0, # IDENTICAL TO
0x2264: 0x00f3, # LESS-THAN OR EQUAL TO
0x2265: 0x00f2, # GREATER-THAN OR EQUAL TO
0x2310: 0x00a9, # REVERSED NOT SIGN
0x2320: 0x00f4, # TOP HALF INTEGRAL
0x2321: 0x00f5, # BOTTOM HALF INTEGRAL
0x2500: 0x00c4, # BOX DRAWINGS LIGHT HORIZONTAL
0x2502: 0x00b3, # BOX DRAWINGS LIGHT VERTICAL
0x250c: 0x00da, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x2510: 0x00bf, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x2514: 0x00c0, # BOX DRAWINGS LIGHT UP AND RIGHT
0x2518: 0x00d9, # BOX DRAWINGS LIGHT UP AND LEFT
0x251c: 0x00c3, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x2524: 0x00b4, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x252c: 0x00c2, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x2534: 0x00c1, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x253c: 0x00c5, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x2550: 0x00cd, # BOX DRAWINGS DOUBLE HORIZONTAL
0x2551: 0x00ba, # BOX DRAWINGS DOUBLE VERTICAL
0x2552: 0x00d5, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
0x2553: 0x00d6, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
0x2554: 0x00c9, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x2555: 0x00b8, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
0x2556: 0x00b7, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
0x2557: 0x00bb, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x2558: 0x00d4, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
0x2559: 0x00d3, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
0x255a: 0x00c8, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x255b: 0x00be, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
0x255c: 0x00bd, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
0x255d: 0x00bc, # BOX DRAWINGS DOUBLE UP AND LEFT
0x255e: 0x00c6, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
0x255f: 0x00c7, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
0x2560: 0x00cc, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x2561: 0x00b5, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
0x2562: 0x00b6, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
0x2563: 0x00b9, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x2564: 0x00d1, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
0x2565: 0x00d2, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
0x2566: 0x00cb, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x2567: 0x00cf, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
0x2568: 0x00d0, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
0x2569: 0x00ca, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x256a: 0x00d8, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
0x256b: 0x00d7, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
0x256c: 0x00ce, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x2580: 0x00df, # UPPER HALF BLOCK
0x2584: 0x00dc, # LOWER HALF BLOCK
0x2588: 0x00db, # FULL BLOCK
0x258c: 0x00dd, # LEFT HALF BLOCK
0x2590: 0x00de, # RIGHT HALF BLOCK
0x2591: 0x00b0, # LIGHT SHADE
0x2592: 0x00b1, # MEDIUM SHADE
0x2593: 0x00b2, # DARK SHADE
0x25a0: 0x00fe, # BLACK SQUARE
} | /regulome_web-2.0rc1.tar.gz/regulome_web-2.0rc1/regulome_app/webapp/static/brython/Lib/encodings/cp437.py | 0.482429 | 0.246063 | cp437.py | pypi |
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_map)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp850',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x0080: 0x00c7, # LATIN CAPITAL LETTER C WITH CEDILLA
0x0081: 0x00fc, # LATIN SMALL LETTER U WITH DIAERESIS
0x0082: 0x00e9, # LATIN SMALL LETTER E WITH ACUTE
0x0083: 0x00e2, # LATIN SMALL LETTER A WITH CIRCUMFLEX
0x0084: 0x00e4, # LATIN SMALL LETTER A WITH DIAERESIS
0x0085: 0x00e0, # LATIN SMALL LETTER A WITH GRAVE
0x0086: 0x00e5, # LATIN SMALL LETTER A WITH RING ABOVE
0x0087: 0x00e7, # LATIN SMALL LETTER C WITH CEDILLA
0x0088: 0x00ea, # LATIN SMALL LETTER E WITH CIRCUMFLEX
0x0089: 0x00eb, # LATIN SMALL LETTER E WITH DIAERESIS
0x008a: 0x00e8, # LATIN SMALL LETTER E WITH GRAVE
0x008b: 0x00ef, # LATIN SMALL LETTER I WITH DIAERESIS
0x008c: 0x00ee, # LATIN SMALL LETTER I WITH CIRCUMFLEX
0x008d: 0x00ec, # LATIN SMALL LETTER I WITH GRAVE
0x008e: 0x00c4, # LATIN CAPITAL LETTER A WITH DIAERESIS
0x008f: 0x00c5, # LATIN CAPITAL LETTER A WITH RING ABOVE
0x0090: 0x00c9, # LATIN CAPITAL LETTER E WITH ACUTE
0x0091: 0x00e6, # LATIN SMALL LIGATURE AE
0x0092: 0x00c6, # LATIN CAPITAL LIGATURE AE
0x0093: 0x00f4, # LATIN SMALL LETTER O WITH CIRCUMFLEX
0x0094: 0x00f6, # LATIN SMALL LETTER O WITH DIAERESIS
0x0095: 0x00f2, # LATIN SMALL LETTER O WITH GRAVE
0x0096: 0x00fb, # LATIN SMALL LETTER U WITH CIRCUMFLEX
0x0097: 0x00f9, # LATIN SMALL LETTER U WITH GRAVE
0x0098: 0x00ff, # LATIN SMALL LETTER Y WITH DIAERESIS
0x0099: 0x00d6, # LATIN CAPITAL LETTER O WITH DIAERESIS
0x009a: 0x00dc, # LATIN CAPITAL LETTER U WITH DIAERESIS
0x009b: 0x00f8, # LATIN SMALL LETTER O WITH STROKE
0x009c: 0x00a3, # POUND SIGN
0x009d: 0x00d8, # LATIN CAPITAL LETTER O WITH STROKE
0x009e: 0x00d7, # MULTIPLICATION SIGN
0x009f: 0x0192, # LATIN SMALL LETTER F WITH HOOK
0x00a0: 0x00e1, # LATIN SMALL LETTER A WITH ACUTE
0x00a1: 0x00ed, # LATIN SMALL LETTER I WITH ACUTE
0x00a2: 0x00f3, # LATIN SMALL LETTER O WITH ACUTE
0x00a3: 0x00fa, # LATIN SMALL LETTER U WITH ACUTE
0x00a4: 0x00f1, # LATIN SMALL LETTER N WITH TILDE
0x00a5: 0x00d1, # LATIN CAPITAL LETTER N WITH TILDE
0x00a6: 0x00aa, # FEMININE ORDINAL INDICATOR
0x00a7: 0x00ba, # MASCULINE ORDINAL INDICATOR
0x00a8: 0x00bf, # INVERTED QUESTION MARK
0x00a9: 0x00ae, # REGISTERED SIGN
0x00aa: 0x00ac, # NOT SIGN
0x00ab: 0x00bd, # VULGAR FRACTION ONE HALF
0x00ac: 0x00bc, # VULGAR FRACTION ONE QUARTER
0x00ad: 0x00a1, # INVERTED EXCLAMATION MARK
0x00ae: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00af: 0x00bb, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00b0: 0x2591, # LIGHT SHADE
0x00b1: 0x2592, # MEDIUM SHADE
0x00b2: 0x2593, # DARK SHADE
0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL
0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x00b5: 0x00c1, # LATIN CAPITAL LETTER A WITH ACUTE
0x00b6: 0x00c2, # LATIN CAPITAL LETTER A WITH CIRCUMFLEX
0x00b7: 0x00c0, # LATIN CAPITAL LETTER A WITH GRAVE
0x00b8: 0x00a9, # COPYRIGHT SIGN
0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL
0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT
0x00bd: 0x00a2, # CENT SIGN
0x00be: 0x00a5, # YEN SIGN
0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT
0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL
0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x00c6: 0x00e3, # LATIN SMALL LETTER A WITH TILDE
0x00c7: 0x00c3, # LATIN CAPITAL LETTER A WITH TILDE
0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL
0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x00cf: 0x00a4, # CURRENCY SIGN
0x00d0: 0x00f0, # LATIN SMALL LETTER ETH
0x00d1: 0x00d0, # LATIN CAPITAL LETTER ETH
0x00d2: 0x00ca, # LATIN CAPITAL LETTER E WITH CIRCUMFLEX
0x00d3: 0x00cb, # LATIN CAPITAL LETTER E WITH DIAERESIS
0x00d4: 0x00c8, # LATIN CAPITAL LETTER E WITH GRAVE
0x00d5: 0x0131, # LATIN SMALL LETTER DOTLESS I
0x00d6: 0x00cd, # LATIN CAPITAL LETTER I WITH ACUTE
0x00d7: 0x00ce, # LATIN CAPITAL LETTER I WITH CIRCUMFLEX
0x00d8: 0x00cf, # LATIN CAPITAL LETTER I WITH DIAERESIS
0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT
0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x00db: 0x2588, # FULL BLOCK
0x00dc: 0x2584, # LOWER HALF BLOCK
0x00dd: 0x00a6, # BROKEN BAR
0x00de: 0x00cc, # LATIN CAPITAL LETTER I WITH GRAVE
0x00df: 0x2580, # UPPER HALF BLOCK
0x00e0: 0x00d3, # LATIN CAPITAL LETTER O WITH ACUTE
0x00e1: 0x00df, # LATIN SMALL LETTER SHARP S
0x00e2: 0x00d4, # LATIN CAPITAL LETTER O WITH CIRCUMFLEX
0x00e3: 0x00d2, # LATIN CAPITAL LETTER O WITH GRAVE
0x00e4: 0x00f5, # LATIN SMALL LETTER O WITH TILDE
0x00e5: 0x00d5, # LATIN CAPITAL LETTER O WITH TILDE
0x00e6: 0x00b5, # MICRO SIGN
0x00e7: 0x00fe, # LATIN SMALL LETTER THORN
0x00e8: 0x00de, # LATIN CAPITAL LETTER THORN
0x00e9: 0x00da, # LATIN CAPITAL LETTER U WITH ACUTE
0x00ea: 0x00db, # LATIN CAPITAL LETTER U WITH CIRCUMFLEX
0x00eb: 0x00d9, # LATIN CAPITAL LETTER U WITH GRAVE
0x00ec: 0x00fd, # LATIN SMALL LETTER Y WITH ACUTE
0x00ed: 0x00dd, # LATIN CAPITAL LETTER Y WITH ACUTE
0x00ee: 0x00af, # MACRON
0x00ef: 0x00b4, # ACUTE ACCENT
0x00f0: 0x00ad, # SOFT HYPHEN
0x00f1: 0x00b1, # PLUS-MINUS SIGN
0x00f2: 0x2017, # DOUBLE LOW LINE
0x00f3: 0x00be, # VULGAR FRACTION THREE QUARTERS
0x00f4: 0x00b6, # PILCROW SIGN
0x00f5: 0x00a7, # SECTION SIGN
0x00f6: 0x00f7, # DIVISION SIGN
0x00f7: 0x00b8, # CEDILLA
0x00f8: 0x00b0, # DEGREE SIGN
0x00f9: 0x00a8, # DIAERESIS
0x00fa: 0x00b7, # MIDDLE DOT
0x00fb: 0x00b9, # SUPERSCRIPT ONE
0x00fc: 0x00b3, # SUPERSCRIPT THREE
0x00fd: 0x00b2, # SUPERSCRIPT TWO
0x00fe: 0x25a0, # BLACK SQUARE
0x00ff: 0x00a0, # NO-BREAK SPACE
})
### Decoding Table
decoding_table = (
'\x00' # 0x0000 -> NULL
'\x01' # 0x0001 -> START OF HEADING
'\x02' # 0x0002 -> START OF TEXT
'\x03' # 0x0003 -> END OF TEXT
'\x04' # 0x0004 -> END OF TRANSMISSION
'\x05' # 0x0005 -> ENQUIRY
'\x06' # 0x0006 -> ACKNOWLEDGE
'\x07' # 0x0007 -> BELL
'\x08' # 0x0008 -> BACKSPACE
'\t' # 0x0009 -> HORIZONTAL TABULATION
'\n' # 0x000a -> LINE FEED
'\x0b' # 0x000b -> VERTICAL TABULATION
'\x0c' # 0x000c -> FORM FEED
'\r' # 0x000d -> CARRIAGE RETURN
'\x0e' # 0x000e -> SHIFT OUT
'\x0f' # 0x000f -> SHIFT IN
'\x10' # 0x0010 -> DATA LINK ESCAPE
'\x11' # 0x0011 -> DEVICE CONTROL ONE
'\x12' # 0x0012 -> DEVICE CONTROL TWO
'\x13' # 0x0013 -> DEVICE CONTROL THREE
'\x14' # 0x0014 -> DEVICE CONTROL FOUR
'\x15' # 0x0015 -> NEGATIVE ACKNOWLEDGE
'\x16' # 0x0016 -> SYNCHRONOUS IDLE
'\x17' # 0x0017 -> END OF TRANSMISSION BLOCK
'\x18' # 0x0018 -> CANCEL
'\x19' # 0x0019 -> END OF MEDIUM
'\x1a' # 0x001a -> SUBSTITUTE
'\x1b' # 0x001b -> ESCAPE
'\x1c' # 0x001c -> FILE SEPARATOR
'\x1d' # 0x001d -> GROUP SEPARATOR
'\x1e' # 0x001e -> RECORD SEPARATOR
'\x1f' # 0x001f -> UNIT SEPARATOR
' ' # 0x0020 -> SPACE
'!' # 0x0021 -> EXCLAMATION MARK
'"' # 0x0022 -> QUOTATION MARK
'#' # 0x0023 -> NUMBER SIGN
'$' # 0x0024 -> DOLLAR SIGN
'%' # 0x0025 -> PERCENT SIGN
'&' # 0x0026 -> AMPERSAND
"'" # 0x0027 -> APOSTROPHE
'(' # 0x0028 -> LEFT PARENTHESIS
')' # 0x0029 -> RIGHT PARENTHESIS
'*' # 0x002a -> ASTERISK
'+' # 0x002b -> PLUS SIGN
',' # 0x002c -> COMMA
'-' # 0x002d -> HYPHEN-MINUS
'.' # 0x002e -> FULL STOP
'/' # 0x002f -> SOLIDUS
'0' # 0x0030 -> DIGIT ZERO
'1' # 0x0031 -> DIGIT ONE
'2' # 0x0032 -> DIGIT TWO
'3' # 0x0033 -> DIGIT THREE
'4' # 0x0034 -> DIGIT FOUR
'5' # 0x0035 -> DIGIT FIVE
'6' # 0x0036 -> DIGIT SIX
'7' # 0x0037 -> DIGIT SEVEN
'8' # 0x0038 -> DIGIT EIGHT
'9' # 0x0039 -> DIGIT NINE
':' # 0x003a -> COLON
';' # 0x003b -> SEMICOLON
'<' # 0x003c -> LESS-THAN SIGN
'=' # 0x003d -> EQUALS SIGN
'>' # 0x003e -> GREATER-THAN SIGN
'?' # 0x003f -> QUESTION MARK
'@' # 0x0040 -> COMMERCIAL AT
'A' # 0x0041 -> LATIN CAPITAL LETTER A
'B' # 0x0042 -> LATIN CAPITAL LETTER B
'C' # 0x0043 -> LATIN CAPITAL LETTER C
'D' # 0x0044 -> LATIN CAPITAL LETTER D
'E' # 0x0045 -> LATIN CAPITAL LETTER E
'F' # 0x0046 -> LATIN CAPITAL LETTER F
'G' # 0x0047 -> LATIN CAPITAL LETTER G
'H' # 0x0048 -> LATIN CAPITAL LETTER H
'I' # 0x0049 -> LATIN CAPITAL LETTER I
'J' # 0x004a -> LATIN CAPITAL LETTER J
'K' # 0x004b -> LATIN CAPITAL LETTER K
'L' # 0x004c -> LATIN CAPITAL LETTER L
'M' # 0x004d -> LATIN CAPITAL LETTER M
'N' # 0x004e -> LATIN CAPITAL LETTER N
'O' # 0x004f -> LATIN CAPITAL LETTER O
'P' # 0x0050 -> LATIN CAPITAL LETTER P
'Q' # 0x0051 -> LATIN CAPITAL LETTER Q
'R' # 0x0052 -> LATIN CAPITAL LETTER R
'S' # 0x0053 -> LATIN CAPITAL LETTER S
'T' # 0x0054 -> LATIN CAPITAL LETTER T
'U' # 0x0055 -> LATIN CAPITAL LETTER U
'V' # 0x0056 -> LATIN CAPITAL LETTER V
'W' # 0x0057 -> LATIN CAPITAL LETTER W
'X' # 0x0058 -> LATIN CAPITAL LETTER X
'Y' # 0x0059 -> LATIN CAPITAL LETTER Y
'Z' # 0x005a -> LATIN CAPITAL LETTER Z
'[' # 0x005b -> LEFT SQUARE BRACKET
'\\' # 0x005c -> REVERSE SOLIDUS
']' # 0x005d -> RIGHT SQUARE BRACKET
'^' # 0x005e -> CIRCUMFLEX ACCENT
'_' # 0x005f -> LOW LINE
'`' # 0x0060 -> GRAVE ACCENT
'a' # 0x0061 -> LATIN SMALL LETTER A
'b' # 0x0062 -> LATIN SMALL LETTER B
'c' # 0x0063 -> LATIN SMALL LETTER C
'd' # 0x0064 -> LATIN SMALL LETTER D
'e' # 0x0065 -> LATIN SMALL LETTER E
'f' # 0x0066 -> LATIN SMALL LETTER F
'g' # 0x0067 -> LATIN SMALL LETTER G
'h' # 0x0068 -> LATIN SMALL LETTER H
'i' # 0x0069 -> LATIN SMALL LETTER I
'j' # 0x006a -> LATIN SMALL LETTER J
'k' # 0x006b -> LATIN SMALL LETTER K
'l' # 0x006c -> LATIN SMALL LETTER L
'm' # 0x006d -> LATIN SMALL LETTER M
'n' # 0x006e -> LATIN SMALL LETTER N
'o' # 0x006f -> LATIN SMALL LETTER O
'p' # 0x0070 -> LATIN SMALL LETTER P
'q' # 0x0071 -> LATIN SMALL LETTER Q
'r' # 0x0072 -> LATIN SMALL LETTER R
's' # 0x0073 -> LATIN SMALL LETTER S
't' # 0x0074 -> LATIN SMALL LETTER T
'u' # 0x0075 -> LATIN SMALL LETTER U
'v' # 0x0076 -> LATIN SMALL LETTER V
'w' # 0x0077 -> LATIN SMALL LETTER W
'x' # 0x0078 -> LATIN SMALL LETTER X
'y' # 0x0079 -> LATIN SMALL LETTER Y
'z' # 0x007a -> LATIN SMALL LETTER Z
'{' # 0x007b -> LEFT CURLY BRACKET
'|' # 0x007c -> VERTICAL LINE
'}' # 0x007d -> RIGHT CURLY BRACKET
'~' # 0x007e -> TILDE
'\x7f' # 0x007f -> DELETE
'\xc7' # 0x0080 -> LATIN CAPITAL LETTER C WITH CEDILLA
'\xfc' # 0x0081 -> LATIN SMALL LETTER U WITH DIAERESIS
'\xe9' # 0x0082 -> LATIN SMALL LETTER E WITH ACUTE
'\xe2' # 0x0083 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
'\xe4' # 0x0084 -> LATIN SMALL LETTER A WITH DIAERESIS
'\xe0' # 0x0085 -> LATIN SMALL LETTER A WITH GRAVE
'\xe5' # 0x0086 -> LATIN SMALL LETTER A WITH RING ABOVE
'\xe7' # 0x0087 -> LATIN SMALL LETTER C WITH CEDILLA
'\xea' # 0x0088 -> LATIN SMALL LETTER E WITH CIRCUMFLEX
'\xeb' # 0x0089 -> LATIN SMALL LETTER E WITH DIAERESIS
'\xe8' # 0x008a -> LATIN SMALL LETTER E WITH GRAVE
'\xef' # 0x008b -> LATIN SMALL LETTER I WITH DIAERESIS
'\xee' # 0x008c -> LATIN SMALL LETTER I WITH CIRCUMFLEX
'\xec' # 0x008d -> LATIN SMALL LETTER I WITH GRAVE
'\xc4' # 0x008e -> LATIN CAPITAL LETTER A WITH DIAERESIS
'\xc5' # 0x008f -> LATIN CAPITAL LETTER A WITH RING ABOVE
'\xc9' # 0x0090 -> LATIN CAPITAL LETTER E WITH ACUTE
'\xe6' # 0x0091 -> LATIN SMALL LIGATURE AE
'\xc6' # 0x0092 -> LATIN CAPITAL LIGATURE AE
'\xf4' # 0x0093 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
'\xf6' # 0x0094 -> LATIN SMALL LETTER O WITH DIAERESIS
'\xf2' # 0x0095 -> LATIN SMALL LETTER O WITH GRAVE
'\xfb' # 0x0096 -> LATIN SMALL LETTER U WITH CIRCUMFLEX
'\xf9' # 0x0097 -> LATIN SMALL LETTER U WITH GRAVE
'\xff' # 0x0098 -> LATIN SMALL LETTER Y WITH DIAERESIS
'\xd6' # 0x0099 -> LATIN CAPITAL LETTER O WITH DIAERESIS
'\xdc' # 0x009a -> LATIN CAPITAL LETTER U WITH DIAERESIS
'\xf8' # 0x009b -> LATIN SMALL LETTER O WITH STROKE
'\xa3' # 0x009c -> POUND SIGN
'\xd8' # 0x009d -> LATIN CAPITAL LETTER O WITH STROKE
'\xd7' # 0x009e -> MULTIPLICATION SIGN
'\u0192' # 0x009f -> LATIN SMALL LETTER F WITH HOOK
'\xe1' # 0x00a0 -> LATIN SMALL LETTER A WITH ACUTE
'\xed' # 0x00a1 -> LATIN SMALL LETTER I WITH ACUTE
'\xf3' # 0x00a2 -> LATIN SMALL LETTER O WITH ACUTE
'\xfa' # 0x00a3 -> LATIN SMALL LETTER U WITH ACUTE
'\xf1' # 0x00a4 -> LATIN SMALL LETTER N WITH TILDE
'\xd1' # 0x00a5 -> LATIN CAPITAL LETTER N WITH TILDE
'\xaa' # 0x00a6 -> FEMININE ORDINAL INDICATOR
'\xba' # 0x00a7 -> MASCULINE ORDINAL INDICATOR
'\xbf' # 0x00a8 -> INVERTED QUESTION MARK
'\xae' # 0x00a9 -> REGISTERED SIGN
'\xac' # 0x00aa -> NOT SIGN
'\xbd' # 0x00ab -> VULGAR FRACTION ONE HALF
'\xbc' # 0x00ac -> VULGAR FRACTION ONE QUARTER
'\xa1' # 0x00ad -> INVERTED EXCLAMATION MARK
'\xab' # 0x00ae -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
'\xbb' # 0x00af -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
'\u2591' # 0x00b0 -> LIGHT SHADE
'\u2592' # 0x00b1 -> MEDIUM SHADE
'\u2593' # 0x00b2 -> DARK SHADE
'\u2502' # 0x00b3 -> BOX DRAWINGS LIGHT VERTICAL
'\u2524' # 0x00b4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT
'\xc1' # 0x00b5 -> LATIN CAPITAL LETTER A WITH ACUTE
'\xc2' # 0x00b6 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
'\xc0' # 0x00b7 -> LATIN CAPITAL LETTER A WITH GRAVE
'\xa9' # 0x00b8 -> COPYRIGHT SIGN
'\u2563' # 0x00b9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT
'\u2551' # 0x00ba -> BOX DRAWINGS DOUBLE VERTICAL
'\u2557' # 0x00bb -> BOX DRAWINGS DOUBLE DOWN AND LEFT
'\u255d' # 0x00bc -> BOX DRAWINGS DOUBLE UP AND LEFT
'\xa2' # 0x00bd -> CENT SIGN
'\xa5' # 0x00be -> YEN SIGN
'\u2510' # 0x00bf -> BOX DRAWINGS LIGHT DOWN AND LEFT
'\u2514' # 0x00c0 -> BOX DRAWINGS LIGHT UP AND RIGHT
'\u2534' # 0x00c1 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL
'\u252c' # 0x00c2 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
'\u251c' # 0x00c3 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT
'\u2500' # 0x00c4 -> BOX DRAWINGS LIGHT HORIZONTAL
'\u253c' # 0x00c5 -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
'\xe3' # 0x00c6 -> LATIN SMALL LETTER A WITH TILDE
'\xc3' # 0x00c7 -> LATIN CAPITAL LETTER A WITH TILDE
'\u255a' # 0x00c8 -> BOX DRAWINGS DOUBLE UP AND RIGHT
'\u2554' # 0x00c9 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT
'\u2569' # 0x00ca -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL
'\u2566' # 0x00cb -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
'\u2560' # 0x00cc -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
'\u2550' # 0x00cd -> BOX DRAWINGS DOUBLE HORIZONTAL
'\u256c' # 0x00ce -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
'\xa4' # 0x00cf -> CURRENCY SIGN
'\xf0' # 0x00d0 -> LATIN SMALL LETTER ETH
'\xd0' # 0x00d1 -> LATIN CAPITAL LETTER ETH
'\xca' # 0x00d2 -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
'\xcb' # 0x00d3 -> LATIN CAPITAL LETTER E WITH DIAERESIS
'\xc8' # 0x00d4 -> LATIN CAPITAL LETTER E WITH GRAVE
'\u0131' # 0x00d5 -> LATIN SMALL LETTER DOTLESS I
'\xcd' # 0x00d6 -> LATIN CAPITAL LETTER I WITH ACUTE
'\xce' # 0x00d7 -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
'\xcf' # 0x00d8 -> LATIN CAPITAL LETTER I WITH DIAERESIS
'\u2518' # 0x00d9 -> BOX DRAWINGS LIGHT UP AND LEFT
'\u250c' # 0x00da -> BOX DRAWINGS LIGHT DOWN AND RIGHT
'\u2588' # 0x00db -> FULL BLOCK
'\u2584' # 0x00dc -> LOWER HALF BLOCK
'\xa6' # 0x00dd -> BROKEN BAR
'\xcc' # 0x00de -> LATIN CAPITAL LETTER I WITH GRAVE
'\u2580' # 0x00df -> UPPER HALF BLOCK
'\xd3' # 0x00e0 -> LATIN CAPITAL LETTER O WITH ACUTE
'\xdf' # 0x00e1 -> LATIN SMALL LETTER SHARP S
'\xd4' # 0x00e2 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
'\xd2' # 0x00e3 -> LATIN CAPITAL LETTER O WITH GRAVE
'\xf5' # 0x00e4 -> LATIN SMALL LETTER O WITH TILDE
'\xd5' # 0x00e5 -> LATIN CAPITAL LETTER O WITH TILDE
'\xb5' # 0x00e6 -> MICRO SIGN
'\xfe' # 0x00e7 -> LATIN SMALL LETTER THORN
'\xde' # 0x00e8 -> LATIN CAPITAL LETTER THORN
'\xda' # 0x00e9 -> LATIN CAPITAL LETTER U WITH ACUTE
'\xdb' # 0x00ea -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
'\xd9' # 0x00eb -> LATIN CAPITAL LETTER U WITH GRAVE
'\xfd' # 0x00ec -> LATIN SMALL LETTER Y WITH ACUTE
'\xdd' # 0x00ed -> LATIN CAPITAL LETTER Y WITH ACUTE
'\xaf' # 0x00ee -> MACRON
'\xb4' # 0x00ef -> ACUTE ACCENT
'\xad' # 0x00f0 -> SOFT HYPHEN
'\xb1' # 0x00f1 -> PLUS-MINUS SIGN
'\u2017' # 0x00f2 -> DOUBLE LOW LINE
'\xbe' # 0x00f3 -> VULGAR FRACTION THREE QUARTERS
'\xb6' # 0x00f4 -> PILCROW SIGN
'\xa7' # 0x00f5 -> SECTION SIGN
'\xf7' # 0x00f6 -> DIVISION SIGN
'\xb8' # 0x00f7 -> CEDILLA
'\xb0' # 0x00f8 -> DEGREE SIGN
'\xa8' # 0x00f9 -> DIAERESIS
'\xb7' # 0x00fa -> MIDDLE DOT
'\xb9' # 0x00fb -> SUPERSCRIPT ONE
'\xb3' # 0x00fc -> SUPERSCRIPT THREE
'\xb2' # 0x00fd -> SUPERSCRIPT TWO
'\u25a0' # 0x00fe -> BLACK SQUARE
'\xa0' # 0x00ff -> NO-BREAK SPACE
)
### Encoding Map
encoding_map = {
0x0000: 0x0000, # NULL
0x0001: 0x0001, # START OF HEADING
0x0002: 0x0002, # START OF TEXT
0x0003: 0x0003, # END OF TEXT
0x0004: 0x0004, # END OF TRANSMISSION
0x0005: 0x0005, # ENQUIRY
0x0006: 0x0006, # ACKNOWLEDGE
0x0007: 0x0007, # BELL
0x0008: 0x0008, # BACKSPACE
0x0009: 0x0009, # HORIZONTAL TABULATION
0x000a: 0x000a, # LINE FEED
0x000b: 0x000b, # VERTICAL TABULATION
0x000c: 0x000c, # FORM FEED
0x000d: 0x000d, # CARRIAGE RETURN
0x000e: 0x000e, # SHIFT OUT
0x000f: 0x000f, # SHIFT IN
0x0010: 0x0010, # DATA LINK ESCAPE
0x0011: 0x0011, # DEVICE CONTROL ONE
0x0012: 0x0012, # DEVICE CONTROL TWO
0x0013: 0x0013, # DEVICE CONTROL THREE
0x0014: 0x0014, # DEVICE CONTROL FOUR
0x0015: 0x0015, # NEGATIVE ACKNOWLEDGE
0x0016: 0x0016, # SYNCHRONOUS IDLE
0x0017: 0x0017, # END OF TRANSMISSION BLOCK
0x0018: 0x0018, # CANCEL
0x0019: 0x0019, # END OF MEDIUM
0x001a: 0x001a, # SUBSTITUTE
0x001b: 0x001b, # ESCAPE
0x001c: 0x001c, # FILE SEPARATOR
0x001d: 0x001d, # GROUP SEPARATOR
0x001e: 0x001e, # RECORD SEPARATOR
0x001f: 0x001f, # UNIT SEPARATOR
0x0020: 0x0020, # SPACE
0x0021: 0x0021, # EXCLAMATION MARK
0x0022: 0x0022, # QUOTATION MARK
0x0023: 0x0023, # NUMBER SIGN
0x0024: 0x0024, # DOLLAR SIGN
0x0025: 0x0025, # PERCENT SIGN
0x0026: 0x0026, # AMPERSAND
0x0027: 0x0027, # APOSTROPHE
0x0028: 0x0028, # LEFT PARENTHESIS
0x0029: 0x0029, # RIGHT PARENTHESIS
0x002a: 0x002a, # ASTERISK
0x002b: 0x002b, # PLUS SIGN
0x002c: 0x002c, # COMMA
0x002d: 0x002d, # HYPHEN-MINUS
0x002e: 0x002e, # FULL STOP
0x002f: 0x002f, # SOLIDUS
0x0030: 0x0030, # DIGIT ZERO
0x0031: 0x0031, # DIGIT ONE
0x0032: 0x0032, # DIGIT TWO
0x0033: 0x0033, # DIGIT THREE
0x0034: 0x0034, # DIGIT FOUR
0x0035: 0x0035, # DIGIT FIVE
0x0036: 0x0036, # DIGIT SIX
0x0037: 0x0037, # DIGIT SEVEN
0x0038: 0x0038, # DIGIT EIGHT
0x0039: 0x0039, # DIGIT NINE
0x003a: 0x003a, # COLON
0x003b: 0x003b, # SEMICOLON
0x003c: 0x003c, # LESS-THAN SIGN
0x003d: 0x003d, # EQUALS SIGN
0x003e: 0x003e, # GREATER-THAN SIGN
0x003f: 0x003f, # QUESTION MARK
0x0040: 0x0040, # COMMERCIAL AT
0x0041: 0x0041, # LATIN CAPITAL LETTER A
0x0042: 0x0042, # LATIN CAPITAL LETTER B
0x0043: 0x0043, # LATIN CAPITAL LETTER C
0x0044: 0x0044, # LATIN CAPITAL LETTER D
0x0045: 0x0045, # LATIN CAPITAL LETTER E
0x0046: 0x0046, # LATIN CAPITAL LETTER F
0x0047: 0x0047, # LATIN CAPITAL LETTER G
0x0048: 0x0048, # LATIN CAPITAL LETTER H
0x0049: 0x0049, # LATIN CAPITAL LETTER I
0x004a: 0x004a, # LATIN CAPITAL LETTER J
0x004b: 0x004b, # LATIN CAPITAL LETTER K
0x004c: 0x004c, # LATIN CAPITAL LETTER L
0x004d: 0x004d, # LATIN CAPITAL LETTER M
0x004e: 0x004e, # LATIN CAPITAL LETTER N
0x004f: 0x004f, # LATIN CAPITAL LETTER O
0x0050: 0x0050, # LATIN CAPITAL LETTER P
0x0051: 0x0051, # LATIN CAPITAL LETTER Q
0x0052: 0x0052, # LATIN CAPITAL LETTER R
0x0053: 0x0053, # LATIN CAPITAL LETTER S
0x0054: 0x0054, # LATIN CAPITAL LETTER T
0x0055: 0x0055, # LATIN CAPITAL LETTER U
0x0056: 0x0056, # LATIN CAPITAL LETTER V
0x0057: 0x0057, # LATIN CAPITAL LETTER W
0x0058: 0x0058, # LATIN CAPITAL LETTER X
0x0059: 0x0059, # LATIN CAPITAL LETTER Y
0x005a: 0x005a, # LATIN CAPITAL LETTER Z
0x005b: 0x005b, # LEFT SQUARE BRACKET
0x005c: 0x005c, # REVERSE SOLIDUS
0x005d: 0x005d, # RIGHT SQUARE BRACKET
0x005e: 0x005e, # CIRCUMFLEX ACCENT
0x005f: 0x005f, # LOW LINE
0x0060: 0x0060, # GRAVE ACCENT
0x0061: 0x0061, # LATIN SMALL LETTER A
0x0062: 0x0062, # LATIN SMALL LETTER B
0x0063: 0x0063, # LATIN SMALL LETTER C
0x0064: 0x0064, # LATIN SMALL LETTER D
0x0065: 0x0065, # LATIN SMALL LETTER E
0x0066: 0x0066, # LATIN SMALL LETTER F
0x0067: 0x0067, # LATIN SMALL LETTER G
0x0068: 0x0068, # LATIN SMALL LETTER H
0x0069: 0x0069, # LATIN SMALL LETTER I
0x006a: 0x006a, # LATIN SMALL LETTER J
0x006b: 0x006b, # LATIN SMALL LETTER K
0x006c: 0x006c, # LATIN SMALL LETTER L
0x006d: 0x006d, # LATIN SMALL LETTER M
0x006e: 0x006e, # LATIN SMALL LETTER N
0x006f: 0x006f, # LATIN SMALL LETTER O
0x0070: 0x0070, # LATIN SMALL LETTER P
0x0071: 0x0071, # LATIN SMALL LETTER Q
0x0072: 0x0072, # LATIN SMALL LETTER R
0x0073: 0x0073, # LATIN SMALL LETTER S
0x0074: 0x0074, # LATIN SMALL LETTER T
0x0075: 0x0075, # LATIN SMALL LETTER U
0x0076: 0x0076, # LATIN SMALL LETTER V
0x0077: 0x0077, # LATIN SMALL LETTER W
0x0078: 0x0078, # LATIN SMALL LETTER X
0x0079: 0x0079, # LATIN SMALL LETTER Y
0x007a: 0x007a, # LATIN SMALL LETTER Z
0x007b: 0x007b, # LEFT CURLY BRACKET
0x007c: 0x007c, # VERTICAL LINE
0x007d: 0x007d, # RIGHT CURLY BRACKET
0x007e: 0x007e, # TILDE
0x007f: 0x007f, # DELETE
0x00a0: 0x00ff, # NO-BREAK SPACE
0x00a1: 0x00ad, # INVERTED EXCLAMATION MARK
0x00a2: 0x00bd, # CENT SIGN
0x00a3: 0x009c, # POUND SIGN
0x00a4: 0x00cf, # CURRENCY SIGN
0x00a5: 0x00be, # YEN SIGN
0x00a6: 0x00dd, # BROKEN BAR
0x00a7: 0x00f5, # SECTION SIGN
0x00a8: 0x00f9, # DIAERESIS
0x00a9: 0x00b8, # COPYRIGHT SIGN
0x00aa: 0x00a6, # FEMININE ORDINAL INDICATOR
0x00ab: 0x00ae, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00ac: 0x00aa, # NOT SIGN
0x00ad: 0x00f0, # SOFT HYPHEN
0x00ae: 0x00a9, # REGISTERED SIGN
0x00af: 0x00ee, # MACRON
0x00b0: 0x00f8, # DEGREE SIGN
0x00b1: 0x00f1, # PLUS-MINUS SIGN
0x00b2: 0x00fd, # SUPERSCRIPT TWO
0x00b3: 0x00fc, # SUPERSCRIPT THREE
0x00b4: 0x00ef, # ACUTE ACCENT
0x00b5: 0x00e6, # MICRO SIGN
0x00b6: 0x00f4, # PILCROW SIGN
0x00b7: 0x00fa, # MIDDLE DOT
0x00b8: 0x00f7, # CEDILLA
0x00b9: 0x00fb, # SUPERSCRIPT ONE
0x00ba: 0x00a7, # MASCULINE ORDINAL INDICATOR
0x00bb: 0x00af, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00bc: 0x00ac, # VULGAR FRACTION ONE QUARTER
0x00bd: 0x00ab, # VULGAR FRACTION ONE HALF
0x00be: 0x00f3, # VULGAR FRACTION THREE QUARTERS
0x00bf: 0x00a8, # INVERTED QUESTION MARK
0x00c0: 0x00b7, # LATIN CAPITAL LETTER A WITH GRAVE
0x00c1: 0x00b5, # LATIN CAPITAL LETTER A WITH ACUTE
0x00c2: 0x00b6, # LATIN CAPITAL LETTER A WITH CIRCUMFLEX
0x00c3: 0x00c7, # LATIN CAPITAL LETTER A WITH TILDE
0x00c4: 0x008e, # LATIN CAPITAL LETTER A WITH DIAERESIS
0x00c5: 0x008f, # LATIN CAPITAL LETTER A WITH RING ABOVE
0x00c6: 0x0092, # LATIN CAPITAL LIGATURE AE
0x00c7: 0x0080, # LATIN CAPITAL LETTER C WITH CEDILLA
0x00c8: 0x00d4, # LATIN CAPITAL LETTER E WITH GRAVE
0x00c9: 0x0090, # LATIN CAPITAL LETTER E WITH ACUTE
0x00ca: 0x00d2, # LATIN CAPITAL LETTER E WITH CIRCUMFLEX
0x00cb: 0x00d3, # LATIN CAPITAL LETTER E WITH DIAERESIS
0x00cc: 0x00de, # LATIN CAPITAL LETTER I WITH GRAVE
0x00cd: 0x00d6, # LATIN CAPITAL LETTER I WITH ACUTE
0x00ce: 0x00d7, # LATIN CAPITAL LETTER I WITH CIRCUMFLEX
0x00cf: 0x00d8, # LATIN CAPITAL LETTER I WITH DIAERESIS
0x00d0: 0x00d1, # LATIN CAPITAL LETTER ETH
0x00d1: 0x00a5, # LATIN CAPITAL LETTER N WITH TILDE
0x00d2: 0x00e3, # LATIN CAPITAL LETTER O WITH GRAVE
0x00d3: 0x00e0, # LATIN CAPITAL LETTER O WITH ACUTE
0x00d4: 0x00e2, # LATIN CAPITAL LETTER O WITH CIRCUMFLEX
0x00d5: 0x00e5, # LATIN CAPITAL LETTER O WITH TILDE
0x00d6: 0x0099, # LATIN CAPITAL LETTER O WITH DIAERESIS
0x00d7: 0x009e, # MULTIPLICATION SIGN
0x00d8: 0x009d, # LATIN CAPITAL LETTER O WITH STROKE
0x00d9: 0x00eb, # LATIN CAPITAL LETTER U WITH GRAVE
0x00da: 0x00e9, # LATIN CAPITAL LETTER U WITH ACUTE
0x00db: 0x00ea, # LATIN CAPITAL LETTER U WITH CIRCUMFLEX
0x00dc: 0x009a, # LATIN CAPITAL LETTER U WITH DIAERESIS
0x00dd: 0x00ed, # LATIN CAPITAL LETTER Y WITH ACUTE
0x00de: 0x00e8, # LATIN CAPITAL LETTER THORN
0x00df: 0x00e1, # LATIN SMALL LETTER SHARP S
0x00e0: 0x0085, # LATIN SMALL LETTER A WITH GRAVE
0x00e1: 0x00a0, # LATIN SMALL LETTER A WITH ACUTE
0x00e2: 0x0083, # LATIN SMALL LETTER A WITH CIRCUMFLEX
0x00e3: 0x00c6, # LATIN SMALL LETTER A WITH TILDE
0x00e4: 0x0084, # LATIN SMALL LETTER A WITH DIAERESIS
0x00e5: 0x0086, # LATIN SMALL LETTER A WITH RING ABOVE
0x00e6: 0x0091, # LATIN SMALL LIGATURE AE
0x00e7: 0x0087, # LATIN SMALL LETTER C WITH CEDILLA
0x00e8: 0x008a, # LATIN SMALL LETTER E WITH GRAVE
0x00e9: 0x0082, # LATIN SMALL LETTER E WITH ACUTE
0x00ea: 0x0088, # LATIN SMALL LETTER E WITH CIRCUMFLEX
0x00eb: 0x0089, # LATIN SMALL LETTER E WITH DIAERESIS
0x00ec: 0x008d, # LATIN SMALL LETTER I WITH GRAVE
0x00ed: 0x00a1, # LATIN SMALL LETTER I WITH ACUTE
0x00ee: 0x008c, # LATIN SMALL LETTER I WITH CIRCUMFLEX
0x00ef: 0x008b, # LATIN SMALL LETTER I WITH DIAERESIS
0x00f0: 0x00d0, # LATIN SMALL LETTER ETH
0x00f1: 0x00a4, # LATIN SMALL LETTER N WITH TILDE
0x00f2: 0x0095, # LATIN SMALL LETTER O WITH GRAVE
0x00f3: 0x00a2, # LATIN SMALL LETTER O WITH ACUTE
0x00f4: 0x0093, # LATIN SMALL LETTER O WITH CIRCUMFLEX
0x00f5: 0x00e4, # LATIN SMALL LETTER O WITH TILDE
0x00f6: 0x0094, # LATIN SMALL LETTER O WITH DIAERESIS
0x00f7: 0x00f6, # DIVISION SIGN
0x00f8: 0x009b, # LATIN SMALL LETTER O WITH STROKE
0x00f9: 0x0097, # LATIN SMALL LETTER U WITH GRAVE
0x00fa: 0x00a3, # LATIN SMALL LETTER U WITH ACUTE
0x00fb: 0x0096, # LATIN SMALL LETTER U WITH CIRCUMFLEX
0x00fc: 0x0081, # LATIN SMALL LETTER U WITH DIAERESIS
0x00fd: 0x00ec, # LATIN SMALL LETTER Y WITH ACUTE
0x00fe: 0x00e7, # LATIN SMALL LETTER THORN
0x00ff: 0x0098, # LATIN SMALL LETTER Y WITH DIAERESIS
0x0131: 0x00d5, # LATIN SMALL LETTER DOTLESS I
0x0192: 0x009f, # LATIN SMALL LETTER F WITH HOOK
0x2017: 0x00f2, # DOUBLE LOW LINE
0x2500: 0x00c4, # BOX DRAWINGS LIGHT HORIZONTAL
0x2502: 0x00b3, # BOX DRAWINGS LIGHT VERTICAL
0x250c: 0x00da, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x2510: 0x00bf, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x2514: 0x00c0, # BOX DRAWINGS LIGHT UP AND RIGHT
0x2518: 0x00d9, # BOX DRAWINGS LIGHT UP AND LEFT
0x251c: 0x00c3, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x2524: 0x00b4, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x252c: 0x00c2, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x2534: 0x00c1, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x253c: 0x00c5, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x2550: 0x00cd, # BOX DRAWINGS DOUBLE HORIZONTAL
0x2551: 0x00ba, # BOX DRAWINGS DOUBLE VERTICAL
0x2554: 0x00c9, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x2557: 0x00bb, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x255a: 0x00c8, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x255d: 0x00bc, # BOX DRAWINGS DOUBLE UP AND LEFT
0x2560: 0x00cc, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x2563: 0x00b9, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x2566: 0x00cb, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x2569: 0x00ca, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x256c: 0x00ce, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x2580: 0x00df, # UPPER HALF BLOCK
0x2584: 0x00dc, # LOWER HALF BLOCK
0x2588: 0x00db, # FULL BLOCK
0x2591: 0x00b0, # LIGHT SHADE
0x2592: 0x00b1, # MEDIUM SHADE
0x2593: 0x00b2, # DARK SHADE
0x25a0: 0x00fe, # BLACK SQUARE
} | /regulome_web-2.0rc1.tar.gz/regulome_web-2.0rc1/regulome_app/webapp/static/brython/Lib/encodings/cp850.py | 0.448426 | 0.218232 | cp850.py | pypi |
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_map)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp862',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x0080: 0x05d0, # HEBREW LETTER ALEF
0x0081: 0x05d1, # HEBREW LETTER BET
0x0082: 0x05d2, # HEBREW LETTER GIMEL
0x0083: 0x05d3, # HEBREW LETTER DALET
0x0084: 0x05d4, # HEBREW LETTER HE
0x0085: 0x05d5, # HEBREW LETTER VAV
0x0086: 0x05d6, # HEBREW LETTER ZAYIN
0x0087: 0x05d7, # HEBREW LETTER HET
0x0088: 0x05d8, # HEBREW LETTER TET
0x0089: 0x05d9, # HEBREW LETTER YOD
0x008a: 0x05da, # HEBREW LETTER FINAL KAF
0x008b: 0x05db, # HEBREW LETTER KAF
0x008c: 0x05dc, # HEBREW LETTER LAMED
0x008d: 0x05dd, # HEBREW LETTER FINAL MEM
0x008e: 0x05de, # HEBREW LETTER MEM
0x008f: 0x05df, # HEBREW LETTER FINAL NUN
0x0090: 0x05e0, # HEBREW LETTER NUN
0x0091: 0x05e1, # HEBREW LETTER SAMEKH
0x0092: 0x05e2, # HEBREW LETTER AYIN
0x0093: 0x05e3, # HEBREW LETTER FINAL PE
0x0094: 0x05e4, # HEBREW LETTER PE
0x0095: 0x05e5, # HEBREW LETTER FINAL TSADI
0x0096: 0x05e6, # HEBREW LETTER TSADI
0x0097: 0x05e7, # HEBREW LETTER QOF
0x0098: 0x05e8, # HEBREW LETTER RESH
0x0099: 0x05e9, # HEBREW LETTER SHIN
0x009a: 0x05ea, # HEBREW LETTER TAV
0x009b: 0x00a2, # CENT SIGN
0x009c: 0x00a3, # POUND SIGN
0x009d: 0x00a5, # YEN SIGN
0x009e: 0x20a7, # PESETA SIGN
0x009f: 0x0192, # LATIN SMALL LETTER F WITH HOOK
0x00a0: 0x00e1, # LATIN SMALL LETTER A WITH ACUTE
0x00a1: 0x00ed, # LATIN SMALL LETTER I WITH ACUTE
0x00a2: 0x00f3, # LATIN SMALL LETTER O WITH ACUTE
0x00a3: 0x00fa, # LATIN SMALL LETTER U WITH ACUTE
0x00a4: 0x00f1, # LATIN SMALL LETTER N WITH TILDE
0x00a5: 0x00d1, # LATIN CAPITAL LETTER N WITH TILDE
0x00a6: 0x00aa, # FEMININE ORDINAL INDICATOR
0x00a7: 0x00ba, # MASCULINE ORDINAL INDICATOR
0x00a8: 0x00bf, # INVERTED QUESTION MARK
0x00a9: 0x2310, # REVERSED NOT SIGN
0x00aa: 0x00ac, # NOT SIGN
0x00ab: 0x00bd, # VULGAR FRACTION ONE HALF
0x00ac: 0x00bc, # VULGAR FRACTION ONE QUARTER
0x00ad: 0x00a1, # INVERTED EXCLAMATION MARK
0x00ae: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00af: 0x00bb, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00b0: 0x2591, # LIGHT SHADE
0x00b1: 0x2592, # MEDIUM SHADE
0x00b2: 0x2593, # DARK SHADE
0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL
0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x00b5: 0x2561, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
0x00b6: 0x2562, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
0x00b7: 0x2556, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
0x00b8: 0x2555, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL
0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT
0x00bd: 0x255c, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
0x00be: 0x255b, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT
0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL
0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x00c6: 0x255e, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
0x00c7: 0x255f, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL
0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x00cf: 0x2567, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
0x00d0: 0x2568, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
0x00d1: 0x2564, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
0x00d2: 0x2565, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
0x00d3: 0x2559, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
0x00d4: 0x2558, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
0x00d5: 0x2552, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
0x00d6: 0x2553, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
0x00d7: 0x256b, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
0x00d8: 0x256a, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT
0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x00db: 0x2588, # FULL BLOCK
0x00dc: 0x2584, # LOWER HALF BLOCK
0x00dd: 0x258c, # LEFT HALF BLOCK
0x00de: 0x2590, # RIGHT HALF BLOCK
0x00df: 0x2580, # UPPER HALF BLOCK
0x00e0: 0x03b1, # GREEK SMALL LETTER ALPHA
0x00e1: 0x00df, # LATIN SMALL LETTER SHARP S (GERMAN)
0x00e2: 0x0393, # GREEK CAPITAL LETTER GAMMA
0x00e3: 0x03c0, # GREEK SMALL LETTER PI
0x00e4: 0x03a3, # GREEK CAPITAL LETTER SIGMA
0x00e5: 0x03c3, # GREEK SMALL LETTER SIGMA
0x00e6: 0x00b5, # MICRO SIGN
0x00e7: 0x03c4, # GREEK SMALL LETTER TAU
0x00e8: 0x03a6, # GREEK CAPITAL LETTER PHI
0x00e9: 0x0398, # GREEK CAPITAL LETTER THETA
0x00ea: 0x03a9, # GREEK CAPITAL LETTER OMEGA
0x00eb: 0x03b4, # GREEK SMALL LETTER DELTA
0x00ec: 0x221e, # INFINITY
0x00ed: 0x03c6, # GREEK SMALL LETTER PHI
0x00ee: 0x03b5, # GREEK SMALL LETTER EPSILON
0x00ef: 0x2229, # INTERSECTION
0x00f0: 0x2261, # IDENTICAL TO
0x00f1: 0x00b1, # PLUS-MINUS SIGN
0x00f2: 0x2265, # GREATER-THAN OR EQUAL TO
0x00f3: 0x2264, # LESS-THAN OR EQUAL TO
0x00f4: 0x2320, # TOP HALF INTEGRAL
0x00f5: 0x2321, # BOTTOM HALF INTEGRAL
0x00f6: 0x00f7, # DIVISION SIGN
0x00f7: 0x2248, # ALMOST EQUAL TO
0x00f8: 0x00b0, # DEGREE SIGN
0x00f9: 0x2219, # BULLET OPERATOR
0x00fa: 0x00b7, # MIDDLE DOT
0x00fb: 0x221a, # SQUARE ROOT
0x00fc: 0x207f, # SUPERSCRIPT LATIN SMALL LETTER N
0x00fd: 0x00b2, # SUPERSCRIPT TWO
0x00fe: 0x25a0, # BLACK SQUARE
0x00ff: 0x00a0, # NO-BREAK SPACE
})
### Decoding Table
decoding_table = (
'\x00' # 0x0000 -> NULL
'\x01' # 0x0001 -> START OF HEADING
'\x02' # 0x0002 -> START OF TEXT
'\x03' # 0x0003 -> END OF TEXT
'\x04' # 0x0004 -> END OF TRANSMISSION
'\x05' # 0x0005 -> ENQUIRY
'\x06' # 0x0006 -> ACKNOWLEDGE
'\x07' # 0x0007 -> BELL
'\x08' # 0x0008 -> BACKSPACE
'\t' # 0x0009 -> HORIZONTAL TABULATION
'\n' # 0x000a -> LINE FEED
'\x0b' # 0x000b -> VERTICAL TABULATION
'\x0c' # 0x000c -> FORM FEED
'\r' # 0x000d -> CARRIAGE RETURN
'\x0e' # 0x000e -> SHIFT OUT
'\x0f' # 0x000f -> SHIFT IN
'\x10' # 0x0010 -> DATA LINK ESCAPE
'\x11' # 0x0011 -> DEVICE CONTROL ONE
'\x12' # 0x0012 -> DEVICE CONTROL TWO
'\x13' # 0x0013 -> DEVICE CONTROL THREE
'\x14' # 0x0014 -> DEVICE CONTROL FOUR
'\x15' # 0x0015 -> NEGATIVE ACKNOWLEDGE
'\x16' # 0x0016 -> SYNCHRONOUS IDLE
'\x17' # 0x0017 -> END OF TRANSMISSION BLOCK
'\x18' # 0x0018 -> CANCEL
'\x19' # 0x0019 -> END OF MEDIUM
'\x1a' # 0x001a -> SUBSTITUTE
'\x1b' # 0x001b -> ESCAPE
'\x1c' # 0x001c -> FILE SEPARATOR
'\x1d' # 0x001d -> GROUP SEPARATOR
'\x1e' # 0x001e -> RECORD SEPARATOR
'\x1f' # 0x001f -> UNIT SEPARATOR
' ' # 0x0020 -> SPACE
'!' # 0x0021 -> EXCLAMATION MARK
'"' # 0x0022 -> QUOTATION MARK
'#' # 0x0023 -> NUMBER SIGN
'$' # 0x0024 -> DOLLAR SIGN
'%' # 0x0025 -> PERCENT SIGN
'&' # 0x0026 -> AMPERSAND
"'" # 0x0027 -> APOSTROPHE
'(' # 0x0028 -> LEFT PARENTHESIS
')' # 0x0029 -> RIGHT PARENTHESIS
'*' # 0x002a -> ASTERISK
'+' # 0x002b -> PLUS SIGN
',' # 0x002c -> COMMA
'-' # 0x002d -> HYPHEN-MINUS
'.' # 0x002e -> FULL STOP
'/' # 0x002f -> SOLIDUS
'0' # 0x0030 -> DIGIT ZERO
'1' # 0x0031 -> DIGIT ONE
'2' # 0x0032 -> DIGIT TWO
'3' # 0x0033 -> DIGIT THREE
'4' # 0x0034 -> DIGIT FOUR
'5' # 0x0035 -> DIGIT FIVE
'6' # 0x0036 -> DIGIT SIX
'7' # 0x0037 -> DIGIT SEVEN
'8' # 0x0038 -> DIGIT EIGHT
'9' # 0x0039 -> DIGIT NINE
':' # 0x003a -> COLON
';' # 0x003b -> SEMICOLON
'<' # 0x003c -> LESS-THAN SIGN
'=' # 0x003d -> EQUALS SIGN
'>' # 0x003e -> GREATER-THAN SIGN
'?' # 0x003f -> QUESTION MARK
'@' # 0x0040 -> COMMERCIAL AT
'A' # 0x0041 -> LATIN CAPITAL LETTER A
'B' # 0x0042 -> LATIN CAPITAL LETTER B
'C' # 0x0043 -> LATIN CAPITAL LETTER C
'D' # 0x0044 -> LATIN CAPITAL LETTER D
'E' # 0x0045 -> LATIN CAPITAL LETTER E
'F' # 0x0046 -> LATIN CAPITAL LETTER F
'G' # 0x0047 -> LATIN CAPITAL LETTER G
'H' # 0x0048 -> LATIN CAPITAL LETTER H
'I' # 0x0049 -> LATIN CAPITAL LETTER I
'J' # 0x004a -> LATIN CAPITAL LETTER J
'K' # 0x004b -> LATIN CAPITAL LETTER K
'L' # 0x004c -> LATIN CAPITAL LETTER L
'M' # 0x004d -> LATIN CAPITAL LETTER M
'N' # 0x004e -> LATIN CAPITAL LETTER N
'O' # 0x004f -> LATIN CAPITAL LETTER O
'P' # 0x0050 -> LATIN CAPITAL LETTER P
'Q' # 0x0051 -> LATIN CAPITAL LETTER Q
'R' # 0x0052 -> LATIN CAPITAL LETTER R
'S' # 0x0053 -> LATIN CAPITAL LETTER S
'T' # 0x0054 -> LATIN CAPITAL LETTER T
'U' # 0x0055 -> LATIN CAPITAL LETTER U
'V' # 0x0056 -> LATIN CAPITAL LETTER V
'W' # 0x0057 -> LATIN CAPITAL LETTER W
'X' # 0x0058 -> LATIN CAPITAL LETTER X
'Y' # 0x0059 -> LATIN CAPITAL LETTER Y
'Z' # 0x005a -> LATIN CAPITAL LETTER Z
'[' # 0x005b -> LEFT SQUARE BRACKET
'\\' # 0x005c -> REVERSE SOLIDUS
']' # 0x005d -> RIGHT SQUARE BRACKET
'^' # 0x005e -> CIRCUMFLEX ACCENT
'_' # 0x005f -> LOW LINE
'`' # 0x0060 -> GRAVE ACCENT
'a' # 0x0061 -> LATIN SMALL LETTER A
'b' # 0x0062 -> LATIN SMALL LETTER B
'c' # 0x0063 -> LATIN SMALL LETTER C
'd' # 0x0064 -> LATIN SMALL LETTER D
'e' # 0x0065 -> LATIN SMALL LETTER E
'f' # 0x0066 -> LATIN SMALL LETTER F
'g' # 0x0067 -> LATIN SMALL LETTER G
'h' # 0x0068 -> LATIN SMALL LETTER H
'i' # 0x0069 -> LATIN SMALL LETTER I
'j' # 0x006a -> LATIN SMALL LETTER J
'k' # 0x006b -> LATIN SMALL LETTER K
'l' # 0x006c -> LATIN SMALL LETTER L
'm' # 0x006d -> LATIN SMALL LETTER M
'n' # 0x006e -> LATIN SMALL LETTER N
'o' # 0x006f -> LATIN SMALL LETTER O
'p' # 0x0070 -> LATIN SMALL LETTER P
'q' # 0x0071 -> LATIN SMALL LETTER Q
'r' # 0x0072 -> LATIN SMALL LETTER R
's' # 0x0073 -> LATIN SMALL LETTER S
't' # 0x0074 -> LATIN SMALL LETTER T
'u' # 0x0075 -> LATIN SMALL LETTER U
'v' # 0x0076 -> LATIN SMALL LETTER V
'w' # 0x0077 -> LATIN SMALL LETTER W
'x' # 0x0078 -> LATIN SMALL LETTER X
'y' # 0x0079 -> LATIN SMALL LETTER Y
'z' # 0x007a -> LATIN SMALL LETTER Z
'{' # 0x007b -> LEFT CURLY BRACKET
'|' # 0x007c -> VERTICAL LINE
'}' # 0x007d -> RIGHT CURLY BRACKET
'~' # 0x007e -> TILDE
'\x7f' # 0x007f -> DELETE
'\u05d0' # 0x0080 -> HEBREW LETTER ALEF
'\u05d1' # 0x0081 -> HEBREW LETTER BET
'\u05d2' # 0x0082 -> HEBREW LETTER GIMEL
'\u05d3' # 0x0083 -> HEBREW LETTER DALET
'\u05d4' # 0x0084 -> HEBREW LETTER HE
'\u05d5' # 0x0085 -> HEBREW LETTER VAV
'\u05d6' # 0x0086 -> HEBREW LETTER ZAYIN
'\u05d7' # 0x0087 -> HEBREW LETTER HET
'\u05d8' # 0x0088 -> HEBREW LETTER TET
'\u05d9' # 0x0089 -> HEBREW LETTER YOD
'\u05da' # 0x008a -> HEBREW LETTER FINAL KAF
'\u05db' # 0x008b -> HEBREW LETTER KAF
'\u05dc' # 0x008c -> HEBREW LETTER LAMED
'\u05dd' # 0x008d -> HEBREW LETTER FINAL MEM
'\u05de' # 0x008e -> HEBREW LETTER MEM
'\u05df' # 0x008f -> HEBREW LETTER FINAL NUN
'\u05e0' # 0x0090 -> HEBREW LETTER NUN
'\u05e1' # 0x0091 -> HEBREW LETTER SAMEKH
'\u05e2' # 0x0092 -> HEBREW LETTER AYIN
'\u05e3' # 0x0093 -> HEBREW LETTER FINAL PE
'\u05e4' # 0x0094 -> HEBREW LETTER PE
'\u05e5' # 0x0095 -> HEBREW LETTER FINAL TSADI
'\u05e6' # 0x0096 -> HEBREW LETTER TSADI
'\u05e7' # 0x0097 -> HEBREW LETTER QOF
'\u05e8' # 0x0098 -> HEBREW LETTER RESH
'\u05e9' # 0x0099 -> HEBREW LETTER SHIN
'\u05ea' # 0x009a -> HEBREW LETTER TAV
'\xa2' # 0x009b -> CENT SIGN
'\xa3' # 0x009c -> POUND SIGN
'\xa5' # 0x009d -> YEN SIGN
'\u20a7' # 0x009e -> PESETA SIGN
'\u0192' # 0x009f -> LATIN SMALL LETTER F WITH HOOK
'\xe1' # 0x00a0 -> LATIN SMALL LETTER A WITH ACUTE
'\xed' # 0x00a1 -> LATIN SMALL LETTER I WITH ACUTE
'\xf3' # 0x00a2 -> LATIN SMALL LETTER O WITH ACUTE
'\xfa' # 0x00a3 -> LATIN SMALL LETTER U WITH ACUTE
'\xf1' # 0x00a4 -> LATIN SMALL LETTER N WITH TILDE
'\xd1' # 0x00a5 -> LATIN CAPITAL LETTER N WITH TILDE
'\xaa' # 0x00a6 -> FEMININE ORDINAL INDICATOR
'\xba' # 0x00a7 -> MASCULINE ORDINAL INDICATOR
'\xbf' # 0x00a8 -> INVERTED QUESTION MARK
'\u2310' # 0x00a9 -> REVERSED NOT SIGN
'\xac' # 0x00aa -> NOT SIGN
'\xbd' # 0x00ab -> VULGAR FRACTION ONE HALF
'\xbc' # 0x00ac -> VULGAR FRACTION ONE QUARTER
'\xa1' # 0x00ad -> INVERTED EXCLAMATION MARK
'\xab' # 0x00ae -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
'\xbb' # 0x00af -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
'\u2591' # 0x00b0 -> LIGHT SHADE
'\u2592' # 0x00b1 -> MEDIUM SHADE
'\u2593' # 0x00b2 -> DARK SHADE
'\u2502' # 0x00b3 -> BOX DRAWINGS LIGHT VERTICAL
'\u2524' # 0x00b4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT
'\u2561' # 0x00b5 -> BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
'\u2562' # 0x00b6 -> BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
'\u2556' # 0x00b7 -> BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
'\u2555' # 0x00b8 -> BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
'\u2563' # 0x00b9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT
'\u2551' # 0x00ba -> BOX DRAWINGS DOUBLE VERTICAL
'\u2557' # 0x00bb -> BOX DRAWINGS DOUBLE DOWN AND LEFT
'\u255d' # 0x00bc -> BOX DRAWINGS DOUBLE UP AND LEFT
'\u255c' # 0x00bd -> BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
'\u255b' # 0x00be -> BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
'\u2510' # 0x00bf -> BOX DRAWINGS LIGHT DOWN AND LEFT
'\u2514' # 0x00c0 -> BOX DRAWINGS LIGHT UP AND RIGHT
'\u2534' # 0x00c1 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL
'\u252c' # 0x00c2 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
'\u251c' # 0x00c3 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT
'\u2500' # 0x00c4 -> BOX DRAWINGS LIGHT HORIZONTAL
'\u253c' # 0x00c5 -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
'\u255e' # 0x00c6 -> BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
'\u255f' # 0x00c7 -> BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
'\u255a' # 0x00c8 -> BOX DRAWINGS DOUBLE UP AND RIGHT
'\u2554' # 0x00c9 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT
'\u2569' # 0x00ca -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL
'\u2566' # 0x00cb -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
'\u2560' # 0x00cc -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
'\u2550' # 0x00cd -> BOX DRAWINGS DOUBLE HORIZONTAL
'\u256c' # 0x00ce -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
'\u2567' # 0x00cf -> BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
'\u2568' # 0x00d0 -> BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
'\u2564' # 0x00d1 -> BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
'\u2565' # 0x00d2 -> BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
'\u2559' # 0x00d3 -> BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
'\u2558' # 0x00d4 -> BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
'\u2552' # 0x00d5 -> BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
'\u2553' # 0x00d6 -> BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
'\u256b' # 0x00d7 -> BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
'\u256a' # 0x00d8 -> BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
'\u2518' # 0x00d9 -> BOX DRAWINGS LIGHT UP AND LEFT
'\u250c' # 0x00da -> BOX DRAWINGS LIGHT DOWN AND RIGHT
'\u2588' # 0x00db -> FULL BLOCK
'\u2584' # 0x00dc -> LOWER HALF BLOCK
'\u258c' # 0x00dd -> LEFT HALF BLOCK
'\u2590' # 0x00de -> RIGHT HALF BLOCK
'\u2580' # 0x00df -> UPPER HALF BLOCK
'\u03b1' # 0x00e0 -> GREEK SMALL LETTER ALPHA
'\xdf' # 0x00e1 -> LATIN SMALL LETTER SHARP S (GERMAN)
'\u0393' # 0x00e2 -> GREEK CAPITAL LETTER GAMMA
'\u03c0' # 0x00e3 -> GREEK SMALL LETTER PI
'\u03a3' # 0x00e4 -> GREEK CAPITAL LETTER SIGMA
'\u03c3' # 0x00e5 -> GREEK SMALL LETTER SIGMA
'\xb5' # 0x00e6 -> MICRO SIGN
'\u03c4' # 0x00e7 -> GREEK SMALL LETTER TAU
'\u03a6' # 0x00e8 -> GREEK CAPITAL LETTER PHI
'\u0398' # 0x00e9 -> GREEK CAPITAL LETTER THETA
'\u03a9' # 0x00ea -> GREEK CAPITAL LETTER OMEGA
'\u03b4' # 0x00eb -> GREEK SMALL LETTER DELTA
'\u221e' # 0x00ec -> INFINITY
'\u03c6' # 0x00ed -> GREEK SMALL LETTER PHI
'\u03b5' # 0x00ee -> GREEK SMALL LETTER EPSILON
'\u2229' # 0x00ef -> INTERSECTION
'\u2261' # 0x00f0 -> IDENTICAL TO
'\xb1' # 0x00f1 -> PLUS-MINUS SIGN
'\u2265' # 0x00f2 -> GREATER-THAN OR EQUAL TO
'\u2264' # 0x00f3 -> LESS-THAN OR EQUAL TO
'\u2320' # 0x00f4 -> TOP HALF INTEGRAL
'\u2321' # 0x00f5 -> BOTTOM HALF INTEGRAL
'\xf7' # 0x00f6 -> DIVISION SIGN
'\u2248' # 0x00f7 -> ALMOST EQUAL TO
'\xb0' # 0x00f8 -> DEGREE SIGN
'\u2219' # 0x00f9 -> BULLET OPERATOR
'\xb7' # 0x00fa -> MIDDLE DOT
'\u221a' # 0x00fb -> SQUARE ROOT
'\u207f' # 0x00fc -> SUPERSCRIPT LATIN SMALL LETTER N
'\xb2' # 0x00fd -> SUPERSCRIPT TWO
'\u25a0' # 0x00fe -> BLACK SQUARE
'\xa0' # 0x00ff -> NO-BREAK SPACE
)
### Encoding Map
encoding_map = {
0x0000: 0x0000, # NULL
0x0001: 0x0001, # START OF HEADING
0x0002: 0x0002, # START OF TEXT
0x0003: 0x0003, # END OF TEXT
0x0004: 0x0004, # END OF TRANSMISSION
0x0005: 0x0005, # ENQUIRY
0x0006: 0x0006, # ACKNOWLEDGE
0x0007: 0x0007, # BELL
0x0008: 0x0008, # BACKSPACE
0x0009: 0x0009, # HORIZONTAL TABULATION
0x000a: 0x000a, # LINE FEED
0x000b: 0x000b, # VERTICAL TABULATION
0x000c: 0x000c, # FORM FEED
0x000d: 0x000d, # CARRIAGE RETURN
0x000e: 0x000e, # SHIFT OUT
0x000f: 0x000f, # SHIFT IN
0x0010: 0x0010, # DATA LINK ESCAPE
0x0011: 0x0011, # DEVICE CONTROL ONE
0x0012: 0x0012, # DEVICE CONTROL TWO
0x0013: 0x0013, # DEVICE CONTROL THREE
0x0014: 0x0014, # DEVICE CONTROL FOUR
0x0015: 0x0015, # NEGATIVE ACKNOWLEDGE
0x0016: 0x0016, # SYNCHRONOUS IDLE
0x0017: 0x0017, # END OF TRANSMISSION BLOCK
0x0018: 0x0018, # CANCEL
0x0019: 0x0019, # END OF MEDIUM
0x001a: 0x001a, # SUBSTITUTE
0x001b: 0x001b, # ESCAPE
0x001c: 0x001c, # FILE SEPARATOR
0x001d: 0x001d, # GROUP SEPARATOR
0x001e: 0x001e, # RECORD SEPARATOR
0x001f: 0x001f, # UNIT SEPARATOR
0x0020: 0x0020, # SPACE
0x0021: 0x0021, # EXCLAMATION MARK
0x0022: 0x0022, # QUOTATION MARK
0x0023: 0x0023, # NUMBER SIGN
0x0024: 0x0024, # DOLLAR SIGN
0x0025: 0x0025, # PERCENT SIGN
0x0026: 0x0026, # AMPERSAND
0x0027: 0x0027, # APOSTROPHE
0x0028: 0x0028, # LEFT PARENTHESIS
0x0029: 0x0029, # RIGHT PARENTHESIS
0x002a: 0x002a, # ASTERISK
0x002b: 0x002b, # PLUS SIGN
0x002c: 0x002c, # COMMA
0x002d: 0x002d, # HYPHEN-MINUS
0x002e: 0x002e, # FULL STOP
0x002f: 0x002f, # SOLIDUS
0x0030: 0x0030, # DIGIT ZERO
0x0031: 0x0031, # DIGIT ONE
0x0032: 0x0032, # DIGIT TWO
0x0033: 0x0033, # DIGIT THREE
0x0034: 0x0034, # DIGIT FOUR
0x0035: 0x0035, # DIGIT FIVE
0x0036: 0x0036, # DIGIT SIX
0x0037: 0x0037, # DIGIT SEVEN
0x0038: 0x0038, # DIGIT EIGHT
0x0039: 0x0039, # DIGIT NINE
0x003a: 0x003a, # COLON
0x003b: 0x003b, # SEMICOLON
0x003c: 0x003c, # LESS-THAN SIGN
0x003d: 0x003d, # EQUALS SIGN
0x003e: 0x003e, # GREATER-THAN SIGN
0x003f: 0x003f, # QUESTION MARK
0x0040: 0x0040, # COMMERCIAL AT
0x0041: 0x0041, # LATIN CAPITAL LETTER A
0x0042: 0x0042, # LATIN CAPITAL LETTER B
0x0043: 0x0043, # LATIN CAPITAL LETTER C
0x0044: 0x0044, # LATIN CAPITAL LETTER D
0x0045: 0x0045, # LATIN CAPITAL LETTER E
0x0046: 0x0046, # LATIN CAPITAL LETTER F
0x0047: 0x0047, # LATIN CAPITAL LETTER G
0x0048: 0x0048, # LATIN CAPITAL LETTER H
0x0049: 0x0049, # LATIN CAPITAL LETTER I
0x004a: 0x004a, # LATIN CAPITAL LETTER J
0x004b: 0x004b, # LATIN CAPITAL LETTER K
0x004c: 0x004c, # LATIN CAPITAL LETTER L
0x004d: 0x004d, # LATIN CAPITAL LETTER M
0x004e: 0x004e, # LATIN CAPITAL LETTER N
0x004f: 0x004f, # LATIN CAPITAL LETTER O
0x0050: 0x0050, # LATIN CAPITAL LETTER P
0x0051: 0x0051, # LATIN CAPITAL LETTER Q
0x0052: 0x0052, # LATIN CAPITAL LETTER R
0x0053: 0x0053, # LATIN CAPITAL LETTER S
0x0054: 0x0054, # LATIN CAPITAL LETTER T
0x0055: 0x0055, # LATIN CAPITAL LETTER U
0x0056: 0x0056, # LATIN CAPITAL LETTER V
0x0057: 0x0057, # LATIN CAPITAL LETTER W
0x0058: 0x0058, # LATIN CAPITAL LETTER X
0x0059: 0x0059, # LATIN CAPITAL LETTER Y
0x005a: 0x005a, # LATIN CAPITAL LETTER Z
0x005b: 0x005b, # LEFT SQUARE BRACKET
0x005c: 0x005c, # REVERSE SOLIDUS
0x005d: 0x005d, # RIGHT SQUARE BRACKET
0x005e: 0x005e, # CIRCUMFLEX ACCENT
0x005f: 0x005f, # LOW LINE
0x0060: 0x0060, # GRAVE ACCENT
0x0061: 0x0061, # LATIN SMALL LETTER A
0x0062: 0x0062, # LATIN SMALL LETTER B
0x0063: 0x0063, # LATIN SMALL LETTER C
0x0064: 0x0064, # LATIN SMALL LETTER D
0x0065: 0x0065, # LATIN SMALL LETTER E
0x0066: 0x0066, # LATIN SMALL LETTER F
0x0067: 0x0067, # LATIN SMALL LETTER G
0x0068: 0x0068, # LATIN SMALL LETTER H
0x0069: 0x0069, # LATIN SMALL LETTER I
0x006a: 0x006a, # LATIN SMALL LETTER J
0x006b: 0x006b, # LATIN SMALL LETTER K
0x006c: 0x006c, # LATIN SMALL LETTER L
0x006d: 0x006d, # LATIN SMALL LETTER M
0x006e: 0x006e, # LATIN SMALL LETTER N
0x006f: 0x006f, # LATIN SMALL LETTER O
0x0070: 0x0070, # LATIN SMALL LETTER P
0x0071: 0x0071, # LATIN SMALL LETTER Q
0x0072: 0x0072, # LATIN SMALL LETTER R
0x0073: 0x0073, # LATIN SMALL LETTER S
0x0074: 0x0074, # LATIN SMALL LETTER T
0x0075: 0x0075, # LATIN SMALL LETTER U
0x0076: 0x0076, # LATIN SMALL LETTER V
0x0077: 0x0077, # LATIN SMALL LETTER W
0x0078: 0x0078, # LATIN SMALL LETTER X
0x0079: 0x0079, # LATIN SMALL LETTER Y
0x007a: 0x007a, # LATIN SMALL LETTER Z
0x007b: 0x007b, # LEFT CURLY BRACKET
0x007c: 0x007c, # VERTICAL LINE
0x007d: 0x007d, # RIGHT CURLY BRACKET
0x007e: 0x007e, # TILDE
0x007f: 0x007f, # DELETE
0x00a0: 0x00ff, # NO-BREAK SPACE
0x00a1: 0x00ad, # INVERTED EXCLAMATION MARK
0x00a2: 0x009b, # CENT SIGN
0x00a3: 0x009c, # POUND SIGN
0x00a5: 0x009d, # YEN SIGN
0x00aa: 0x00a6, # FEMININE ORDINAL INDICATOR
0x00ab: 0x00ae, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00ac: 0x00aa, # NOT SIGN
0x00b0: 0x00f8, # DEGREE SIGN
0x00b1: 0x00f1, # PLUS-MINUS SIGN
0x00b2: 0x00fd, # SUPERSCRIPT TWO
0x00b5: 0x00e6, # MICRO SIGN
0x00b7: 0x00fa, # MIDDLE DOT
0x00ba: 0x00a7, # MASCULINE ORDINAL INDICATOR
0x00bb: 0x00af, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00bc: 0x00ac, # VULGAR FRACTION ONE QUARTER
0x00bd: 0x00ab, # VULGAR FRACTION ONE HALF
0x00bf: 0x00a8, # INVERTED QUESTION MARK
0x00d1: 0x00a5, # LATIN CAPITAL LETTER N WITH TILDE
0x00df: 0x00e1, # LATIN SMALL LETTER SHARP S (GERMAN)
0x00e1: 0x00a0, # LATIN SMALL LETTER A WITH ACUTE
0x00ed: 0x00a1, # LATIN SMALL LETTER I WITH ACUTE
0x00f1: 0x00a4, # LATIN SMALL LETTER N WITH TILDE
0x00f3: 0x00a2, # LATIN SMALL LETTER O WITH ACUTE
0x00f7: 0x00f6, # DIVISION SIGN
0x00fa: 0x00a3, # LATIN SMALL LETTER U WITH ACUTE
0x0192: 0x009f, # LATIN SMALL LETTER F WITH HOOK
0x0393: 0x00e2, # GREEK CAPITAL LETTER GAMMA
0x0398: 0x00e9, # GREEK CAPITAL LETTER THETA
0x03a3: 0x00e4, # GREEK CAPITAL LETTER SIGMA
0x03a6: 0x00e8, # GREEK CAPITAL LETTER PHI
0x03a9: 0x00ea, # GREEK CAPITAL LETTER OMEGA
0x03b1: 0x00e0, # GREEK SMALL LETTER ALPHA
0x03b4: 0x00eb, # GREEK SMALL LETTER DELTA
0x03b5: 0x00ee, # GREEK SMALL LETTER EPSILON
0x03c0: 0x00e3, # GREEK SMALL LETTER PI
0x03c3: 0x00e5, # GREEK SMALL LETTER SIGMA
0x03c4: 0x00e7, # GREEK SMALL LETTER TAU
0x03c6: 0x00ed, # GREEK SMALL LETTER PHI
0x05d0: 0x0080, # HEBREW LETTER ALEF
0x05d1: 0x0081, # HEBREW LETTER BET
0x05d2: 0x0082, # HEBREW LETTER GIMEL
0x05d3: 0x0083, # HEBREW LETTER DALET
0x05d4: 0x0084, # HEBREW LETTER HE
0x05d5: 0x0085, # HEBREW LETTER VAV
0x05d6: 0x0086, # HEBREW LETTER ZAYIN
0x05d7: 0x0087, # HEBREW LETTER HET
0x05d8: 0x0088, # HEBREW LETTER TET
0x05d9: 0x0089, # HEBREW LETTER YOD
0x05da: 0x008a, # HEBREW LETTER FINAL KAF
0x05db: 0x008b, # HEBREW LETTER KAF
0x05dc: 0x008c, # HEBREW LETTER LAMED
0x05dd: 0x008d, # HEBREW LETTER FINAL MEM
0x05de: 0x008e, # HEBREW LETTER MEM
0x05df: 0x008f, # HEBREW LETTER FINAL NUN
0x05e0: 0x0090, # HEBREW LETTER NUN
0x05e1: 0x0091, # HEBREW LETTER SAMEKH
0x05e2: 0x0092, # HEBREW LETTER AYIN
0x05e3: 0x0093, # HEBREW LETTER FINAL PE
0x05e4: 0x0094, # HEBREW LETTER PE
0x05e5: 0x0095, # HEBREW LETTER FINAL TSADI
0x05e6: 0x0096, # HEBREW LETTER TSADI
0x05e7: 0x0097, # HEBREW LETTER QOF
0x05e8: 0x0098, # HEBREW LETTER RESH
0x05e9: 0x0099, # HEBREW LETTER SHIN
0x05ea: 0x009a, # HEBREW LETTER TAV
0x207f: 0x00fc, # SUPERSCRIPT LATIN SMALL LETTER N
0x20a7: 0x009e, # PESETA SIGN
0x2219: 0x00f9, # BULLET OPERATOR
0x221a: 0x00fb, # SQUARE ROOT
0x221e: 0x00ec, # INFINITY
0x2229: 0x00ef, # INTERSECTION
0x2248: 0x00f7, # ALMOST EQUAL TO
0x2261: 0x00f0, # IDENTICAL TO
0x2264: 0x00f3, # LESS-THAN OR EQUAL TO
0x2265: 0x00f2, # GREATER-THAN OR EQUAL TO
0x2310: 0x00a9, # REVERSED NOT SIGN
0x2320: 0x00f4, # TOP HALF INTEGRAL
0x2321: 0x00f5, # BOTTOM HALF INTEGRAL
0x2500: 0x00c4, # BOX DRAWINGS LIGHT HORIZONTAL
0x2502: 0x00b3, # BOX DRAWINGS LIGHT VERTICAL
0x250c: 0x00da, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x2510: 0x00bf, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x2514: 0x00c0, # BOX DRAWINGS LIGHT UP AND RIGHT
0x2518: 0x00d9, # BOX DRAWINGS LIGHT UP AND LEFT
0x251c: 0x00c3, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x2524: 0x00b4, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x252c: 0x00c2, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x2534: 0x00c1, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x253c: 0x00c5, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x2550: 0x00cd, # BOX DRAWINGS DOUBLE HORIZONTAL
0x2551: 0x00ba, # BOX DRAWINGS DOUBLE VERTICAL
0x2552: 0x00d5, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
0x2553: 0x00d6, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
0x2554: 0x00c9, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x2555: 0x00b8, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
0x2556: 0x00b7, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
0x2557: 0x00bb, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x2558: 0x00d4, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
0x2559: 0x00d3, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
0x255a: 0x00c8, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x255b: 0x00be, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
0x255c: 0x00bd, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
0x255d: 0x00bc, # BOX DRAWINGS DOUBLE UP AND LEFT
0x255e: 0x00c6, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
0x255f: 0x00c7, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
0x2560: 0x00cc, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x2561: 0x00b5, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
0x2562: 0x00b6, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
0x2563: 0x00b9, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x2564: 0x00d1, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
0x2565: 0x00d2, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
0x2566: 0x00cb, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x2567: 0x00cf, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
0x2568: 0x00d0, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
0x2569: 0x00ca, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x256a: 0x00d8, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
0x256b: 0x00d7, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
0x256c: 0x00ce, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x2580: 0x00df, # UPPER HALF BLOCK
0x2584: 0x00dc, # LOWER HALF BLOCK
0x2588: 0x00db, # FULL BLOCK
0x258c: 0x00dd, # LEFT HALF BLOCK
0x2590: 0x00de, # RIGHT HALF BLOCK
0x2591: 0x00b0, # LIGHT SHADE
0x2592: 0x00b1, # MEDIUM SHADE
0x2593: 0x00b2, # DARK SHADE
0x25a0: 0x00fe, # BLACK SQUARE
} | /regulome_web-2.0rc1.tar.gz/regulome_web-2.0rc1/regulome_app/webapp/static/brython/Lib/encodings/cp862.py | 0.45302 | 0.219348 | cp862.py | pypi |
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_map)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp860',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x0080: 0x00c7, # LATIN CAPITAL LETTER C WITH CEDILLA
0x0081: 0x00fc, # LATIN SMALL LETTER U WITH DIAERESIS
0x0082: 0x00e9, # LATIN SMALL LETTER E WITH ACUTE
0x0083: 0x00e2, # LATIN SMALL LETTER A WITH CIRCUMFLEX
0x0084: 0x00e3, # LATIN SMALL LETTER A WITH TILDE
0x0085: 0x00e0, # LATIN SMALL LETTER A WITH GRAVE
0x0086: 0x00c1, # LATIN CAPITAL LETTER A WITH ACUTE
0x0087: 0x00e7, # LATIN SMALL LETTER C WITH CEDILLA
0x0088: 0x00ea, # LATIN SMALL LETTER E WITH CIRCUMFLEX
0x0089: 0x00ca, # LATIN CAPITAL LETTER E WITH CIRCUMFLEX
0x008a: 0x00e8, # LATIN SMALL LETTER E WITH GRAVE
0x008b: 0x00cd, # LATIN CAPITAL LETTER I WITH ACUTE
0x008c: 0x00d4, # LATIN CAPITAL LETTER O WITH CIRCUMFLEX
0x008d: 0x00ec, # LATIN SMALL LETTER I WITH GRAVE
0x008e: 0x00c3, # LATIN CAPITAL LETTER A WITH TILDE
0x008f: 0x00c2, # LATIN CAPITAL LETTER A WITH CIRCUMFLEX
0x0090: 0x00c9, # LATIN CAPITAL LETTER E WITH ACUTE
0x0091: 0x00c0, # LATIN CAPITAL LETTER A WITH GRAVE
0x0092: 0x00c8, # LATIN CAPITAL LETTER E WITH GRAVE
0x0093: 0x00f4, # LATIN SMALL LETTER O WITH CIRCUMFLEX
0x0094: 0x00f5, # LATIN SMALL LETTER O WITH TILDE
0x0095: 0x00f2, # LATIN SMALL LETTER O WITH GRAVE
0x0096: 0x00da, # LATIN CAPITAL LETTER U WITH ACUTE
0x0097: 0x00f9, # LATIN SMALL LETTER U WITH GRAVE
0x0098: 0x00cc, # LATIN CAPITAL LETTER I WITH GRAVE
0x0099: 0x00d5, # LATIN CAPITAL LETTER O WITH TILDE
0x009a: 0x00dc, # LATIN CAPITAL LETTER U WITH DIAERESIS
0x009b: 0x00a2, # CENT SIGN
0x009c: 0x00a3, # POUND SIGN
0x009d: 0x00d9, # LATIN CAPITAL LETTER U WITH GRAVE
0x009e: 0x20a7, # PESETA SIGN
0x009f: 0x00d3, # LATIN CAPITAL LETTER O WITH ACUTE
0x00a0: 0x00e1, # LATIN SMALL LETTER A WITH ACUTE
0x00a1: 0x00ed, # LATIN SMALL LETTER I WITH ACUTE
0x00a2: 0x00f3, # LATIN SMALL LETTER O WITH ACUTE
0x00a3: 0x00fa, # LATIN SMALL LETTER U WITH ACUTE
0x00a4: 0x00f1, # LATIN SMALL LETTER N WITH TILDE
0x00a5: 0x00d1, # LATIN CAPITAL LETTER N WITH TILDE
0x00a6: 0x00aa, # FEMININE ORDINAL INDICATOR
0x00a7: 0x00ba, # MASCULINE ORDINAL INDICATOR
0x00a8: 0x00bf, # INVERTED QUESTION MARK
0x00a9: 0x00d2, # LATIN CAPITAL LETTER O WITH GRAVE
0x00aa: 0x00ac, # NOT SIGN
0x00ab: 0x00bd, # VULGAR FRACTION ONE HALF
0x00ac: 0x00bc, # VULGAR FRACTION ONE QUARTER
0x00ad: 0x00a1, # INVERTED EXCLAMATION MARK
0x00ae: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00af: 0x00bb, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00b0: 0x2591, # LIGHT SHADE
0x00b1: 0x2592, # MEDIUM SHADE
0x00b2: 0x2593, # DARK SHADE
0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL
0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x00b5: 0x2561, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
0x00b6: 0x2562, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
0x00b7: 0x2556, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
0x00b8: 0x2555, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL
0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT
0x00bd: 0x255c, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
0x00be: 0x255b, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT
0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL
0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x00c6: 0x255e, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
0x00c7: 0x255f, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL
0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x00cf: 0x2567, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
0x00d0: 0x2568, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
0x00d1: 0x2564, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
0x00d2: 0x2565, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
0x00d3: 0x2559, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
0x00d4: 0x2558, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
0x00d5: 0x2552, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
0x00d6: 0x2553, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
0x00d7: 0x256b, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
0x00d8: 0x256a, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT
0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x00db: 0x2588, # FULL BLOCK
0x00dc: 0x2584, # LOWER HALF BLOCK
0x00dd: 0x258c, # LEFT HALF BLOCK
0x00de: 0x2590, # RIGHT HALF BLOCK
0x00df: 0x2580, # UPPER HALF BLOCK
0x00e0: 0x03b1, # GREEK SMALL LETTER ALPHA
0x00e1: 0x00df, # LATIN SMALL LETTER SHARP S
0x00e2: 0x0393, # GREEK CAPITAL LETTER GAMMA
0x00e3: 0x03c0, # GREEK SMALL LETTER PI
0x00e4: 0x03a3, # GREEK CAPITAL LETTER SIGMA
0x00e5: 0x03c3, # GREEK SMALL LETTER SIGMA
0x00e6: 0x00b5, # MICRO SIGN
0x00e7: 0x03c4, # GREEK SMALL LETTER TAU
0x00e8: 0x03a6, # GREEK CAPITAL LETTER PHI
0x00e9: 0x0398, # GREEK CAPITAL LETTER THETA
0x00ea: 0x03a9, # GREEK CAPITAL LETTER OMEGA
0x00eb: 0x03b4, # GREEK SMALL LETTER DELTA
0x00ec: 0x221e, # INFINITY
0x00ed: 0x03c6, # GREEK SMALL LETTER PHI
0x00ee: 0x03b5, # GREEK SMALL LETTER EPSILON
0x00ef: 0x2229, # INTERSECTION
0x00f0: 0x2261, # IDENTICAL TO
0x00f1: 0x00b1, # PLUS-MINUS SIGN
0x00f2: 0x2265, # GREATER-THAN OR EQUAL TO
0x00f3: 0x2264, # LESS-THAN OR EQUAL TO
0x00f4: 0x2320, # TOP HALF INTEGRAL
0x00f5: 0x2321, # BOTTOM HALF INTEGRAL
0x00f6: 0x00f7, # DIVISION SIGN
0x00f7: 0x2248, # ALMOST EQUAL TO
0x00f8: 0x00b0, # DEGREE SIGN
0x00f9: 0x2219, # BULLET OPERATOR
0x00fa: 0x00b7, # MIDDLE DOT
0x00fb: 0x221a, # SQUARE ROOT
0x00fc: 0x207f, # SUPERSCRIPT LATIN SMALL LETTER N
0x00fd: 0x00b2, # SUPERSCRIPT TWO
0x00fe: 0x25a0, # BLACK SQUARE
0x00ff: 0x00a0, # NO-BREAK SPACE
})
### Decoding Table
decoding_table = (
'\x00' # 0x0000 -> NULL
'\x01' # 0x0001 -> START OF HEADING
'\x02' # 0x0002 -> START OF TEXT
'\x03' # 0x0003 -> END OF TEXT
'\x04' # 0x0004 -> END OF TRANSMISSION
'\x05' # 0x0005 -> ENQUIRY
'\x06' # 0x0006 -> ACKNOWLEDGE
'\x07' # 0x0007 -> BELL
'\x08' # 0x0008 -> BACKSPACE
'\t' # 0x0009 -> HORIZONTAL TABULATION
'\n' # 0x000a -> LINE FEED
'\x0b' # 0x000b -> VERTICAL TABULATION
'\x0c' # 0x000c -> FORM FEED
'\r' # 0x000d -> CARRIAGE RETURN
'\x0e' # 0x000e -> SHIFT OUT
'\x0f' # 0x000f -> SHIFT IN
'\x10' # 0x0010 -> DATA LINK ESCAPE
'\x11' # 0x0011 -> DEVICE CONTROL ONE
'\x12' # 0x0012 -> DEVICE CONTROL TWO
'\x13' # 0x0013 -> DEVICE CONTROL THREE
'\x14' # 0x0014 -> DEVICE CONTROL FOUR
'\x15' # 0x0015 -> NEGATIVE ACKNOWLEDGE
'\x16' # 0x0016 -> SYNCHRONOUS IDLE
'\x17' # 0x0017 -> END OF TRANSMISSION BLOCK
'\x18' # 0x0018 -> CANCEL
'\x19' # 0x0019 -> END OF MEDIUM
'\x1a' # 0x001a -> SUBSTITUTE
'\x1b' # 0x001b -> ESCAPE
'\x1c' # 0x001c -> FILE SEPARATOR
'\x1d' # 0x001d -> GROUP SEPARATOR
'\x1e' # 0x001e -> RECORD SEPARATOR
'\x1f' # 0x001f -> UNIT SEPARATOR
' ' # 0x0020 -> SPACE
'!' # 0x0021 -> EXCLAMATION MARK
'"' # 0x0022 -> QUOTATION MARK
'#' # 0x0023 -> NUMBER SIGN
'$' # 0x0024 -> DOLLAR SIGN
'%' # 0x0025 -> PERCENT SIGN
'&' # 0x0026 -> AMPERSAND
"'" # 0x0027 -> APOSTROPHE
'(' # 0x0028 -> LEFT PARENTHESIS
')' # 0x0029 -> RIGHT PARENTHESIS
'*' # 0x002a -> ASTERISK
'+' # 0x002b -> PLUS SIGN
',' # 0x002c -> COMMA
'-' # 0x002d -> HYPHEN-MINUS
'.' # 0x002e -> FULL STOP
'/' # 0x002f -> SOLIDUS
'0' # 0x0030 -> DIGIT ZERO
'1' # 0x0031 -> DIGIT ONE
'2' # 0x0032 -> DIGIT TWO
'3' # 0x0033 -> DIGIT THREE
'4' # 0x0034 -> DIGIT FOUR
'5' # 0x0035 -> DIGIT FIVE
'6' # 0x0036 -> DIGIT SIX
'7' # 0x0037 -> DIGIT SEVEN
'8' # 0x0038 -> DIGIT EIGHT
'9' # 0x0039 -> DIGIT NINE
':' # 0x003a -> COLON
';' # 0x003b -> SEMICOLON
'<' # 0x003c -> LESS-THAN SIGN
'=' # 0x003d -> EQUALS SIGN
'>' # 0x003e -> GREATER-THAN SIGN
'?' # 0x003f -> QUESTION MARK
'@' # 0x0040 -> COMMERCIAL AT
'A' # 0x0041 -> LATIN CAPITAL LETTER A
'B' # 0x0042 -> LATIN CAPITAL LETTER B
'C' # 0x0043 -> LATIN CAPITAL LETTER C
'D' # 0x0044 -> LATIN CAPITAL LETTER D
'E' # 0x0045 -> LATIN CAPITAL LETTER E
'F' # 0x0046 -> LATIN CAPITAL LETTER F
'G' # 0x0047 -> LATIN CAPITAL LETTER G
'H' # 0x0048 -> LATIN CAPITAL LETTER H
'I' # 0x0049 -> LATIN CAPITAL LETTER I
'J' # 0x004a -> LATIN CAPITAL LETTER J
'K' # 0x004b -> LATIN CAPITAL LETTER K
'L' # 0x004c -> LATIN CAPITAL LETTER L
'M' # 0x004d -> LATIN CAPITAL LETTER M
'N' # 0x004e -> LATIN CAPITAL LETTER N
'O' # 0x004f -> LATIN CAPITAL LETTER O
'P' # 0x0050 -> LATIN CAPITAL LETTER P
'Q' # 0x0051 -> LATIN CAPITAL LETTER Q
'R' # 0x0052 -> LATIN CAPITAL LETTER R
'S' # 0x0053 -> LATIN CAPITAL LETTER S
'T' # 0x0054 -> LATIN CAPITAL LETTER T
'U' # 0x0055 -> LATIN CAPITAL LETTER U
'V' # 0x0056 -> LATIN CAPITAL LETTER V
'W' # 0x0057 -> LATIN CAPITAL LETTER W
'X' # 0x0058 -> LATIN CAPITAL LETTER X
'Y' # 0x0059 -> LATIN CAPITAL LETTER Y
'Z' # 0x005a -> LATIN CAPITAL LETTER Z
'[' # 0x005b -> LEFT SQUARE BRACKET
'\\' # 0x005c -> REVERSE SOLIDUS
']' # 0x005d -> RIGHT SQUARE BRACKET
'^' # 0x005e -> CIRCUMFLEX ACCENT
'_' # 0x005f -> LOW LINE
'`' # 0x0060 -> GRAVE ACCENT
'a' # 0x0061 -> LATIN SMALL LETTER A
'b' # 0x0062 -> LATIN SMALL LETTER B
'c' # 0x0063 -> LATIN SMALL LETTER C
'd' # 0x0064 -> LATIN SMALL LETTER D
'e' # 0x0065 -> LATIN SMALL LETTER E
'f' # 0x0066 -> LATIN SMALL LETTER F
'g' # 0x0067 -> LATIN SMALL LETTER G
'h' # 0x0068 -> LATIN SMALL LETTER H
'i' # 0x0069 -> LATIN SMALL LETTER I
'j' # 0x006a -> LATIN SMALL LETTER J
'k' # 0x006b -> LATIN SMALL LETTER K
'l' # 0x006c -> LATIN SMALL LETTER L
'm' # 0x006d -> LATIN SMALL LETTER M
'n' # 0x006e -> LATIN SMALL LETTER N
'o' # 0x006f -> LATIN SMALL LETTER O
'p' # 0x0070 -> LATIN SMALL LETTER P
'q' # 0x0071 -> LATIN SMALL LETTER Q
'r' # 0x0072 -> LATIN SMALL LETTER R
's' # 0x0073 -> LATIN SMALL LETTER S
't' # 0x0074 -> LATIN SMALL LETTER T
'u' # 0x0075 -> LATIN SMALL LETTER U
'v' # 0x0076 -> LATIN SMALL LETTER V
'w' # 0x0077 -> LATIN SMALL LETTER W
'x' # 0x0078 -> LATIN SMALL LETTER X
'y' # 0x0079 -> LATIN SMALL LETTER Y
'z' # 0x007a -> LATIN SMALL LETTER Z
'{' # 0x007b -> LEFT CURLY BRACKET
'|' # 0x007c -> VERTICAL LINE
'}' # 0x007d -> RIGHT CURLY BRACKET
'~' # 0x007e -> TILDE
'\x7f' # 0x007f -> DELETE
'\xc7' # 0x0080 -> LATIN CAPITAL LETTER C WITH CEDILLA
'\xfc' # 0x0081 -> LATIN SMALL LETTER U WITH DIAERESIS
'\xe9' # 0x0082 -> LATIN SMALL LETTER E WITH ACUTE
'\xe2' # 0x0083 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
'\xe3' # 0x0084 -> LATIN SMALL LETTER A WITH TILDE
'\xe0' # 0x0085 -> LATIN SMALL LETTER A WITH GRAVE
'\xc1' # 0x0086 -> LATIN CAPITAL LETTER A WITH ACUTE
'\xe7' # 0x0087 -> LATIN SMALL LETTER C WITH CEDILLA
'\xea' # 0x0088 -> LATIN SMALL LETTER E WITH CIRCUMFLEX
'\xca' # 0x0089 -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
'\xe8' # 0x008a -> LATIN SMALL LETTER E WITH GRAVE
'\xcd' # 0x008b -> LATIN CAPITAL LETTER I WITH ACUTE
'\xd4' # 0x008c -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
'\xec' # 0x008d -> LATIN SMALL LETTER I WITH GRAVE
'\xc3' # 0x008e -> LATIN CAPITAL LETTER A WITH TILDE
'\xc2' # 0x008f -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
'\xc9' # 0x0090 -> LATIN CAPITAL LETTER E WITH ACUTE
'\xc0' # 0x0091 -> LATIN CAPITAL LETTER A WITH GRAVE
'\xc8' # 0x0092 -> LATIN CAPITAL LETTER E WITH GRAVE
'\xf4' # 0x0093 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
'\xf5' # 0x0094 -> LATIN SMALL LETTER O WITH TILDE
'\xf2' # 0x0095 -> LATIN SMALL LETTER O WITH GRAVE
'\xda' # 0x0096 -> LATIN CAPITAL LETTER U WITH ACUTE
'\xf9' # 0x0097 -> LATIN SMALL LETTER U WITH GRAVE
'\xcc' # 0x0098 -> LATIN CAPITAL LETTER I WITH GRAVE
'\xd5' # 0x0099 -> LATIN CAPITAL LETTER O WITH TILDE
'\xdc' # 0x009a -> LATIN CAPITAL LETTER U WITH DIAERESIS
'\xa2' # 0x009b -> CENT SIGN
'\xa3' # 0x009c -> POUND SIGN
'\xd9' # 0x009d -> LATIN CAPITAL LETTER U WITH GRAVE
'\u20a7' # 0x009e -> PESETA SIGN
'\xd3' # 0x009f -> LATIN CAPITAL LETTER O WITH ACUTE
'\xe1' # 0x00a0 -> LATIN SMALL LETTER A WITH ACUTE
'\xed' # 0x00a1 -> LATIN SMALL LETTER I WITH ACUTE
'\xf3' # 0x00a2 -> LATIN SMALL LETTER O WITH ACUTE
'\xfa' # 0x00a3 -> LATIN SMALL LETTER U WITH ACUTE
'\xf1' # 0x00a4 -> LATIN SMALL LETTER N WITH TILDE
'\xd1' # 0x00a5 -> LATIN CAPITAL LETTER N WITH TILDE
'\xaa' # 0x00a6 -> FEMININE ORDINAL INDICATOR
'\xba' # 0x00a7 -> MASCULINE ORDINAL INDICATOR
'\xbf' # 0x00a8 -> INVERTED QUESTION MARK
'\xd2' # 0x00a9 -> LATIN CAPITAL LETTER O WITH GRAVE
'\xac' # 0x00aa -> NOT SIGN
'\xbd' # 0x00ab -> VULGAR FRACTION ONE HALF
'\xbc' # 0x00ac -> VULGAR FRACTION ONE QUARTER
'\xa1' # 0x00ad -> INVERTED EXCLAMATION MARK
'\xab' # 0x00ae -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
'\xbb' # 0x00af -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
'\u2591' # 0x00b0 -> LIGHT SHADE
'\u2592' # 0x00b1 -> MEDIUM SHADE
'\u2593' # 0x00b2 -> DARK SHADE
'\u2502' # 0x00b3 -> BOX DRAWINGS LIGHT VERTICAL
'\u2524' # 0x00b4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT
'\u2561' # 0x00b5 -> BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
'\u2562' # 0x00b6 -> BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
'\u2556' # 0x00b7 -> BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
'\u2555' # 0x00b8 -> BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
'\u2563' # 0x00b9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT
'\u2551' # 0x00ba -> BOX DRAWINGS DOUBLE VERTICAL
'\u2557' # 0x00bb -> BOX DRAWINGS DOUBLE DOWN AND LEFT
'\u255d' # 0x00bc -> BOX DRAWINGS DOUBLE UP AND LEFT
'\u255c' # 0x00bd -> BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
'\u255b' # 0x00be -> BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
'\u2510' # 0x00bf -> BOX DRAWINGS LIGHT DOWN AND LEFT
'\u2514' # 0x00c0 -> BOX DRAWINGS LIGHT UP AND RIGHT
'\u2534' # 0x00c1 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL
'\u252c' # 0x00c2 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
'\u251c' # 0x00c3 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT
'\u2500' # 0x00c4 -> BOX DRAWINGS LIGHT HORIZONTAL
'\u253c' # 0x00c5 -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
'\u255e' # 0x00c6 -> BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
'\u255f' # 0x00c7 -> BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
'\u255a' # 0x00c8 -> BOX DRAWINGS DOUBLE UP AND RIGHT
'\u2554' # 0x00c9 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT
'\u2569' # 0x00ca -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL
'\u2566' # 0x00cb -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
'\u2560' # 0x00cc -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
'\u2550' # 0x00cd -> BOX DRAWINGS DOUBLE HORIZONTAL
'\u256c' # 0x00ce -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
'\u2567' # 0x00cf -> BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
'\u2568' # 0x00d0 -> BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
'\u2564' # 0x00d1 -> BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
'\u2565' # 0x00d2 -> BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
'\u2559' # 0x00d3 -> BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
'\u2558' # 0x00d4 -> BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
'\u2552' # 0x00d5 -> BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
'\u2553' # 0x00d6 -> BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
'\u256b' # 0x00d7 -> BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
'\u256a' # 0x00d8 -> BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
'\u2518' # 0x00d9 -> BOX DRAWINGS LIGHT UP AND LEFT
'\u250c' # 0x00da -> BOX DRAWINGS LIGHT DOWN AND RIGHT
'\u2588' # 0x00db -> FULL BLOCK
'\u2584' # 0x00dc -> LOWER HALF BLOCK
'\u258c' # 0x00dd -> LEFT HALF BLOCK
'\u2590' # 0x00de -> RIGHT HALF BLOCK
'\u2580' # 0x00df -> UPPER HALF BLOCK
'\u03b1' # 0x00e0 -> GREEK SMALL LETTER ALPHA
'\xdf' # 0x00e1 -> LATIN SMALL LETTER SHARP S
'\u0393' # 0x00e2 -> GREEK CAPITAL LETTER GAMMA
'\u03c0' # 0x00e3 -> GREEK SMALL LETTER PI
'\u03a3' # 0x00e4 -> GREEK CAPITAL LETTER SIGMA
'\u03c3' # 0x00e5 -> GREEK SMALL LETTER SIGMA
'\xb5' # 0x00e6 -> MICRO SIGN
'\u03c4' # 0x00e7 -> GREEK SMALL LETTER TAU
'\u03a6' # 0x00e8 -> GREEK CAPITAL LETTER PHI
'\u0398' # 0x00e9 -> GREEK CAPITAL LETTER THETA
'\u03a9' # 0x00ea -> GREEK CAPITAL LETTER OMEGA
'\u03b4' # 0x00eb -> GREEK SMALL LETTER DELTA
'\u221e' # 0x00ec -> INFINITY
'\u03c6' # 0x00ed -> GREEK SMALL LETTER PHI
'\u03b5' # 0x00ee -> GREEK SMALL LETTER EPSILON
'\u2229' # 0x00ef -> INTERSECTION
'\u2261' # 0x00f0 -> IDENTICAL TO
'\xb1' # 0x00f1 -> PLUS-MINUS SIGN
'\u2265' # 0x00f2 -> GREATER-THAN OR EQUAL TO
'\u2264' # 0x00f3 -> LESS-THAN OR EQUAL TO
'\u2320' # 0x00f4 -> TOP HALF INTEGRAL
'\u2321' # 0x00f5 -> BOTTOM HALF INTEGRAL
'\xf7' # 0x00f6 -> DIVISION SIGN
'\u2248' # 0x00f7 -> ALMOST EQUAL TO
'\xb0' # 0x00f8 -> DEGREE SIGN
'\u2219' # 0x00f9 -> BULLET OPERATOR
'\xb7' # 0x00fa -> MIDDLE DOT
'\u221a' # 0x00fb -> SQUARE ROOT
'\u207f' # 0x00fc -> SUPERSCRIPT LATIN SMALL LETTER N
'\xb2' # 0x00fd -> SUPERSCRIPT TWO
'\u25a0' # 0x00fe -> BLACK SQUARE
'\xa0' # 0x00ff -> NO-BREAK SPACE
)
### Encoding Map
encoding_map = {
0x0000: 0x0000, # NULL
0x0001: 0x0001, # START OF HEADING
0x0002: 0x0002, # START OF TEXT
0x0003: 0x0003, # END OF TEXT
0x0004: 0x0004, # END OF TRANSMISSION
0x0005: 0x0005, # ENQUIRY
0x0006: 0x0006, # ACKNOWLEDGE
0x0007: 0x0007, # BELL
0x0008: 0x0008, # BACKSPACE
0x0009: 0x0009, # HORIZONTAL TABULATION
0x000a: 0x000a, # LINE FEED
0x000b: 0x000b, # VERTICAL TABULATION
0x000c: 0x000c, # FORM FEED
0x000d: 0x000d, # CARRIAGE RETURN
0x000e: 0x000e, # SHIFT OUT
0x000f: 0x000f, # SHIFT IN
0x0010: 0x0010, # DATA LINK ESCAPE
0x0011: 0x0011, # DEVICE CONTROL ONE
0x0012: 0x0012, # DEVICE CONTROL TWO
0x0013: 0x0013, # DEVICE CONTROL THREE
0x0014: 0x0014, # DEVICE CONTROL FOUR
0x0015: 0x0015, # NEGATIVE ACKNOWLEDGE
0x0016: 0x0016, # SYNCHRONOUS IDLE
0x0017: 0x0017, # END OF TRANSMISSION BLOCK
0x0018: 0x0018, # CANCEL
0x0019: 0x0019, # END OF MEDIUM
0x001a: 0x001a, # SUBSTITUTE
0x001b: 0x001b, # ESCAPE
0x001c: 0x001c, # FILE SEPARATOR
0x001d: 0x001d, # GROUP SEPARATOR
0x001e: 0x001e, # RECORD SEPARATOR
0x001f: 0x001f, # UNIT SEPARATOR
0x0020: 0x0020, # SPACE
0x0021: 0x0021, # EXCLAMATION MARK
0x0022: 0x0022, # QUOTATION MARK
0x0023: 0x0023, # NUMBER SIGN
0x0024: 0x0024, # DOLLAR SIGN
0x0025: 0x0025, # PERCENT SIGN
0x0026: 0x0026, # AMPERSAND
0x0027: 0x0027, # APOSTROPHE
0x0028: 0x0028, # LEFT PARENTHESIS
0x0029: 0x0029, # RIGHT PARENTHESIS
0x002a: 0x002a, # ASTERISK
0x002b: 0x002b, # PLUS SIGN
0x002c: 0x002c, # COMMA
0x002d: 0x002d, # HYPHEN-MINUS
0x002e: 0x002e, # FULL STOP
0x002f: 0x002f, # SOLIDUS
0x0030: 0x0030, # DIGIT ZERO
0x0031: 0x0031, # DIGIT ONE
0x0032: 0x0032, # DIGIT TWO
0x0033: 0x0033, # DIGIT THREE
0x0034: 0x0034, # DIGIT FOUR
0x0035: 0x0035, # DIGIT FIVE
0x0036: 0x0036, # DIGIT SIX
0x0037: 0x0037, # DIGIT SEVEN
0x0038: 0x0038, # DIGIT EIGHT
0x0039: 0x0039, # DIGIT NINE
0x003a: 0x003a, # COLON
0x003b: 0x003b, # SEMICOLON
0x003c: 0x003c, # LESS-THAN SIGN
0x003d: 0x003d, # EQUALS SIGN
0x003e: 0x003e, # GREATER-THAN SIGN
0x003f: 0x003f, # QUESTION MARK
0x0040: 0x0040, # COMMERCIAL AT
0x0041: 0x0041, # LATIN CAPITAL LETTER A
0x0042: 0x0042, # LATIN CAPITAL LETTER B
0x0043: 0x0043, # LATIN CAPITAL LETTER C
0x0044: 0x0044, # LATIN CAPITAL LETTER D
0x0045: 0x0045, # LATIN CAPITAL LETTER E
0x0046: 0x0046, # LATIN CAPITAL LETTER F
0x0047: 0x0047, # LATIN CAPITAL LETTER G
0x0048: 0x0048, # LATIN CAPITAL LETTER H
0x0049: 0x0049, # LATIN CAPITAL LETTER I
0x004a: 0x004a, # LATIN CAPITAL LETTER J
0x004b: 0x004b, # LATIN CAPITAL LETTER K
0x004c: 0x004c, # LATIN CAPITAL LETTER L
0x004d: 0x004d, # LATIN CAPITAL LETTER M
0x004e: 0x004e, # LATIN CAPITAL LETTER N
0x004f: 0x004f, # LATIN CAPITAL LETTER O
0x0050: 0x0050, # LATIN CAPITAL LETTER P
0x0051: 0x0051, # LATIN CAPITAL LETTER Q
0x0052: 0x0052, # LATIN CAPITAL LETTER R
0x0053: 0x0053, # LATIN CAPITAL LETTER S
0x0054: 0x0054, # LATIN CAPITAL LETTER T
0x0055: 0x0055, # LATIN CAPITAL LETTER U
0x0056: 0x0056, # LATIN CAPITAL LETTER V
0x0057: 0x0057, # LATIN CAPITAL LETTER W
0x0058: 0x0058, # LATIN CAPITAL LETTER X
0x0059: 0x0059, # LATIN CAPITAL LETTER Y
0x005a: 0x005a, # LATIN CAPITAL LETTER Z
0x005b: 0x005b, # LEFT SQUARE BRACKET
0x005c: 0x005c, # REVERSE SOLIDUS
0x005d: 0x005d, # RIGHT SQUARE BRACKET
0x005e: 0x005e, # CIRCUMFLEX ACCENT
0x005f: 0x005f, # LOW LINE
0x0060: 0x0060, # GRAVE ACCENT
0x0061: 0x0061, # LATIN SMALL LETTER A
0x0062: 0x0062, # LATIN SMALL LETTER B
0x0063: 0x0063, # LATIN SMALL LETTER C
0x0064: 0x0064, # LATIN SMALL LETTER D
0x0065: 0x0065, # LATIN SMALL LETTER E
0x0066: 0x0066, # LATIN SMALL LETTER F
0x0067: 0x0067, # LATIN SMALL LETTER G
0x0068: 0x0068, # LATIN SMALL LETTER H
0x0069: 0x0069, # LATIN SMALL LETTER I
0x006a: 0x006a, # LATIN SMALL LETTER J
0x006b: 0x006b, # LATIN SMALL LETTER K
0x006c: 0x006c, # LATIN SMALL LETTER L
0x006d: 0x006d, # LATIN SMALL LETTER M
0x006e: 0x006e, # LATIN SMALL LETTER N
0x006f: 0x006f, # LATIN SMALL LETTER O
0x0070: 0x0070, # LATIN SMALL LETTER P
0x0071: 0x0071, # LATIN SMALL LETTER Q
0x0072: 0x0072, # LATIN SMALL LETTER R
0x0073: 0x0073, # LATIN SMALL LETTER S
0x0074: 0x0074, # LATIN SMALL LETTER T
0x0075: 0x0075, # LATIN SMALL LETTER U
0x0076: 0x0076, # LATIN SMALL LETTER V
0x0077: 0x0077, # LATIN SMALL LETTER W
0x0078: 0x0078, # LATIN SMALL LETTER X
0x0079: 0x0079, # LATIN SMALL LETTER Y
0x007a: 0x007a, # LATIN SMALL LETTER Z
0x007b: 0x007b, # LEFT CURLY BRACKET
0x007c: 0x007c, # VERTICAL LINE
0x007d: 0x007d, # RIGHT CURLY BRACKET
0x007e: 0x007e, # TILDE
0x007f: 0x007f, # DELETE
0x00a0: 0x00ff, # NO-BREAK SPACE
0x00a1: 0x00ad, # INVERTED EXCLAMATION MARK
0x00a2: 0x009b, # CENT SIGN
0x00a3: 0x009c, # POUND SIGN
0x00aa: 0x00a6, # FEMININE ORDINAL INDICATOR
0x00ab: 0x00ae, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00ac: 0x00aa, # NOT SIGN
0x00b0: 0x00f8, # DEGREE SIGN
0x00b1: 0x00f1, # PLUS-MINUS SIGN
0x00b2: 0x00fd, # SUPERSCRIPT TWO
0x00b5: 0x00e6, # MICRO SIGN
0x00b7: 0x00fa, # MIDDLE DOT
0x00ba: 0x00a7, # MASCULINE ORDINAL INDICATOR
0x00bb: 0x00af, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00bc: 0x00ac, # VULGAR FRACTION ONE QUARTER
0x00bd: 0x00ab, # VULGAR FRACTION ONE HALF
0x00bf: 0x00a8, # INVERTED QUESTION MARK
0x00c0: 0x0091, # LATIN CAPITAL LETTER A WITH GRAVE
0x00c1: 0x0086, # LATIN CAPITAL LETTER A WITH ACUTE
0x00c2: 0x008f, # LATIN CAPITAL LETTER A WITH CIRCUMFLEX
0x00c3: 0x008e, # LATIN CAPITAL LETTER A WITH TILDE
0x00c7: 0x0080, # LATIN CAPITAL LETTER C WITH CEDILLA
0x00c8: 0x0092, # LATIN CAPITAL LETTER E WITH GRAVE
0x00c9: 0x0090, # LATIN CAPITAL LETTER E WITH ACUTE
0x00ca: 0x0089, # LATIN CAPITAL LETTER E WITH CIRCUMFLEX
0x00cc: 0x0098, # LATIN CAPITAL LETTER I WITH GRAVE
0x00cd: 0x008b, # LATIN CAPITAL LETTER I WITH ACUTE
0x00d1: 0x00a5, # LATIN CAPITAL LETTER N WITH TILDE
0x00d2: 0x00a9, # LATIN CAPITAL LETTER O WITH GRAVE
0x00d3: 0x009f, # LATIN CAPITAL LETTER O WITH ACUTE
0x00d4: 0x008c, # LATIN CAPITAL LETTER O WITH CIRCUMFLEX
0x00d5: 0x0099, # LATIN CAPITAL LETTER O WITH TILDE
0x00d9: 0x009d, # LATIN CAPITAL LETTER U WITH GRAVE
0x00da: 0x0096, # LATIN CAPITAL LETTER U WITH ACUTE
0x00dc: 0x009a, # LATIN CAPITAL LETTER U WITH DIAERESIS
0x00df: 0x00e1, # LATIN SMALL LETTER SHARP S
0x00e0: 0x0085, # LATIN SMALL LETTER A WITH GRAVE
0x00e1: 0x00a0, # LATIN SMALL LETTER A WITH ACUTE
0x00e2: 0x0083, # LATIN SMALL LETTER A WITH CIRCUMFLEX
0x00e3: 0x0084, # LATIN SMALL LETTER A WITH TILDE
0x00e7: 0x0087, # LATIN SMALL LETTER C WITH CEDILLA
0x00e8: 0x008a, # LATIN SMALL LETTER E WITH GRAVE
0x00e9: 0x0082, # LATIN SMALL LETTER E WITH ACUTE
0x00ea: 0x0088, # LATIN SMALL LETTER E WITH CIRCUMFLEX
0x00ec: 0x008d, # LATIN SMALL LETTER I WITH GRAVE
0x00ed: 0x00a1, # LATIN SMALL LETTER I WITH ACUTE
0x00f1: 0x00a4, # LATIN SMALL LETTER N WITH TILDE
0x00f2: 0x0095, # LATIN SMALL LETTER O WITH GRAVE
0x00f3: 0x00a2, # LATIN SMALL LETTER O WITH ACUTE
0x00f4: 0x0093, # LATIN SMALL LETTER O WITH CIRCUMFLEX
0x00f5: 0x0094, # LATIN SMALL LETTER O WITH TILDE
0x00f7: 0x00f6, # DIVISION SIGN
0x00f9: 0x0097, # LATIN SMALL LETTER U WITH GRAVE
0x00fa: 0x00a3, # LATIN SMALL LETTER U WITH ACUTE
0x00fc: 0x0081, # LATIN SMALL LETTER U WITH DIAERESIS
0x0393: 0x00e2, # GREEK CAPITAL LETTER GAMMA
0x0398: 0x00e9, # GREEK CAPITAL LETTER THETA
0x03a3: 0x00e4, # GREEK CAPITAL LETTER SIGMA
0x03a6: 0x00e8, # GREEK CAPITAL LETTER PHI
0x03a9: 0x00ea, # GREEK CAPITAL LETTER OMEGA
0x03b1: 0x00e0, # GREEK SMALL LETTER ALPHA
0x03b4: 0x00eb, # GREEK SMALL LETTER DELTA
0x03b5: 0x00ee, # GREEK SMALL LETTER EPSILON
0x03c0: 0x00e3, # GREEK SMALL LETTER PI
0x03c3: 0x00e5, # GREEK SMALL LETTER SIGMA
0x03c4: 0x00e7, # GREEK SMALL LETTER TAU
0x03c6: 0x00ed, # GREEK SMALL LETTER PHI
0x207f: 0x00fc, # SUPERSCRIPT LATIN SMALL LETTER N
0x20a7: 0x009e, # PESETA SIGN
0x2219: 0x00f9, # BULLET OPERATOR
0x221a: 0x00fb, # SQUARE ROOT
0x221e: 0x00ec, # INFINITY
0x2229: 0x00ef, # INTERSECTION
0x2248: 0x00f7, # ALMOST EQUAL TO
0x2261: 0x00f0, # IDENTICAL TO
0x2264: 0x00f3, # LESS-THAN OR EQUAL TO
0x2265: 0x00f2, # GREATER-THAN OR EQUAL TO
0x2320: 0x00f4, # TOP HALF INTEGRAL
0x2321: 0x00f5, # BOTTOM HALF INTEGRAL
0x2500: 0x00c4, # BOX DRAWINGS LIGHT HORIZONTAL
0x2502: 0x00b3, # BOX DRAWINGS LIGHT VERTICAL
0x250c: 0x00da, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x2510: 0x00bf, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x2514: 0x00c0, # BOX DRAWINGS LIGHT UP AND RIGHT
0x2518: 0x00d9, # BOX DRAWINGS LIGHT UP AND LEFT
0x251c: 0x00c3, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x2524: 0x00b4, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x252c: 0x00c2, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x2534: 0x00c1, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x253c: 0x00c5, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x2550: 0x00cd, # BOX DRAWINGS DOUBLE HORIZONTAL
0x2551: 0x00ba, # BOX DRAWINGS DOUBLE VERTICAL
0x2552: 0x00d5, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
0x2553: 0x00d6, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
0x2554: 0x00c9, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x2555: 0x00b8, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
0x2556: 0x00b7, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
0x2557: 0x00bb, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x2558: 0x00d4, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
0x2559: 0x00d3, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
0x255a: 0x00c8, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x255b: 0x00be, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
0x255c: 0x00bd, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
0x255d: 0x00bc, # BOX DRAWINGS DOUBLE UP AND LEFT
0x255e: 0x00c6, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
0x255f: 0x00c7, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
0x2560: 0x00cc, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x2561: 0x00b5, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
0x2562: 0x00b6, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
0x2563: 0x00b9, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x2564: 0x00d1, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
0x2565: 0x00d2, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
0x2566: 0x00cb, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x2567: 0x00cf, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
0x2568: 0x00d0, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
0x2569: 0x00ca, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x256a: 0x00d8, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
0x256b: 0x00d7, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
0x256c: 0x00ce, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x2580: 0x00df, # UPPER HALF BLOCK
0x2584: 0x00dc, # LOWER HALF BLOCK
0x2588: 0x00db, # FULL BLOCK
0x258c: 0x00dd, # LEFT HALF BLOCK
0x2590: 0x00de, # RIGHT HALF BLOCK
0x2591: 0x00b0, # LIGHT SHADE
0x2592: 0x00b1, # MEDIUM SHADE
0x2593: 0x00b2, # DARK SHADE
0x25a0: 0x00fe, # BLACK SQUARE
} | /regulome_web-2.0rc1.tar.gz/regulome_web-2.0rc1/regulome_app/webapp/static/brython/Lib/encodings/cp860.py | 0.47244 | 0.212702 | cp860.py | pypi |
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_map)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp869',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x0080: None, # UNDEFINED
0x0081: None, # UNDEFINED
0x0082: None, # UNDEFINED
0x0083: None, # UNDEFINED
0x0084: None, # UNDEFINED
0x0085: None, # UNDEFINED
0x0086: 0x0386, # GREEK CAPITAL LETTER ALPHA WITH TONOS
0x0087: None, # UNDEFINED
0x0088: 0x00b7, # MIDDLE DOT
0x0089: 0x00ac, # NOT SIGN
0x008a: 0x00a6, # BROKEN BAR
0x008b: 0x2018, # LEFT SINGLE QUOTATION MARK
0x008c: 0x2019, # RIGHT SINGLE QUOTATION MARK
0x008d: 0x0388, # GREEK CAPITAL LETTER EPSILON WITH TONOS
0x008e: 0x2015, # HORIZONTAL BAR
0x008f: 0x0389, # GREEK CAPITAL LETTER ETA WITH TONOS
0x0090: 0x038a, # GREEK CAPITAL LETTER IOTA WITH TONOS
0x0091: 0x03aa, # GREEK CAPITAL LETTER IOTA WITH DIALYTIKA
0x0092: 0x038c, # GREEK CAPITAL LETTER OMICRON WITH TONOS
0x0093: None, # UNDEFINED
0x0094: None, # UNDEFINED
0x0095: 0x038e, # GREEK CAPITAL LETTER UPSILON WITH TONOS
0x0096: 0x03ab, # GREEK CAPITAL LETTER UPSILON WITH DIALYTIKA
0x0097: 0x00a9, # COPYRIGHT SIGN
0x0098: 0x038f, # GREEK CAPITAL LETTER OMEGA WITH TONOS
0x0099: 0x00b2, # SUPERSCRIPT TWO
0x009a: 0x00b3, # SUPERSCRIPT THREE
0x009b: 0x03ac, # GREEK SMALL LETTER ALPHA WITH TONOS
0x009c: 0x00a3, # POUND SIGN
0x009d: 0x03ad, # GREEK SMALL LETTER EPSILON WITH TONOS
0x009e: 0x03ae, # GREEK SMALL LETTER ETA WITH TONOS
0x009f: 0x03af, # GREEK SMALL LETTER IOTA WITH TONOS
0x00a0: 0x03ca, # GREEK SMALL LETTER IOTA WITH DIALYTIKA
0x00a1: 0x0390, # GREEK SMALL LETTER IOTA WITH DIALYTIKA AND TONOS
0x00a2: 0x03cc, # GREEK SMALL LETTER OMICRON WITH TONOS
0x00a3: 0x03cd, # GREEK SMALL LETTER UPSILON WITH TONOS
0x00a4: 0x0391, # GREEK CAPITAL LETTER ALPHA
0x00a5: 0x0392, # GREEK CAPITAL LETTER BETA
0x00a6: 0x0393, # GREEK CAPITAL LETTER GAMMA
0x00a7: 0x0394, # GREEK CAPITAL LETTER DELTA
0x00a8: 0x0395, # GREEK CAPITAL LETTER EPSILON
0x00a9: 0x0396, # GREEK CAPITAL LETTER ZETA
0x00aa: 0x0397, # GREEK CAPITAL LETTER ETA
0x00ab: 0x00bd, # VULGAR FRACTION ONE HALF
0x00ac: 0x0398, # GREEK CAPITAL LETTER THETA
0x00ad: 0x0399, # GREEK CAPITAL LETTER IOTA
0x00ae: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00af: 0x00bb, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00b0: 0x2591, # LIGHT SHADE
0x00b1: 0x2592, # MEDIUM SHADE
0x00b2: 0x2593, # DARK SHADE
0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL
0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x00b5: 0x039a, # GREEK CAPITAL LETTER KAPPA
0x00b6: 0x039b, # GREEK CAPITAL LETTER LAMDA
0x00b7: 0x039c, # GREEK CAPITAL LETTER MU
0x00b8: 0x039d, # GREEK CAPITAL LETTER NU
0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL
0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT
0x00bd: 0x039e, # GREEK CAPITAL LETTER XI
0x00be: 0x039f, # GREEK CAPITAL LETTER OMICRON
0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT
0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL
0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x00c6: 0x03a0, # GREEK CAPITAL LETTER PI
0x00c7: 0x03a1, # GREEK CAPITAL LETTER RHO
0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL
0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x00cf: 0x03a3, # GREEK CAPITAL LETTER SIGMA
0x00d0: 0x03a4, # GREEK CAPITAL LETTER TAU
0x00d1: 0x03a5, # GREEK CAPITAL LETTER UPSILON
0x00d2: 0x03a6, # GREEK CAPITAL LETTER PHI
0x00d3: 0x03a7, # GREEK CAPITAL LETTER CHI
0x00d4: 0x03a8, # GREEK CAPITAL LETTER PSI
0x00d5: 0x03a9, # GREEK CAPITAL LETTER OMEGA
0x00d6: 0x03b1, # GREEK SMALL LETTER ALPHA
0x00d7: 0x03b2, # GREEK SMALL LETTER BETA
0x00d8: 0x03b3, # GREEK SMALL LETTER GAMMA
0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT
0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x00db: 0x2588, # FULL BLOCK
0x00dc: 0x2584, # LOWER HALF BLOCK
0x00dd: 0x03b4, # GREEK SMALL LETTER DELTA
0x00de: 0x03b5, # GREEK SMALL LETTER EPSILON
0x00df: 0x2580, # UPPER HALF BLOCK
0x00e0: 0x03b6, # GREEK SMALL LETTER ZETA
0x00e1: 0x03b7, # GREEK SMALL LETTER ETA
0x00e2: 0x03b8, # GREEK SMALL LETTER THETA
0x00e3: 0x03b9, # GREEK SMALL LETTER IOTA
0x00e4: 0x03ba, # GREEK SMALL LETTER KAPPA
0x00e5: 0x03bb, # GREEK SMALL LETTER LAMDA
0x00e6: 0x03bc, # GREEK SMALL LETTER MU
0x00e7: 0x03bd, # GREEK SMALL LETTER NU
0x00e8: 0x03be, # GREEK SMALL LETTER XI
0x00e9: 0x03bf, # GREEK SMALL LETTER OMICRON
0x00ea: 0x03c0, # GREEK SMALL LETTER PI
0x00eb: 0x03c1, # GREEK SMALL LETTER RHO
0x00ec: 0x03c3, # GREEK SMALL LETTER SIGMA
0x00ed: 0x03c2, # GREEK SMALL LETTER FINAL SIGMA
0x00ee: 0x03c4, # GREEK SMALL LETTER TAU
0x00ef: 0x0384, # GREEK TONOS
0x00f0: 0x00ad, # SOFT HYPHEN
0x00f1: 0x00b1, # PLUS-MINUS SIGN
0x00f2: 0x03c5, # GREEK SMALL LETTER UPSILON
0x00f3: 0x03c6, # GREEK SMALL LETTER PHI
0x00f4: 0x03c7, # GREEK SMALL LETTER CHI
0x00f5: 0x00a7, # SECTION SIGN
0x00f6: 0x03c8, # GREEK SMALL LETTER PSI
0x00f7: 0x0385, # GREEK DIALYTIKA TONOS
0x00f8: 0x00b0, # DEGREE SIGN
0x00f9: 0x00a8, # DIAERESIS
0x00fa: 0x03c9, # GREEK SMALL LETTER OMEGA
0x00fb: 0x03cb, # GREEK SMALL LETTER UPSILON WITH DIALYTIKA
0x00fc: 0x03b0, # GREEK SMALL LETTER UPSILON WITH DIALYTIKA AND TONOS
0x00fd: 0x03ce, # GREEK SMALL LETTER OMEGA WITH TONOS
0x00fe: 0x25a0, # BLACK SQUARE
0x00ff: 0x00a0, # NO-BREAK SPACE
})
### Decoding Table
decoding_table = (
'\x00' # 0x0000 -> NULL
'\x01' # 0x0001 -> START OF HEADING
'\x02' # 0x0002 -> START OF TEXT
'\x03' # 0x0003 -> END OF TEXT
'\x04' # 0x0004 -> END OF TRANSMISSION
'\x05' # 0x0005 -> ENQUIRY
'\x06' # 0x0006 -> ACKNOWLEDGE
'\x07' # 0x0007 -> BELL
'\x08' # 0x0008 -> BACKSPACE
'\t' # 0x0009 -> HORIZONTAL TABULATION
'\n' # 0x000a -> LINE FEED
'\x0b' # 0x000b -> VERTICAL TABULATION
'\x0c' # 0x000c -> FORM FEED
'\r' # 0x000d -> CARRIAGE RETURN
'\x0e' # 0x000e -> SHIFT OUT
'\x0f' # 0x000f -> SHIFT IN
'\x10' # 0x0010 -> DATA LINK ESCAPE
'\x11' # 0x0011 -> DEVICE CONTROL ONE
'\x12' # 0x0012 -> DEVICE CONTROL TWO
'\x13' # 0x0013 -> DEVICE CONTROL THREE
'\x14' # 0x0014 -> DEVICE CONTROL FOUR
'\x15' # 0x0015 -> NEGATIVE ACKNOWLEDGE
'\x16' # 0x0016 -> SYNCHRONOUS IDLE
'\x17' # 0x0017 -> END OF TRANSMISSION BLOCK
'\x18' # 0x0018 -> CANCEL
'\x19' # 0x0019 -> END OF MEDIUM
'\x1a' # 0x001a -> SUBSTITUTE
'\x1b' # 0x001b -> ESCAPE
'\x1c' # 0x001c -> FILE SEPARATOR
'\x1d' # 0x001d -> GROUP SEPARATOR
'\x1e' # 0x001e -> RECORD SEPARATOR
'\x1f' # 0x001f -> UNIT SEPARATOR
' ' # 0x0020 -> SPACE
'!' # 0x0021 -> EXCLAMATION MARK
'"' # 0x0022 -> QUOTATION MARK
'#' # 0x0023 -> NUMBER SIGN
'$' # 0x0024 -> DOLLAR SIGN
'%' # 0x0025 -> PERCENT SIGN
'&' # 0x0026 -> AMPERSAND
"'" # 0x0027 -> APOSTROPHE
'(' # 0x0028 -> LEFT PARENTHESIS
')' # 0x0029 -> RIGHT PARENTHESIS
'*' # 0x002a -> ASTERISK
'+' # 0x002b -> PLUS SIGN
',' # 0x002c -> COMMA
'-' # 0x002d -> HYPHEN-MINUS
'.' # 0x002e -> FULL STOP
'/' # 0x002f -> SOLIDUS
'0' # 0x0030 -> DIGIT ZERO
'1' # 0x0031 -> DIGIT ONE
'2' # 0x0032 -> DIGIT TWO
'3' # 0x0033 -> DIGIT THREE
'4' # 0x0034 -> DIGIT FOUR
'5' # 0x0035 -> DIGIT FIVE
'6' # 0x0036 -> DIGIT SIX
'7' # 0x0037 -> DIGIT SEVEN
'8' # 0x0038 -> DIGIT EIGHT
'9' # 0x0039 -> DIGIT NINE
':' # 0x003a -> COLON
';' # 0x003b -> SEMICOLON
'<' # 0x003c -> LESS-THAN SIGN
'=' # 0x003d -> EQUALS SIGN
'>' # 0x003e -> GREATER-THAN SIGN
'?' # 0x003f -> QUESTION MARK
'@' # 0x0040 -> COMMERCIAL AT
'A' # 0x0041 -> LATIN CAPITAL LETTER A
'B' # 0x0042 -> LATIN CAPITAL LETTER B
'C' # 0x0043 -> LATIN CAPITAL LETTER C
'D' # 0x0044 -> LATIN CAPITAL LETTER D
'E' # 0x0045 -> LATIN CAPITAL LETTER E
'F' # 0x0046 -> LATIN CAPITAL LETTER F
'G' # 0x0047 -> LATIN CAPITAL LETTER G
'H' # 0x0048 -> LATIN CAPITAL LETTER H
'I' # 0x0049 -> LATIN CAPITAL LETTER I
'J' # 0x004a -> LATIN CAPITAL LETTER J
'K' # 0x004b -> LATIN CAPITAL LETTER K
'L' # 0x004c -> LATIN CAPITAL LETTER L
'M' # 0x004d -> LATIN CAPITAL LETTER M
'N' # 0x004e -> LATIN CAPITAL LETTER N
'O' # 0x004f -> LATIN CAPITAL LETTER O
'P' # 0x0050 -> LATIN CAPITAL LETTER P
'Q' # 0x0051 -> LATIN CAPITAL LETTER Q
'R' # 0x0052 -> LATIN CAPITAL LETTER R
'S' # 0x0053 -> LATIN CAPITAL LETTER S
'T' # 0x0054 -> LATIN CAPITAL LETTER T
'U' # 0x0055 -> LATIN CAPITAL LETTER U
'V' # 0x0056 -> LATIN CAPITAL LETTER V
'W' # 0x0057 -> LATIN CAPITAL LETTER W
'X' # 0x0058 -> LATIN CAPITAL LETTER X
'Y' # 0x0059 -> LATIN CAPITAL LETTER Y
'Z' # 0x005a -> LATIN CAPITAL LETTER Z
'[' # 0x005b -> LEFT SQUARE BRACKET
'\\' # 0x005c -> REVERSE SOLIDUS
']' # 0x005d -> RIGHT SQUARE BRACKET
'^' # 0x005e -> CIRCUMFLEX ACCENT
'_' # 0x005f -> LOW LINE
'`' # 0x0060 -> GRAVE ACCENT
'a' # 0x0061 -> LATIN SMALL LETTER A
'b' # 0x0062 -> LATIN SMALL LETTER B
'c' # 0x0063 -> LATIN SMALL LETTER C
'd' # 0x0064 -> LATIN SMALL LETTER D
'e' # 0x0065 -> LATIN SMALL LETTER E
'f' # 0x0066 -> LATIN SMALL LETTER F
'g' # 0x0067 -> LATIN SMALL LETTER G
'h' # 0x0068 -> LATIN SMALL LETTER H
'i' # 0x0069 -> LATIN SMALL LETTER I
'j' # 0x006a -> LATIN SMALL LETTER J
'k' # 0x006b -> LATIN SMALL LETTER K
'l' # 0x006c -> LATIN SMALL LETTER L
'm' # 0x006d -> LATIN SMALL LETTER M
'n' # 0x006e -> LATIN SMALL LETTER N
'o' # 0x006f -> LATIN SMALL LETTER O
'p' # 0x0070 -> LATIN SMALL LETTER P
'q' # 0x0071 -> LATIN SMALL LETTER Q
'r' # 0x0072 -> LATIN SMALL LETTER R
's' # 0x0073 -> LATIN SMALL LETTER S
't' # 0x0074 -> LATIN SMALL LETTER T
'u' # 0x0075 -> LATIN SMALL LETTER U
'v' # 0x0076 -> LATIN SMALL LETTER V
'w' # 0x0077 -> LATIN SMALL LETTER W
'x' # 0x0078 -> LATIN SMALL LETTER X
'y' # 0x0079 -> LATIN SMALL LETTER Y
'z' # 0x007a -> LATIN SMALL LETTER Z
'{' # 0x007b -> LEFT CURLY BRACKET
'|' # 0x007c -> VERTICAL LINE
'}' # 0x007d -> RIGHT CURLY BRACKET
'~' # 0x007e -> TILDE
'\x7f' # 0x007f -> DELETE
'\ufffe' # 0x0080 -> UNDEFINED
'\ufffe' # 0x0081 -> UNDEFINED
'\ufffe' # 0x0082 -> UNDEFINED
'\ufffe' # 0x0083 -> UNDEFINED
'\ufffe' # 0x0084 -> UNDEFINED
'\ufffe' # 0x0085 -> UNDEFINED
'\u0386' # 0x0086 -> GREEK CAPITAL LETTER ALPHA WITH TONOS
'\ufffe' # 0x0087 -> UNDEFINED
'\xb7' # 0x0088 -> MIDDLE DOT
'\xac' # 0x0089 -> NOT SIGN
'\xa6' # 0x008a -> BROKEN BAR
'\u2018' # 0x008b -> LEFT SINGLE QUOTATION MARK
'\u2019' # 0x008c -> RIGHT SINGLE QUOTATION MARK
'\u0388' # 0x008d -> GREEK CAPITAL LETTER EPSILON WITH TONOS
'\u2015' # 0x008e -> HORIZONTAL BAR
'\u0389' # 0x008f -> GREEK CAPITAL LETTER ETA WITH TONOS
'\u038a' # 0x0090 -> GREEK CAPITAL LETTER IOTA WITH TONOS
'\u03aa' # 0x0091 -> GREEK CAPITAL LETTER IOTA WITH DIALYTIKA
'\u038c' # 0x0092 -> GREEK CAPITAL LETTER OMICRON WITH TONOS
'\ufffe' # 0x0093 -> UNDEFINED
'\ufffe' # 0x0094 -> UNDEFINED
'\u038e' # 0x0095 -> GREEK CAPITAL LETTER UPSILON WITH TONOS
'\u03ab' # 0x0096 -> GREEK CAPITAL LETTER UPSILON WITH DIALYTIKA
'\xa9' # 0x0097 -> COPYRIGHT SIGN
'\u038f' # 0x0098 -> GREEK CAPITAL LETTER OMEGA WITH TONOS
'\xb2' # 0x0099 -> SUPERSCRIPT TWO
'\xb3' # 0x009a -> SUPERSCRIPT THREE
'\u03ac' # 0x009b -> GREEK SMALL LETTER ALPHA WITH TONOS
'\xa3' # 0x009c -> POUND SIGN
'\u03ad' # 0x009d -> GREEK SMALL LETTER EPSILON WITH TONOS
'\u03ae' # 0x009e -> GREEK SMALL LETTER ETA WITH TONOS
'\u03af' # 0x009f -> GREEK SMALL LETTER IOTA WITH TONOS
'\u03ca' # 0x00a0 -> GREEK SMALL LETTER IOTA WITH DIALYTIKA
'\u0390' # 0x00a1 -> GREEK SMALL LETTER IOTA WITH DIALYTIKA AND TONOS
'\u03cc' # 0x00a2 -> GREEK SMALL LETTER OMICRON WITH TONOS
'\u03cd' # 0x00a3 -> GREEK SMALL LETTER UPSILON WITH TONOS
'\u0391' # 0x00a4 -> GREEK CAPITAL LETTER ALPHA
'\u0392' # 0x00a5 -> GREEK CAPITAL LETTER BETA
'\u0393' # 0x00a6 -> GREEK CAPITAL LETTER GAMMA
'\u0394' # 0x00a7 -> GREEK CAPITAL LETTER DELTA
'\u0395' # 0x00a8 -> GREEK CAPITAL LETTER EPSILON
'\u0396' # 0x00a9 -> GREEK CAPITAL LETTER ZETA
'\u0397' # 0x00aa -> GREEK CAPITAL LETTER ETA
'\xbd' # 0x00ab -> VULGAR FRACTION ONE HALF
'\u0398' # 0x00ac -> GREEK CAPITAL LETTER THETA
'\u0399' # 0x00ad -> GREEK CAPITAL LETTER IOTA
'\xab' # 0x00ae -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
'\xbb' # 0x00af -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
'\u2591' # 0x00b0 -> LIGHT SHADE
'\u2592' # 0x00b1 -> MEDIUM SHADE
'\u2593' # 0x00b2 -> DARK SHADE
'\u2502' # 0x00b3 -> BOX DRAWINGS LIGHT VERTICAL
'\u2524' # 0x00b4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT
'\u039a' # 0x00b5 -> GREEK CAPITAL LETTER KAPPA
'\u039b' # 0x00b6 -> GREEK CAPITAL LETTER LAMDA
'\u039c' # 0x00b7 -> GREEK CAPITAL LETTER MU
'\u039d' # 0x00b8 -> GREEK CAPITAL LETTER NU
'\u2563' # 0x00b9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT
'\u2551' # 0x00ba -> BOX DRAWINGS DOUBLE VERTICAL
'\u2557' # 0x00bb -> BOX DRAWINGS DOUBLE DOWN AND LEFT
'\u255d' # 0x00bc -> BOX DRAWINGS DOUBLE UP AND LEFT
'\u039e' # 0x00bd -> GREEK CAPITAL LETTER XI
'\u039f' # 0x00be -> GREEK CAPITAL LETTER OMICRON
'\u2510' # 0x00bf -> BOX DRAWINGS LIGHT DOWN AND LEFT
'\u2514' # 0x00c0 -> BOX DRAWINGS LIGHT UP AND RIGHT
'\u2534' # 0x00c1 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL
'\u252c' # 0x00c2 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
'\u251c' # 0x00c3 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT
'\u2500' # 0x00c4 -> BOX DRAWINGS LIGHT HORIZONTAL
'\u253c' # 0x00c5 -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
'\u03a0' # 0x00c6 -> GREEK CAPITAL LETTER PI
'\u03a1' # 0x00c7 -> GREEK CAPITAL LETTER RHO
'\u255a' # 0x00c8 -> BOX DRAWINGS DOUBLE UP AND RIGHT
'\u2554' # 0x00c9 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT
'\u2569' # 0x00ca -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL
'\u2566' # 0x00cb -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
'\u2560' # 0x00cc -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
'\u2550' # 0x00cd -> BOX DRAWINGS DOUBLE HORIZONTAL
'\u256c' # 0x00ce -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
'\u03a3' # 0x00cf -> GREEK CAPITAL LETTER SIGMA
'\u03a4' # 0x00d0 -> GREEK CAPITAL LETTER TAU
'\u03a5' # 0x00d1 -> GREEK CAPITAL LETTER UPSILON
'\u03a6' # 0x00d2 -> GREEK CAPITAL LETTER PHI
'\u03a7' # 0x00d3 -> GREEK CAPITAL LETTER CHI
'\u03a8' # 0x00d4 -> GREEK CAPITAL LETTER PSI
'\u03a9' # 0x00d5 -> GREEK CAPITAL LETTER OMEGA
'\u03b1' # 0x00d6 -> GREEK SMALL LETTER ALPHA
'\u03b2' # 0x00d7 -> GREEK SMALL LETTER BETA
'\u03b3' # 0x00d8 -> GREEK SMALL LETTER GAMMA
'\u2518' # 0x00d9 -> BOX DRAWINGS LIGHT UP AND LEFT
'\u250c' # 0x00da -> BOX DRAWINGS LIGHT DOWN AND RIGHT
'\u2588' # 0x00db -> FULL BLOCK
'\u2584' # 0x00dc -> LOWER HALF BLOCK
'\u03b4' # 0x00dd -> GREEK SMALL LETTER DELTA
'\u03b5' # 0x00de -> GREEK SMALL LETTER EPSILON
'\u2580' # 0x00df -> UPPER HALF BLOCK
'\u03b6' # 0x00e0 -> GREEK SMALL LETTER ZETA
'\u03b7' # 0x00e1 -> GREEK SMALL LETTER ETA
'\u03b8' # 0x00e2 -> GREEK SMALL LETTER THETA
'\u03b9' # 0x00e3 -> GREEK SMALL LETTER IOTA
'\u03ba' # 0x00e4 -> GREEK SMALL LETTER KAPPA
'\u03bb' # 0x00e5 -> GREEK SMALL LETTER LAMDA
'\u03bc' # 0x00e6 -> GREEK SMALL LETTER MU
'\u03bd' # 0x00e7 -> GREEK SMALL LETTER NU
'\u03be' # 0x00e8 -> GREEK SMALL LETTER XI
'\u03bf' # 0x00e9 -> GREEK SMALL LETTER OMICRON
'\u03c0' # 0x00ea -> GREEK SMALL LETTER PI
'\u03c1' # 0x00eb -> GREEK SMALL LETTER RHO
'\u03c3' # 0x00ec -> GREEK SMALL LETTER SIGMA
'\u03c2' # 0x00ed -> GREEK SMALL LETTER FINAL SIGMA
'\u03c4' # 0x00ee -> GREEK SMALL LETTER TAU
'\u0384' # 0x00ef -> GREEK TONOS
'\xad' # 0x00f0 -> SOFT HYPHEN
'\xb1' # 0x00f1 -> PLUS-MINUS SIGN
'\u03c5' # 0x00f2 -> GREEK SMALL LETTER UPSILON
'\u03c6' # 0x00f3 -> GREEK SMALL LETTER PHI
'\u03c7' # 0x00f4 -> GREEK SMALL LETTER CHI
'\xa7' # 0x00f5 -> SECTION SIGN
'\u03c8' # 0x00f6 -> GREEK SMALL LETTER PSI
'\u0385' # 0x00f7 -> GREEK DIALYTIKA TONOS
'\xb0' # 0x00f8 -> DEGREE SIGN
'\xa8' # 0x00f9 -> DIAERESIS
'\u03c9' # 0x00fa -> GREEK SMALL LETTER OMEGA
'\u03cb' # 0x00fb -> GREEK SMALL LETTER UPSILON WITH DIALYTIKA
'\u03b0' # 0x00fc -> GREEK SMALL LETTER UPSILON WITH DIALYTIKA AND TONOS
'\u03ce' # 0x00fd -> GREEK SMALL LETTER OMEGA WITH TONOS
'\u25a0' # 0x00fe -> BLACK SQUARE
'\xa0' # 0x00ff -> NO-BREAK SPACE
)
### Encoding Map
encoding_map = {
0x0000: 0x0000, # NULL
0x0001: 0x0001, # START OF HEADING
0x0002: 0x0002, # START OF TEXT
0x0003: 0x0003, # END OF TEXT
0x0004: 0x0004, # END OF TRANSMISSION
0x0005: 0x0005, # ENQUIRY
0x0006: 0x0006, # ACKNOWLEDGE
0x0007: 0x0007, # BELL
0x0008: 0x0008, # BACKSPACE
0x0009: 0x0009, # HORIZONTAL TABULATION
0x000a: 0x000a, # LINE FEED
0x000b: 0x000b, # VERTICAL TABULATION
0x000c: 0x000c, # FORM FEED
0x000d: 0x000d, # CARRIAGE RETURN
0x000e: 0x000e, # SHIFT OUT
0x000f: 0x000f, # SHIFT IN
0x0010: 0x0010, # DATA LINK ESCAPE
0x0011: 0x0011, # DEVICE CONTROL ONE
0x0012: 0x0012, # DEVICE CONTROL TWO
0x0013: 0x0013, # DEVICE CONTROL THREE
0x0014: 0x0014, # DEVICE CONTROL FOUR
0x0015: 0x0015, # NEGATIVE ACKNOWLEDGE
0x0016: 0x0016, # SYNCHRONOUS IDLE
0x0017: 0x0017, # END OF TRANSMISSION BLOCK
0x0018: 0x0018, # CANCEL
0x0019: 0x0019, # END OF MEDIUM
0x001a: 0x001a, # SUBSTITUTE
0x001b: 0x001b, # ESCAPE
0x001c: 0x001c, # FILE SEPARATOR
0x001d: 0x001d, # GROUP SEPARATOR
0x001e: 0x001e, # RECORD SEPARATOR
0x001f: 0x001f, # UNIT SEPARATOR
0x0020: 0x0020, # SPACE
0x0021: 0x0021, # EXCLAMATION MARK
0x0022: 0x0022, # QUOTATION MARK
0x0023: 0x0023, # NUMBER SIGN
0x0024: 0x0024, # DOLLAR SIGN
0x0025: 0x0025, # PERCENT SIGN
0x0026: 0x0026, # AMPERSAND
0x0027: 0x0027, # APOSTROPHE
0x0028: 0x0028, # LEFT PARENTHESIS
0x0029: 0x0029, # RIGHT PARENTHESIS
0x002a: 0x002a, # ASTERISK
0x002b: 0x002b, # PLUS SIGN
0x002c: 0x002c, # COMMA
0x002d: 0x002d, # HYPHEN-MINUS
0x002e: 0x002e, # FULL STOP
0x002f: 0x002f, # SOLIDUS
0x0030: 0x0030, # DIGIT ZERO
0x0031: 0x0031, # DIGIT ONE
0x0032: 0x0032, # DIGIT TWO
0x0033: 0x0033, # DIGIT THREE
0x0034: 0x0034, # DIGIT FOUR
0x0035: 0x0035, # DIGIT FIVE
0x0036: 0x0036, # DIGIT SIX
0x0037: 0x0037, # DIGIT SEVEN
0x0038: 0x0038, # DIGIT EIGHT
0x0039: 0x0039, # DIGIT NINE
0x003a: 0x003a, # COLON
0x003b: 0x003b, # SEMICOLON
0x003c: 0x003c, # LESS-THAN SIGN
0x003d: 0x003d, # EQUALS SIGN
0x003e: 0x003e, # GREATER-THAN SIGN
0x003f: 0x003f, # QUESTION MARK
0x0040: 0x0040, # COMMERCIAL AT
0x0041: 0x0041, # LATIN CAPITAL LETTER A
0x0042: 0x0042, # LATIN CAPITAL LETTER B
0x0043: 0x0043, # LATIN CAPITAL LETTER C
0x0044: 0x0044, # LATIN CAPITAL LETTER D
0x0045: 0x0045, # LATIN CAPITAL LETTER E
0x0046: 0x0046, # LATIN CAPITAL LETTER F
0x0047: 0x0047, # LATIN CAPITAL LETTER G
0x0048: 0x0048, # LATIN CAPITAL LETTER H
0x0049: 0x0049, # LATIN CAPITAL LETTER I
0x004a: 0x004a, # LATIN CAPITAL LETTER J
0x004b: 0x004b, # LATIN CAPITAL LETTER K
0x004c: 0x004c, # LATIN CAPITAL LETTER L
0x004d: 0x004d, # LATIN CAPITAL LETTER M
0x004e: 0x004e, # LATIN CAPITAL LETTER N
0x004f: 0x004f, # LATIN CAPITAL LETTER O
0x0050: 0x0050, # LATIN CAPITAL LETTER P
0x0051: 0x0051, # LATIN CAPITAL LETTER Q
0x0052: 0x0052, # LATIN CAPITAL LETTER R
0x0053: 0x0053, # LATIN CAPITAL LETTER S
0x0054: 0x0054, # LATIN CAPITAL LETTER T
0x0055: 0x0055, # LATIN CAPITAL LETTER U
0x0056: 0x0056, # LATIN CAPITAL LETTER V
0x0057: 0x0057, # LATIN CAPITAL LETTER W
0x0058: 0x0058, # LATIN CAPITAL LETTER X
0x0059: 0x0059, # LATIN CAPITAL LETTER Y
0x005a: 0x005a, # LATIN CAPITAL LETTER Z
0x005b: 0x005b, # LEFT SQUARE BRACKET
0x005c: 0x005c, # REVERSE SOLIDUS
0x005d: 0x005d, # RIGHT SQUARE BRACKET
0x005e: 0x005e, # CIRCUMFLEX ACCENT
0x005f: 0x005f, # LOW LINE
0x0060: 0x0060, # GRAVE ACCENT
0x0061: 0x0061, # LATIN SMALL LETTER A
0x0062: 0x0062, # LATIN SMALL LETTER B
0x0063: 0x0063, # LATIN SMALL LETTER C
0x0064: 0x0064, # LATIN SMALL LETTER D
0x0065: 0x0065, # LATIN SMALL LETTER E
0x0066: 0x0066, # LATIN SMALL LETTER F
0x0067: 0x0067, # LATIN SMALL LETTER G
0x0068: 0x0068, # LATIN SMALL LETTER H
0x0069: 0x0069, # LATIN SMALL LETTER I
0x006a: 0x006a, # LATIN SMALL LETTER J
0x006b: 0x006b, # LATIN SMALL LETTER K
0x006c: 0x006c, # LATIN SMALL LETTER L
0x006d: 0x006d, # LATIN SMALL LETTER M
0x006e: 0x006e, # LATIN SMALL LETTER N
0x006f: 0x006f, # LATIN SMALL LETTER O
0x0070: 0x0070, # LATIN SMALL LETTER P
0x0071: 0x0071, # LATIN SMALL LETTER Q
0x0072: 0x0072, # LATIN SMALL LETTER R
0x0073: 0x0073, # LATIN SMALL LETTER S
0x0074: 0x0074, # LATIN SMALL LETTER T
0x0075: 0x0075, # LATIN SMALL LETTER U
0x0076: 0x0076, # LATIN SMALL LETTER V
0x0077: 0x0077, # LATIN SMALL LETTER W
0x0078: 0x0078, # LATIN SMALL LETTER X
0x0079: 0x0079, # LATIN SMALL LETTER Y
0x007a: 0x007a, # LATIN SMALL LETTER Z
0x007b: 0x007b, # LEFT CURLY BRACKET
0x007c: 0x007c, # VERTICAL LINE
0x007d: 0x007d, # RIGHT CURLY BRACKET
0x007e: 0x007e, # TILDE
0x007f: 0x007f, # DELETE
0x00a0: 0x00ff, # NO-BREAK SPACE
0x00a3: 0x009c, # POUND SIGN
0x00a6: 0x008a, # BROKEN BAR
0x00a7: 0x00f5, # SECTION SIGN
0x00a8: 0x00f9, # DIAERESIS
0x00a9: 0x0097, # COPYRIGHT SIGN
0x00ab: 0x00ae, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00ac: 0x0089, # NOT SIGN
0x00ad: 0x00f0, # SOFT HYPHEN
0x00b0: 0x00f8, # DEGREE SIGN
0x00b1: 0x00f1, # PLUS-MINUS SIGN
0x00b2: 0x0099, # SUPERSCRIPT TWO
0x00b3: 0x009a, # SUPERSCRIPT THREE
0x00b7: 0x0088, # MIDDLE DOT
0x00bb: 0x00af, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00bd: 0x00ab, # VULGAR FRACTION ONE HALF
0x0384: 0x00ef, # GREEK TONOS
0x0385: 0x00f7, # GREEK DIALYTIKA TONOS
0x0386: 0x0086, # GREEK CAPITAL LETTER ALPHA WITH TONOS
0x0388: 0x008d, # GREEK CAPITAL LETTER EPSILON WITH TONOS
0x0389: 0x008f, # GREEK CAPITAL LETTER ETA WITH TONOS
0x038a: 0x0090, # GREEK CAPITAL LETTER IOTA WITH TONOS
0x038c: 0x0092, # GREEK CAPITAL LETTER OMICRON WITH TONOS
0x038e: 0x0095, # GREEK CAPITAL LETTER UPSILON WITH TONOS
0x038f: 0x0098, # GREEK CAPITAL LETTER OMEGA WITH TONOS
0x0390: 0x00a1, # GREEK SMALL LETTER IOTA WITH DIALYTIKA AND TONOS
0x0391: 0x00a4, # GREEK CAPITAL LETTER ALPHA
0x0392: 0x00a5, # GREEK CAPITAL LETTER BETA
0x0393: 0x00a6, # GREEK CAPITAL LETTER GAMMA
0x0394: 0x00a7, # GREEK CAPITAL LETTER DELTA
0x0395: 0x00a8, # GREEK CAPITAL LETTER EPSILON
0x0396: 0x00a9, # GREEK CAPITAL LETTER ZETA
0x0397: 0x00aa, # GREEK CAPITAL LETTER ETA
0x0398: 0x00ac, # GREEK CAPITAL LETTER THETA
0x0399: 0x00ad, # GREEK CAPITAL LETTER IOTA
0x039a: 0x00b5, # GREEK CAPITAL LETTER KAPPA
0x039b: 0x00b6, # GREEK CAPITAL LETTER LAMDA
0x039c: 0x00b7, # GREEK CAPITAL LETTER MU
0x039d: 0x00b8, # GREEK CAPITAL LETTER NU
0x039e: 0x00bd, # GREEK CAPITAL LETTER XI
0x039f: 0x00be, # GREEK CAPITAL LETTER OMICRON
0x03a0: 0x00c6, # GREEK CAPITAL LETTER PI
0x03a1: 0x00c7, # GREEK CAPITAL LETTER RHO
0x03a3: 0x00cf, # GREEK CAPITAL LETTER SIGMA
0x03a4: 0x00d0, # GREEK CAPITAL LETTER TAU
0x03a5: 0x00d1, # GREEK CAPITAL LETTER UPSILON
0x03a6: 0x00d2, # GREEK CAPITAL LETTER PHI
0x03a7: 0x00d3, # GREEK CAPITAL LETTER CHI
0x03a8: 0x00d4, # GREEK CAPITAL LETTER PSI
0x03a9: 0x00d5, # GREEK CAPITAL LETTER OMEGA
0x03aa: 0x0091, # GREEK CAPITAL LETTER IOTA WITH DIALYTIKA
0x03ab: 0x0096, # GREEK CAPITAL LETTER UPSILON WITH DIALYTIKA
0x03ac: 0x009b, # GREEK SMALL LETTER ALPHA WITH TONOS
0x03ad: 0x009d, # GREEK SMALL LETTER EPSILON WITH TONOS
0x03ae: 0x009e, # GREEK SMALL LETTER ETA WITH TONOS
0x03af: 0x009f, # GREEK SMALL LETTER IOTA WITH TONOS
0x03b0: 0x00fc, # GREEK SMALL LETTER UPSILON WITH DIALYTIKA AND TONOS
0x03b1: 0x00d6, # GREEK SMALL LETTER ALPHA
0x03b2: 0x00d7, # GREEK SMALL LETTER BETA
0x03b3: 0x00d8, # GREEK SMALL LETTER GAMMA
0x03b4: 0x00dd, # GREEK SMALL LETTER DELTA
0x03b5: 0x00de, # GREEK SMALL LETTER EPSILON
0x03b6: 0x00e0, # GREEK SMALL LETTER ZETA
0x03b7: 0x00e1, # GREEK SMALL LETTER ETA
0x03b8: 0x00e2, # GREEK SMALL LETTER THETA
0x03b9: 0x00e3, # GREEK SMALL LETTER IOTA
0x03ba: 0x00e4, # GREEK SMALL LETTER KAPPA
0x03bb: 0x00e5, # GREEK SMALL LETTER LAMDA
0x03bc: 0x00e6, # GREEK SMALL LETTER MU
0x03bd: 0x00e7, # GREEK SMALL LETTER NU
0x03be: 0x00e8, # GREEK SMALL LETTER XI
0x03bf: 0x00e9, # GREEK SMALL LETTER OMICRON
0x03c0: 0x00ea, # GREEK SMALL LETTER PI
0x03c1: 0x00eb, # GREEK SMALL LETTER RHO
0x03c2: 0x00ed, # GREEK SMALL LETTER FINAL SIGMA
0x03c3: 0x00ec, # GREEK SMALL LETTER SIGMA
0x03c4: 0x00ee, # GREEK SMALL LETTER TAU
0x03c5: 0x00f2, # GREEK SMALL LETTER UPSILON
0x03c6: 0x00f3, # GREEK SMALL LETTER PHI
0x03c7: 0x00f4, # GREEK SMALL LETTER CHI
0x03c8: 0x00f6, # GREEK SMALL LETTER PSI
0x03c9: 0x00fa, # GREEK SMALL LETTER OMEGA
0x03ca: 0x00a0, # GREEK SMALL LETTER IOTA WITH DIALYTIKA
0x03cb: 0x00fb, # GREEK SMALL LETTER UPSILON WITH DIALYTIKA
0x03cc: 0x00a2, # GREEK SMALL LETTER OMICRON WITH TONOS
0x03cd: 0x00a3, # GREEK SMALL LETTER UPSILON WITH TONOS
0x03ce: 0x00fd, # GREEK SMALL LETTER OMEGA WITH TONOS
0x2015: 0x008e, # HORIZONTAL BAR
0x2018: 0x008b, # LEFT SINGLE QUOTATION MARK
0x2019: 0x008c, # RIGHT SINGLE QUOTATION MARK
0x2500: 0x00c4, # BOX DRAWINGS LIGHT HORIZONTAL
0x2502: 0x00b3, # BOX DRAWINGS LIGHT VERTICAL
0x250c: 0x00da, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x2510: 0x00bf, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x2514: 0x00c0, # BOX DRAWINGS LIGHT UP AND RIGHT
0x2518: 0x00d9, # BOX DRAWINGS LIGHT UP AND LEFT
0x251c: 0x00c3, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x2524: 0x00b4, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x252c: 0x00c2, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x2534: 0x00c1, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x253c: 0x00c5, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x2550: 0x00cd, # BOX DRAWINGS DOUBLE HORIZONTAL
0x2551: 0x00ba, # BOX DRAWINGS DOUBLE VERTICAL
0x2554: 0x00c9, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x2557: 0x00bb, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x255a: 0x00c8, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x255d: 0x00bc, # BOX DRAWINGS DOUBLE UP AND LEFT
0x2560: 0x00cc, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x2563: 0x00b9, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x2566: 0x00cb, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x2569: 0x00ca, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x256c: 0x00ce, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x2580: 0x00df, # UPPER HALF BLOCK
0x2584: 0x00dc, # LOWER HALF BLOCK
0x2588: 0x00db, # FULL BLOCK
0x2591: 0x00b0, # LIGHT SHADE
0x2592: 0x00b1, # MEDIUM SHADE
0x2593: 0x00b2, # DARK SHADE
0x25a0: 0x00fe, # BLACK SQUARE
} | /regulome_web-2.0rc1.tar.gz/regulome_web-2.0rc1/regulome_app/webapp/static/brython/Lib/encodings/cp869.py | 0.411466 | 0.18385 | cp869.py | pypi |
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_map)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp775',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x0080: 0x0106, # LATIN CAPITAL LETTER C WITH ACUTE
0x0081: 0x00fc, # LATIN SMALL LETTER U WITH DIAERESIS
0x0082: 0x00e9, # LATIN SMALL LETTER E WITH ACUTE
0x0083: 0x0101, # LATIN SMALL LETTER A WITH MACRON
0x0084: 0x00e4, # LATIN SMALL LETTER A WITH DIAERESIS
0x0085: 0x0123, # LATIN SMALL LETTER G WITH CEDILLA
0x0086: 0x00e5, # LATIN SMALL LETTER A WITH RING ABOVE
0x0087: 0x0107, # LATIN SMALL LETTER C WITH ACUTE
0x0088: 0x0142, # LATIN SMALL LETTER L WITH STROKE
0x0089: 0x0113, # LATIN SMALL LETTER E WITH MACRON
0x008a: 0x0156, # LATIN CAPITAL LETTER R WITH CEDILLA
0x008b: 0x0157, # LATIN SMALL LETTER R WITH CEDILLA
0x008c: 0x012b, # LATIN SMALL LETTER I WITH MACRON
0x008d: 0x0179, # LATIN CAPITAL LETTER Z WITH ACUTE
0x008e: 0x00c4, # LATIN CAPITAL LETTER A WITH DIAERESIS
0x008f: 0x00c5, # LATIN CAPITAL LETTER A WITH RING ABOVE
0x0090: 0x00c9, # LATIN CAPITAL LETTER E WITH ACUTE
0x0091: 0x00e6, # LATIN SMALL LIGATURE AE
0x0092: 0x00c6, # LATIN CAPITAL LIGATURE AE
0x0093: 0x014d, # LATIN SMALL LETTER O WITH MACRON
0x0094: 0x00f6, # LATIN SMALL LETTER O WITH DIAERESIS
0x0095: 0x0122, # LATIN CAPITAL LETTER G WITH CEDILLA
0x0096: 0x00a2, # CENT SIGN
0x0097: 0x015a, # LATIN CAPITAL LETTER S WITH ACUTE
0x0098: 0x015b, # LATIN SMALL LETTER S WITH ACUTE
0x0099: 0x00d6, # LATIN CAPITAL LETTER O WITH DIAERESIS
0x009a: 0x00dc, # LATIN CAPITAL LETTER U WITH DIAERESIS
0x009b: 0x00f8, # LATIN SMALL LETTER O WITH STROKE
0x009c: 0x00a3, # POUND SIGN
0x009d: 0x00d8, # LATIN CAPITAL LETTER O WITH STROKE
0x009e: 0x00d7, # MULTIPLICATION SIGN
0x009f: 0x00a4, # CURRENCY SIGN
0x00a0: 0x0100, # LATIN CAPITAL LETTER A WITH MACRON
0x00a1: 0x012a, # LATIN CAPITAL LETTER I WITH MACRON
0x00a2: 0x00f3, # LATIN SMALL LETTER O WITH ACUTE
0x00a3: 0x017b, # LATIN CAPITAL LETTER Z WITH DOT ABOVE
0x00a4: 0x017c, # LATIN SMALL LETTER Z WITH DOT ABOVE
0x00a5: 0x017a, # LATIN SMALL LETTER Z WITH ACUTE
0x00a6: 0x201d, # RIGHT DOUBLE QUOTATION MARK
0x00a7: 0x00a6, # BROKEN BAR
0x00a8: 0x00a9, # COPYRIGHT SIGN
0x00a9: 0x00ae, # REGISTERED SIGN
0x00aa: 0x00ac, # NOT SIGN
0x00ab: 0x00bd, # VULGAR FRACTION ONE HALF
0x00ac: 0x00bc, # VULGAR FRACTION ONE QUARTER
0x00ad: 0x0141, # LATIN CAPITAL LETTER L WITH STROKE
0x00ae: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00af: 0x00bb, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00b0: 0x2591, # LIGHT SHADE
0x00b1: 0x2592, # MEDIUM SHADE
0x00b2: 0x2593, # DARK SHADE
0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL
0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x00b5: 0x0104, # LATIN CAPITAL LETTER A WITH OGONEK
0x00b6: 0x010c, # LATIN CAPITAL LETTER C WITH CARON
0x00b7: 0x0118, # LATIN CAPITAL LETTER E WITH OGONEK
0x00b8: 0x0116, # LATIN CAPITAL LETTER E WITH DOT ABOVE
0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL
0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT
0x00bd: 0x012e, # LATIN CAPITAL LETTER I WITH OGONEK
0x00be: 0x0160, # LATIN CAPITAL LETTER S WITH CARON
0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT
0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL
0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x00c6: 0x0172, # LATIN CAPITAL LETTER U WITH OGONEK
0x00c7: 0x016a, # LATIN CAPITAL LETTER U WITH MACRON
0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL
0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x00cf: 0x017d, # LATIN CAPITAL LETTER Z WITH CARON
0x00d0: 0x0105, # LATIN SMALL LETTER A WITH OGONEK
0x00d1: 0x010d, # LATIN SMALL LETTER C WITH CARON
0x00d2: 0x0119, # LATIN SMALL LETTER E WITH OGONEK
0x00d3: 0x0117, # LATIN SMALL LETTER E WITH DOT ABOVE
0x00d4: 0x012f, # LATIN SMALL LETTER I WITH OGONEK
0x00d5: 0x0161, # LATIN SMALL LETTER S WITH CARON
0x00d6: 0x0173, # LATIN SMALL LETTER U WITH OGONEK
0x00d7: 0x016b, # LATIN SMALL LETTER U WITH MACRON
0x00d8: 0x017e, # LATIN SMALL LETTER Z WITH CARON
0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT
0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x00db: 0x2588, # FULL BLOCK
0x00dc: 0x2584, # LOWER HALF BLOCK
0x00dd: 0x258c, # LEFT HALF BLOCK
0x00de: 0x2590, # RIGHT HALF BLOCK
0x00df: 0x2580, # UPPER HALF BLOCK
0x00e0: 0x00d3, # LATIN CAPITAL LETTER O WITH ACUTE
0x00e1: 0x00df, # LATIN SMALL LETTER SHARP S (GERMAN)
0x00e2: 0x014c, # LATIN CAPITAL LETTER O WITH MACRON
0x00e3: 0x0143, # LATIN CAPITAL LETTER N WITH ACUTE
0x00e4: 0x00f5, # LATIN SMALL LETTER O WITH TILDE
0x00e5: 0x00d5, # LATIN CAPITAL LETTER O WITH TILDE
0x00e6: 0x00b5, # MICRO SIGN
0x00e7: 0x0144, # LATIN SMALL LETTER N WITH ACUTE
0x00e8: 0x0136, # LATIN CAPITAL LETTER K WITH CEDILLA
0x00e9: 0x0137, # LATIN SMALL LETTER K WITH CEDILLA
0x00ea: 0x013b, # LATIN CAPITAL LETTER L WITH CEDILLA
0x00eb: 0x013c, # LATIN SMALL LETTER L WITH CEDILLA
0x00ec: 0x0146, # LATIN SMALL LETTER N WITH CEDILLA
0x00ed: 0x0112, # LATIN CAPITAL LETTER E WITH MACRON
0x00ee: 0x0145, # LATIN CAPITAL LETTER N WITH CEDILLA
0x00ef: 0x2019, # RIGHT SINGLE QUOTATION MARK
0x00f0: 0x00ad, # SOFT HYPHEN
0x00f1: 0x00b1, # PLUS-MINUS SIGN
0x00f2: 0x201c, # LEFT DOUBLE QUOTATION MARK
0x00f3: 0x00be, # VULGAR FRACTION THREE QUARTERS
0x00f4: 0x00b6, # PILCROW SIGN
0x00f5: 0x00a7, # SECTION SIGN
0x00f6: 0x00f7, # DIVISION SIGN
0x00f7: 0x201e, # DOUBLE LOW-9 QUOTATION MARK
0x00f8: 0x00b0, # DEGREE SIGN
0x00f9: 0x2219, # BULLET OPERATOR
0x00fa: 0x00b7, # MIDDLE DOT
0x00fb: 0x00b9, # SUPERSCRIPT ONE
0x00fc: 0x00b3, # SUPERSCRIPT THREE
0x00fd: 0x00b2, # SUPERSCRIPT TWO
0x00fe: 0x25a0, # BLACK SQUARE
0x00ff: 0x00a0, # NO-BREAK SPACE
})
### Decoding Table
decoding_table = (
'\x00' # 0x0000 -> NULL
'\x01' # 0x0001 -> START OF HEADING
'\x02' # 0x0002 -> START OF TEXT
'\x03' # 0x0003 -> END OF TEXT
'\x04' # 0x0004 -> END OF TRANSMISSION
'\x05' # 0x0005 -> ENQUIRY
'\x06' # 0x0006 -> ACKNOWLEDGE
'\x07' # 0x0007 -> BELL
'\x08' # 0x0008 -> BACKSPACE
'\t' # 0x0009 -> HORIZONTAL TABULATION
'\n' # 0x000a -> LINE FEED
'\x0b' # 0x000b -> VERTICAL TABULATION
'\x0c' # 0x000c -> FORM FEED
'\r' # 0x000d -> CARRIAGE RETURN
'\x0e' # 0x000e -> SHIFT OUT
'\x0f' # 0x000f -> SHIFT IN
'\x10' # 0x0010 -> DATA LINK ESCAPE
'\x11' # 0x0011 -> DEVICE CONTROL ONE
'\x12' # 0x0012 -> DEVICE CONTROL TWO
'\x13' # 0x0013 -> DEVICE CONTROL THREE
'\x14' # 0x0014 -> DEVICE CONTROL FOUR
'\x15' # 0x0015 -> NEGATIVE ACKNOWLEDGE
'\x16' # 0x0016 -> SYNCHRONOUS IDLE
'\x17' # 0x0017 -> END OF TRANSMISSION BLOCK
'\x18' # 0x0018 -> CANCEL
'\x19' # 0x0019 -> END OF MEDIUM
'\x1a' # 0x001a -> SUBSTITUTE
'\x1b' # 0x001b -> ESCAPE
'\x1c' # 0x001c -> FILE SEPARATOR
'\x1d' # 0x001d -> GROUP SEPARATOR
'\x1e' # 0x001e -> RECORD SEPARATOR
'\x1f' # 0x001f -> UNIT SEPARATOR
' ' # 0x0020 -> SPACE
'!' # 0x0021 -> EXCLAMATION MARK
'"' # 0x0022 -> QUOTATION MARK
'#' # 0x0023 -> NUMBER SIGN
'$' # 0x0024 -> DOLLAR SIGN
'%' # 0x0025 -> PERCENT SIGN
'&' # 0x0026 -> AMPERSAND
"'" # 0x0027 -> APOSTROPHE
'(' # 0x0028 -> LEFT PARENTHESIS
')' # 0x0029 -> RIGHT PARENTHESIS
'*' # 0x002a -> ASTERISK
'+' # 0x002b -> PLUS SIGN
',' # 0x002c -> COMMA
'-' # 0x002d -> HYPHEN-MINUS
'.' # 0x002e -> FULL STOP
'/' # 0x002f -> SOLIDUS
'0' # 0x0030 -> DIGIT ZERO
'1' # 0x0031 -> DIGIT ONE
'2' # 0x0032 -> DIGIT TWO
'3' # 0x0033 -> DIGIT THREE
'4' # 0x0034 -> DIGIT FOUR
'5' # 0x0035 -> DIGIT FIVE
'6' # 0x0036 -> DIGIT SIX
'7' # 0x0037 -> DIGIT SEVEN
'8' # 0x0038 -> DIGIT EIGHT
'9' # 0x0039 -> DIGIT NINE
':' # 0x003a -> COLON
';' # 0x003b -> SEMICOLON
'<' # 0x003c -> LESS-THAN SIGN
'=' # 0x003d -> EQUALS SIGN
'>' # 0x003e -> GREATER-THAN SIGN
'?' # 0x003f -> QUESTION MARK
'@' # 0x0040 -> COMMERCIAL AT
'A' # 0x0041 -> LATIN CAPITAL LETTER A
'B' # 0x0042 -> LATIN CAPITAL LETTER B
'C' # 0x0043 -> LATIN CAPITAL LETTER C
'D' # 0x0044 -> LATIN CAPITAL LETTER D
'E' # 0x0045 -> LATIN CAPITAL LETTER E
'F' # 0x0046 -> LATIN CAPITAL LETTER F
'G' # 0x0047 -> LATIN CAPITAL LETTER G
'H' # 0x0048 -> LATIN CAPITAL LETTER H
'I' # 0x0049 -> LATIN CAPITAL LETTER I
'J' # 0x004a -> LATIN CAPITAL LETTER J
'K' # 0x004b -> LATIN CAPITAL LETTER K
'L' # 0x004c -> LATIN CAPITAL LETTER L
'M' # 0x004d -> LATIN CAPITAL LETTER M
'N' # 0x004e -> LATIN CAPITAL LETTER N
'O' # 0x004f -> LATIN CAPITAL LETTER O
'P' # 0x0050 -> LATIN CAPITAL LETTER P
'Q' # 0x0051 -> LATIN CAPITAL LETTER Q
'R' # 0x0052 -> LATIN CAPITAL LETTER R
'S' # 0x0053 -> LATIN CAPITAL LETTER S
'T' # 0x0054 -> LATIN CAPITAL LETTER T
'U' # 0x0055 -> LATIN CAPITAL LETTER U
'V' # 0x0056 -> LATIN CAPITAL LETTER V
'W' # 0x0057 -> LATIN CAPITAL LETTER W
'X' # 0x0058 -> LATIN CAPITAL LETTER X
'Y' # 0x0059 -> LATIN CAPITAL LETTER Y
'Z' # 0x005a -> LATIN CAPITAL LETTER Z
'[' # 0x005b -> LEFT SQUARE BRACKET
'\\' # 0x005c -> REVERSE SOLIDUS
']' # 0x005d -> RIGHT SQUARE BRACKET
'^' # 0x005e -> CIRCUMFLEX ACCENT
'_' # 0x005f -> LOW LINE
'`' # 0x0060 -> GRAVE ACCENT
'a' # 0x0061 -> LATIN SMALL LETTER A
'b' # 0x0062 -> LATIN SMALL LETTER B
'c' # 0x0063 -> LATIN SMALL LETTER C
'd' # 0x0064 -> LATIN SMALL LETTER D
'e' # 0x0065 -> LATIN SMALL LETTER E
'f' # 0x0066 -> LATIN SMALL LETTER F
'g' # 0x0067 -> LATIN SMALL LETTER G
'h' # 0x0068 -> LATIN SMALL LETTER H
'i' # 0x0069 -> LATIN SMALL LETTER I
'j' # 0x006a -> LATIN SMALL LETTER J
'k' # 0x006b -> LATIN SMALL LETTER K
'l' # 0x006c -> LATIN SMALL LETTER L
'm' # 0x006d -> LATIN SMALL LETTER M
'n' # 0x006e -> LATIN SMALL LETTER N
'o' # 0x006f -> LATIN SMALL LETTER O
'p' # 0x0070 -> LATIN SMALL LETTER P
'q' # 0x0071 -> LATIN SMALL LETTER Q
'r' # 0x0072 -> LATIN SMALL LETTER R
's' # 0x0073 -> LATIN SMALL LETTER S
't' # 0x0074 -> LATIN SMALL LETTER T
'u' # 0x0075 -> LATIN SMALL LETTER U
'v' # 0x0076 -> LATIN SMALL LETTER V
'w' # 0x0077 -> LATIN SMALL LETTER W
'x' # 0x0078 -> LATIN SMALL LETTER X
'y' # 0x0079 -> LATIN SMALL LETTER Y
'z' # 0x007a -> LATIN SMALL LETTER Z
'{' # 0x007b -> LEFT CURLY BRACKET
'|' # 0x007c -> VERTICAL LINE
'}' # 0x007d -> RIGHT CURLY BRACKET
'~' # 0x007e -> TILDE
'\x7f' # 0x007f -> DELETE
'\u0106' # 0x0080 -> LATIN CAPITAL LETTER C WITH ACUTE
'\xfc' # 0x0081 -> LATIN SMALL LETTER U WITH DIAERESIS
'\xe9' # 0x0082 -> LATIN SMALL LETTER E WITH ACUTE
'\u0101' # 0x0083 -> LATIN SMALL LETTER A WITH MACRON
'\xe4' # 0x0084 -> LATIN SMALL LETTER A WITH DIAERESIS
'\u0123' # 0x0085 -> LATIN SMALL LETTER G WITH CEDILLA
'\xe5' # 0x0086 -> LATIN SMALL LETTER A WITH RING ABOVE
'\u0107' # 0x0087 -> LATIN SMALL LETTER C WITH ACUTE
'\u0142' # 0x0088 -> LATIN SMALL LETTER L WITH STROKE
'\u0113' # 0x0089 -> LATIN SMALL LETTER E WITH MACRON
'\u0156' # 0x008a -> LATIN CAPITAL LETTER R WITH CEDILLA
'\u0157' # 0x008b -> LATIN SMALL LETTER R WITH CEDILLA
'\u012b' # 0x008c -> LATIN SMALL LETTER I WITH MACRON
'\u0179' # 0x008d -> LATIN CAPITAL LETTER Z WITH ACUTE
'\xc4' # 0x008e -> LATIN CAPITAL LETTER A WITH DIAERESIS
'\xc5' # 0x008f -> LATIN CAPITAL LETTER A WITH RING ABOVE
'\xc9' # 0x0090 -> LATIN CAPITAL LETTER E WITH ACUTE
'\xe6' # 0x0091 -> LATIN SMALL LIGATURE AE
'\xc6' # 0x0092 -> LATIN CAPITAL LIGATURE AE
'\u014d' # 0x0093 -> LATIN SMALL LETTER O WITH MACRON
'\xf6' # 0x0094 -> LATIN SMALL LETTER O WITH DIAERESIS
'\u0122' # 0x0095 -> LATIN CAPITAL LETTER G WITH CEDILLA
'\xa2' # 0x0096 -> CENT SIGN
'\u015a' # 0x0097 -> LATIN CAPITAL LETTER S WITH ACUTE
'\u015b' # 0x0098 -> LATIN SMALL LETTER S WITH ACUTE
'\xd6' # 0x0099 -> LATIN CAPITAL LETTER O WITH DIAERESIS
'\xdc' # 0x009a -> LATIN CAPITAL LETTER U WITH DIAERESIS
'\xf8' # 0x009b -> LATIN SMALL LETTER O WITH STROKE
'\xa3' # 0x009c -> POUND SIGN
'\xd8' # 0x009d -> LATIN CAPITAL LETTER O WITH STROKE
'\xd7' # 0x009e -> MULTIPLICATION SIGN
'\xa4' # 0x009f -> CURRENCY SIGN
'\u0100' # 0x00a0 -> LATIN CAPITAL LETTER A WITH MACRON
'\u012a' # 0x00a1 -> LATIN CAPITAL LETTER I WITH MACRON
'\xf3' # 0x00a2 -> LATIN SMALL LETTER O WITH ACUTE
'\u017b' # 0x00a3 -> LATIN CAPITAL LETTER Z WITH DOT ABOVE
'\u017c' # 0x00a4 -> LATIN SMALL LETTER Z WITH DOT ABOVE
'\u017a' # 0x00a5 -> LATIN SMALL LETTER Z WITH ACUTE
'\u201d' # 0x00a6 -> RIGHT DOUBLE QUOTATION MARK
'\xa6' # 0x00a7 -> BROKEN BAR
'\xa9' # 0x00a8 -> COPYRIGHT SIGN
'\xae' # 0x00a9 -> REGISTERED SIGN
'\xac' # 0x00aa -> NOT SIGN
'\xbd' # 0x00ab -> VULGAR FRACTION ONE HALF
'\xbc' # 0x00ac -> VULGAR FRACTION ONE QUARTER
'\u0141' # 0x00ad -> LATIN CAPITAL LETTER L WITH STROKE
'\xab' # 0x00ae -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
'\xbb' # 0x00af -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
'\u2591' # 0x00b0 -> LIGHT SHADE
'\u2592' # 0x00b1 -> MEDIUM SHADE
'\u2593' # 0x00b2 -> DARK SHADE
'\u2502' # 0x00b3 -> BOX DRAWINGS LIGHT VERTICAL
'\u2524' # 0x00b4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT
'\u0104' # 0x00b5 -> LATIN CAPITAL LETTER A WITH OGONEK
'\u010c' # 0x00b6 -> LATIN CAPITAL LETTER C WITH CARON
'\u0118' # 0x00b7 -> LATIN CAPITAL LETTER E WITH OGONEK
'\u0116' # 0x00b8 -> LATIN CAPITAL LETTER E WITH DOT ABOVE
'\u2563' # 0x00b9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT
'\u2551' # 0x00ba -> BOX DRAWINGS DOUBLE VERTICAL
'\u2557' # 0x00bb -> BOX DRAWINGS DOUBLE DOWN AND LEFT
'\u255d' # 0x00bc -> BOX DRAWINGS DOUBLE UP AND LEFT
'\u012e' # 0x00bd -> LATIN CAPITAL LETTER I WITH OGONEK
'\u0160' # 0x00be -> LATIN CAPITAL LETTER S WITH CARON
'\u2510' # 0x00bf -> BOX DRAWINGS LIGHT DOWN AND LEFT
'\u2514' # 0x00c0 -> BOX DRAWINGS LIGHT UP AND RIGHT
'\u2534' # 0x00c1 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL
'\u252c' # 0x00c2 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
'\u251c' # 0x00c3 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT
'\u2500' # 0x00c4 -> BOX DRAWINGS LIGHT HORIZONTAL
'\u253c' # 0x00c5 -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
'\u0172' # 0x00c6 -> LATIN CAPITAL LETTER U WITH OGONEK
'\u016a' # 0x00c7 -> LATIN CAPITAL LETTER U WITH MACRON
'\u255a' # 0x00c8 -> BOX DRAWINGS DOUBLE UP AND RIGHT
'\u2554' # 0x00c9 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT
'\u2569' # 0x00ca -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL
'\u2566' # 0x00cb -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
'\u2560' # 0x00cc -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
'\u2550' # 0x00cd -> BOX DRAWINGS DOUBLE HORIZONTAL
'\u256c' # 0x00ce -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
'\u017d' # 0x00cf -> LATIN CAPITAL LETTER Z WITH CARON
'\u0105' # 0x00d0 -> LATIN SMALL LETTER A WITH OGONEK
'\u010d' # 0x00d1 -> LATIN SMALL LETTER C WITH CARON
'\u0119' # 0x00d2 -> LATIN SMALL LETTER E WITH OGONEK
'\u0117' # 0x00d3 -> LATIN SMALL LETTER E WITH DOT ABOVE
'\u012f' # 0x00d4 -> LATIN SMALL LETTER I WITH OGONEK
'\u0161' # 0x00d5 -> LATIN SMALL LETTER S WITH CARON
'\u0173' # 0x00d6 -> LATIN SMALL LETTER U WITH OGONEK
'\u016b' # 0x00d7 -> LATIN SMALL LETTER U WITH MACRON
'\u017e' # 0x00d8 -> LATIN SMALL LETTER Z WITH CARON
'\u2518' # 0x00d9 -> BOX DRAWINGS LIGHT UP AND LEFT
'\u250c' # 0x00da -> BOX DRAWINGS LIGHT DOWN AND RIGHT
'\u2588' # 0x00db -> FULL BLOCK
'\u2584' # 0x00dc -> LOWER HALF BLOCK
'\u258c' # 0x00dd -> LEFT HALF BLOCK
'\u2590' # 0x00de -> RIGHT HALF BLOCK
'\u2580' # 0x00df -> UPPER HALF BLOCK
'\xd3' # 0x00e0 -> LATIN CAPITAL LETTER O WITH ACUTE
'\xdf' # 0x00e1 -> LATIN SMALL LETTER SHARP S (GERMAN)
'\u014c' # 0x00e2 -> LATIN CAPITAL LETTER O WITH MACRON
'\u0143' # 0x00e3 -> LATIN CAPITAL LETTER N WITH ACUTE
'\xf5' # 0x00e4 -> LATIN SMALL LETTER O WITH TILDE
'\xd5' # 0x00e5 -> LATIN CAPITAL LETTER O WITH TILDE
'\xb5' # 0x00e6 -> MICRO SIGN
'\u0144' # 0x00e7 -> LATIN SMALL LETTER N WITH ACUTE
'\u0136' # 0x00e8 -> LATIN CAPITAL LETTER K WITH CEDILLA
'\u0137' # 0x00e9 -> LATIN SMALL LETTER K WITH CEDILLA
'\u013b' # 0x00ea -> LATIN CAPITAL LETTER L WITH CEDILLA
'\u013c' # 0x00eb -> LATIN SMALL LETTER L WITH CEDILLA
'\u0146' # 0x00ec -> LATIN SMALL LETTER N WITH CEDILLA
'\u0112' # 0x00ed -> LATIN CAPITAL LETTER E WITH MACRON
'\u0145' # 0x00ee -> LATIN CAPITAL LETTER N WITH CEDILLA
'\u2019' # 0x00ef -> RIGHT SINGLE QUOTATION MARK
'\xad' # 0x00f0 -> SOFT HYPHEN
'\xb1' # 0x00f1 -> PLUS-MINUS SIGN
'\u201c' # 0x00f2 -> LEFT DOUBLE QUOTATION MARK
'\xbe' # 0x00f3 -> VULGAR FRACTION THREE QUARTERS
'\xb6' # 0x00f4 -> PILCROW SIGN
'\xa7' # 0x00f5 -> SECTION SIGN
'\xf7' # 0x00f6 -> DIVISION SIGN
'\u201e' # 0x00f7 -> DOUBLE LOW-9 QUOTATION MARK
'\xb0' # 0x00f8 -> DEGREE SIGN
'\u2219' # 0x00f9 -> BULLET OPERATOR
'\xb7' # 0x00fa -> MIDDLE DOT
'\xb9' # 0x00fb -> SUPERSCRIPT ONE
'\xb3' # 0x00fc -> SUPERSCRIPT THREE
'\xb2' # 0x00fd -> SUPERSCRIPT TWO
'\u25a0' # 0x00fe -> BLACK SQUARE
'\xa0' # 0x00ff -> NO-BREAK SPACE
)
### Encoding Map
encoding_map = {
0x0000: 0x0000, # NULL
0x0001: 0x0001, # START OF HEADING
0x0002: 0x0002, # START OF TEXT
0x0003: 0x0003, # END OF TEXT
0x0004: 0x0004, # END OF TRANSMISSION
0x0005: 0x0005, # ENQUIRY
0x0006: 0x0006, # ACKNOWLEDGE
0x0007: 0x0007, # BELL
0x0008: 0x0008, # BACKSPACE
0x0009: 0x0009, # HORIZONTAL TABULATION
0x000a: 0x000a, # LINE FEED
0x000b: 0x000b, # VERTICAL TABULATION
0x000c: 0x000c, # FORM FEED
0x000d: 0x000d, # CARRIAGE RETURN
0x000e: 0x000e, # SHIFT OUT
0x000f: 0x000f, # SHIFT IN
0x0010: 0x0010, # DATA LINK ESCAPE
0x0011: 0x0011, # DEVICE CONTROL ONE
0x0012: 0x0012, # DEVICE CONTROL TWO
0x0013: 0x0013, # DEVICE CONTROL THREE
0x0014: 0x0014, # DEVICE CONTROL FOUR
0x0015: 0x0015, # NEGATIVE ACKNOWLEDGE
0x0016: 0x0016, # SYNCHRONOUS IDLE
0x0017: 0x0017, # END OF TRANSMISSION BLOCK
0x0018: 0x0018, # CANCEL
0x0019: 0x0019, # END OF MEDIUM
0x001a: 0x001a, # SUBSTITUTE
0x001b: 0x001b, # ESCAPE
0x001c: 0x001c, # FILE SEPARATOR
0x001d: 0x001d, # GROUP SEPARATOR
0x001e: 0x001e, # RECORD SEPARATOR
0x001f: 0x001f, # UNIT SEPARATOR
0x0020: 0x0020, # SPACE
0x0021: 0x0021, # EXCLAMATION MARK
0x0022: 0x0022, # QUOTATION MARK
0x0023: 0x0023, # NUMBER SIGN
0x0024: 0x0024, # DOLLAR SIGN
0x0025: 0x0025, # PERCENT SIGN
0x0026: 0x0026, # AMPERSAND
0x0027: 0x0027, # APOSTROPHE
0x0028: 0x0028, # LEFT PARENTHESIS
0x0029: 0x0029, # RIGHT PARENTHESIS
0x002a: 0x002a, # ASTERISK
0x002b: 0x002b, # PLUS SIGN
0x002c: 0x002c, # COMMA
0x002d: 0x002d, # HYPHEN-MINUS
0x002e: 0x002e, # FULL STOP
0x002f: 0x002f, # SOLIDUS
0x0030: 0x0030, # DIGIT ZERO
0x0031: 0x0031, # DIGIT ONE
0x0032: 0x0032, # DIGIT TWO
0x0033: 0x0033, # DIGIT THREE
0x0034: 0x0034, # DIGIT FOUR
0x0035: 0x0035, # DIGIT FIVE
0x0036: 0x0036, # DIGIT SIX
0x0037: 0x0037, # DIGIT SEVEN
0x0038: 0x0038, # DIGIT EIGHT
0x0039: 0x0039, # DIGIT NINE
0x003a: 0x003a, # COLON
0x003b: 0x003b, # SEMICOLON
0x003c: 0x003c, # LESS-THAN SIGN
0x003d: 0x003d, # EQUALS SIGN
0x003e: 0x003e, # GREATER-THAN SIGN
0x003f: 0x003f, # QUESTION MARK
0x0040: 0x0040, # COMMERCIAL AT
0x0041: 0x0041, # LATIN CAPITAL LETTER A
0x0042: 0x0042, # LATIN CAPITAL LETTER B
0x0043: 0x0043, # LATIN CAPITAL LETTER C
0x0044: 0x0044, # LATIN CAPITAL LETTER D
0x0045: 0x0045, # LATIN CAPITAL LETTER E
0x0046: 0x0046, # LATIN CAPITAL LETTER F
0x0047: 0x0047, # LATIN CAPITAL LETTER G
0x0048: 0x0048, # LATIN CAPITAL LETTER H
0x0049: 0x0049, # LATIN CAPITAL LETTER I
0x004a: 0x004a, # LATIN CAPITAL LETTER J
0x004b: 0x004b, # LATIN CAPITAL LETTER K
0x004c: 0x004c, # LATIN CAPITAL LETTER L
0x004d: 0x004d, # LATIN CAPITAL LETTER M
0x004e: 0x004e, # LATIN CAPITAL LETTER N
0x004f: 0x004f, # LATIN CAPITAL LETTER O
0x0050: 0x0050, # LATIN CAPITAL LETTER P
0x0051: 0x0051, # LATIN CAPITAL LETTER Q
0x0052: 0x0052, # LATIN CAPITAL LETTER R
0x0053: 0x0053, # LATIN CAPITAL LETTER S
0x0054: 0x0054, # LATIN CAPITAL LETTER T
0x0055: 0x0055, # LATIN CAPITAL LETTER U
0x0056: 0x0056, # LATIN CAPITAL LETTER V
0x0057: 0x0057, # LATIN CAPITAL LETTER W
0x0058: 0x0058, # LATIN CAPITAL LETTER X
0x0059: 0x0059, # LATIN CAPITAL LETTER Y
0x005a: 0x005a, # LATIN CAPITAL LETTER Z
0x005b: 0x005b, # LEFT SQUARE BRACKET
0x005c: 0x005c, # REVERSE SOLIDUS
0x005d: 0x005d, # RIGHT SQUARE BRACKET
0x005e: 0x005e, # CIRCUMFLEX ACCENT
0x005f: 0x005f, # LOW LINE
0x0060: 0x0060, # GRAVE ACCENT
0x0061: 0x0061, # LATIN SMALL LETTER A
0x0062: 0x0062, # LATIN SMALL LETTER B
0x0063: 0x0063, # LATIN SMALL LETTER C
0x0064: 0x0064, # LATIN SMALL LETTER D
0x0065: 0x0065, # LATIN SMALL LETTER E
0x0066: 0x0066, # LATIN SMALL LETTER F
0x0067: 0x0067, # LATIN SMALL LETTER G
0x0068: 0x0068, # LATIN SMALL LETTER H
0x0069: 0x0069, # LATIN SMALL LETTER I
0x006a: 0x006a, # LATIN SMALL LETTER J
0x006b: 0x006b, # LATIN SMALL LETTER K
0x006c: 0x006c, # LATIN SMALL LETTER L
0x006d: 0x006d, # LATIN SMALL LETTER M
0x006e: 0x006e, # LATIN SMALL LETTER N
0x006f: 0x006f, # LATIN SMALL LETTER O
0x0070: 0x0070, # LATIN SMALL LETTER P
0x0071: 0x0071, # LATIN SMALL LETTER Q
0x0072: 0x0072, # LATIN SMALL LETTER R
0x0073: 0x0073, # LATIN SMALL LETTER S
0x0074: 0x0074, # LATIN SMALL LETTER T
0x0075: 0x0075, # LATIN SMALL LETTER U
0x0076: 0x0076, # LATIN SMALL LETTER V
0x0077: 0x0077, # LATIN SMALL LETTER W
0x0078: 0x0078, # LATIN SMALL LETTER X
0x0079: 0x0079, # LATIN SMALL LETTER Y
0x007a: 0x007a, # LATIN SMALL LETTER Z
0x007b: 0x007b, # LEFT CURLY BRACKET
0x007c: 0x007c, # VERTICAL LINE
0x007d: 0x007d, # RIGHT CURLY BRACKET
0x007e: 0x007e, # TILDE
0x007f: 0x007f, # DELETE
0x00a0: 0x00ff, # NO-BREAK SPACE
0x00a2: 0x0096, # CENT SIGN
0x00a3: 0x009c, # POUND SIGN
0x00a4: 0x009f, # CURRENCY SIGN
0x00a6: 0x00a7, # BROKEN BAR
0x00a7: 0x00f5, # SECTION SIGN
0x00a9: 0x00a8, # COPYRIGHT SIGN
0x00ab: 0x00ae, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00ac: 0x00aa, # NOT SIGN
0x00ad: 0x00f0, # SOFT HYPHEN
0x00ae: 0x00a9, # REGISTERED SIGN
0x00b0: 0x00f8, # DEGREE SIGN
0x00b1: 0x00f1, # PLUS-MINUS SIGN
0x00b2: 0x00fd, # SUPERSCRIPT TWO
0x00b3: 0x00fc, # SUPERSCRIPT THREE
0x00b5: 0x00e6, # MICRO SIGN
0x00b6: 0x00f4, # PILCROW SIGN
0x00b7: 0x00fa, # MIDDLE DOT
0x00b9: 0x00fb, # SUPERSCRIPT ONE
0x00bb: 0x00af, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00bc: 0x00ac, # VULGAR FRACTION ONE QUARTER
0x00bd: 0x00ab, # VULGAR FRACTION ONE HALF
0x00be: 0x00f3, # VULGAR FRACTION THREE QUARTERS
0x00c4: 0x008e, # LATIN CAPITAL LETTER A WITH DIAERESIS
0x00c5: 0x008f, # LATIN CAPITAL LETTER A WITH RING ABOVE
0x00c6: 0x0092, # LATIN CAPITAL LIGATURE AE
0x00c9: 0x0090, # LATIN CAPITAL LETTER E WITH ACUTE
0x00d3: 0x00e0, # LATIN CAPITAL LETTER O WITH ACUTE
0x00d5: 0x00e5, # LATIN CAPITAL LETTER O WITH TILDE
0x00d6: 0x0099, # LATIN CAPITAL LETTER O WITH DIAERESIS
0x00d7: 0x009e, # MULTIPLICATION SIGN
0x00d8: 0x009d, # LATIN CAPITAL LETTER O WITH STROKE
0x00dc: 0x009a, # LATIN CAPITAL LETTER U WITH DIAERESIS
0x00df: 0x00e1, # LATIN SMALL LETTER SHARP S (GERMAN)
0x00e4: 0x0084, # LATIN SMALL LETTER A WITH DIAERESIS
0x00e5: 0x0086, # LATIN SMALL LETTER A WITH RING ABOVE
0x00e6: 0x0091, # LATIN SMALL LIGATURE AE
0x00e9: 0x0082, # LATIN SMALL LETTER E WITH ACUTE
0x00f3: 0x00a2, # LATIN SMALL LETTER O WITH ACUTE
0x00f5: 0x00e4, # LATIN SMALL LETTER O WITH TILDE
0x00f6: 0x0094, # LATIN SMALL LETTER O WITH DIAERESIS
0x00f7: 0x00f6, # DIVISION SIGN
0x00f8: 0x009b, # LATIN SMALL LETTER O WITH STROKE
0x00fc: 0x0081, # LATIN SMALL LETTER U WITH DIAERESIS
0x0100: 0x00a0, # LATIN CAPITAL LETTER A WITH MACRON
0x0101: 0x0083, # LATIN SMALL LETTER A WITH MACRON
0x0104: 0x00b5, # LATIN CAPITAL LETTER A WITH OGONEK
0x0105: 0x00d0, # LATIN SMALL LETTER A WITH OGONEK
0x0106: 0x0080, # LATIN CAPITAL LETTER C WITH ACUTE
0x0107: 0x0087, # LATIN SMALL LETTER C WITH ACUTE
0x010c: 0x00b6, # LATIN CAPITAL LETTER C WITH CARON
0x010d: 0x00d1, # LATIN SMALL LETTER C WITH CARON
0x0112: 0x00ed, # LATIN CAPITAL LETTER E WITH MACRON
0x0113: 0x0089, # LATIN SMALL LETTER E WITH MACRON
0x0116: 0x00b8, # LATIN CAPITAL LETTER E WITH DOT ABOVE
0x0117: 0x00d3, # LATIN SMALL LETTER E WITH DOT ABOVE
0x0118: 0x00b7, # LATIN CAPITAL LETTER E WITH OGONEK
0x0119: 0x00d2, # LATIN SMALL LETTER E WITH OGONEK
0x0122: 0x0095, # LATIN CAPITAL LETTER G WITH CEDILLA
0x0123: 0x0085, # LATIN SMALL LETTER G WITH CEDILLA
0x012a: 0x00a1, # LATIN CAPITAL LETTER I WITH MACRON
0x012b: 0x008c, # LATIN SMALL LETTER I WITH MACRON
0x012e: 0x00bd, # LATIN CAPITAL LETTER I WITH OGONEK
0x012f: 0x00d4, # LATIN SMALL LETTER I WITH OGONEK
0x0136: 0x00e8, # LATIN CAPITAL LETTER K WITH CEDILLA
0x0137: 0x00e9, # LATIN SMALL LETTER K WITH CEDILLA
0x013b: 0x00ea, # LATIN CAPITAL LETTER L WITH CEDILLA
0x013c: 0x00eb, # LATIN SMALL LETTER L WITH CEDILLA
0x0141: 0x00ad, # LATIN CAPITAL LETTER L WITH STROKE
0x0142: 0x0088, # LATIN SMALL LETTER L WITH STROKE
0x0143: 0x00e3, # LATIN CAPITAL LETTER N WITH ACUTE
0x0144: 0x00e7, # LATIN SMALL LETTER N WITH ACUTE
0x0145: 0x00ee, # LATIN CAPITAL LETTER N WITH CEDILLA
0x0146: 0x00ec, # LATIN SMALL LETTER N WITH CEDILLA
0x014c: 0x00e2, # LATIN CAPITAL LETTER O WITH MACRON
0x014d: 0x0093, # LATIN SMALL LETTER O WITH MACRON
0x0156: 0x008a, # LATIN CAPITAL LETTER R WITH CEDILLA
0x0157: 0x008b, # LATIN SMALL LETTER R WITH CEDILLA
0x015a: 0x0097, # LATIN CAPITAL LETTER S WITH ACUTE
0x015b: 0x0098, # LATIN SMALL LETTER S WITH ACUTE
0x0160: 0x00be, # LATIN CAPITAL LETTER S WITH CARON
0x0161: 0x00d5, # LATIN SMALL LETTER S WITH CARON
0x016a: 0x00c7, # LATIN CAPITAL LETTER U WITH MACRON
0x016b: 0x00d7, # LATIN SMALL LETTER U WITH MACRON
0x0172: 0x00c6, # LATIN CAPITAL LETTER U WITH OGONEK
0x0173: 0x00d6, # LATIN SMALL LETTER U WITH OGONEK
0x0179: 0x008d, # LATIN CAPITAL LETTER Z WITH ACUTE
0x017a: 0x00a5, # LATIN SMALL LETTER Z WITH ACUTE
0x017b: 0x00a3, # LATIN CAPITAL LETTER Z WITH DOT ABOVE
0x017c: 0x00a4, # LATIN SMALL LETTER Z WITH DOT ABOVE
0x017d: 0x00cf, # LATIN CAPITAL LETTER Z WITH CARON
0x017e: 0x00d8, # LATIN SMALL LETTER Z WITH CARON
0x2019: 0x00ef, # RIGHT SINGLE QUOTATION MARK
0x201c: 0x00f2, # LEFT DOUBLE QUOTATION MARK
0x201d: 0x00a6, # RIGHT DOUBLE QUOTATION MARK
0x201e: 0x00f7, # DOUBLE LOW-9 QUOTATION MARK
0x2219: 0x00f9, # BULLET OPERATOR
0x2500: 0x00c4, # BOX DRAWINGS LIGHT HORIZONTAL
0x2502: 0x00b3, # BOX DRAWINGS LIGHT VERTICAL
0x250c: 0x00da, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x2510: 0x00bf, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x2514: 0x00c0, # BOX DRAWINGS LIGHT UP AND RIGHT
0x2518: 0x00d9, # BOX DRAWINGS LIGHT UP AND LEFT
0x251c: 0x00c3, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x2524: 0x00b4, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x252c: 0x00c2, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x2534: 0x00c1, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x253c: 0x00c5, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x2550: 0x00cd, # BOX DRAWINGS DOUBLE HORIZONTAL
0x2551: 0x00ba, # BOX DRAWINGS DOUBLE VERTICAL
0x2554: 0x00c9, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x2557: 0x00bb, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x255a: 0x00c8, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x255d: 0x00bc, # BOX DRAWINGS DOUBLE UP AND LEFT
0x2560: 0x00cc, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x2563: 0x00b9, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x2566: 0x00cb, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x2569: 0x00ca, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x256c: 0x00ce, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x2580: 0x00df, # UPPER HALF BLOCK
0x2584: 0x00dc, # LOWER HALF BLOCK
0x2588: 0x00db, # FULL BLOCK
0x258c: 0x00dd, # LEFT HALF BLOCK
0x2590: 0x00de, # RIGHT HALF BLOCK
0x2591: 0x00b0, # LIGHT SHADE
0x2592: 0x00b1, # MEDIUM SHADE
0x2593: 0x00b2, # DARK SHADE
0x25a0: 0x00fe, # BLACK SQUARE
} | /regulome_web-2.0rc1.tar.gz/regulome_web-2.0rc1/regulome_app/webapp/static/brython/Lib/encodings/cp775.py | 0.434461 | 0.204342 | cp775.py | pypi |
import warnings
import os
import functools
import pandas as pd
import numpy as np
from tqdm import tqdm
from sklearn.utils.validation import check_array
import enricher.regulon.regulon_enrichment as regulon_enrichment
import enricher.features.expression_utils as expression_utils
import enricher.regulon.regulon_utils as regulon_utils
import argparse
warnings.simplefilter("ignore", UserWarning)
if __name__ == '__main__':
DATA_PATH = os.path.join(os.getcwd(), 'data')
else:
dirname = os.path.dirname(__file__)
DATA_PATH = os.path.join(dirname, 'data')
sif_file = DATA_PATH + '/PathwayCommons9.All.hgnc.sif.gz'
sec_intx_file = DATA_PATH + '/secondary_intx_regulon.pkl'
class Error(Exception):
"""Base class for other exceptions"""
class OmicError(Error):
"""Raised when duplications in omic features or samples are detected"""
class Enrichment(object):
"""Base enrichment class for predicting regulon enrichment from -omic datasets.
Args:
cohort :
expr (:obj:`pd.DataFrame`, shape = [n_feats, n_samps])
regulon (:obj: `pandas DataFrame`)
regulon_size (int): Minimum number of edges for a given regulator.
sec_intx_file (str): Path to pre-compiled secondary interaction network.
"""
def __init__(self, cohort, expr, regulon=None, regulon_size=15, sec_intx=sec_intx_file,
thresh_filter=0.1):
if not isinstance(expr, pd.DataFrame):
raise TypeError("`expr` must be a pandas DataFrame, found "
"{} instead!".format(type(expr)))
if len(set(expr.index)) != expr.shape[0]:
print(len(set(expr.index)))
print(expr.shape)
raise OmicError("Duplicate feature names in {cohort} dataset!".format(cohort=cohort))
if len(set(expr.columns)) != expr.shape[1]:
raise OmicError("Duplicate sample names in {cohort} dataset!".format(cohort=cohort))
self.cohort = cohort
self.expr = expr
if regulon is None:
self.regulon = regulon_utils.read_pickle(sec_intx)
else:
self.regulon = regulon
self.scaler_type = None
self.scaled = False
self.regulon_size = regulon_size
self.regulon_weights = None
self.thresh_filter = thresh_filter
self.total_enrichment = None
self.delta = None
self.local_enrichment = None
self.regulators = None
self.quant_nes = None
def __str__(self):
return """------\nCohort: {}\nn-features: {}\nn-samples: {}\nscaler: {}\nscaled:\
{}\nregulon threshold: {}\nregulon nodes: {}\nregulon edges: {}\n------\n""".\
format(self.cohort,
self.expr.shape[0],
self.expr.shape[1],
self.scaler_type,
self.scaled, self.regulon_size,
len(self.regulon.UpGene.unique()),
self.regulon.shape[0])
def __repr__(self):
return """------\nCohort: {}\nn-features: {}\nn-samples: {}\nscaler: {}\nscaled: {}\
\nregulon threshold: {}\nregulon nodes: {}\nregulon edges: {}\n------\n""".\
format(self.cohort,
self.expr.shape[0],
self.expr.shape[1],
self.scaler_type,
self.scaled,
self.regulon_size,
len(self.regulon.UpGene.unique()),
self.regulon.shape[0])
@staticmethod
def _preprocess_data(expr, scaler_type='robust', thresh_filter=0.0):
""" Centers expression data based on a specified data scaler algorithm
Args:
expr (pandas DataFrame obj): pandas DataFrame of [n_features, n_samples]
scaler_type (str): Scaler to normalized features/samples by:
standard | robust | minmax | quant
thresh_filter (float): Prior to normalization remove features that have
a standard deviation per feature less than {thresh_filter}
Returns:
scaled_frame (:obj: `pandas DataFrame`) : pandas DataFrame containing
scaled expression data of shape [n_samples, n_features]
"""
# By default, the input is checked to be a non-empty 2D array containing
# only finite values.
_ = check_array(expr)
scaler_opt = {'standard': expression_utils.StandardScaler(),
'robust': expression_utils.RobustScaler(),
'minmax': expression_utils.MinMaxScaler(),
'quant': expression_utils.QuantileTransformer()}
if scaler_type not in scaler_opt:
raise KeyError('{scaler_type} not supported scaler_type!'
' Supported types include: {keys}'.format(
scaler_type=scaler_type, keys=' | '.join(scaler_opt.keys())))
scaler = scaler_opt[scaler_type]
# Transpose frame to correctly orient frame for scaling and machine learning algorithms
expr_t = expr[(expr.std(axis=1) > thresh_filter)].T
print('--- Centering features with {} scaler ---'.format(scaler_type))
scaled_frame = pd.DataFrame(scaler.fit_transform(expr_lt),
index=expr_lt.index,
columns=expr_lt.columns)
return scaled_frame
@staticmethod
def _prune_regulon(expr, regulon, regulon_size):
""" Prunes regulon with secondary interactions that do not meet
the necessary number of downstream interactions metric {regulon_size}
Args:
expr (pandas DataFrame obj): pandas DataFrame of [n_samples, n_features]
regulon (:obj: `pandas DataFrame`) : pandas DataFrame containing weight
interactions between regulator and downstream members of its regulon
of shape [len(Target), ['Regulator','Target','MoA','likelihood']
regulon_size (int) : number of downstream interactions required for a
given regulator in order to calculate enrichment score
Returns:
filtered_regulon (:obj: `pandas DataFrame`) : pandas DataFrame containing weight
interactions between regulator and downstream members of its regulon of shape :
[len(Target), ['Regulator','Target','MoA','likelihood']
"""
expr_filtered_regulon = regulon[
((regulon.UpGene.isin(expr.columns)) & (regulon.DownGene.isin(expr.columns)))].\
set_index('UpGene')
idx = (expr_filtered_regulon.index.value_counts() >= regulon_size)
filtered_regulon = expr_filtered_regulon.loc[idx[idx == True].index].reset_index()
edges = list(set(filtered_regulon.UpGene) | set(filtered_regulon.DownGene))
sub_expr = expr.loc[:,edges]
return filtered_regulon, sub_expr
@staticmethod
def _structure_weights(regulator, pruned_regulon, f_statistics, r_frame, p_frame):
""" Calculates weights associated with regulators. Weights are the summation of
the F-statistic and absolute spearman correlation coefficient. The weight
retains the sign of the spearman correlation coefficient.
Args:
regulator (str): A feature to assign weights to downstream interactions
pruned_regulon (:obj:`pd.DataFrame`, shape = [n_interactions, 3]
f_statistics (dict) : Dictionary with key:{regulator} key and
r_frame (:obj:`pd.DataFrame`), shape = [n_features, n_features]
p_frame (:obj:`pd.DataFrame`), shape = [n_features, n_features]
Returns:
weights_ordered (:obj:`pd.DataFrame`), shape = [n_interactions, 3]
"""
sub_regul = pruned_regulon[(pruned_regulon['UpGene'] == regulator)]
targs = sub_regul.DownGene
p_ = p_frame.loc[targs, regulator]
p_.name = 'likelihood'
f_ = f_statistics[regulator][0]
r_ = r_frame.loc[targs, regulator]
w_ = (f_ + abs(r_)) * np.sign(r_)
w_.index.name = 'Target'
w_.name = 'MoA'
weights = w_.to_frame()
weights['likelihood'] = p_
weights['Regulator'] = regulator
weights_ordered = weights.reset_index().\
reindex(['Regulator', 'Target', 'MoA', 'likelihood'], axis=1)\
.set_index('Regulator')
return weights_ordered
def scale(self, scaler_type='robust', thresh_filter=0.1):
""" Fit and scale expression data based on a specified data scaler algorithm
Args:
scaler_type (str): Scaler to normalized features/samples by:
standard | robust | minmax | quant
thresh_filter (float): Prior to normalization remove features that do not have
the mean unit of a feature (i.e. 1 tpm) is greater than {thresh_filter}
"""
self.scaler_type = scaler_type
if scaler_type == None:
warnings.warn('Proceeding without scaling dataset!')
self.expr = self.expr.T
else:
self.expr = self._preprocess_data(self.expr, self.scaler_type, thresh_filter)
self.scaled = True
def assign_weights(self):
"""
Generate normalized likelihood weights and assigns those weights to the absolute gene
expression signature
"""
if not self.scaled:
warnings.warn('Assigning interaction weights without scaling dataset!')
pruned_regulon, sub_expr = self._prune_regulon(self.expr, self.regulon, self.regulon_size)
self.expr = sub_expr
# noinspection PyTypeChecker
r, p = regulon_utils.spearmanr(self.expr)
r_frame = pd.DataFrame(r, columns=self.expr.columns, index=self.expr.columns)
p_frame = pd.DataFrame(p, columns=self.expr.columns, index=self.expr.columns)
F_statistics = {regulator: regulon_utils.f_regression(
self.expr.reindex(frame.DownGene, axis=1),
self.expr.reindex([regulator], axis=1).values.ravel())
for regulator, frame in pruned_regulon.groupby('UpGene')}
weights = pd.concat([self._structure_weights(regulator,
pruned_regulon,
F_statistics,
r_frame,
p_frame)
for regulator in F_statistics])
self.regulon_weights = weights[~np.isinf(weights.MoA)]
def calculate_enrichment(self):
"""
Subset and generate regulator activity scores based on rank ordering of up-regulated
and down-regulated targets
"""
if self.regulon_weights is None:
raise TypeError("`regulon_weights` must be assigned prior to enrichment calculation,"
" found {} instead!".format(type(self.regulon_weights)))
quant_nes = regulon_enrichment.quantile_nes_score(self.regulon_weights, self.expr.T)
self.quant_nes = quant_nes
self.regulators = self.regulon_weights.index.unique()
print('--- Calculating regulon enrichment scores ---')
nes_list, local_enrich_list, delta_list = zip(*list(map(functools.partial(regulon_enrichment.score_enrichment,
expr=self.expr,
regulon=self.regulon_weights,
quant_nes=quant_nes),
tqdm(self.regulators))))
self.total_enrichment = pd.concat(nes_list, axis=1)
self.local_enrichment = pd.concat(local_enrich_list, axis=1)
self.delta = pd.concat(delta_list, axis=1)
def main():
parser = argparse.ArgumentParser(
"Infer transcription factor activity from gene expression data utilizing pathway and molecular interactions "
"and mechanisms available through Pathway Commons."
)
parser.add_argument('cohort', type=str, help="which TCGA cohort to use")
parser.add_argument('expr', type=str, help="which tab delimited expression matrix to use "
"shape : [n_features, n_samples]"
"units : TPM, RPKM")
parser.add_argument('out_dir', type=str, help="output directory")
parser.add_argument('--regulon', type=str, help="optional regulon containing weight interactions between "
"regulator and downstream members of its regulon"
"shape : [len(Target), ['Regulator','Target','MoA','likelihood']",
default=None)
parser.add_argument('--regulon_size', type=int, help="number of downstream interactions required for a given "
"regulator in order to calculate enrichment score", default=15)
parser.add_argument('--sec_intx', type=str, help="path to pre-compiled serialized secondary "
"interaction network", default=sec_intx_file)
parser.add_argument('--scaler_type', type=str, help="Scaler to normalized features/samples by: "
"standard | robust | minmax | quant", default='robust')
parser.add_argument('--thresh_filter', type=float, help="Prior to normalization remove features that have a standard "
"deviation per feature less than {thresh_filter}",
default=0.1)
# parse command line arguments
args = parser.parse_args()
expr_matrix = pd.read_table(args.expr,index_col=0)
enr_obj = Enrichment(cohort=args.cohort, expr=expr_matrix, regulon=args.regulon,
regulon_size=args.regulon_size, sec_intx=args.sec_intx,
thresh_filter=args.thresh_filter)
print(enr_obj)
print('\nScaling data...\n')
enr_obj.scale(scaler_type=args.scaler_type, thresh_filter=args.thresh_filter)
print('\nData scaled!\n')
print('\nAssigning weights...\n')
enr_obj.assign_weights()
print('\nWeights assigned!\n')
print('\nCalculating enrichment...\n')
enr_obj.calculate_enrichment()
print('\nEnrichment scores calculated!\n')
regulon_utils.ensure_dir(args.out_dir)
regulon_utils.write_pickle(enr_obj, os.path.join(args.out_dir,'{}_enrichment.pkl'.format(args.cohort)))
enr_obj.total_enrichment.to_csv(os.path.join(args.out_dir,'{}_regulon_enrichment.tsv'.format(args.cohort)),sep='\t')
print('Complete')
if __name__ == "__main__":
main() | /regulon_enrichment-0.7a2-py3-none-any.whl/enricher/enrich.py | 0.73782 | 0.257625 | enrich.py | pypi |
import warnings
warnings.simplefilter("ignore", UserWarning)
import pandas as pd
import dill as pickle
import functools
import os
from sklearn.feature_selection import f_regression, mutual_info_regression
from sklearn.mixture import BayesianGaussianMixture as GMM
from scipy.stats import spearmanr, pearsonr
import scipy.stats as st
import numpy as np
from tqdm import tqdm
import timeit
def load_sif():
return pd.read_csv(sif_file, names = ['UpGene', 'Type', 'DownGene'], sep = '\t', header = None)
def filter_sif(sif, intx_type = 'controls-expression-of'):
return sif[(sif['Type'] == intx_type)]
def load_secondary_itx_sif():
""" Load precompiled secondary interaction sif
Returns:
(pandas.DataFrame): pandas.DataFrame obj of length: n interactions and
columns: ['UpGene','Type',DownGene']
"""
return pd.read_csv(sec_intx_file, names = ['UpGene', 'Type', 'DownGene'], sep = '\t', header = None)
def write_pickle(obj, relnm):
""" Serialize object to pickle and write to disk at relnm
Args:
obj (`:obj:`) : Python object to be pickled
relnm (str) : Relative name/path to pickle on disk
Returns:
'Serialized object to disk at {}'.format(relnm)
"""
with open(relnm, 'wb') as f:
pickle.dump(obj, f, protocol = -1)
return 'Serialized object to disk at {}'.format(relnm)
def read_pickle(relnm):
""" Read serialized object from pickle on disk at relnm
Args:
relnm (str) : Relative name/path to pickled object
Returns:
obj (`:obj: unpickled object`)
"""
with open(relnm, 'rb') as f:
obj = pickle.load(f)
print('Loaded object from disk at {}'.format(relnm))
return obj
def ensure_dir(relnm):
""" Accept relative filepath string, create it if it doesnt already exist
return filepath string
Args:
relnm (str) : Relative name/path
Returns:
relnm (str)
"""
d = os.path.join(os.getcwd(), relnm)
if not os.path.exists(d):
print('--- path does not exist : {} ---'.format(d))
print('--- constructing path : {} ---'.format(d))
os.makedirs(d)
return relnm
def traverse_interactions(regulator, filt_sif):
""" Parse interaction network and add secondary interactions on a per regulator basis
Args:
regulator (str): Regulator to expand interaction network
filt_sif (pandas.DataFrame): pandas.DataFrame obj of length: n interactions and
columns: ['UpGene','Type',DownGene']
Returns:
comb_idx (pandas.DataFrame):pandas.DataFrame obj of length: n interactions + secondary interactions and
columns: ['UpGene','Type',DownGene']
"""
sub_reg = filt_sif[(filt_sif.UpGene == regulator)]
down_genes = sub_reg.DownGene.unique()
secondary_itx = filt_sif[(filt_sif.UpGene.isin(down_genes))]
secondary_itx.UpGene = regulator
comb_idx = pd.concat([sub_reg, secondary_itx])
comb_idx.Type = 'controls-expression-of'
comb_idx = comb_idx.drop_duplicates()
comb_idx = comb_idx[(comb_idx.DownGene != regulator)]
return comb_idx
def generate_expanded_regulon():
""" Generates an expanded Pathway Commons regulon with secondary down-stream interactions for
regulators that control the expression of other regulators
Returns:
Nothing - Generates a pickled pandas dataframe for future reference/use
"""
print('--- Generating regulon with primary and secondary interactions ---')
sif = load_sif()
filt_sif = filter_sif(sif)
regulators = filt_sif.UpGene.unique()
regulon_list = list(map(functools.partial(traverse_interactions, filt_sif = filt_sif), regulators))
regulon = pd.concat(regulon_list)
regulon.set_index('UpGene', inplace = True)
regulon.reset_index(inplace=True)
print('---- Regulon constructed ---')
write_pickle(regulon, '../data/secondary_intx_regulon.pkl')
def bgm_moa(regul_weights):
""" Fits regulon mode of activation weights to a bayesian gaussian mixture model with three components and computes
the probability of the three distributions (repression, non-influential, activation) for each regulator
Args:
regul_weights (:obj: `pandas DataFrame`) : pandas DataFrame containing weight interactions between regulator and
downstream members of its regulon of shape [len(Target), ['Regulator','Target','MoA','likelihood']
Returns:
"""
g = GMM(n_components = 3, max_iter = 1000)
sub_reg = regul_weights.copy()
sub_reg_vals = sub_reg.MoA.values.reshape(-1, 1)
g.fit(sub_reg_vals)
mu = g.means_.flatten()
sigma = np.sqrt(g.covariances_).flatten()
fit = sorted(list(zip(mu, sigma)))
activation = (st.norm.cdf(sub_reg_vals, fit[2][0], fit[2][1]))
repression = 1 - st.norm.cdf(sub_reg_vals, fit[0][0], fit[0][1])
total_lower = 1 - st.norm.cdf(sub_reg_vals, fit[1][0], fit[1][1])
total_upper = (st.norm.cdf(sub_reg_vals, fit[1][0], fit[1][1]))
copy_target = sub_reg.copy()
copy_target['up'] = 0
copy_target['down'] = 0
copy_target.loc[(copy_target.MoA >= 0), 'up'] = 1
copy_target.loc[(copy_target.MoA <= 0), 'down'] = 1
up_moa = copy_target.up.values.reshape(copy_target.shape[0], 1)
down_moa = copy_target.down.values.reshape(copy_target.shape[0], 1)
Mode = (activation / (repression + total_lower + activation) * up_moa) -\
(repression / (repression + total_upper + activation) * down_moa)
return Mode
def prune_regulon(expr, regulon, regulon_size):
""" Prunes regulon with secondary interactions that do not meet the necessary number of downstream interactions
metric {regulon_size}
Args:
expr (pandas DataFrame obj): pandas DataFrame of [n_samples, n_features]
regulon (:obj: `pandas DataFrame`) : pandas DataFrame containing weight interactions between regulator and
downstream members of its regulon of shape [len(Target), ['Regulator','Target','MoA','likelihood']
regulon_size (int) : number of downstream interactions required for a given regulator in order to calculate
enrichment score
Returns:
filtered_regulon (:obj: `pandas DataFrame`) : pandas DataFrame containing weight interactions between regulator
and downstream members of its regulon of shape [len(Target), ['Regulator','Target','MoA','likelihood']
"""
expr_filtered_regulon = regulon[((regulon.UpGene.isin(expr.columns)) & (regulon.DownGene.isin(expr.columns)))]
expr_filtered_regulon.set_index('UpGene', inplace=True)
idx = (expr_filtered_regulon.index.value_counts() >= regulon_size)
filt_idx = idx[idx==True]
filtered_regulon = expr_filtered_regulon.loc[filt_idx.index]
filtered_regulon.reset_index(inplace=True)
return filtered_regulon
def regulon_weight_assignment(regulator, expr, filtered_regulon):
""" Assigns probability and weights for regulator - target interactions
Args:
regulator (str): Regulator to expand interaction network
expr (:obj: `pandas DataFrame`) : pandas DataFrame containing scaled expression data of
shape [n_samples, n_features]
filtered_regulon (:obj: `pandas DataFrame`) : pandas DataFrame containing weight interactions between regulator
and downstream members of its regulon of shape [len(Target), ['Regulator','Target','MoA','likelihood']
Returns:
regul_weights (:obj: `pandas DataFrame`) : pandas DataFrame containing weight interactions between regulator and
downstream members of its regulon of shape [len(Target), ['Regulator','Target','MoA','likelihood']
"""
sub_reg = filtered_regulon[(filtered_regulon['UpGene'] == regulator)]
X = expr.reindex(sub_reg.DownGene.values, axis = 1).dropna(axis = 1)
y = expr.reindex([regulator], axis = 1)
spr_results = X.apply(lambda col: spearmanr(col, y.iloc[:, 0]), axis = 0).apply(pd.Series)
spr_result = spr_results[0]
spr_pvalues = spr_results[1]
f_test, _ = f_regression(X, y.values.ravel())
weights = f_test
weights_spr = weights + abs(spr_result)
regul_weights = (weights_spr * np.sign(spr_result)).to_frame()
regul_weights.columns = ['MoA']
regul_weights.index.name = 'Target'
regul_weights.reset_index(inplace = True)
regul_weights['Regulator'] = regulator
regul_weights['likelihood'] = spr_pvalues.values
regul_weights = regul_weights.reindex(['Regulator', 'Target', 'MoA', 'likelihood'], axis = 1)
regul_weights.set_index('Regulator', inplace = True)
regul_weights = regul_weights[~np.isinf(regul_weights.MoA)]
return regul_weights
def structure_weights(regulator, pruned_regulon, f_statistics, r_frame, p_frame):
""" Calculates weights associated with regulators. Weights are the summation of the F-statistic and absolute
spearman correlation coefficient. The weight retains the sign of the spearman correlation coefficient.
Args:
regulator (str): A feature to assign weights to downstream interactions
pruned_regulon (:obj:`pd.DataFrame`, shape = [n_interactions, 3]
f_statistics (dict) : Dictionary with key:{regulator} key and
r_frame (:obj:`pd.DataFrame`), shape = [n_features, n_features]
p_frame (:obj:`pd.DataFrame`), shape = [n_features, n_features]
Returns:
weights_ordered (:obj:`pd.DataFrame`), shape = [n_interactions, 3]
"""
sub_regul = pruned_regulon[(pruned_regulon['UpGene'] == regulator)]
targs = sub_regul.DownGene
p_ = p_frame.loc[targs, regulator]
p_.name = 'likelihood'
f_ = f_statistics[regulator][0]
r_ = r_frame.loc[targs, regulator]
w_ = (f_ + abs(r_)) * np.sign(r_)
w_.index.name = 'Target'
w_.name = 'MoA'
weights = w_.to_frame()
weights['likelihood'] = p_
weights['Regulator'] = regulator
weights_ordered = weights.reset_index().reindex(['Regulator', 'Target', 'MoA', 'likelihood'],
axis = 1).set_index('Regulator')
return weights_ordered
def generate_bolstered_regulon(expr, cohort, regulon_size=15):
""" Calculate weights for PC regulon and a dataset using mutual information, f-statistic to test for linear
relationships, and the spearman correlation coefficient to determine the mode of regulation
Args:
expr (:obj: `pandas DataFrame`) : pandas DataFrame containing scaled expression data of
shape [n_samples, n_features]
cohort (str) : name of cohort to associate with compiled regulon
regulon_size (int) : required number of downstream interactions for a give regulator
Returns:
regul_weights (:obj: `pandas DataFrame`) : pandas DataFrame containing weight interactions between regulator and
downstream members of its regulon of shape [len(Target), ['Regulator','Target','MoA','likelihood']
"""
bolstered_relnm = os.path.join(dirname, '../experiments/{0}/data/{0}_bolstered_regulon.pkl'.format(cohort))
# Check to see if bolstered regulon exists
if os.path.isfile(bolstered_relnm):
print('--- loading context specific regulon ---')
total_regulon = read_pickle(bolstered_relnm)
else:
if os.path.isfile(sec_intx_file):
print('--- loading unfiltered regulon ---')
regulon = read_pickle(sec_intx_file)
else:
generate_expanded_regulon()
regulon = read_pickle(sec_intx_file)
print('--- pruning regulon ---')
filtered_regulon = prune_regulon(expr, regulon, regulon_size)
regulators = filtered_regulon.UpGene.unique()
print('--- compiling regulon of {} regulators and {} interactions with a minimum of {} interactions ---'.
format(len(regulators), filtered_regulon.shape[0], regulon_size))
regulon_list = list(map(functools.partial(regulon_weight_assignment, expr=expr,
filtered_regulon = filtered_regulon), tqdm(regulators)))
total_regulon = pd.concat(regulon_list)
relnm = os.path.join(dirname, '../experiments/{0}/data'.format(cohort))
ensure_dir(relnm)
write_pickle(total_regulon, os.path.join(relnm, '{}_bolstered_regulon.pkl'.format(cohort)))
return total_regulon | /regulon_enrichment-0.7a2-py3-none-any.whl/enricher/regulon/regulon_utils.py | 0.783492 | 0.375649 | regulon_utils.py | pypi |
import datetime
import pandas as pd
import scipy.stats as st
import numpy as np
def assign_weights(lh):
""" Generate normalized likelihood weights and assigns those weights to the absolute gene expression signature
Args:
lh (pandas DataFrame): sparse DataFrame indicating likelihood for transcription factors
Returns:
nes_wt (pandas series): weight associated for each regulator based on their absolute gene expression signature
wts (pandas DataFrame): sparse DataFrame indicating likelihood for regulators
"""
# Generate normalized likelihood weights
wts = lh.T.divide(lh.max(axis = 1)).T
wts = wts.fillna(0.0)
# Absolute gene expression signature
nes_wt = pd.DataFrame((wts ** 2).sum(axis=1)**.5)
wts = (wts.T / (wts.T.sum())).T
return nes_wt, wts
def ensure_overlap(lh, mor, expr):
""" ensures label overlap with weights and expression matrix
Args:
lh (pandas DataFrame): sparse DataFrame indicating likelihood for regulators
mor (pandas DataFrame): sparse DataFrame indicating mode or regulation for transcription factors
expr (:obj: `pandas DataFrame`): pandas DataFrame of shape [n_feats, n_samps]
Returns:
expression, mor, lh (pandas DataFrame): expression, mode of regulation, and likelihood frames, respectfully,
re-indexed to be concordant with weights associated with regulon
"""
expression = expr.reindex(mor.columns)
mor = mor.reindex(expression.index, axis=1)
lh = lh.reindex(expression.index, axis=1)
return expression, mor, lh
def deconstruct_regulon(regulon):
"""Returns a pandas DataFrame for likelihood and mode of regulation inferred by compile_total_regulon
Args:
regulon (pandas DataFrame): Aracne-AP Regulon file in four column format i.e. Regulator,Target,MoA,likelihood
Returns:
lh, mor (pandas DataFrame): sparse DataFrame indicating likelihood and mode or
regulation for transcription factors
"""
lh = regulon.pivot(index = 'Regulator', columns = 'Target', values = 'likelihood').fillna(0.0)
mor = regulon.pivot(index = 'Regulator', columns = 'Target', values = 'MoA').fillna(0.0)
return lh, mor
def quantile_nes_score(regulon, expr):
""" Generates quantile transformed rank position enrichment scores
Args:
regulon (:obj: `pandas DataFrame`): Aracne-AP Regulon file in four column format i.e.
Regulator,Target,MoA,likelihood
expr (:obj: `pandas DataFrame`): pandas DataFrame of shape [n_feats, n_samps]
Returns:
nes (:obj: `pandas DataFrame`): normalized enrichment scores per regulator
"""
reset_regulon = regulon.reset_index()
lh, mor = deconstruct_regulon(reset_regulon)
expression, mor, lh = ensure_overlap(lh, mor, expr)
nes_wt, wts = assign_weights(lh)
pos = expression.index.get_indexer_for(lh.columns)
t2 = expression.rank() / (expression.shape[0] + 1)
t2 = pd.DataFrame(st.norm.ppf(t2.iloc[pos, ], loc=0, scale=1), columns=t2.columns, index=t2.index)
sum1 = (mor * wts).dot(t2)
nes = pd.DataFrame(sum1.values * nes_wt.values, columns=sum1.columns, index=sum1.index)
return nes
def load_quantile(regulon, expr, cohort):
""" return pandas series of quantile enrichment scores for a given regulator
Args:
regulon (:obj: `pandas DataFrame`): Aracne-AP Regulon file in four column format i.e.
Regulator,Target,MoA,likelihood
expr (:obj: `pandas DataFrame`): pandas DataFrame of shape [n_feats, n_samps]
cohort (str) : name of cohort to associate with compiled regulon
Returns:
nes (obj: pandas series): series of quantile enrichment scores for a give regulator
"""
quantile_nes = os.path.join(dirname, '../experiments/{0}/data/{0}_quantile_ranks.pkl'.format(cohort))
if os.path.isfile(quantile_nes):
print('--- Loading quantile normalization scores ---')
nes = read_pickle(quantile_nes)
else:
print('--- Generating quantile normalization scores ---')
nes = quantile_nes_score(regulon, expr.T)
return nes
def subset_regulon(regulator, regulon, expr):
""" Subset expression frame by regulator targets expressed in expression frame and by mode of regulation
Args:
regulator (str) : Regulator to subset expression frame by
regulon (:obj: `pandas DataFrame`) : pandas DataFrame of Regulator-Target interactions
expr (:obj: `pandas DataFrame`): pandas DataFrame of shape [n_samps, n_feats]
Returns:
down_reg_sub (:obj: `pandas DataFrame`) : pandas DataFrame of down regulated targets regulator normed expression
values
up_reg_sub (:obj: `pandas DataFrame`) : pandas DataFrame of up regulated targets regulator normed expression
values
"""
sub_regul = regulon.loc[regulator]
sub_expr = expr.reindex(sub_regul.Target.values, axis = 1)
# Subset expression values by up and down regulated targets
down_reg_sub = sub_expr.loc[:, (sub_regul.MoA < 0.0).values]
up_reg_sub = sub_expr.loc[:, (sub_regul.MoA > 0.0).values]
return down_reg_sub, up_reg_sub
def rank_and_order_total(expr_sub, regulator, regulon, ascending, expr):
""" Rank and order transcription factor targets expression frame
Args:
expr_sub (:obj: `pandas DataFrame`) : pandas DataFrame of regulated targets regulator normed expression
regulator (str) : Regulator to subset expression frame by
regulon (:obj: `pandas DataFrame`): pandas DataFrame of regulon returned by compile_regulon
with columns ['Target', 'MoA', 'likelihood']
ascending (bool): Boolean flag to rank regulon gene set via ascending/descending manner
expr (:obj: `pandas DataFrame`): pandas DataFrame of shape [n_samps, n_feats]
Returns:
rank_ordered (:obj: `pandas DataFrame`) : pandas DataFrame of regulated targets regulator normed expression
"""
total_ranked = expr.rank(method = 'max', ascending = ascending, axis = 1)
moa_frame = regulon.loc[regulator, ].loc[regulon.loc[regulator, ].Target.isin(expr_sub.columns),
['Target', 'MoA', 'likelihood']].reset_index()
moa_frame.index = moa_frame.Target
moa_frame.likelihood = moa_frame.likelihood / moa_frame.likelihood.max()
moa_frame['weights'] = moa_frame.MoA * moa_frame.likelihood
moa_frame = moa_frame.loc[:, 'MoA'].to_frame().T
ranks = total_ranked.loc[:, expr_sub.columns]
# weighted_ranks = pd.np.multiply(ranks, moa_frame).sum(axis = 1).to_frame()
weighted_ranks = np.multiply(ranks, moa_frame).sum(axis = 1).to_frame()
# Store minimum rank for samples - this value is equivalent to the total number of targets in expr_sub i.e. if all
# genes for a particular sample rank first the rank_min = 1.0 * #genes
# rank_min = 1.0 * expr_sub.shape[1]
rank_min = weighted_ranks.min().values[0]
# Store maximum rank for samples - this value is equivalent to the total number of targets in expr_sub i.e. if all
# genes for a particular sample rank last the rank_min = #samples * #genes
rank_max = weighted_ranks.max().values[0]
weighted_ranks['min'] = rank_min
weighted_ranks['max'] = rank_max
return weighted_ranks
def format_nes_frame(down_reg_ordered, up_reg_ordered, regulator):
""" Function to concatenate and sum down and up regulated z-score rankings
Args:
down_reg_ordered (:obj: `pandas DataFrame`) : pandas DataFrame of z-scores for down regulated
targets of regulator
up_reg_ordered (:obj: `pandas DataFrame`) : pandas DataFrame of z-scores for up regulated
targets of regulator
regulator (str) : Regulator that controls the activity of a regulon
Returns:
zframe (:obj: `pandas DataFrame`) : pandas DataFrame of average z-scores for up and down-regulated targets
"""
down_normed = pd.DataFrame(down_reg_ordered.loc[:, 0].values, columns = ['down-regulated-targets'],
index = down_reg_ordered.index)
down_normed = down_normed.fillna(0.0)
up_normed = pd.DataFrame(up_reg_ordered.loc[:, 0].values, columns = ['up-regulated-targets'],
index = up_reg_ordered.index)
up_normed = up_normed.fillna(0.0)
join_r = pd.concat([down_normed, up_normed], axis = 1)
join_r.columns = ['down-regulated-targets', 'up-regulated-targets']
zframe = ((join_r['down-regulated-targets'] * -1) + join_r['up-regulated-targets']).to_frame()
zframe.columns = [regulator]
zframe[regulator] = st.zscore(zframe[regulator])
zframe = (zframe - zframe.median()) / zframe.std()
return zframe
def format_delta(down_reg_sub, up_reg_sub):
""" Take the mean difference from up/down - regulated targets and scale
Args:
down_reg_sub (:obj: `pandas DataFrame`) : pandas DataFrame of down regulated targets regulator normed expression
up_reg_sub (:obj: `pandas DataFrame`) : pandas DataFrame of up regulated targets regulator normed expression
Returns:
delta (:obj: `pandas DataFrame`) : pandas DataFrame of the mean delta difference of regulator targets
per sample
"""
up_reg_sub = pd.DataFrame(st.zscore(up_reg_sub, axis = 1), columns = up_reg_sub.columns, index = up_reg_sub.index)
up_reg_normed = up_reg_sub - up_reg_sub.median()
up_reg_normed = (up_reg_normed.median(axis = 1) / up_reg_normed.median(axis = 1).max()).\
fillna(0.0).replace([np.inf, -np.inf], 0.0)
down_reg_sub = pd.DataFrame(st.zscore(down_reg_sub), columns = down_reg_sub.columns, index = down_reg_sub.index)
down_reg_normed = down_reg_sub + down_reg_sub.median()
down_reg_normed = (down_reg_normed.median(axis = 1) / down_reg_normed.median(axis = 1).max()).\
fillna(0.0).replace([np.inf, -np.inf], 0.0)
delta = up_reg_normed - down_reg_normed
delta = delta.to_frame()
delta.columns = ['Delta']
delta['Delta'] = st.zscore(delta)
return delta
def score_enrichment(regulator, expr, regulon, quant_nes):
""" Function to subset and generate regulator activity scores based
on rank ordering of up-regulated and down-regulated targets
Args:
regulator (str) : Regulator to subset expression frame by
expr (:obj: `pandas DataFrame`): pandas DataFrame of shape [n_samps, n_feats]
regulon (:obj: `pandas DataFrame`): pandas DataFrame of regulon returned by compile_regulon
with columns ['Target', 'MoA', 'likelihood']
quant_nes (obj: `pandas DataFrame`): quantile enrichment scores for regulators
Return:
enrichment_score (:obj: `pandas DataFrame`): pandas DataFrame of activity scores for specified regulator
"""
print(regulator)
down_reg_sub, up_reg_sub = subset_regulon(regulator, regulon, expr)
# Rank up and down regulated targets by z-scores. Sum rank values across rows
# (Compute numerical data ranks [1 through n] along axis)
# and sort samples lowest to highest summed rank score.
down_reg_ordered = rank_and_order_total(down_reg_sub, regulator, regulon, ascending=False, expr=expr)
up_reg_ordered = rank_and_order_total(up_reg_sub, regulator, regulon, ascending=True, expr=expr)
zframe = format_nes_frame(down_reg_ordered, up_reg_ordered, regulator)
delta = format_delta(down_reg_sub, up_reg_sub)
delta_ = delta.copy()
delta_.columns = [regulator]
local_enrich = zframe.copy()
zframe[regulator] = zframe.values + delta.values
enrichment_score = zframe[regulator] + quant_nes.loc[regulator]
return enrichment_score, local_enrich, delta_
def logger(**kwargs):
""" Generates a log file of arguments passed to EnrichR.py
Args:
**kwargs: paired key word arguments
Returns:
None
"""
cohort = kwargs['cohort']
relnm = os.path.join(dirname, '../experiments/{0}/data'.format(cohort))
now = datetime.datetime.now()
ensure_dir(relnm)
out_f = open(os.path.join(relnm, '{}_kwargs.txt'.format(cohort)), 'w')
out_f.write("EnrichR generated regulon, enrichment scores and scaled expression data-set compiled on "
"{} with the following **kwargs \n".
format(now.strftime("%Y-%m-%d %H:%M")))
for k, v in kwargs.items():
out_f.write('* {} : {} \n'.format(k, v))
out_f.close()
def generate_enrichment_scores(expr_f, cohort, norm_type = 'robust', feature = True, sample = False,
thresh_filter = 0.4, scale = True, regulon_size = 15):
""" Runs expression and regulon_utils functions to generate cohort specific regulon and enrichment scores
Args:
expr_f (str): absolute path to tab delimited expression file of shape = [n_features, n_samples]
cohort (str) : name of cohort to associate with compiled regulon and enrichment scores
norm_type (str): Scaler to normalized features/samples by: standard | robust | minmax | quant
feature (bool): Scale expression data by features
sample (bool): Scale expression data by both features and samples
thresh_filter (float): Prior to normalization remove features that do not have the mean unit of
a feature (i.e. 1 tpm) is greater than {thresh_filter}
scale (bool): optional arg to avoid scaling dataset if data set has been normalized prior to analysis
regulon_size (int) : required number of downstream interactions for a give regulator
Returns:
None
"""
input_args = locals()
total_enrichment_nes = os.path.join(dirname, '../experiments/{0}/data/{0}_total_enrichment.pkl'.format(cohort))
if os.path.isfile(total_enrichment_nes):
print('--- Regulon enrichment scores pre-computed ---')
print(total_enrichment_nes)
else:
non_scaled_expr = load_expr(expr_f)
expr = load_scaled_expr(non_scaled_expr, cohort = cohort, norm_type = norm_type, feature = feature,
sample = sample, thresh_filter = thresh_filter, scale = scale)
regulon = generate_bolstered_regulon(expr, cohort, regulon_size = regulon_size)
quant_nes = load_quantile(regulon, expr, cohort)
regulators = regulon.index.unique()
print('--- Calculating regulon enrichment scores ---')
nes_list = list(map(functools.partial(score_enrichment, expr=expr, regulon = regulon, quant_nes=quant_nes),
tqdm(regulators)))
total_enrichment = pd.concat(nes_list, axis=1)
relnm = os.path.join(dirname, '../experiments/{0}/data'.format(cohort))
ensure_dir(relnm)
write_pickle(total_enrichment, os.path.join(relnm, '{}_total_enrichment.pkl'.format(cohort)))
logger(**input_args) | /regulon_enrichment-0.7a2-py3-none-any.whl/enricher/regulon/regulon_enrichment.py | 0.935154 | 0.761272 | regulon_enrichment.py | pypi |
import numpy as np
import os
from sklearn.preprocessing import RobustScaler, MinMaxScaler, StandardScaler, QuantileTransformer
import pandas as pd
def load_expr(expr_f):
"""
Args:
expr_f (str): absolute path to tab delimited expression file of shape = [n_features, n_samples]
Returns:
expr (:obj:`np.array` of :obj:`float`,
shape = [n_samples, n_features])
"""
expr = pd.read_csv(expr_f, sep='\t', index_col=0)
return expr
def log_norm(expr):
"""Log-normalizes a dataset, usually RNA-seq expression.
Puts a matrix of continuous values into log-space after adding
a constant derived from the smallest non-zero value.
Args:
expr (:obj:`np.array` of :obj:`float`,
shape = [n_samples, n_features])
Returns:
expr (:obj:`pandas DataFrame`): shape = [n_features, n_samples]
Examples:
>>> norm_expr = log_norm(np.array([[1.0, 0], [2.0, 8.0]]))
>>> print(norm_expr)
[[ 0.5849625 , -1.],
[ 1.32192809, 3.08746284]]
"""
log_add = np.nanmin(expr[expr > 0]) * 0.5
norm_mat = np.log2(expr + log_add)
norm_mat = norm_mat.dropna(axis=1)
return norm_mat
def fit_and_transform_array(expr, norm_type='robust', feature = True, sample = False, thresh_filter = 0.4, scale=True):
""" Fit and scale expression data based on a specified data scaler algorithm
Args:
expr (pandas DataFrame obj): pandas DataFrame of [n_features, n_samples]
norm_type (str): Scaler to normalized features/samples by: standard | robust | minmax | quant
feature (bool): Scale expression data by features
sample (bool): Scale expression data by both features and samples
thresh_filter (float): Prior to normalization remove features that do not have the mean unit of
a feature (i.e. 1 tpm) is greater than {thresh_filter}
scale (bool): optional arg to avoid scaling dataset if data set has been normalized prior to analysis
Returns:
scaled_frame (:obj: `pandas DataFrame`) : pandas DataFrame containing scaled expression data of
shape [n_samples, n_features]
"""
scaler_opt = {'standard': StandardScaler(), 'robust': RobustScaler(), 'minmax': MinMaxScaler(),
'quant': QuantileTransformer()}
print('--- setting {} as scaler to normalize features|samples by ---'.format(norm_type))
scaler = scaler_opt[norm_type]
if scale:
# Transpose frame to correctly orient frame for scaling and machine learning algorithms
expr = expr.groupby(expr.index).mean()
expr = expr[(expr.mean(axis=1) > thresh_filter)].T
expr = log_norm(expr)
print('--- log2 normalization ---')
if feature and sample:
# scale by both feature and sample
print('--- scaling by feature and sample ---')
scaler_s = scaler_opt[norm_type]
f_scaled_expr = pd.DataFrame(scaler.fit_transform(expr), index = expr.index, columns = expr.columns).T
scaled_frame = pd.DataFrame(scaler_s.fit_transform(f_scaled_expr), index = f_scaled_expr.index,
columns = f_scaled_expr.columns).T
elif feature and not sample:
# scale by feature
print('--- scaling by feature ---')
scaled_frame = pd.DataFrame(scaler.fit_transform(expr), index = expr.index, columns = expr.columns)
else:
print('--- expression dataset will not be scaled ---')
scaled_frame = expr
else:
print('--- expression dataset will not be scaled ---')
scaled_frame = expr
return scaled_frame
def load_scaled_expr(expr, cohort, norm_type='robust', feature = True, sample = False, thresh_filter = 0.4, scale=True):
""" Checks if expression file has been normalized, if not fits and scales expression data based on a specified data
scaler algorithm, else loads the pickled object
Args:
expr (pandas DataFrame obj): pandas DataFrame of [n_features, n_samples]
cohort (str) : name of cohort to associate with compiled regulon
norm_type (str): Scaler to normalized features/samples by: standard | robust | minmax | quant
feature (bool): Scale expression data by features
sample (bool): Scale expression data by both features and samples
thresh_filter (float): Prior to normalization remove features that do not have the mean unit of
a feature (i.e. 1 tpm) is greater than {thresh_filter}
scale (bool): optional arg to avoid scaling dataset if data set has been normalized prior to analysis
Returns:
scaled_frame (:obj: `pandas DataFrame`) : pandas DataFrame containing scaled expression data of
shape [n_samples, n_features]
"""
scaled_expr = os.path.join(dirname, '../experiments/{cohort}/data/{cohort}_{norm_type}_{feature}_{sample}_{thresh_filter}_{scale}_frame.pkl'.
format(cohort = cohort, norm_type = norm_type, feature = feature, sample = sample, thresh_filter = thresh_filter, scale = scale))
if os.path.isfile(scaled_expr):
print('--- Loading scaled expression data ---')
nes = read_pickle(scaled_expr)
else:
print('--- Generating scaled expression data ---')
nes = fit_and_transform_array(expr = expr, norm_type=norm_type, feature = feature,
sample = sample, thresh_filter = thresh_filter, scale = scale)
write_pickle(nes, scaled_expr)
return nes | /regulon_enrichment-0.7a2-py3-none-any.whl/enricher/features/expression_utils.py | 0.897527 | 0.73185 | expression_utils.py | pypi |
import math
import matplotlib.pyplot as plt
from .Generaldistribution import Distribution
class Gaussian(Distribution):
""" Gaussian distribution class for calculating and
visualizing a Gaussian distribution.
Attributes:
mean (float) representing the mean value of the distribution
stdev (float) representing the standard deviation of the distribution
data_list (list of floats) a list of floats extracted from the data file
"""
def __init__(self, mu=0, sigma=1):
Distribution.__init__(self, mu, sigma)
def calculate_mean(self):
"""Function to calculate the mean of the data set.
Args:
None
Returns:
float: mean of the data set
"""
avg = 1.0 * sum(self.data) / len(self.data)
self.mean = avg
return self.mean
def calculate_stdev(self, sample=True):
"""Function to calculate the standard deviation of the data set.
Args:
sample (bool): whether the data represents a sample or population
Returns:
float: standard deviation of the data set
"""
if sample:
n = len(self.data) - 1
else:
n = len(self.data)
mean = self.calculate_mean()
sigma = 0
for d in self.data:
sigma += (d - mean) ** 2
sigma = math.sqrt(sigma / n)
self.stdev = sigma
return self.stdev
def plot_histogram(self):
"""Function to output a histogram of the instance variable data using
matplotlib pyplot library.
Args:
None
Returns:
None
"""
plt.hist(self.data)
plt.title('Histogram of Data')
plt.xlabel('data')
plt.ylabel('count')
def pdf(self, x):
"""Probability density function calculator for the gaussian distribution.
Args:
x (float): point for calculating the probability density function
Returns:
float: probability density function output
"""
return (1.0 / (self.stdev * math.sqrt(2*math.pi))) * math.exp(-0.5*((x - self.mean) / self.stdev) ** 2)
def plot_histogram_pdf(self, n_spaces = 50):
"""Function to plot the normalized histogram of the data and a plot of the
probability density function along the same range
Args:
n_spaces (int): number of data points
Returns:
list: x values for the pdf plot
list: y values for the pdf plot
"""
mu = self.mean
sigma = self.stdev
min_range = min(self.data)
max_range = max(self.data)
# calculates the interval between x values
interval = 1.0 * (max_range - min_range) / n_spaces
x = []
y = []
# calculate the x values to visualize
for i in range(n_spaces):
tmp = min_range + interval*i
x.append(tmp)
y.append(self.pdf(tmp))
# make the plots
fig, axes = plt.subplots(2,sharex=True)
fig.subplots_adjust(hspace=.5)
axes[0].hist(self.data, density=True)
axes[0].set_title('Normed Histogram of Data')
axes[0].set_ylabel('Density')
axes[1].plot(x, y)
axes[1].set_title('Normal Distribution for \n Sample Mean and Sample Standard Deviation')
axes[0].set_ylabel('Density')
plt.show()
return x, y
def __add__(self, other):
"""Function to add together two Gaussian distributions
Args:
other (Gaussian): Gaussian instance
Returns:
Gaussian: Gaussian distribution
"""
result = Gaussian()
result.mean = self.mean + other.mean
result.stdev = math.sqrt(self.stdev ** 2 + other.stdev ** 2)
return result
def __repr__(self):
"""Function to output the characteristics of the Gaussian instance
Args:
None
Returns:
string: characteristics of the Gaussian
"""
return "mean {}, standard deviation {}".format(self.mean, self.stdev) | /rehan_distributions-0.1.tar.gz/rehan_distributions-0.1/rehan_distributions/Gaussiandistribution.py | 0.688364 | 0.853058 | Gaussiandistribution.py | pypi |
from medspacy.target_matcher import TargetRule
rules = [
TargetRule(literal="apartment", category="RESIDENCE",
pattern=[{'IS_TITLE': True, 'OP': '+'}, {'LOWER': {'REGEX': 'apartment'}}],
),
TargetRule(literal="<RESIDES>", category="RESIDES", pattern=[{'LEMMA': {'IN': ['reside', 'stay', 'live', 'sleep']}},
{'LOWER': {'IN': ['in', 'at']}, 'OP': '?'}],
attributes=None),
TargetRule(literal="move in", category="RESIDES", pattern=[{'LEMMA': 'move'}, {'LOWER': 'in'}],
),
TargetRule(literal="current living situation:", category="RESIDES"),
TargetRule(literal="veteran", category="PATIENT",
pattern=[{'LOWER': {'REGEX': '^vet(eran)?'}}, {'LOWER': "'s", 'OP': '?'}],
),
TargetRule(literal="patient", category="PATIENT",
pattern=[{'LOWER': {'IN': ['patient', 'pt', 'pt.']}}, {'LOWER': "'s", 'OP': '?'}],
),
TargetRule(literal="patient", category="PATIENT", pattern=[{'LOWER': {'IN': ['my', 'me']}}],
),
TargetRule(literal="<DET> job", category="EMPLOYMENT", pattern=[{'POS': 'DET'}, {'LOWER': 'job'}],
),
TargetRule(literal="...Homeless", category="HOMELESSNESS", pattern=[{'LOWER': {'REGEX': 'homeless'}}],
attributes=None),
TargetRule(literal="father", category="FAMILY"),
TargetRule(literal="fathers", category="FAMILY"),
TargetRule(literal="girlfriend", category="FAMILY"),
TargetRule(literal="boyfriend", category="FAMILY"),
TargetRule(literal="fiance", category="FAMILY"),
TargetRule(literal="fiancee", category="FAMILY"),
TargetRule(literal="mother", category="FAMILY"),
TargetRule(literal="mothers", category="FAMILY"),
TargetRule(literal="brother", category="FAMILY"),
TargetRule(literal="brothers", category="FAMILY"),
TargetRule(literal="sister", category="FAMILY"),
TargetRule(literal="sisters", category="FAMILY"),
TargetRule(literal="friend", category="FAMILY"),
TargetRule(literal="buddy", category="FAMILY"),
TargetRule(literal="buddies", category="FAMILY"),
TargetRule(literal="friends", category="FAMILY"),
TargetRule(literal="daughter", category="FAMILY"),
TargetRule(literal="daughters", category="FAMILY"),
TargetRule(literal="son", category="FAMILY"),
TargetRule(literal="sons", category="FAMILY"),
TargetRule(literal="grandson", category="FAMILY"),
TargetRule(literal="grandsons", category="FAMILY"),
TargetRule(literal="grandparent", category="FAMILY"),
TargetRule(literal="grandparents", category="FAMILY"),
TargetRule(literal="grandfather", category="FAMILY"),
TargetRule(literal="grandfathers", category="FAMILY"),
TargetRule(literal="grandmother", category="FAMILY"),
TargetRule(literal="grandmothers", category="FAMILY"),
TargetRule(literal="granddaughter", category="FAMILY"),
TargetRule(literal="granddaughters", category="FAMILY"),
TargetRule(literal="relative", category="FAMILY"),
TargetRule(literal="relatives", category="FAMILY"),
TargetRule(literal="non relative", category="FAMILY"),
TargetRule(literal="non relatives", category="FAMILY"),
TargetRule(literal="non-relative", category="FAMILY"),
TargetRule(literal="non-relatives", category="FAMILY"),
TargetRule(literal="niece", category="FAMILY"),
TargetRule(literal="nieces", category="FAMILY"),
TargetRule(literal="nephew", category="FAMILY"),
TargetRule(literal="nephews", category="FAMILY"),
TargetRule(literal="uncle", category="FAMILY"),
TargetRule(literal="uncles", category="FAMILY"),
TargetRule(literal="aunt", category="FAMILY"),
TargetRule(literal="aunts", category="FAMILY"),
TargetRule(literal="parent", category="FAMILY"),
TargetRule(literal="parents", category="FAMILY"),
TargetRule(literal="homeless shelter", category="TEMPORARY_HOUSING"),
TargetRule(literal="homeless shelters", category="TEMPORARY_HOUSING"),
TargetRule(literal="shelter", category="TEMPORARY_HOUSING"),
TargetRule(literal="shelters", category="TEMPORARY_HOUSING"),
TargetRule(literal="YMCA", category="TEMPORARY_HOUSING"),
TargetRule(literal="Salvation Army", category="TEMPORARY_HOUSING"),
TargetRule(literal="the streets", category="HOMELESS_LOCATION"),
TargetRule(literal="the street", category="HOMELESS_LOCATION"),
TargetRule(literal="motel", category="HOMELESS_LOCATION"),
TargetRule(literal="motels", category="HOMELESS_LOCATION"),
TargetRule(literal="hotel", category="HOMELESS_LOCATION"),
TargetRule(literal="park", category="HOMELESS_LOCATION"),
TargetRule(literal="parks", category="HOMELESS_LOCATION"),
TargetRule(literal="hotels", category="HOMELESS_LOCATION"),
TargetRule(literal="woods", category="HOMELESS_LOCATION"),
TargetRule(literal="car", category="HOMELESS_LOCATION"),
TargetRule(literal="vehicle", category="HOMELESS_LOCATION"),
TargetRule(literal="literally homeless", category="HOMELESSNESS"),
TargetRule(literal="house", category="RESIDENCE"),
TargetRule(literal="housing", category="RESIDENCE"),
TargetRule(literal="home", category="RESIDENCE"),
TargetRule(literal="apt", category="RESIDENCE"),
TargetRule(literal="apt.", category="RESIDENCE"),
TargetRule(literal="apartment", category="RESIDENCE"),
TargetRule(literal="condo", category="RESIDENCE"),
TargetRule(literal="stable housing", category="RESIDENCE"),
TargetRule(literal="unit", category="RESIDENCE"),
TargetRule(literal="place", category="RESIDENCE")
] | /rehoused_nlp-0.0.1.0.tar.gz/rehoused_nlp-0.0.1.0/rehoused_nlp/resources/concept_tag_rules.py | 0.443841 | 0.381911 | concept_tag_rules.py | pypi |
from medspacy.context import ConTextRule
from .. import constants
from . import callbacks
context_rules = [
# Negation - these values modifiers can change "homeless" to "exit from homeless"
ConTextRule("without", "NEGATED_EXISTENCE", direction="forward", max_scope=5),
ConTextRule("overcame", "NEGATED_EXISTENCE", direction="forward", max_scope=5,
pattern=[{"LOWER": {"REGEX": "overc[oa]me"}}]),
ConTextRule("not currently", "NEGATED_EXISTENCE", direction="forward", max_scope=5),
ConTextRule("not", "NEGATED_EXISTENCE", direction="forward", max_scope=5),
ConTextRule("denies", "NEGATED_EXISTENCE", direction="forward", max_scope=5,
pattern=[{"LOWER": {"IN": ["denied", "denies"]}}]),
ConTextRule("denial", "NEGATED_EXISTENCE", direction="forward", max_scope=5),
ConTextRule("denies risk", "NEGATED_EXISTENCE", direction="forward", max_scope=5,
pattern=[{"LOWER": {"REGEX": r"denie[d|s]"}}, {"LOWER": "risk"}, {"LOWER": "of", "OP": "?"}]),
ConTextRule(": denied", "NEGATED_EXISTENCE", direction="BACKWARD", max_scope=5,
pattern=[{"LOWER": ":"}, {"LOWER": {"REGEX": r"denie[ds]"}}]),
ConTextRule("declined", "NEGATED_EXISTENCE", direction="BIDIRECTIONAL",
pattern=[{"LOWER": {"IN": ["decline", "declining"]}}],
max_scope=5, allowed_types={"TEMPORARY_HOUSING", "EVIDENCE_OF_HOUSING"}),
ConTextRule("has no", "NEGATED_EXISTENCE", direction="forward", max_scope=5),
ConTextRule("resolved", "NEGATED_EXISTENCE", direction="backward", max_scope=5),
ConTextRule("no", "NEGATED_EXISTENCE", direction="FORWARD", max_scope=5),
ConTextRule("not have", "NEGATED_EXISTENCE", direction="FORWARD", max_scope=5, allowed_types={"EVIDENCE_OF_HOUSING"}),
ConTextRule(": no", "NEGATED_EXISTENCE", direction="BACKWARD", max_scope=5),
ConTextRule("not eligible for", "NEGATED_EXISTENCE", direction="forward",
allowed_types={"EVIDENCE_OF_HOUSING"}),
ConTextRule("not interested in", "NEGATED_EXISTENCE", direction="forward"),
ConTextRule("unable to", "NEGATED_EXISTENCE", direction="forward", allowed_types={"EVIDENCE_OF_HOUSING", "TEMPORARY_HOUSING"}),
ConTextRule("not want", "NEGATED_EXISTENCE", direction="forward",
excluded_types={"EVIDENCE_OF_HOMELESSNESS"}),
ConTextRule("refuse", "NEGATED_EXISTENCE", direction="forward",
pattern=[{"LEMMA": "refuse"}],
allowed_types={"EVIDENCE_OF_HOUSING", "TEMPORARY_HOUSING"}),
ConTextRule("until <THEY> have/find", "HYPOTHETICAL", direction="FORWARD",
pattern=[
{"LOWER": "until"},
{"LOWER": {"IN": ["he", "she"]}},
{"LEMMA": {"IN": ["find", "have"]}}
],
allowed_types={"EVIDENCE_OF_HOUSING", "TEMPORARY_HOUSING"},
max_scope=4),
ConTextRule("intent to pay", "HYPOTHETICAL"),
ConTextRule("has chosen", "HYPOTHETICAL", on_modifies=callbacks.has_chosen),
ConTextRule("waiting for", "HYPOTHETICAL", direction="FORWARD",
pattern=[
{"LOWER": {"REGEX": "wait(ing)?$"}},
{"LOWER": {"IN": ["for", "on"]}}
]),
ConTextRule("awaiting", "HYPOTHETICAL", direction="forward"),
ConTextRule("prepare", "HYPOTHETICAL", direction="forward",
pattern=[{"LEMMA": "prepare"}, {"LOWER": "to", "OP": "?"}]),
ConTextRule("not pay", "NEGATED_EXISTENCE", direction="FORWARD",
pattern=[{"LOWER": "not"}, {"LEMMA": "pay"}],
allowed_types={"EVIDENCE_OF_HOUSING"}, on_modifies=callbacks.on_modifies_pay),
ConTextRule("inability to pay", "NEGATED_EXISTENCE", direction="FORWARD",
on_modifies=callbacks.on_modifies_pay, allowed_types={"EVIDENCE_OF_HOUSING", }),
ConTextRule("fell through", "NEGATED_EXISTENCE", direction="BACKWARD",
pattern=[
{"LEMMA": "fall"},
{"LOWER": "through"},
],
allowed_types={"EVIDENCE_OF_HOUSING", "TEMPORARY_HOUSING"}),
ConTextRule("lacks", "NEGATED_EXISTENCE", "FORWARD", allowed_types={"EVIDENCE_OF_HOUSING", "TEMPORARY_HOUSING"}),
ConTextRule("[]", "NOT_RELEVANT", direction="forward", # Empty checkmark can mark the adjacent item to be ignored
max_scope=None),
ConTextRule("()", "NOT_RELEVANT", direction="forward", # Empty checkmark can mark the adjacent item to be ignored
max_scope=None),
ConTextRule("phone number", "NOT_RELEVANT", direction="BACKWARD", pattern=[{"LOWER": {"REGEX": "phone"}}, {"LOWER": {"IN": ["num", "no", "no.", "number"]}}]),
ConTextRule(": Yes [ ] No [X]", "NEGATED_EXISTENCE", direction="BACKWARD"),
ConTextRule(": None", "NEGATED_EXISTENCE", direction="BACKWARD",
pattern=[{"LOWER": {"IN": ["):"]}}, {"LOWER": "None"}]),
ConTextRule("___", "NOT_RELEVANT", direction="FORWARD", max_scope=2),
ConTextRule("not report", "NOT_RELEVANT", "FORWARD", max_scope=5), # "not report being homeless"
ConTextRule("formerly", "HISTORICAL", direction="forward"),
ConTextRule("previously", "HISTORICAL", direction="forward"),
ConTextRule("prior to", "HISTORICAL", direction="forward"),
ConTextRule("after", "HISTORICAL", direction="forward"),
ConTextRule("resolved on", "HISTORICAL", direction="BACKWARD"),
ConTextRule("move out", "HISTORICAL", direction="FORWARD", pattern=[{"LEMMA": "move"}, {"LOWER": "out"}]),
ConTextRule("before this", "HISTORICAL", direction="forward",
pattern=[{"LOWER": "before"},
{"LOWER": {"IN": ["this", "that"]}}]),
ConTextRule("on and off", "HISTORICAL", direction="bidirectional"),
ConTextRule("history of", "HISTORICAL", direction="forward", allowed_types={"EVIDENCE_OF_HOMELESSNESS"},
pattern=[{"LOWER": {"IN": ["history", "hx"]}}, {"LOWER": "of"}]),
ConTextRule("past medical history of", "HISTORICAL", direction="FORWARD", allowed_types={"EVIDENCE_OF_HOMELESSNESS"},
pattern=[
{"LOWER": "past"},
{"LOWER": "medical"},
{"LOWER": {"IN": ["history", "hx"]}},
{"LOWER": "of", 'OP': "?"}
]),
ConTextRule("pmhx of", "HISTORICAL", direction="FORWARD", allowed_types={"EVIDENCE_OF_HOMELESSNESS"},
pattern=[
{"LOWER": {"IN": ["pmh", "pmhx"]}},
{"LOWER": "of", 'OP': "?"}
]),
ConTextRule("within the last N years", "HISTORICAL",
pattern=[
{"LOWER": "within"},
{"LOWER": "the", "OP": "?"},
{"LOWER": "last"},
{"LIKE_NUM": True},
{"LOWER": "years"}
]),
ConTextRule("for <TIME>", "HISTORICAL",
pattern=[
{"LOWER": "for"},
{"OP": "?"},
{"LIKE_NUM": True, "OP": "+"},
{"LEMMA": {"IN": ["day", "week", "month", "year"]}}
],
on_match=callbacks.preceded_by_was
),
ConTextRule("within the last year", "HISTORICAL"),
ConTextRule("in the past", "HISTORICAL", max_scope=5),
ConTextRule("the last X months", "HISTORICAL", direction="BIDIRECTIONAL",
pattern=[
{"LOWER": {"IN": ["last", "past"]}},
{"OP": "?"},
{"LEMMA": "month"}
]),
ConTextRule("he left", "HISTORICAL", pattern=[{"LOWER": {"IN": ["he", "she"]}}, {"LOWER": "left"}],
allowed_types={"EVIDENCE_OF_HOUSING", "TEMPORARY_HOUSING"}),
# ConTextRule("was", "HISTORICAL", "FORWARD", max_scope=2, allowed_types={"EVIDENCE_OF_HOMELESSNESS", "TEMPORARY_HOUSING"}),
ConTextRule("in the past N days", "CURRENT",
pattern=[
{"LOWER": "in"},
{"LOWER": "the", "OP": "?"},
{"LOWER": "past"},
{"LIKE_NUM": True},
{"LOWER": "days"}
]
),
ConTextRule("in the past few days", "CURRENT",
pattern=[
{"LOWER": "in"},
{"LOWER": "the", "OP": "?"},
{"LOWER": "past"},
{"LOWER": "few"},
{"LOWER": "days"}
]
),
ConTextRule("recently", "CURRENT"),
ConTextRule("again", "CURRENT", max_scope=2),
ConTextRule("currently", "CURRENT"),
ConTextRule("maintain", "CURRENT", allowed_types={"EVIDENCE_OF_HOUSING"}),
ConTextRule("current episode", "CURRENT", allowed_types={"EVIDENCE_OF_HOMELESSNESS"}, direction="FORWARD"),
ConTextRule("worried about", "HYPOTHETICAL", direction="forward"),
ConTextRule("avoid", "HYPOTHETICAL", direction="forward", allowed_types={"EVIDENCE_OF_HOMELESSNESS"}),
ConTextRule("looking for", "HYPOTHETICAL", direction="forward",
pattern=[{"LEMMA": "look"}, {"LOWER": {"IN": ["for", "at", "in"]}}],
allowed_types={"EVIDENCE_OF_HOUSING"}),
ConTextRule("lead on", "HYPOTHETICAL", direction="forward"),
ConTextRule("options for", "HYPOTHETICAL", direction="forward", max_scope=5, allowed_types={"EVIDENCE_OF_HOUSING"}),
ConTextRule("apply for", "HYPOTHETICAL", direction="forward",
pattern=[{"LOWER": {"REGEX": "appl(y|ied|ication)"}}, {"LOWER": {"IN": ["for", "to", "into"]}}],
allowed_types=["EVIDENCE_OF_HOUSING", "TEMPORARY_HOUSING"]),
ConTextRule("submitted application", "HYPOTHETICAL", direction="BIDIRECTIONAL",
pattern=[{"LEMMA": "submit"}, {"OP": "?"}, {"LEMMA": "application"}]),
ConTextRule("put in an application", "HYPOTHETICAL", direction="BIDIRECTIONAL",
allowed_types=["EVIDENCE_OF_HOUSING", "TEMPORARY_HOUSING"],
pattern=[
{"LOWER": "put"},
{"LOWER": "in"},
{"OP": "?"},
{"LOWER": "application"}
]),
ConTextRule("qualified", "HYPOTHETICAL", direction="BIDIRECTIONAL",
allowed_types=["EVIDENCE_OF_HOUSING", "TEMPORARY_HOUSING"],
pattern=[
{"LOWER": {"REGEX": "qualif"}},
{"LOWER": "for"},
]),
ConTextRule("eligible for", "HYPOTHETICAL", direction="forward", allowed_types=["EVIDENCE_OF_HOUSING", "HOMELESSNESS_HEALTHCARE_SERVICE"]),
ConTextRule("in need of", "HYPOTHETICAL", direction="forward", allowed_types={"EVIDNECE_OF_HOUSING", "TEMPORARY_HOUSING"}),
ConTextRule("need", "HYPOTHETICAL", direction="forward", pattern=[{"LEMMA": "need"}], allowed_types={"EVIDENCE_OF_HOUSING", "TEMPORARY_HOUSING"}),
ConTextRule("will be", "HYPOTHETICAL", direction="forward", excluded_types={"RISK_OF_HOMELESSNESS"}),
ConTextRule("proposed", "HYPOTHETICAL", direction="forward"),
ConTextRule("propose", "HYPOTHETICAL", direction="forward"),
ConTextRule("become", "HYPOTHETICAL", direction="forward", allowed_types={"EVIDENCE_OF_HOMELESSNESS", "RISK_OF_HOMELESSNESS"},
pattern=[{"LOWER": {"IN": ["become", "becomes"]}}],
max_scope=5),
ConTextRule("find", "HYPOTHETICAL", direction="forward", pattern=[{"LOWER": {"REGEX": "find"}}],
allowed_types={"EVIDENCE_OF_HOUSING"}),
ConTextRule("goal", "HYPOTHETICAL", direction="BIDIRECTIONAL", pattern=[{"LOWER": {"REGEX": "^goal"}}], allowed_types={"EVIDENCE_OF_HOUSING", "TEMPORARY_HOUSING"}),
ConTextRule("get", "HYPOTHETICAL", direction="forward",
pattern=[{"LOWER": {"IN": ["get", "gets", "obtain", "obtains"]}}],
allowed_types=["EVIDENCE_OF_HOUSING"],
max_scope=3), # ie., "get his own apartment"
ConTextRule("forms for", "HYPOTHETICAL", direction="forward"),
ConTextRule("wants to move", "HYPOTHETICAL", direction="forward"),
ConTextRule("affordable", "HYPOTHETICAL", direction="bidirectional",
allowed_types={"EVIDENCE_OF_HOUSING"}),
ConTextRule("request assistance", "HYPOTHETICAL", direction="forward"),
ConTextRule("assisting with", "HYPOTHETICAL", direction="FORWARD", allowed_types={"EVIDENCE_OF_HOUSING", "TEMPORARY_HOUSING"},
pattern=[{"LEMMA": "assist"}, {"OP": "?"}, {"LOWER": "with"}]),
ConTextRule("request", "HYPOTHETICAL", direction="forward", pattern=[{"LEMMA": "request"}]),
ConTextRule("eventually get", "HYPOTHETICAL", direction="forward"),
ConTextRule("future", "HYPOTHETICAL", direction="forward", max_scope=5),
ConTextRule("in the future", "HYPOTHETICAL", direction="BIDIRECTIONAL",
pattern=[
{"LOWER": "in"},
{"LOWER": "the"},
{"POS": "ADJ", "OP": "*"},
{"LOWER": "future"}
]
),
ConTextRule("near future", "HYPOTHETICAL", direction="BACKWARD", max_scope=5,
pattern=[
{"LOWER": "near"},
{"LOWER": "future"}
]
),
ConTextRule("qualify for", "HYPOTHETICAL", direction="forward"),
ConTextRule("able to", "HYPOTHETICAL", direction="forward", max_scope=5, allowed_types={"EVIDENCE_OF_HOUSING", "TEMPORARY_HOUSING"}),
ConTextRule("will provide", "HYPOTHETICAL", direction="forward"),
ConTextRule("once he/she", "HYPOTHETICAL", direction="forward", allowed_types={"EVIDENCE_OF_HOUSING"},
pattern=[{"LOWER": "once"}, {"LOWER": {"IN": ["he", "she"]}}]),
ConTextRule("will provide transport", "PSEUDO", direction="PSEUDO",
pattern=[
{"LOWER": "will"},
{"LOWER": "provide"},
{"LOWER": {"REGEX": "^transport"}}
]),
ConTextRule("preference:", "HYPOTHETICAL", direction="forward",
pattern=[{"LOWER": "preference"}, {"LOWER": {"IN": [":", "for"]}}],
allowed_types={"EVIDENCE_OF_HOUSING"}),
ConTextRule("would prefer", "HYPOTHETICAL", "FORWARD", allowed_types={"EVIDENCE_OF_HOUSING"}),
ConTextRule("afford", "HYPOTHETICAL", direction="forward", allowed_types={"EVIDENCE_OF_HOUSING"},
pattern=[{"LOWER": {"REGEX": "^afford"}, "POS": "VERB"}]),
ConTextRule("suggest", "HYPOTHETICAL", direction="forward", allowed_types={"EVIDENCE_OF_HOUSING"}),
ConTextRule("will obtain", "HYPOTHETICAL", direction="forward", allowed_types={"EVIDENCE_OF_HOUSING"},
pattern=[{"LEMMA": "will", "OP": "?"}, {"LOWER": "obtain"}]),
ConTextRule("discuss", "HYPOTHETICAL", direction="forward", allowed_types={"EVIDENCE_OF_HOUSING", "TEMPORARY_HOUSING"},
pattern=[{"LEMMA": "discuss"}],
max_scope=5),
ConTextRule("seeking", "HYPOTHETICAL", direction="forward",
pattern=[{"LOWER": {"REGEX": "seek"}}]
, allowed_types={"EVIDENCE_OF_HOUSING"}),
ConTextRule("searching for", "HYPOTHETICAL", direction="forward",
allowed_types={"EVIDENCE_OF_HOUSING"}),
ConTextRule("search", "HYPOTHETICAL", direction="BIDIRECTIONAL",
allowed_types={"EVIDENCE_OF_HOUSING"}),
ConTextRule("search options", "HYPOTHETICAL", direction="forward",
allowed_types={"EVIDENCE_OF_HOUSING"}),
# ConTextRule("found", "HYPOTHETICAL", allowed_types={"EVIDENCE_OF_HOUSING"}),
ConTextRule("look for", "HYPOTHETICAL", direction="forward",
pattern=[{"LOWER": {"REGEX": "look"}}, {"LOWER": "for"}]
, allowed_types={"EVIDENCE_OF_HOUSING", "TEMPORARY_HOUSING"}),
ConTextRule("hunt for", "HYPOTHETICAL", direction="forward",
pattern=[{"LOWER": {"REGEX": "^hunt"}}, {"LOWER": "for"}]
, allowed_types={"EVIDENCE_OF_HOUSING", "TEMPORARY_HOUSING"}),
ConTextRule("view a X", "HYPOTHETICAL", direction="forward",
pattern=[{"LOWER": {"IN": ["view", "see"]}}, {"POS": "DET"}],
max_scope=1,
allowed_types={"EVIDENCE_OF_HOUSING"}),
ConTextRule("locate", "HYPOTHETICAL", direction="forward",
pattern=[{"LOWER": {"IN": ["locate", "located", "locating"]}}]
, allowed_types={"EVIDENCE_OF_HOUSING", "TEMPORARY_HOUSING"}),
ConTextRule("secure", "HYPOTHETICAL", direction="forward",
pattern=[{"LOWER": {"IN": ["secure", "secures", "securing"]}}]),
ConTextRule("offered", "HYPOTHETICAL", direction="forward"),
ConTextRule("show", "HYPOTHETICAL", direction="forward", pattern=[{"LEMMA": "show"}], max_scope=5),
ConTextRule("pending", "HYPOTHETICAL", direction="BIDIRECTIONAL", max_scope=4),
ConTextRule("eventually", "HYPOTHETICAL", direction="BIDIRECTIONAL", max_scope=6),
ConTextRule("identified", "HYPOTHETICAL", direction="BIDIRECTIONAL", max_scope=4,
allowed_types={"EVIDENCE_OF_HOUSING", "TEMPORARY_HOUSING"}),
ConTextRule("want", "HYPOTHETICAL", direction="forward",
pattern=[{"LEMMA": "want"}, {"LOWER": "to", "OP": "?"}],
allowed_types={"EVIDENCE_OF_HOUSING",}),
ConTextRule("would like", "HYPOTHETICAL", direction="forward",
allowed_types=constants.HOUSING_LABELS),
ConTextRule("hopes", "HYPOTHETICAL", direction="forward",
pattern=[{"LOWER": {"IN": ["hope", "hopes", "hopeful"]}}],
on_modifies=callbacks.hopes_on_modifies,
allowed_types=constants.HOUSING_LABELS),
ConTextRule("consider", "HYPOTHETICAL", direction="forward", #max_scope=5,
allowed_types=constants.HOUSING_LABELS,
pattern=[{"LOWER": {"REGEX": "consider"}}]),
ConTextRule("interested in", "HYPOTHETICAL", direction="forward", max_scope=5,
allowed_types=constants.HOUSING_LABELS),
ConTextRule("inquire", "HYPOTHETICAL", direction="forward", max_scope=5,
allowed_types=constants.HOUSING_LABELS),
ConTextRule("look into", "HYPOTHETICAL", direction="FORWARD", max_scope=5,
pattern=[{"LOWER": {"REGEX": "look"}}, {"LOWER": "into"}]),
ConTextRule("does not want to", "HYPOTHETICAL", direction="forward",
pattern=[{"LOWER": {"IN": ["does", "do"]}},
{"LOWER": "not"},
{"LOWER": "want"},
{"LOWER": "to", "OP": "?"}]),
ConTextRule("prevent", "HYPOTHETICAL", direction="forward",
allowed_types={"EVIDENCE_OF_HOUSING", "TEMPORARY_HOUSING"}),
ConTextRule("planing", "HYPOTHETICAL", direction="forward",
pattern=[{"LOWER": {"REGEX": "plan"}}, {"LOWER": "to", "OP": "?"}],
allowed_types={"EVIDENCE_OF_HOUSING"}),
ConTextRule("planning", "HYPOTHETICAL", direction="FORWARD",
pattern=[{"LOWER": "has"}, {"LEMMA": "plan"}],
allowed_types={"EVIDENCE_OF_HOUSING"}),
ConTextRule("(housing) plan", "HYPOTHETICAL", direction="BACKWARD",
pattern=[{"LOWER": "plan"}],
allowed_types={"EVIDENCE_OF_HOUSING"},
max_scope=1,
on_modifies=callbacks.on_modifies_housing_plan),
ConTextRule("to be", "HYPOTHETICAL", direction="forward", # ie., "to be homeless"
max_scope=5),
ConTextRule("desire to be", "HYPOTHETICAL", direction="forward", # ie., "to be homeless"
max_scope=5),
ConTextRule("information regarding", "HYPOTHETICAL", direction="forward"),
ConTextRule("potential", "HYPOTHETICAL", direction="forward"),
ConTextRule("available", "HYPOTHETICAL", direction="BACKWARD", allowed_types={"EVIDENCE_OF_HOUSING", "TEMPORARY_HOUSING"},
max_scope=2),
ConTextRule("visit", "HYPOTHETICAL", direction="FORWARD", allowed_types={"EVIDENCE_OF_HOUSING",},
pattern=[{"LOWER": {"IN": ["visit", "visited"]}}],
on_match=callbacks.visit_on_match
# TODO: maybe this should only match specific phrases
),
ConTextRule("move forward", "HYPOTHETICAL", direction="FORWARD", allowed_types={"EVIDENCE_OF_HOUSING", "TEMPORARY_HOUSING"},
pattern=[{"LEMMA": "move"}, {"LOWER": "forward"}],),
ConTextRule("screened for admission", "HYPOTHETICAL", direction="FORWARD", allowed_types={"EVIDENCE_OF_HOUSING", "TEMPORARY_HOUSING"},
pattern=[{"LOWER": "screened"}, {"LOWER": "for"}, {"POS": "ADJ", "OP": "?"}, {"LOWER": "admission"}]),
ConTextRule("screening for", "HYPOTHETICAL", direction="FORWARD",
allowed_types={"EVIDENCE_OF_HOUSING", "EVIDENCE_OF_HOMELESSNESS", "TEMPORARY_HOUSING"},
pattern=[
{"LOWER": {"IN": ["screen", "screened", "screening"]}},
{"LOWER": "interview", "OP": "?"},
{"LOWER": "for"}
]
),
ConTextRule("contact with", "HYPOTHETICAL", direction="FORWARD", on_modifies=callbacks.contact_with),
ConTextRule("should be approved", "HYPOTHETICAL", direction="FORWARD"),
# TODO: Keep an eye on this, may be too aggressive
ConTextRule("?", "NOT_RELEVANT", direction="BACKWARD", max_scope=None, on_match=callbacks.disambiguate_question_mark),
ConTextRule("working definition", "NOT_RELEVANT", direction="BIDIRECTIONAL", max_scope=None), # " as defined by HUD's working definition:"
ConTextRule("<NUMBERED_LIST>", "LIST", direction="BIDIRECTIONAL", pattern=r"\n *[\d]{1,2}\.\s+[A-Z]"),
# Risk
ConTextRule("vulnerable to", "AT_RISK", direction="forward",
pattern=[{"LOWER": {"REGEX": "vulnerab"}}, {"LOWER": "to"}]),
ConTextRule("at risk", "AT_RISK", direction="forward"),
ConTextRule("will become", "HYPOTHETICAL", direction="forward", allowed_types={"EVIDENCE_OF_HOMELESSNESS"}),
ConTextRule("cannot afford", "AT_RISK", direction="forward",
pattern=r"can('t|not) (afford|pay)",
allowed_types={"EVIDENCE_OF_HOUSING"}),
ConTextRule("fail inspection", "AT_RISK", pattern=[{"LEMMA": "fail"}, {"OP": "?"}, {"LOWER": "inspection"}]),
ConTextRule("not pass inspection", "AT_RISK"),
ConTextRule("kicked out", "AT_RISK", direction="BIDIRECTIONAL",
pattern=[{"LEMMA": "kick"}, {"OP": "?"}, {"LOWER": "out"}]),
ConTextRule("need for", "AT_RISK", direction="FORWARD"),
ConTextRule("not found", "AT_RISK", direction="FORWARD"),
ConTextRule("in jeopardy", "AT_RISK", direction="BIDIRECTIONAL", allowed_types={"EVIDENCE_OF_HOUSING", "TEMPORARY_HOUSING"}),
ConTextRule("late", "AT_RISK", direction="BIDIRECTIONAL",
allowed_types={"EVIDENCE_OF_HOUSING"},
on_modifies=callbacks.on_modifies_pay),
ConTextRule("AXIS IV", "AT_RISK", "FORWARD"),
ConTextRule("stressors", "AT_RISK", "FORWARD"),
ConTextRule("loss of", "AT_RISK", "FORWARD"),
ConTextRule("loss", "AT_RISK", "BACKWARD", allowed_types={"EVIDENCE_OF_HOUSING", "TEMPORARY_HOUSING"}),
ConTextRule("lost", "AT_RISK", "BIDIRECTIONAL", allowed_types={"EVIDENCE_OF_HOUSING", "TEMPORARY_HOUSING"}),
ConTextRule("losing", "AT_RISK", "BIDIRECTIONAL", allowed_types={"EVIDENCE_OF_HOUSING", "TEMPORARY_HOUSING"}),
# Terminate
# TODO: I'm not sold on this one
ConTextRule("as he", "TERMINATE", direction="TERMINATE", # 'not homeless as he has his own apartmentr
pattern=[{"LOWER": "as"}, {"POS": "PRON"}]),
ConTextRule("but", "CONJ", direction="TERMINATE"),
# Ignore
ConTextRule("plan:", "IGNORE", direction="TERMINATE"),
# Other
# ConTextRule("eligible for", "ELIGIBILITY", direction="FORWARD", allowed_types={"HOMELESSNESS_HEALTHCARE_SERVICE"},),
ConTextRule("resides in", "RESIDES_IN", direction="FORWARD",
pattern=[
{"LOWER": {"IN": ["lives", "living", "resides", "currently", ]}},
{"LOWER": "in"},
],
allowed_types={"EVIDENCE_OF_HOUSING", "EVIDENCE_OF_HOMELESSNESS", "TEMPORARY_HOUSING", "VA_HOUSING"},
on_modifies=callbacks.resides_in_on_modifies),
ConTextRule("was residing in", "PSEUDO", direction="PSEUDO",
pattern=[
{"LOWER": "was"},
{"LOWER": {"IN": ["residing", "living"]}},
{"LOWER": "in"},
],
allowed_types={"EVIDENCE_OF_HOUSING", "EVIDENCE_OF_HOMELESSNESS", "TEMPORARY_HOUSING", "VA_HOUSING"},
),
ConTextRule("had been residing in", "PSEUDO", direction="PSEUDO",
pattern=[
{"LOWER": "had"},
{"LOWER": {"IN": ["been"]}, "OP": "?"},
{"LOWER": {"IN": ["living", "residing", ]}},
{"LOWER": "in"},
],
allowed_types={"EVIDENCE_OF_HOUSING", "EVIDENCE_OF_HOMELESSNESS", "TEMPORARY_HOUSING", "VA_HOUSING"},
),
ConTextRule("resides in", "RESIDES_IN", direction="FORWARD",
pattern=[
{"_": {"concept_tag": "RESIDES"}, "OP": "+"},
],
on_modifies=callbacks.resides_in_on_modifies,
allowed_types={"EVIDENCE_OF_HOUSING", "EVIDENCE_OF_HOMELESSNESS", "TEMPORARY_HOUSING", "VA_HOUSING"}),
ConTextRule("continues to reside in", "RESIDES_IN", # This may be a high-precision modifier
pattern=[
{"LOWER": "continues"},
{"LOWER": "to"},
{"LOWER": {"IN": ["reside", "live"]}},
]),
ConTextRule("moving into", "RESIDES_IN", direction="FORWARD",
pattern=[
{"LOWER": {"IN": ["moved", "moving", "transition", "transitioning"]}},
{"LOWER": {"IN": ["to", "into"]}}
],
allowed_types={"EVIDENCE_OF_HOUSING", "EVIDENCE_OF_HOMELESSNESS", "TEMPORARY_HOUSING", "VA_HOUSING"}),
ConTextRule("sleep in", "SLEEPS_IN",
pattern=[
{"LOWER": {"IN": ["sleep", "sleeps"]}},
{"LOWER": {"IN": ["in", "at"]}},
]
),
# TODO: Careful that this doesn't mess up instances of living with his family
# Meant to match "his mother is living in stable housing"
ConTextRule("<PRON> <FAMILY>", "FAMILY", direction="FORWARD",
pattern=[
{"LOWER": {"IN": ["his", "her"]}},
{"_": {"concept_tag": "FAMILY"}, "OP": "+"},
]),
ConTextRule("was accepted", "ACCEPTED", pattern=[{"LOWER": {"IN": ["was", "been"]}, "OP": "?"}, {"LOWER": "accepted"}, {"LOWER": {"IN": ["to", "into"]}, "OP": "?"}]),
ConTextRule("approved", "ACCEPTED", direction="FORWARD", pattern=[{"LOWER": "approved"}, {"LOWER": "for", "OP": "?"},]),
ConTextRule("could be approved", "HYPOTHETICAL", direction="FORWARD"),
ConTextRule("enrolled", "ENROLLMENT", direction="FORWARD",
pattern=[
{"LOWER": {"IN": ["enroll", "enrolled"]}},
{"LOWER": {"IN": ["in", "into", "participant"]}, "OP": "?"}
]),
ConTextRule("denies any concerns", "POSITIVE_HOUSING", direction="FORWARD",
pattern=[
{"LEMMA": "deny"},
{"LOWER": "any"},
{"LOWER": {"IN": ["concerns", "issues", "problems"]}},
]),
ConTextRule("is able to afford", "POSITIVE_HOUSING", direction="FORWARD",
pattern=[
{"LEMMA": "be"},
{"LOWER": "able"},
{"LOWER": "to"},
{"LOWER": "afford"},
],
allowed_types={"EVIDENCE_OF_HOUSING",}
),
ConTextRule("no issues with", "POSITIVE_HOUSING", direction="FORWARD",
pattern=[
{"LOWER": {"IN": ["no", "any"]}},
{"LOWER": "issues"},
{"LOWER": "with"},
]),
ConTextRule("once he is able to afford", "HYPOTHETICAL", direction="FORWARD",
pattern=[
{"LOWER": {"IN": ["once", "when"]}},
{"OP": "?"},
{"LEMMA": "be", "OP": "?"},
{"LOWER": "able"},
{"LOWER": "to"},
{"LOWER": "afford"},
],
allowed_types={"EVIDENCE_OF_HOUSING",}
),
ConTextRule("pay", "PAYMENT", pattern=[{"LOWER": {"IN": ["pay", "paid", "pays"]}}]),
ConTextRule("not in danger", "PSEUDO", direction="PSEUDO"),
ConTextRule("not provided", "PSEUDO", direction="PSEUDO"),
] | /rehoused_nlp-0.0.1.0.tar.gz/rehoused_nlp-0.0.1.0/rehoused_nlp/resources/context_rules.py | 0.453746 | 0.398758 | context_rules.py | pypi |
0.8.0
=====
* Split high-level Reikna and Cluda?
* FEATURE (docs): extend starting page (quick links to guides, list of algorithms, quick example)
Interface simplifications
-------------------------
Currently computations and transformations are hard to write, read and debug. There is a number of things that can improve matters significantly:
Computation plan:
* Use assignment syntax instead of mutated arrays (that will only work in a plan, of course)
* Use assignment syntax instead of attaching transformations via parameters.
* Give kernels and computations human-friendly names, so that one could run a computation in the timed mode getting info on how much time each kernel takes (possibly accumulating that data over several runs) and displaying it as a call tree.
* Need some easy way to check the intermediate results of the computation (for debugging purposes). In the essense, we need to derive the computation signature automatically from declared inputs/returns instead of specifying it explicitly.
* Need more clear separation of the preparation and compilation stages. Either by the means of API, or at least some guidelines.
Other:
* Support "dynamic" array parameters: size (perhaps only over a particular axis), strides and offset. The user can choose whether to compile in the parameter, or pass it on invocation. This will allow one to pass different views of the same array without recompiling the kernel, for instance. It will be easier with strides and offsets, but the change of shape may require a different kernel grid/block sizes, so may not be feasible.
* Export all the user-level stuff from the top level of ``reikna`` module, and all the computation-writing stuff from ``reikna.core`` or something.
Quality of life
---------------
* A "debug mode" for computation compilation, where it prints which kernels from which computations it is compiling and with which parameters. Will help debugging issues happening on compilation stage (e.g. when the requested number of threads is too high).
* FEATURE (core): check correctness of types in Computation.__call__() if _debug is on
* Expose temporary array in computations via ``__tempalloc__``.
Note: they already are exposed. Need to just add a usage example (and see if the issue about it is still open)
* Add a simpler version of ``_build_plan`` for the cases where one does not need to amend the plan.
* ?FEATURE (core): create "fallback" when if _build_plan() does not catch OutOfResources,
it is called again with reduced local size.
Is it really necessary? Different computations have different ways to handle OutOfResources
(e.g. Reduce divides the block size by 2, while MatrixMul requires block size to be a square).
Generic reduction of maximum block size will lead to a lot of unnecessary compilations
(which will be extremely slow on a CUDA platform).
* FEATURE (core): take not only CLUDA Thread as a parameter for computation ``compile``, but also CommandQueue, opencl Context, CUDA Stream and so on.
* FEATURE (CLUDA): add ``Thread.fork()`` which creates another Thread with the same context and device but different queue.
Also, how do we create a ``Thread`` with the same context, but different device?
Or how do we create and use a ``Thread`` with several devices?
* FEATURE: add debug mode, where every array passed to a computation is checked for shape, dtype etc with the respective computation parameters.
* FIX (core): 'io' parameters proved to be a source of errors and confusion and do not seem to be needed anywhere.
Remove them altogether?
* ?FEATURE (core): Some mechanism to merge together two successive Computation calls. Will require an API to tell reikna that certain computations are executed together, plus some way to determine if the computation is local and pure parallel (otherwise the connection will require the change of code).
* ?API (computations): move some of the functionality to the top level of ``reikna`` module?
* ?FEATURE (core): add ability to connect several transformation parameters to one node.
Currently it is impossible because of the chosen interface (kwds do not allow repetitions).
This can be actually still achieved by connecting additional identity transformations.
* ?FEATURE (core): check for errors in load/stores/param usage when connecting transformations?
Alternatively, return more meaningful errors when accessing load/store/parameter with the wrong number.
* ?FEATURE (core): check for errors in load/stores/param usage in kernels?
Need to see what errors look like in this case.
* ?FEATURE (core): check that types of arrays in the computation signature are supported by GPU (eliminates the need to check it in every computation)
* ?FEATURE (core): allow output parameters to be returned as resusts of a computation, like in numpy ufuncs.
1) What do we do with 'io' parameters? Moreover, do we even need them?
2) Some mechanics for returning an array of size 1 as a scalar copied to CPU?
New features
------------
* FEATURE (cluda): add a mechanism to select the best local size based on occupancy
* ?FEATURE (cluda): add support for rational numbers (based on int2)
* FEATURE (core): add group identifier to temporary allocations, with the guarantee that the allocations with different groups are not packed.
This may be used to ensure that buffers to be used on different devices are not packed,
which may be bad since OpenCL tries to preemptively move allocations from device to device.
It'll help when Threads start to support several devices.
Interface changes
-----------------
* ?API (core): make ``device_params`` an attribute of plan or plan factory?
* ?API (cluda): make dtypes.result_type() and dtypes.min_scalar_type() depend on device?
Internals
---------
* Integer arguments (shapes, strides, offsets, sizes) can be a ``numpy`` integer type instead of ``int``, which leads to problems with calls to C++ backend (see e.g. issue 50). Normalize everything integer in every possible function?
* ?API (cluda): having a reikna Array superclassing PyCUDA/PyOpenCL arrays is dangerous for seamless usage of Reikna computations with existing PyCUDA/PyOpenCL code.
Currently the only addition it has is the ``thread`` attribute, which is needed only
by ``reikna_integrator``.
Can we get rid of it and return to using normal arrays?
Note: now Array does some offset/base_data magic to make PyCUDA/PyOpenCL arrays behave the same.
* FIX (core): When we connect a transformation, difference in strides between arrays in the connection can be ignored (and probably the transformation's signature changed too; at least we need to decide which strides to use in the exposed node).
Proposal: leave it as is; make existing transformations "propagate" strides to results; and create a special transformation that only changes strides (or make it a parameter to the identity one).
Currently strides are not supported by PyCUDA or PyOpenCL, so this will wait.
Idea: strides can be passes to compile() (in form of actual arrays, as a dictionary).
* ?FIX (core): investigate if the strides-to-flat-index algorithm requires updating to support strides which are not multiples of ``dtype.itemsize`` (see ``flat_index_expr()``).
* ?FIX (cluda): currently ``ctype_module()`` will throw an error if dtype is not aligned properly.
This guarantees that there's no disagreement between a dtype on numpy side and a struct on device side.
But the error thrown may be somewhere deep in the hierarchy and not at the point when a user supplies this dtype.
Perhaps add some ``check_alignment()`` function and use it, e.g. in ``reduce.Predicate``?
* ?FIX (cluda): we'll see what numpy folks say about struct alignment.
Perhaps ``dtypes._find_alignment()`` will not be necessary anymore.
* ?FIX (core): perhaps we should memoize parametrized modules too: for example, FFT produces dozens of modules for load and store (because it calls them in a loop).
* ?FEATURE: Need to cache the results of Computation.compile().
Even inside a single thread it can give a performance boost (e.g. code generation for FFT is especially slow).
* ?FIX (core): PureParallel.from_trf() relies on the implementation of transformations: it defines 'idx' variables so that the transformation's load_same()/store_same() could use them.
Now if a user calls these in his custom computation they'll display a cryptic compileation error, since 'idx' variables are not defined.
We need to either make PureParallel rely only on the public API, or define 'idx' variables in every static kernel so that load_same()/store_same() could be used anywhere.
* ?FIX (cluda): Is there a way to get number of shared memory banks and warp size from AMD device?
* ?FIX (cluda): find a way to get ``min_mem_coalesce_width`` for OpenCL
* ?FIX (cluda): what are we going to do with OpenCL platforms that do not support intra-block interaction?
(for example, Apple's implementation)
Currently we have a ``ValueError`` there.
Perhaps the best solution is to write specialized 'CPU' versions of the computations?
* ?FEATURE (core): Some mechanism to detect when two transformations are reading from the same node at the same index, and only read the global memory once. This can be done by storing node results in kernel-global variables instead of chaining functions like it's done now. The problem is that we have to be able to distinguish between several loads from the same node at different indices.
Scan
----
* Split struct fields for the local memory operations the same as in Reduce. Since Scan uses optimized bank access, it will increase speed for structure types.
* Refactor plan building code.
* Add dynamic workgroup size selection (in case the maximum one uses too many registers)
* Review the code of Reduce and make it resemble that of Scan (after all, they are very similar).
FFT
---
* FEATURE: test and possibly port FFT code from https://github.com/clMathLibraries/clFFT. It's probably faster than the Apple's code currently used.
CBRNG
-----
* FEATURE (computations): use dtypes for custom structures to pass a counter in CBRNG if the sampler is deterministic.
New computations
----------------
* FEATURE (computations): add matrix-vector and vector-vector multiplication (the latter can probably be implemented just as a specialized ``Reduce``)
* FEATURE (computations): add better block width finder for small matrices in matrixmul
* FEATURE (computations): add bitonic sort
* FEATURE (computations): add filter
* FEATURE (computations): add radix-3,5,7 for FFT
* FEATURE (computations): commonly required linalg functions: diagonalisation, inversion, decomposition, determinant of matrices, linalg.norm
* FEATURE (computations): median of an array:
1) brute force: sort over an axis and slice;
2) O(N): median of medians algorithm (need to investigate whether it is effective on GPU)
Long-term plans
===============
Kernel DSL
----------
A lot of interface problems to solve. See py2c project.
* allows one to do everything the C code can (defining and using structure types, creating variables and arrays on the stack etc)
* allows one to do everything Mako can (e.g. unroll loops)
* avoids explicit numeric function specifications (that is, can propagate types)
* kernels and internal functions can be executed as-is for debug purposes (or even in a special mode checking array bounds, bank conflicts or global memory access coalescing)
* note that the transformation DSL may be different from the kernel DSL (namely, more limited)
Correlations
------------
It is possible to define for any kernel and transformation which pairs of arrays are accessed in a correlated manner, i.e. something like:
\begin{definition}
Data-independent computation (DIC) is a function $F :: ThreadId -> [(MemId, Operation, Index)]$,
where $ThreadId = Int$, $MemId = Int$, $Index = Int$, $Operation = Input | Output$.
\end{definition}
\begin{definition}
DIC is said to have a decorrelation for buffers $m, n \in MemId$ and block size $b$, if
$\exists t_1, t_2 \in ThreadID, i \in Index |
block(t_1) \ne block(t_2),
(m, Input or Output, i) \in F(t_1) and (n, Output, i) \in F(t_2)$.
\end{definition}
\begin{theorem}
If, and only if a DIC has a dependency for buffers $m, n$,
then there exists an index $i$ such that
the order of operations accessing it in buffers $m, n$ is undefined,
and at least one of these operations is $Output$.
\end{theorem}
\begin{definition}
DIC is said to have a writing inconsistency for buffers $m, n$, if
$\exists i \in Index, t1, t2 \in ThreadId |
(m, Output, i) \in F(t) and (n, Output, i) \in F(t)$.
In other words, it does not rewrite the data.
\end{definition}
Simply put, if input and output are correlated, one can supply the same array for both parameters.
Then, when transformations are connected to kernels, we can propagate correlations (i.e. if A and B are correlated, and transformation B->B' is correlated, then A->B' are correlated) and derive correlations for the resulting kernel.
This is the correlation of access, and only depends on array shapes.
In practice there are all sorts of problems:
* correlation does not allow inplace operation if two arrays have different strides
* one needs to formally justify the propagation through attached transformation
* ... including cases when, say, output transformation reads from another array
* ... or if an array is padded and then unpadded - does the correlation still work? does it work for other arrays involved in this transformation?
* ... does it depend on the order and type of access (read/write)?
* how is end user supposed to take advantage of this knowledge?
It is doubtful that a user will call some methods of the computation to check whether he can use it inplace; he will rather look into documentation.
Variant: it may be used for error checking; i.e. to test that same array was not passed to decorrelated parameters.
* we cannot use it to pack temporary arrays, because even identically typed arrays are not guaranteed to start at the same physical memory, therefore "inplace" is meaningless for them
So for now I'm ditching this feature.
Temporary memory is still packed, but only taking into account its appearance in kernel arguments.
Computation provider
--------------------
Library that by request (perhaps, from other languages) returns kernels and call signatures for algorithms, using Python as a templating engine.
Namely, it returns:
1. A list of kernels to be executed in turn.
2. Signatures for each of the kernels (with named parameters and their types).
3. Call parameters for each of the kernels (grid, block, shared memory).
4. List of memory blocks to allocate and their names (which will be used to pass them to kernels when necessary according to signatures).
Problems:
1. More involved algorithms cannot be passed between languages this way (the ones that requires synchronization in the middle, like adaptive-step ODE solver, for example).
2. Need to somehow pass device/context to this library from the caller. The kernels will have to be compiled in order to find out the register usage.
3. How to pass type derivation lambdas? Possible solution: limit derivations to <same_as>(x), <definite_type>, <complex_for>(x), <real_for>(x) and create some micro-DSL for passing these as strings.
Transformation DSL
------------------
Currently transformation code is quite difficult to read and write.
Perhaps some DSL can be devised to make it easier?
Even better, if that DSL could be applied to kernels too.
Take a look at:
* Copperhead (Python-based DSL for GPGPU)
* CodePy (Python -> AST transformer)
* Clyther (subset of Python -> OpenCL code)
* https://github.com/mdipierro/mdpcl (same)
* Parakeet: https://github.com/iskandr/parakeet
| /reikna-0.8.0.tar.gz/reikna-0.8.0/TODO.rst | 0.866161 | 0.814754 | TODO.rst | pypi |
.. _tutorial-modules:
******************************
Tutorial: modules and snippets
******************************
Modules and snippets are important primitives in CLUDA which are used in the rest of ``reikna``, although mostly internally.
Even if you do not write modules yourself, you will most likely use operations from the :py:mod:`~reikna.cluda.functions` module, or common transformations from the :py:mod:`~reikna.transformations` module, which are essentially snippet and module factories (callables returning :py:class:`~reikna.cluda.Snippet` and :py:class:`~reikna.cluda.Module` objects).
Therefore it helps if you know how they work under the hood.
Snippets
========
Snippets are ``Mako`` template defs (essentially functions returning rendered text) with the associated dictionary of render keywords.
Some computations which are parametrized by custom code (for example, :py:class:`~reikna.algorithms.PureParallel`) require this code to be provided in form of a snippet with a certain call signature.
When a snippet is used in a template, the result is quite straightworward: its template function is called, rendering and returning its contents, just as a normal ``Mako`` def.
Let us demonstrate it with a simple example.
Consider the following snippet:
::
add = Snippet("""
<%def name="add(varname)">
${varname} + ${num}
</%def>
""",
render_kwds=dict(num=1))
Now we can compile a template which uses this snippet:
::
program = thr.compile("""
KERNEL void test(int *arr)
{
const SIZE_T idx = get_global_id(0);
int a = arr[idx];
arr[idx] = ${add('x')};
}
""",
render_kwds=dict(add=add))
As a result, the code that gets compiled is
::
KERNEL void test(int *arr)
{
const SIZE_T idx = get_global_id(0);
int a = arr[idx];
arr[idx] = x + 1;
}
If the snippet is used without parentheses (e.g. ``${add}``), it is equivalent to calling it without arguments (``${add()}``).
The root code that gets passed to :py:meth:`~reikna.cluda.api.Thread.compile` can be viewed as a snippet with an empty signature.
Modules
=======
Modules are quite similar to snippets in a sense that they are also ``Mako`` defs with an associated dictionary of render keywords.
The difference lies in the way they are processed.
Consider a module containing a single function:
::
add = Module("""
<%def name="add(prefix, arg)">
WITHIN_KERNEL int ${prefix}(int x)
{
return x + ${num} + ${arg};
}
</%def>
""",
render_kwds=dict(num=1))
Modules contain complete C entities (function, macros, structures) and get rendered in the root level of the source file.
In order to avoid name clashes, their def gets a string as a first argument, which it has to use to prefix these entities' names.
If the module contains only one entity that is supposed to be used by the parent code, it is a good idea to set its name to ``prefix`` only, to simplify its usage.
Let us now create a kernel that uses this module:
::
program = thr.compile("""
KERNEL void test(int *arr)
{
const SIZE_T idx = get_global_id(0);
int a = arr[idx];
arr[idx] = ${add(2)}(x);
}
""",
render_kwds=dict(add=add))
Before the compilation render keywords are inspected, and if a module object is encountered, the following things happen:
1. This object's ``render_kwds`` are inspected recursively and any modules there are rendered in the same way as described here, producing a source file.
2. The module itself gets assigned a new prefix and its template function is rendered with this prefix as the first argument, with the positional arguments given following it.
The result is attached to the source file.
3. The corresponding value in the current ``render_kwds`` is replaced by the newly assigned prefix.
With the code above, the rendered module will produce the code
::
WITHIN_KERNEL int _module0_(int x)
{
return x + 1 + 2;
}
and the ``add`` keyword in the ``render_kwds`` gets its value changed to ``_module0_``.
Then the main code is rendered and appended to the previously renderd parts, giving
::
WITHIN_KERNEL int _module0_(int x)
{
return x + 1;
}
KERNEL void test(int *arr)
{
const SIZE_T idx = get_global_id(0);
int a = arr[idx];
arr[idx] = _module0_(x);
}
which is then passed to the compiler.
If your module's template def does not take any arguments except for ``prefix``, you can call it in the parent template just as ``${add}`` (without empty parentheses).
.. warning::
Note that ``add`` in this case is not a string, it is an object that has ``__str__()`` defined.
If you want to concatenate a module prefix with some other string, you have to either call ``str()`` explicitly (``str(add) + "abc"``), or concatenate it inside a template (``${add} abc``).
Modules can reference snippets in their ``render_kwds``, which, in turn, can reference other modules.
This produces a tree-like structure with the snippet made from the code passed by user at the root.
When it is rendered, it is traversed depth-first, modules are extracted from it and arranged in a flat list in the order of appearance.
Their positions in ``render_kwds`` are replaced by assigned prefixes.
This flat list is then rendered, producing a single source file being fed to the compiler.
Note that if the same module object was used without arguments in several other modules or in the kernel itself, it will only be rendered once.
Therefore one can create a "root" module with the data structure declaration and then use that structure in other modules without producing type errors on compilation.
Shortcuts
=========
The amount of boilerplate code can be somewhat reduced by using :py:meth:`Snippet.create <reikna.cluda.Snippet.create>` and :py:meth:`Module.create <reikna.cluda.Module.create>` constructors.
For the snippet above it would look like:
::
add = Snippet.create(
lambda varname: "${varname} + ${num}",
render_kwds=dict(num=1))
Note that the lambda here serves only to provide the information about the ``Mako`` def's signature.
Therefore it should return the template code regardless of the actual arguments passed.
If the argument list is created dynamically, you can use :py:func:`~reikna.helpers.template_def` with a normal constructor:
::
argnames = ['varname']
add = Snippet(
template_def(argnames, "${varname} + ${num}"),
render_kwds=dict(num=1))
Modules have a similar shortcut constructor.
The only difference is that by default the resulting template def has one positional argument called ``prefix``.
If you provide your own signature, its first positional argument will receive the prefix value.
::
add = Module.create("""
WITHIN_KERNEL int ${prefix}(int x)
{
return x + ${num};
}
""",
render_kwds=dict(num=1))
Of course, both :py:class:`~reikna.cluda.Snippet` and :py:class:`~reikna.cluda.Module` constructors can take already created ``Mako`` defs, which is convenient if you keep templates in a separate file.
Module and snippet discovery
============================
Sometimes you may want to pass a module or a snippet inside a template as an attribute of a custom object.
In order for CLUDA to be able to discover and process it without modifying your original object, you need to make your object comply to a discovery protocol.
The protocol method takes a processing function and is expected to return a **new object** of the same class with the processing function applied to all the attributes that may contain a module or a snippet.
By default, objects of type ``tuple``, ``list``, and ``dict`` are discoverable.
For example:
::
class MyClass:
def __init__(self, coeff, mul_module, div_module):
self.coeff = coeff
self.mul = mul_module
self.div = div_module
def __process_modules__(self, process):
return MyClass(self.coeff, process(self.mul), process(self.div))
Nontrivial example
==================
Modules were introduced to help split big kernels into small reusable pieces which in ``CUDA`` or ``OpenCL`` program would be put into different source or header files.
For example, a random number generator may be assembled from a function generating random integers, a function transforming these integers into random numbers with a certain distribution, and a :py:class:`~reikna.algorithms.PureParallel` computation calling these functions and saving results to global memory.
These two functions can be extracted into separate modules, so that a user could call them from some custom kernel if he does not need to store the intermediate results.
Going further with this example, one notices that functions that produce randoms with sophisticated distributions are often based on simpler distributions.
For instance, the commonly used Marsaglia algorithm for generating Gamma-distributed random numbers requires several uniformly and normally distributed randoms.
Normally distributed randoms, in turn, require several uniformly distributed randoms --- with the range which differs from the one for uniformly distributed randoms used by the initial Gamma distribution.
Instead of copy-pasting the function or setting its parameters dynamically (which in more complicated cases may affect the performance), one just specifies the dependencies between modules and lets the underlying system handle things.
The final render tree may look like:
::
Snippet(
PureParallel,
render_kwds = {
base_rng -> Snippet(...)
gamma -> Snippet(
} Gamma,
render_kwds = {
uniform -> Snippet(...)
normal -> Snippet(
} Normal,
) render_kwds = {
uniform -> Snippet(...)
}
)
| /reikna-0.8.0.tar.gz/reikna-0.8.0/doc/source/tutorial-modules.rst | 0.868506 | 0.730314 | tutorial-modules.rst | pypi |
************
Introduction
************
This section contains a brief illustration of what ``reikna`` does.
For more details see :ref:`basic <tutorial-basic>` and :ref:`advanced <tutorial-advanced>` tutorials.
CLUDA
=====
CLUDA is an abstraction layer on top of PyCUDA/PyOpenCL.
Its main purpose is to separate the rest of ``reikna`` from the difference in their APIs, but it can be used by itself too for some simple tasks.
Consider the following example, which is very similar to the one from the index page on PyCUDA documentation:
.. testcode:: cluda_simple_example
import numpy
import reikna.cluda as cluda
N = 256
api = cluda.ocl_api()
thr = api.Thread.create()
program = thr.compile("""
KERNEL void multiply_them(
GLOBAL_MEM float *dest,
GLOBAL_MEM float *a,
GLOBAL_MEM float *b)
{
const SIZE_T i = get_local_id(0);
dest[i] = a[i] * b[i];
}
""")
multiply_them = program.multiply_them
a = numpy.random.randn(N).astype(numpy.float32)
b = numpy.random.randn(N).astype(numpy.float32)
a_dev = thr.to_device(a)
b_dev = thr.to_device(b)
dest_dev = thr.empty_like(a_dev)
multiply_them(dest_dev, a_dev, b_dev, local_size=N, global_size=N)
print((dest_dev.get() - a * b == 0).all())
.. testoutput:: cluda_simple_example
:hide:
True
If you are familiar with ``PyCUDA`` or ``PyOpenCL``, you will easily understand all the steps we have made here.
The ``cluda.ocl_api()`` call is the only place where OpenCL is mentioned, and if you replace it with ``cluda.cuda_api()`` it will be enough to make the code use CUDA.
The abstraction is achieved by using generic API module on the Python side, and special macros (:c:macro:`KERNEL`, :c:macro:`GLOBAL_MEM`, and others) on the kernel side.
The argument of :py:meth:`~reikna.cluda.api.Thread.compile` method can also be a template, which is quite useful for metaprogramming, and also used to compensate for the lack of complex number operations in CUDA and OpenCL.
Let us illustrate both scenarios by making the initial example multiply complex arrays.
The template engine of choice in ``reikna`` is `Mako <http://www.makotemplates.org>`_, and you are encouraged to read about it as it is quite useful. For the purpose of this example all we need to know is that ``${python_expression()}`` is a synthax construction which renders the expression result.
.. testcode:: cluda_template_example
import numpy
from numpy.linalg import norm
from reikna import cluda
from reikna.cluda import functions, dtypes
N = 256
dtype = numpy.complex64
api = cluda.ocl_api()
thr = api.Thread.create()
program = thr.compile("""
KERNEL void multiply_them(
GLOBAL_MEM ${ctype} *dest,
GLOBAL_MEM ${ctype} *a,
GLOBAL_MEM ${ctype} *b)
{
const SIZE_T i = get_local_id(0);
dest[i] = ${mul}(a[i], b[i]);
}
""", render_kwds=dict(
ctype=dtypes.ctype(dtype),
mul=functions.mul(dtype, dtype)))
multiply_them = program.multiply_them
r1 = numpy.random.randn(N).astype(numpy.float32)
r2 = numpy.random.randn(N).astype(numpy.float32)
a = r1 + 1j * r2
b = r1 - 1j * r2
a_dev = thr.to_device(a)
b_dev = thr.to_device(b)
dest_dev = thr.empty_like(a_dev)
multiply_them(dest_dev, a_dev, b_dev, local_size=N, global_size=N)
print(norm(dest_dev.get() - a * b) / norm(a * b) <= 1e-6)
.. testoutput:: cluda_template_example
:hide:
True
Note that CLUDA ``Thread`` is created by means of a static method and not using the constructor.
The constructor is reserved for more probable scenario, where we want to include some ``reikna`` functionality in a larger program, and we want it to use the existing context and stream/queue (see the :py:class:`~reikna.cluda.api.Thread` constructor).
In this case all further operations with the thread will be performed using the objects provided.
Here we have passed two values to the template: ``ctype`` (a string with C type name), and ``mul`` which is a :py:class:`~reikna.cluda.Module` object containing a single multiplication function.
The object is created by a function :py:func:`~reikna.cluda.functions.mul` which takes data types being multiplied and returns a module that was parametrized accordingly.
Inside the template the variable ``mul`` is essentially the prefix for all the global C objects (functions, structures, macros etc) from the module.
If there is only one public object in the module (which is recommended), it is a common practice to give it the name consisting just of the prefix, so that it could be called easily from the parent code.
For more information on modules, see :ref:`tutorial-modules`; the complete list of things available in CLUDA can be found in :ref:`CLUDA reference <api-cluda>`.
Computations
============
Now it's time for the main part of the functionality.
``reikna`` provides GPGPU algorithms in the form of :py:class:`~reikna.core.Computation`-based cores and :py:class:`~reikna.core.Transformation`-based plug-ins.
Computations contain the algorithm itself; examples are matrix multiplication, reduction, sorting and so on.
Transformations are parallel operations on inputs or outputs of computations, used for scaling, typecast and other auxiliary purposes.
Transformations are compiled into the main computation kernel and are therefore quite cheap in terms of performance.
As an example, we will consider the matrix multiplication.
.. testcode:: matrixmul_example
import numpy
from numpy.linalg import norm
import reikna.cluda as cluda
from reikna.linalg import MatrixMul
api = cluda.ocl_api()
thr = api.Thread.create()
shape1 = (100, 200)
shape2 = (200, 100)
a = numpy.random.randn(*shape1).astype(numpy.float32)
b = numpy.random.randn(*shape2).astype(numpy.float32)
a_dev = thr.to_device(a)
b_dev = thr.to_device(b)
res_dev = thr.array((shape1[0], shape2[1]), dtype=numpy.float32)
dot = MatrixMul(a_dev, b_dev, out_arr=res_dev)
dotc = dot.compile(thr)
dotc(res_dev, a_dev, b_dev)
res_reference = numpy.dot(a, b)
print(norm(res_dev.get() - res_reference) / norm(res_reference) < 1e-6)
.. testoutput:: matrixmul_example
:hide:
True
Most of the code above should be already familiar, with the exception of the creation of :py:class:`~reikna.linalg.MatrixMul` object.
The computation constructor takes two array-like objects, representing arrays that will participate in the computation.
After that the computation object has to be compiled.
The :py:meth:`~reikna.core.Computation.compile` method requires a :py:class:`~reikna.cluda.api.Thread` object, which serves as a source of data about the target API and device, and provides an execution queue.
Transformations
===============
Now imagine that you want to multiply complex matrices, but real and imaginary parts of your data are kept in separate arrays.
You could create additional kernels that would join your data into arrays of complex values, but this would require additional storage and additional calls to GPU.
Transformation API allows you to connect these transformations to the core computation --- matrix multiplication --- effectively adding the code into the main computation kernel and changing its signature.
Let us change the previous example and connect transformations to it.
.. testcode:: transformation_example
import numpy
from numpy.linalg import norm
import reikna.cluda as cluda
from reikna.core import Type
from reikna.linalg import MatrixMul
from reikna.transformations import combine_complex
api = cluda.ocl_api()
thr = api.Thread.create()
shape1 = (100, 200)
shape2 = (200, 100)
a_re = numpy.random.randn(*shape1).astype(numpy.float32)
a_im = numpy.random.randn(*shape1).astype(numpy.float32)
b_re = numpy.random.randn(*shape2).astype(numpy.float32)
b_im = numpy.random.randn(*shape2).astype(numpy.float32)
arrays = [thr.to_device(x) for x in [a_re, a_im, b_re, b_im]]
a_re_dev, a_im_dev, b_re_dev, b_im_dev = arrays
a_type = Type(numpy.complex64, shape=shape1)
b_type = Type(numpy.complex64, shape=shape2)
res_dev = thr.array((shape1[0], shape2[1]), dtype=numpy.complex64)
dot = MatrixMul(a_type, b_type, out_arr=res_dev)
combine_a = combine_complex(a_type)
combine_b = combine_complex(b_type)
dot.parameter.matrix_a.connect(
combine_a, combine_a.output, a_re=combine_a.real, a_im=combine_a.imag)
dot.parameter.matrix_b.connect(
combine_b, combine_b.output, b_re=combine_b.real, b_im=combine_b.imag)
dotc = dot.compile(thr)
dotc(res_dev, a_re_dev, a_im_dev, b_re_dev, b_im_dev)
res_reference = numpy.dot(a_re + 1j * a_im, b_re + 1j * b_im)
print(norm(res_dev.get() - res_reference) / norm(res_reference) < 1e-6)
.. testoutput:: transformation_example
:hide:
True
We have used a pre-created transformation :py:func:`~reikna.transformations.combine_complex` from :py:mod:`reikna.transformations` for simplicity; developing a custom transformation is also possible and described in :ref:`tutorial-advanced-transformation`.
From the documentation we know that it transforms two inputs into one output; therefore we need to attach it to one of the inputs of ``dot`` (identified by its name), and provide names for two new inputs.
Names to attach to are obtained from the documentation for the particular computation; for :py:class:`~reikna.linalg.MatrixMul` these are ``out``, ``a`` and ``b``.
In the current example we have attached the transformations to both inputs.
Note that the computation has a new signature now, and the compiled ``dot`` object now works with split complex numbers.
| /reikna-0.8.0.tar.gz/reikna-0.8.0/doc/source/introduction.rst | 0.900991 | 0.727794 | introduction.rst | pypi |
.. _tutorial-basic:
****************
Tutorial: basics
****************
Usage of computations
=====================
All ``reikna`` computation classes are derived from the :py:class:`~reikna.core.Computation` class and therefore share the same API and behavior.
A computation object is an opaque typed function-like object containing all the information necessary to generate GPU kernels that implement some algorithm, along with necessary internal temporary and persistent memory buffers.
Before use it needs to be compiled by calling :py:meth:`~reikna.core.Computation.compile` for a given :py:class:`~reikna.cluda.api.Thread` (thus using its associated device and queue).
This method returns a :py:class:`~reikna.core.computation.ComputationCallable` object which takes GPU arrays and scalar parameters and calls its internal kernels.
Computations and transformations
================================
One often needs to perform some simple processing of the input or output values of a computation.
This can be scaling, splitting complex values into components, padding, and so on.
Some of these operations require additional memory to store intermediate results, and all of them involve additional overhead of calling the kernel, and passing values to and from the device memory.
``Reikna`` porvides an API to define such transformations and attach them to "core" computations, effectively compiling the transformation code into the main kernel(s), thus avoiding all these drawbacks.
Transformation tree
===================
Before talking about transformations themselves, we need to take a closer look at the computation signatures.
Every :py:class:`~reikna.core.Computation` object has a :py:attr:`~reikna.core.Computation.signature` attribute containing ``funcsigs.Signature`` object.
It is the same signature object as can be exctracted from any Python function using ``funcsigs.signature`` function (or ``inspect.signature`` from the standard library for Python >= 3.3).
When the computation object is compiled, the resulting callable will have this exact signature.
The base signature for any computation can be found in its documentation (and, sometimes, can depend on the arguments passed to its constructor --- see, for example, :py:class:`~reikna.algorithms.PureParallel`).
The signature can change if a user connects transformations to some parameter via :py:meth:`~reikna.core.Computation.connect`; in this case the :py:attr:`~reikna.core.Computation.signature` attribute will change accordingly.
All attached transformations form a tree with roots being the base parameters computation has right after creation, and leaves forming the user-visible signature, which the compiled :py:class:`~reikna.core.computation.ComputationCallable` will have.
As an example, let us consider a pure parallel computation object with one output, two inputs and a scalar parameter, which performs the calculation ``out = in1 + in2 + param``:
.. testcode:: transformation_example
from __future__ import print_function
import numpy
from reikna import cluda
from reikna.cluda import Snippet
from reikna.core import Transformation, Type, Annotation, Parameter
from reikna.algorithms import PureParallel
import reikna.transformations as transformations
arr_t = Type(numpy.float32, shape=128)
carr_t = Type(numpy.complex64, shape=128)
comp = PureParallel(
[Parameter('out', Annotation(carr_t, 'o')),
Parameter('in1', Annotation(carr_t, 'i')),
Parameter('in2', Annotation(carr_t, 'i')),
Parameter('param', Annotation(numpy.float32))],
"""
VSIZE_T idx = ${idxs[0]};
${out.store_idx}(
idx, ${in1.load_idx}(idx) + ${in2.load_idx}(idx) + ${param});
""")
The details of creating the computation itself are not important for this example; they are provided here just for the sake of completeness.
The initial transformation tree of ``comp`` object looks like:
::
| out | >>
>> | in1 |
>> | in2 |
>> | param |
Here the insides of ``||`` are the base computation (the one defined by the developer), and ``>>`` denote inputs and outputs provided by the user.
The computation signature is:
.. doctest:: transformation_example
>>> for param in comp.signature.parameters.values():
... print(param.name + ":" + repr(param.annotation))
out:Annotation(Type(complex64, shape=(128,), strides=(8,)), role='o')
in1:Annotation(Type(complex64, shape=(128,), strides=(8,)), role='i')
in2:Annotation(Type(complex64, shape=(128,), strides=(8,)), role='i')
param:Annotation(float32)
Now let us attach the transformation to the output which will split it into two halves: ``out1 = out / 2``, ``out2 = out / 2``:
.. testcode:: transformation_example
tr = transformations.split_complex(comp.parameter.out)
comp.parameter.out.connect(tr, tr.input, out1=tr.real, out2=tr.imag)
We have used the pre-created transformation here for simplicity; writing custom transformations is described in :ref:`tutorial-advanced-transformation`.
In addition, we want ``in2`` to be scaled before being passed to the main computation.
To achieve this, we connect the scaling transformation to it:
.. testcode:: transformation_example
tr = transformations.mul_param(comp.parameter.in2, numpy.float32)
comp.parameter.in2.connect(tr, tr.output, in2_prime=tr.input, param2=tr.param)
The transformation tree now looks like:
::
| out | ----> out1 >>
| | \-> out2 >>
>> | in1 |
>> in2_prime ------> | in2 |
>> param2 ----/ | |
| param |
As can be seen, nothing has changed from the base computation's point of view: it still gets the same inputs and outputs to the same array.
But user-supplied parameters (``>>``) have changed, which can be also seen in the value of the :py:attr:`~reikna.core.Computation.signature`:
.. doctest:: transformation_example
>>> for param in comp.signature.parameters.values():
... print(param.name + ":" + repr(param.annotation))
out1:Annotation(Type(float32, shape=(128,), strides=(4,)), role='o')
out2:Annotation(Type(float32, shape=(128,), strides=(4,)), role='o')
in1:Annotation(Type(complex64, shape=(128,), strides=(8,)), role='i')
in2_prime:Annotation(Type(complex64, shape=(128,), strides=(8,)), role='i')
param2:Annotation(float32)
param:Annotation(float32)
Notice that the order of the final signature is obtained by traversing the transformation tree depth-first, starting from the base parameters.
For more details see the note in the documentation for :py:meth:`~reikna.core.Computation.connect`.
The resulting computation returns the value ``in1 + (in2_prime * param2) + param`` split in half.
In order to run it, we have to compile it first.
When ``prepare_for`` is called, the data types and shapes of the given arguments will be propagated to the roots and used to prepare the original computation.
.. testcode:: transformation_example
api = cluda.ocl_api()
thr = api.Thread.create()
in1_t = comp.parameter.in1
in2p_t = comp.parameter.in2_prime
out1 = thr.empty_like(comp.parameter.out1)
out2 = thr.empty_like(comp.parameter.out2)
in1 = thr.to_device(numpy.ones(in1_t.shape, in1_t.dtype))
in2_prime = thr.to_device(numpy.ones(in2p_t.shape, in2p_t.dtype))
c_comp = comp.compile(thr)
c_comp(out1, out2, in1, in2_prime, 4, 3)
Transformation restrictions
===========================
There are some limitations of the transformation mechanics:
#. Transformations are purely parallel, that is they cannot use local memory.
In fact, they are very much like :py:class:`~reikna.algorithms.PureParallel` computations,
except that the indices they use are defined by the main computation,
and not set by the GPU driver.
#. External endpoints of the output transformations cannot point to existing nodes in the transformation tree.
This is the direct consequence of the first limitation --- it would unavoidably create races between memory writes from different branches.
On the other hand, input transformations can be safely connected to existing nodes, including base nodes (although note that inputs are not cached; so even if you load twice from the same index of the same input node, the global memory will be queried twice).
| /reikna-0.8.0.tar.gz/reikna-0.8.0/doc/source/tutorial-basic.rst | 0.964212 | 0.922796 | tutorial-basic.rst | pypi |
.. _tutorial-advanced:
*************************
Tutorial: advanced topics
*************************
This tutorial goes into more detail about the internals of computations and transformations, describing how to write them.
Mako basics
===========
``Reikna`` uses `Mako <http://makotemplates.org>`_ extensively as a templating engine for transformations and computations.
For the purpose of this tutorial you only need to know several things about the synthax:
* Most of Mako synthax is plain Python, with the set of global variables specified externally by the code doing the template rendering
* ``${expr}`` evaluates Python expression ``expr``, calls ``str()`` on the result and puts it into the text
* a pair of ``<%`` and ``%>`` executes Python code inside, which may introduce some local variables
* a pair of ``<%def name="func(a, b)">`` and ``</%def>`` defines a template function, which actually becomes a Python function which can be called as ``func(a, b)`` from the other part of the template and returns a rendered string
.. _tutorial-advanced-transformation:
Writing a transformation
========================
Some common transformations are already available from :py:mod:`~reikna.transformations` module.
But you can create a custom one if you need to.
Transformations are based on the class :py:class:`~reikna.core.Transformation`, and are very similar to :py:class:`~reikna.algorithms.PureParallel` instances, with some additional limitations.
Let us consider a (not very useful, but quite involved) example:
::
tr = Transformation(
[
Parameter('out1', Annotation(Type(numpy.float32, shape=100), 'o')),
Parameter('out2', Annotation(Type(numpy.float32, shape=80), 'o')),
Parameter('in1', Annotation(Type(numpy.float32, shape=100), 'i')),
Parameter('in2', Annotation(Type(numpy.float32, shape=100), 'i')),
Parameter('param', Annotation(Type(numpy.float32))),
],
"""
VSIZE_T idx = ${idxs[0]};
float i1 = ${in1.load_same};
float i2 = ${in2.load_idx}(100 - idx) * ${param};
${out1.store_same}(i1);
if (idx < 80)
${out2.store_same}(i2);
""",
connectors=['in1', 'out1'])
**Connectors.**
A transformation gets activated when the main computation attempts to load some value from some index in global memory, or store one to some index.
This index is passed to the transformation attached to the corresponding parameter, and used to invoke loads/stores either without changes (to perform strictly elementwise operations), or, possibly, with some changes (as the example illustrates).
If some parameter is only queried once, and only using ``load_same`` or ``store_same``, it is called a *connector*, which means that it can be used to attach the transformation to a computation.
Currently connectors cannot be detected automatically, so it is the responsibility of the user to provide a list of them to the constructor.
By default all parameters are considered to be connectors.
**Shape changing.**
Parameters in transformations are typed, and it is possible to change data type or shape of a parameter the transformation is attached to.
In our example ``out2`` has length 80, so the current index is checked before the output to make sure there is no out of bounds access.
**Parameter objects.**
The transformation example above has some hardcoded stuff, for example the type of parameters (``float``), or their shapes (``100`` and ``80``).
These can be accessed from argument objects ``out1``, ``in1`` etc; they all have the type :py:class:`~reikna.core.transformation.KernelParameter`.
In addition, the transformation code gets an :py:class:`~reikna.core.Indices` object with the name ``idxs``, which allows one to manipulate index names directly.
.. _tutorial-advanced-computation:
Writing a computation
=====================
A computation must derive :py:class:`~reikna.core.Computation`.
As an example, let us create a computation which calculates ``output = input1 + input2 * param``.
Defining a class:
::
import numpy
from reikna.helpers import *
from reikna.core import *
class TestComputation(Computation):
Each computation class has to define the constructor, and the plan building callback.
**Constructor.**
:py:class:`~reikna.core.Computation` constructor takes a list of computation parameters, which the deriving class constructor has to create according to arguments passed to it.
You will often need :py:class:`~reikna.core.Type` objects, which can be extracted from arrays, scalars or other :py:class:`~reikna.core.Type` objects with the help of :py:meth:`~reikna.core.Type.from_value` (or they can be passed straight to :py:class:`~reikna.core.Annotation`) which does the same thing.
::
def __init__(self, arr, coeff):
assert len(arr.shape) == 1
Computation.__init__(self, [
Parameter('output', Annotation(arr, 'o')),
Parameter('input1', Annotation(arr, 'i')),
Parameter('input2', Annotation(arr, 'i')),
Parameter('param', Annotation(coeff))])
In addition to that, the constructor can create some internal state which will be used by the plan builder.
**Plan builder.**
The second method is called when the computation is being compiled, and has to fill and return the computation plan --- a sequence of kernel calls, plus maybe some temporary or persistent internal allocations its kernels use.
In addition, the plan can include calls to nested computations.
The method takes two predefined positional parameters, plus :py:class:`~reikna.core.computation.KernelArgument` objects corresponding to computation parameters.
The ``plan_factory`` is a callable that creates a new :py:class:`~reikna.core.computation.ComputationPlan` object (in some cases you may want to recreate the plan, for example, if the workgroup size you were using turned out to be too big), and ``device_params`` is a :py:class:`~reikna.cluda.api.DeviceParameters` object, which is used to optimize the computation for the specific device.
The method must return a filled :py:class:`~reikna.core.computation.ComputationPlan` object.
For our example we only need one action, which is the execution of an elementwise kernel:
::
def _build_plan(self, plan_factory, device_params, output, input1, input2, param):
plan = plan_factory()
template = template_from(
"""
<%def name='testcomp(kernel_declaration, k_output, k_input1, k_input2, k_param)'>
${kernel_declaration}
{
VIRTUAL_SKIP_THREADS;
const VSIZE_T idx = virtual_global_id(0);
${k_output.ctype} result =
${k_input1.load_idx}(idx) +
${mul}(${k_input2.load_idx}(idx), ${k_param});
${k_output.store_idx}(idx, result);
}
</%def>
""")
plan.kernel_call(
template.get_def('testcomp'),
[output, input1, input2, param],
global_size=output.shape,
render_kwds=dict(mul=functions.mul(input2.dtype, param.dtype)))
return plan
Every kernel call is based on the separate ``Mako`` template def.
The template can be specified as a string using :py:func:`~reikna.helpers.template_def`, or loaded as a separate file.
Usual pattern in this case is to call the template file same as the file where the computation class is defined (for example, ``testcomp.mako`` for ``testcomp.py``), and store it in some variable on module load using :py:func:`~reikna.helpers.template_for` as ``TEMPLATE = template_for(__file__)``.
The template function should take the same number of positional arguments as the kernel plus one; you can view ``<%def ... >`` part as an actual kernel definition, but with the arguments being :py:class:`~reikna.core.transformation.KernelParameter` objects containing parameter metadata.
The first argument will contain the string with the kernel declaration.
Also, depending on whether the corresponding argument is an output array, an input array or a scalar parameter, the object can be used as ``${obj.store_idx}(index, val)``, ``${obj.load_idx}(index)`` or ``${obj}``.
This will produce the corresponding request to the global memory or kernel arguments.
If you need additional device functions, they have to be specified between ``<%def ... >`` and ``${kernel_declaration}``.
Obviously, these functions can still use ``dtype`` and ``ctype`` object properties, although ``store_idx`` and ``load_idx`` will most likely result in compilation error (since they are rendered as macros using main kernel arguments).
Since kernel call parameters (``global_size`` and ``local_size``) are specified on creation, all kernel calls are rendered as CLUDA static kernels (see :py:meth:`~reikna.cluda.api.Thread.compile_static`) and therefore can use all the corresponding macros and functions (like :c:func:`virtual_global_flat_id` in our kernel).
Also, they must have :c:macro:`VIRTUAL_SKIP_THREADS` at the beginning of the kernel which remainder threads (which can be present, for example, if the workgroup size is not a multiple of the global size).
| /reikna-0.8.0.tar.gz/reikna-0.8.0/doc/source/tutorial-advanced.rst | 0.929895 | 0.91118 | tutorial-advanced.rst | pypi |
import numpy
from reikna.fft import FFT
import reikna.helpers as helpers
from reikna.cluda import dtypes, any_api
from reikna.core import Computation, Parameter, Annotation, Type, Transformation
import reikna.cluda.functions as functions
from reikna.algorithms import Reduce, Scan, predicate_sum
TEMPLATE = helpers.template_for(__file__)
# Refernce functions
def rfft_reference(a):
# Numpy already has a dedicated rfft() function, but this one illustrates the algorithm
# used by the computation.
assert a.size % 2 == 0
assert a.ndim == 1
N = a.size
WNmk = numpy.exp(-2j * numpy.pi * numpy.arange(N//2) / N)
A = 0.5 * (1 - 1j * WNmk)
B = 0.5 * (1 + 1j * WNmk)
x = a[::2] + 1j * a[1::2]
X = numpy.fft.fft(x) # N/2-sized FFT
res = numpy.empty(N//2 + 1, numpy.complex128)
res[:N//2] = X * A + (numpy.roll(X[N//2-1::-1], 1)).conj() * B
res[N//2] = X[0].real - X[0].imag
return res
def irfft_reference(a):
# Numpy already has a dedicated irfft() function, but this one illustrates the algorithm
# used by the computation.
assert a.size % 2 == 1
assert a.ndim == 1
N = (a.size - 1) * 2
# Following numpy.fft.irftt() which ignores these values
a[0] = a[0].real
a[-1] = a[-1].real
WNmk = numpy.exp(-2j * numpy.pi * numpy.arange(N//2) / N)
A = 0.5 * (1 - 1j * WNmk)
B = 0.5 * (1 + 1j * WNmk)
A = A.conj()
B = B.conj()
X = a[:-1] * A + a[N//2:0:-1].conj() * B
x = numpy.fft.ifft(X) # N/2-sized IFFT
res = numpy.empty(N, numpy.float64)
res[::2] = x.real
res[1::2] = x.imag
return res
def aprfft_reference(x):
# x : real N/2-array (first half of the full signal)
# output: complex N/4-array (odd harmonics)
assert x.size % 2 == 0
assert x.ndim == 1
N = x.size * 2
y = x * (4 * numpy.sin(2 * numpy.pi * numpy.arange(N//2) / N))
Y = numpy.fft.rfft(y)
t = x * numpy.cos(2 * numpy.pi * numpy.arange(N//2) / N)
re_X_1 = 2 * sum(t)
Y *= -1j
Y[0] /= 2
Y[0] += re_X_1
res = numpy.cumsum(Y[:-1])
return res
def iaprfft_reference(X):
# X : complex N/4-array of odd harmonics
# output: real N/2-array (first half of the signal)
assert X.ndim == 1
N = X.size * 4
Y = numpy.empty(N//4+1, numpy.complex128)
Y[1:-1] = X[1:] - X[:-1]
Y *= 1j
Y[0] = -2 * X[0].imag
Y[-1] = 2 * X[-1].imag
y = numpy.fft.irfft(Y)
res = numpy.empty(N//2, numpy.float64)
res[1:] = y[1:] / 4 / numpy.sin(2 * numpy.pi * numpy.arange(1, N//2) / N)
res[0] = X.real.sum() / (N / 2)
return res
# GPU computations
def prepare_rfft_input(arr):
res = Type(dtypes.complex_for(arr.dtype), arr.shape[:-1] + (arr.shape[-1] // 2,))
return Transformation(
[
Parameter('output', Annotation(res, 'o')),
Parameter('input', Annotation(arr, 'i')),
],
"""
<%
batch_idxs = " ".join((idx + ", ") for idx in idxs[:-1])
%>
${input.ctype} re = ${input.load_idx}(${batch_idxs} ${idxs[-1]} * 2);
${input.ctype} im = ${input.load_idx}(${batch_idxs} ${idxs[-1]} * 2 + 1);
${output.store_same}(COMPLEX_CTR(${output.ctype})(re, im));
""",
connectors=['output'])
class RFFT(Computation):
def __init__(self, arr_t, dont_store_last=False):
self._dont_store_last = dont_store_last
output_size = arr_t.shape[-1] // 2 + (0 if dont_store_last else 1)
out_arr = Type(
dtypes.complex_for(arr_t.dtype),
arr_t.shape[:-1] + (output_size,))
Computation.__init__(self, [
Parameter('output', Annotation(out_arr, 'o')),
Parameter('input', Annotation(arr_t, 'i'))])
def _build_plan(self, plan_factory, device_params, output, input_):
plan = plan_factory()
N = input_.shape[-1]
WNmk = numpy.exp(-2j * numpy.pi * numpy.arange(N//2) / N)
A = 0.5 * (1 - 1j * WNmk)
B = 0.5 * (1 + 1j * WNmk)
A_arr = plan.persistent_array(A)
B_arr = plan.persistent_array(B)
cfft_arr = Type(output.dtype, input_.shape[:-1] + (input_.shape[-1] // 2,))
cfft = FFT(cfft_arr, axes=(len(input_.shape) - 1,))
prepare_input = prepare_rfft_input(input_)
cfft.parameter.input.connect(
prepare_input, prepare_input.output, real_input=prepare_input.input)
temp = plan.temp_array_like(cfft.parameter.output)
batch_size = helpers.product(output.shape[:-1])
plan.computation_call(cfft, temp, input_)
plan.kernel_call(
TEMPLATE.get_def('prepare_rfft_output'),
[output, temp, A_arr, B_arr],
global_size=(batch_size, N // 2),
render_kwds=dict(
slices=(len(input_.shape) - 1, 1),
N=N,
mul=functions.mul(output.dtype, output.dtype),
conj=functions.conj(output.dtype),
dont_store_last=self._dont_store_last))
return plan
def prepare_irfft_output(arr):
res = Type(dtypes.real_for(arr.dtype), arr.shape[:-1] + (arr.shape[-1] * 2,))
return Transformation(
[
Parameter('output', Annotation(res, 'o')),
Parameter('input', Annotation(arr, 'i')),
],
"""
<%
batch_idxs = " ".join((idx + ", ") for idx in idxs[:-1])
%>
${input.ctype} x = ${input.load_same};
${output.store_idx}(${batch_idxs} ${idxs[-1]} * 2, x.x);
${output.store_idx}(${batch_idxs} ${idxs[-1]} * 2 + 1, x.y);
""",
connectors=['output'])
class IRFFT(Computation):
def __init__(self, arr_t):
output_size = (arr_t.shape[-1] - 1) * 2
out_arr = Type(
dtypes.real_for(arr_t.dtype),
arr_t.shape[:-1] + (output_size,))
Computation.__init__(self, [
Parameter('output', Annotation(out_arr, 'o')),
Parameter('input', Annotation(arr_t, 'i'))])
def _build_plan(self, plan_factory, device_params, output, input_):
plan = plan_factory()
N = (input_.shape[-1] - 1) * 2
WNmk = numpy.exp(-2j * numpy.pi * numpy.arange(N//2) / N)
A = 0.5 * (1 - 1j * WNmk)
B = 0.5 * (1 + 1j * WNmk)
A_arr = plan.persistent_array(A.conj())
B_arr = plan.persistent_array(B.conj())
cfft_arr = Type(input_.dtype, input_.shape[:-1] + (N // 2,))
cfft = FFT(cfft_arr, axes=(len(input_.shape) - 1,))
prepare_output = prepare_irfft_output(cfft.parameter.output)
cfft.parameter.output.connect(
prepare_output, prepare_output.input, real_output=prepare_output.output)
temp = plan.temp_array_like(cfft.parameter.input)
batch_size = helpers.product(output.shape[:-1])
plan.kernel_call(
TEMPLATE.get_def('prepare_irfft_input'),
[temp, input_, A_arr, B_arr],
global_size=(batch_size, N // 2),
render_kwds=dict(
slices=(len(input_.shape) - 1, 1),
N=N,
mul=functions.mul(input_.dtype, input_.dtype),
conj=functions.conj(input_.dtype)))
plan.computation_call(cfft, output, temp, inverse=True)
return plan
def get_multiply(output):
return Transformation(
[
Parameter('output', Annotation(output, 'o')),
Parameter('a', Annotation(output, 'i')),
Parameter('b', Annotation(Type(output.dtype, (output.shape[-1],)), 'i'))
],
"""
${output.store_same}(${mul}(${a.load_same}, ${b.load_idx}(${idxs[-1]})));
""",
connectors=['output', 'a'],
render_kwds=dict(mul=functions.mul(output.dtype, output.dtype))
)
def get_prepare_prfft_scan(output):
return Transformation(
[
Parameter('output', Annotation(output, 'o')),
Parameter('Y', Annotation(output, 'i')),
Parameter('re_X_0', Annotation(
Type(dtypes.real_for(output.dtype), output.shape[:-1]), 'i'))
],
"""
${Y.ctype} Y = ${Y.load_same};
Y = COMPLEX_CTR(${Y.ctype})(Y.y, -Y.x);
if (${idxs[-1]} == 0)
{
Y.x = Y.x / 2 + ${re_X_0.load_idx}(${", ".join(idxs[:-1])});
Y.y /= 2;
}
${output.store_same}(Y);
""",
connectors=['output', 'Y'],
)
class APRFFT(Computation):
"""
FFT of a real antiperiodic signal (x[k] = -x[N/2+k]).
"""
def __init__(self, arr_t):
out_arr = Type(
dtypes.complex_for(arr_t.dtype),
arr_t.shape[:-1] + (arr_t.shape[-1] // 2,))
Computation.__init__(self, [
Parameter('output', Annotation(out_arr, 'o')),
Parameter('input', Annotation(arr_t, 'i'))])
def _build_plan(self, plan_factory, device_params, output, input_):
plan = plan_factory()
N = input_.shape[-1] * 2
batch_shape = input_.shape[:-1]
batch_size = helpers.product(batch_shape)
coeffs1 = 4 * numpy.sin(2 * numpy.pi * numpy.arange(N//2) / N)
coeffs2 = 2 * numpy.cos(2 * numpy.pi * numpy.arange(N//2) / N)
c1_arr = plan.persistent_array(coeffs1)
c2_arr = plan.persistent_array(coeffs2)
multiply = get_multiply(input_)
# re_X_1 = sum(x * coeffs2)
t = plan.temp_array_like(input_)
rd = Reduce(t, predicate_sum(input_.dtype), axes=(len(input_.shape)-1,))
rd.parameter.input.connect(
multiply, multiply.output, x=multiply.a, c2=multiply.b)
re_X_0 = plan.temp_array_like(rd.parameter.output)
plan.computation_call(rd, re_X_0, input_, c2_arr)
# Y = numpy.fft.rfft(x * coeffs1)
rfft = RFFT(input_, dont_store_last=True)
rfft.parameter.input.connect(
multiply, multiply.output, x=multiply.a, c1=multiply.b)
Y = plan.temp_array_like(rfft.parameter.output)
plan.computation_call(rfft, Y, input_, c1_arr)
# Y *= -1j
# Y[0] /= 2
# Y[0] += re_X_1
# res = numpy.cumsum(Y[:-1])
prepare_prfft_scan = get_prepare_prfft_scan(Y)
sc = Scan(Y, predicate_sum(Y.dtype), axes=(-1,), exclusive=False)
sc.parameter.input.connect(
prepare_prfft_scan, prepare_prfft_scan.output,
Y=prepare_prfft_scan.Y, re_X_0=prepare_prfft_scan.re_X_0)
plan.computation_call(sc, output, Y, re_X_0)
return plan
def get_prepare_iprfft_input(X):
# Input: size N//4
# Output: size N//4+1
N = X.shape[-1] * 4
Y = Type(X.dtype, X.shape[:-1] + (N // 4 + 1,))
return Transformation(
[
Parameter('Y', Annotation(Y, 'o')),
Parameter('X', Annotation(X, 'i')),
],
"""
<%
batch_idxs = " ".join((idx + ", ") for idx in idxs[:-1])
%>
${Y.ctype} Y;
if (${idxs[-1]} == 0)
{
${X.ctype} X = ${X.load_idx}(${batch_idxs} 0);
Y = COMPLEX_CTR(${Y.ctype})(-2 * X.y, 0);
}
else if (${idxs[-1]} == ${N//4})
{
${X.ctype} X = ${X.load_idx}(${batch_idxs} ${N//4-1});
Y = COMPLEX_CTR(${Y.ctype})(2 * X.y, 0);
}
else
{
${X.ctype} X = ${X.load_idx}(${batch_idxs} ${idxs[-1]});
${X.ctype} X_prev = ${X.load_idx}(${batch_idxs} ${idxs[-1]} - 1);
${X.ctype} diff = X - X_prev;
Y = COMPLEX_CTR(${Y.ctype})(-diff.y, diff.x);
}
${Y.store_same}(Y);
""",
connectors=['Y'],
render_kwds=dict(N=N)
)
def get_prepare_iprfft_output(y):
# Input: size N//4
# Output: size N//4
N = y.shape[-1] * 2
return Transformation(
[
Parameter('x', Annotation(y, 'o')),
Parameter('y', Annotation(y, 'i')),
Parameter('x0', Annotation(Type(y.dtype, y.shape[:-1]), 'i')),
Parameter('coeffs', Annotation(Type(y.dtype, (N//2,)), 'i')),
],
"""
${y.ctype} y = ${y.load_same};
${coeffs.ctype} coeff = ${coeffs.load_idx}(${idxs[-1]});
${x.ctype} x;
if (${idxs[-1]} == 0)
{
${x0.ctype} x0 = ${x0.load_idx}(${", ".join(idxs[:-1])});
x = x0 / ${N // 2};
}
else
{
x = y * coeff;
}
${x.store_same}(x);
""",
connectors=['y'],
render_kwds=dict(N=N)
)
class IAPRFFT(Computation):
"""
IFFT of a real antiperiodic signal (x[k] = -x[N/2+k]).
"""
def __init__(self, arr_t):
out_arr = Type(
dtypes.real_for(arr_t.dtype),
arr_t.shape[:-1] + (arr_t.shape[-1] * 2,))
Computation.__init__(self, [
Parameter('output', Annotation(out_arr, 'o')),
Parameter('input', Annotation(arr_t, 'i'))])
def _build_plan(self, plan_factory, device_params, output, input_):
plan = plan_factory()
N = input_.shape[-1] * 4
batch_shape = input_.shape[:-1]
batch_size = helpers.product(batch_shape)
# The first element is unused
coeffs = numpy.concatenate(
[[0], 1 / (4 * numpy.sin(2 * numpy.pi * numpy.arange(1, N//2) / N))])
coeffs_arr = plan.persistent_array(coeffs)
prepare_iprfft_input = get_prepare_iprfft_input(input_)
prepare_iprfft_output = get_prepare_iprfft_output(output)
irfft = IRFFT(prepare_iprfft_input.Y)
irfft.parameter.input.connect(
prepare_iprfft_input, prepare_iprfft_input.Y,
X=prepare_iprfft_input.X)
irfft.parameter.output.connect(
prepare_iprfft_output, prepare_iprfft_output.y,
x=prepare_iprfft_output.x,
x0=prepare_iprfft_output.x0, coeffs=prepare_iprfft_output.coeffs)
real = Transformation(
[
Parameter('output', Annotation(Type(dtypes.real_for(input_.dtype), input_.shape), 'o')),
Parameter('input', Annotation(input_, 'i')),
],
"""
${output.store_same}((${input.load_same}).x);
""",
connectors=['output']
)
rd_t = Type(output.dtype, input_.shape)
rd = Reduce(rd_t, predicate_sum(rd_t.dtype), axes=(len(input_.shape)-1,))
rd.parameter.input.connect(real, real.output, X=real.input)
x0 = plan.temp_array_like(rd.parameter.output)
plan.computation_call(rd, x0, input_)
plan.computation_call(irfft, output, x0, coeffs_arr, input_)
return plan
# Tests
def test_rfft(thr):
N = 1024
a = numpy.random.normal(size=N)
rfft = RFFT(a).compile(thr)
fa_numpy = numpy.fft.rfft(a)
fa_ref = rfft_reference(a)
a_dev = thr.to_device(a)
fa_gpu = thr.empty_like(rfft.parameter.output)
rfft(fa_gpu, a_dev)
assert numpy.allclose(fa_numpy, fa_ref)
assert numpy.allclose(fa_numpy, fa_gpu.get())
def test_irfft(thr):
N = 1024
fa = (numpy.random.normal(size=N//2+1) + 1j * numpy.random.normal(size=N//2+1))
irfft = IRFFT(fa).compile(thr)
a_numpy = numpy.fft.irfft(fa)
a_ref = irfft_reference(fa)
fa_dev = thr.to_device(fa)
a_gpu = thr.empty_like(irfft.parameter.output)
irfft(a_gpu, fa_dev)
assert numpy.allclose(a_numpy, a_ref)
assert numpy.allclose(a_numpy, a_gpu.get())
def test_aprfft(thr):
N = 1024
half_a = numpy.random.normal(size=N//2)
a = numpy.concatenate([half_a, -half_a])
aprfft = APRFFT(half_a).compile(thr)
fa_numpy = numpy.fft.rfft(a)[1::2]
fa_ref = aprfft_reference(half_a)
half_a_dev = thr.to_device(half_a)
fa_dev = thr.empty_like(aprfft.parameter.output)
aprfft(fa_dev, half_a_dev)
assert numpy.allclose(fa_numpy, fa_ref)
assert numpy.allclose(fa_numpy, fa_dev.get())
def test_iaprfft(thr):
N = 1024
fa_odd_harmonics = (numpy.random.normal(size=N//4) + 1j * numpy.random.normal(size=N//4))
fa = numpy.zeros(N//2+1, fa_odd_harmonics.dtype)
fa[1::2] = fa_odd_harmonics
iaprfft = IAPRFFT(fa_odd_harmonics).compile(thr)
half_a_numpy = numpy.fft.irfft(fa)[:N//2]
half_a_ref = iaprfft_reference(fa_odd_harmonics)
fa_oh_dev = thr.to_device(fa_odd_harmonics)
half_a_dev = thr.empty_like(iaprfft.parameter.output)
iaprfft(half_a_dev, fa_oh_dev)
assert numpy.allclose(half_a_numpy, half_a_ref)
assert numpy.allclose(half_a_numpy, half_a_dev.get())
if __name__ == '__main__':
api = any_api()
thr = api.Thread.create(interactive=True)
test_rfft(thr)
test_irfft(thr)
test_aprfft(thr)
test_iaprfft(thr) | /reikna-0.8.0.tar.gz/reikna-0.8.0/examples/demo_specialized_fft.py | 0.749912 | 0.595845 | demo_specialized_fft.py | pypi |
import numpy
from reikna.cluda import dtypes, any_api
from reikna.algorithms import Reduce, Predicate
from reikna.cluda import Snippet
from reikna.core import Annotation, Type, Transformation, Parameter
# Pick the first available GPGPU API and make a Thread on it.
api = any_api()
thr = api.Thread.create()
# Minmax data type and the corresponding structure.
# Note that the dtype must be aligned before use on a GPU.
mmc_dtype = dtypes.align(numpy.dtype([
("cur_min", numpy.int32),
("cur_max", numpy.int32),
("pad", numpy.int32),
]))
mmc_c_decl = dtypes.ctype_module(mmc_dtype)
# Create the "empty" element for our minmax monoid, that is
# x `minmax` empty == empty `minmax` x == x.
empty = numpy.empty(1, mmc_dtype)[0]
empty["cur_min"] = 1 << 30
empty["cur_max"] = -(1 << 30)
# Reduction predicate for the minmax.
# v1 and v2 get the names of two variables to be processed.
predicate = Predicate(
Snippet.create(lambda v1, v2: """
${ctype} result = ${v1};
if (${v2}.cur_min < result.cur_min)
result.cur_min = ${v2}.cur_min;
if (${v2}.cur_max > result.cur_max)
result.cur_max = ${v2}.cur_max;
return result;
""",
render_kwds=dict(ctype=mmc_c_decl)),
empty)
# Test array
arr = numpy.random.randint(0, 10**6, 20000)
# A transformation that creates initial minmax structures for the given array of integers
to_mmc = Transformation(
[Parameter('output', Annotation(Type(mmc_dtype, arr.shape), 'o')),
Parameter('input', Annotation(arr, 'i'))],
"""
${output.ctype} res;
res.cur_min = ${input.load_same};
res.cur_max = ${input.load_same};
${output.store_same}(res);
""")
# Create the reduction computation and attach the transformation above to its input.
reduction = Reduce(to_mmc.output, predicate)
reduction.parameter.input.connect(to_mmc, to_mmc.output, new_input=to_mmc.input)
creduction = reduction.compile(thr)
# Run the computation
arr_dev = thr.to_device(arr)
res_dev = thr.empty_like(reduction.parameter.output)
creduction(res_dev, arr_dev)
minmax = res_dev.get()
assert minmax["cur_min"] == arr.min()
assert minmax["cur_max"] == arr.max() | /reikna-0.8.0.tar.gz/reikna-0.8.0/examples/demo_struct_reduce.py | 0.718397 | 0.334753 | demo_struct_reduce.py | pypi |
import time
import numpy
from reikna.cluda import any_api
from reikna.core import Computation, Parameter, Annotation
from reikna.fft import FFT
from reikna.algorithms import Transpose
class FFTWithTranspose(Computation):
def __init__(self, arr_t, axes=None):
Computation.__init__(self, [
Parameter('output', Annotation(arr_t, 'o')),
Parameter('input', Annotation(arr_t, 'i')),
Parameter('inverse', Annotation(numpy.int32), default=0)])
if axes is None:
axes = range(len(arr_t.shape))
self._axes = tuple(sorted(axes))
def _build_plan(self, plan_factory, device_params, output, input_, inverse):
plan = plan_factory()
num_axes = len(input_.shape)
current_axes = list(range(num_axes))
current_input = input_
# Iterate over all the axes we need to FFT over
for i, initial_axis in enumerate(self._axes):
# Find out where the target axis is currently located
current_axis = current_axes.index(initial_axis)
# If it is not the innermost one, we will transpose the array
# to bring it to the end.
if current_axis != len(current_axes) - 1:
local_axes = list(range(num_axes))
# The `Transpose` computation is most efficient when we ask it
# to swap two innermost parts of the axes list, e.g.
# [0, 1, 2, 3, 4, 5] to [0, 1, 4, 5, 2, 3]
# This way the transposition requires only one kernel call.
# Since we do not care where the other axes go as long as the target one
# becomes the innermost one, it is easy to follow this guideline.
# That's the transposition that we will need to perform
# on the current array
local_axes = (
local_axes[:current_axis]
+ local_axes[current_axis+1:]
+ [local_axes[current_axis]])
# That's the corresponding permutation of the original axes
# (we need to keep track of it)
current_axes = (
current_axes[:current_axis]
+ current_axes[current_axis+1:]
+ [current_axes[current_axis]])
# Transpose the array, saving the result in a temporary buffer
tr = Transpose(current_input, axes=local_axes)
temp = plan.temp_array_like(tr.parameter.output)
plan.computation_call(tr, temp, current_input)
# Now the target axis is the innermost one
current_axis = len(current_axes) - 1
current_input = temp
# If it is the last FFT to perform, and there is no final transposition required,
# the FFT output is pointed to the output array instead of a temporary buffer.
if i == len(self._axes) - 1 and current_axes == list(range(num_axes)):
current_output = output
else:
current_output = current_input
fft = FFT(current_input, axes=(current_axis,))
plan.computation_call(fft, current_output, current_input, inverse)
# If the axes are not in the original order, there is one last transposition required
if current_axes != list(range(num_axes)):
pairs = list(zip(current_axes, list(range(num_axes))))
tr_axes = [local_axis for _, local_axis in sorted(pairs)]
tr = Transpose(current_output, tr_axes)
plan.computation_call(tr, output, current_output)
return plan
if __name__ == '__main__':
api = any_api()
thr = api.Thread.create()
dtype = numpy.complex128
shape = (1024, 16, 16, 16)
axes = (1, 2, 3)
data = numpy.random.normal(size=shape) + 1j * numpy.random.normal(size=shape)
data = data.astype(dtype)
fft = FFT(data, axes=axes)
fftc = fft.compile(thr)
fft2 = FFTWithTranspose(data, axes=axes)
fft2c = fft2.compile(thr)
data_dev = thr.to_device(data)
res_dev = thr.empty_like(data_dev)
for comp, tag in [(fftc, "original FFT"), (fft2c, "transposition-based FFT")]:
attempts = 10
ts = []
for i in range(attempts):
t1 = time.time()
comp(res_dev, data_dev)
thr.synchronize()
t2 = time.time()
ts.append(t2 - t1)
fwd_ref = numpy.fft.fftn(data, axes=axes).astype(dtype)
assert numpy.allclose(res_dev.get(), fwd_ref)
print(tag, min(ts), "s") | /reikna-0.8.0.tar.gz/reikna-0.8.0/examples/demo_fftn_with_transpose.py | 0.601945 | 0.507385 | demo_fftn_with_transpose.py | pypi |
import numpy
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from matplotlib.mlab import specgram, window_hanning
from reikna.cluda import any_api
from reikna.cluda import dtypes, functions
from reikna.core import Computation, Transformation, Parameter, Annotation, Type
from reikna.fft import FFT
from reikna.algorithms import Transpose
import reikna.transformations as transformations
def get_data():
dt = 0.0005
t = numpy.arange(0.0, 20.0, dt)
s1 = numpy.sin(2*numpy.pi*100*t)
s2 = 2*numpy.sin(2*numpy.pi*400*t)
# create a transient "chirp"
mask = numpy.where(numpy.logical_and(t > 10, t < 12), 1.0, 0.0)
s2 = s2 * mask
# add some noise into the mix
nse = 0.01*numpy.random.randn(len(t))
x = s1 + s2 + nse # the signal
NFFT = 1024 # the length of the windowing segments
Fs = int(1.0/dt) # the sampling frequency
return x, dict(NFFT=NFFT, Fs=Fs, noverlap=900, pad_to=2048)
def hanning_window(arr, NFFT):
"""
Applies the von Hann window to the rows of a 2D array.
To account for zero padding (which we do not want to window), NFFT is provided separately.
"""
if dtypes.is_complex(arr.dtype):
coeff_dtype = dtypes.real_for(arr.dtype)
else:
coeff_dtype = arr.dtype
return Transformation(
[
Parameter('output', Annotation(arr, 'o')),
Parameter('input', Annotation(arr, 'i')),
],
"""
${dtypes.ctype(coeff_dtype)} coeff;
%if NFFT != output.shape[0]:
if (${idxs[1]} >= ${NFFT})
{
coeff = 1;
}
else
%endif
{
coeff = 0.5 * (1 - cos(2 * ${numpy.pi} * ${idxs[-1]} / (${NFFT} - 1)));
}
${output.store_same}(${mul}(${input.load_same}, coeff));
""",
render_kwds=dict(
coeff_dtype=coeff_dtype, NFFT=NFFT,
mul=functions.mul(arr.dtype, coeff_dtype)))
def rolling_frame(arr, NFFT, noverlap, pad_to):
"""
Transforms a 1D array to a 2D array whose rows are
partially overlapped parts of the initial array.
"""
frame_step = NFFT - noverlap
frame_num = (arr.size - noverlap) // frame_step
frame_size = NFFT if pad_to is None else pad_to
result_arr = Type(arr.dtype, (frame_num, frame_size))
return Transformation(
[
Parameter('output', Annotation(result_arr, 'o')),
Parameter('input', Annotation(arr, 'i')),
],
"""
%if NFFT != output.shape[1]:
if (${idxs[1]} >= ${NFFT})
{
${output.store_same}(0);
}
else
%endif
{
${output.store_same}(${input.load_idx}(${idxs[0]} * ${frame_step} + ${idxs[1]}));
}
""",
render_kwds=dict(frame_step=frame_step, NFFT=NFFT),
# note that only the "store_same"-using argument can serve as a connector!
connectors=['output'])
def crop_frequencies(arr):
"""
Crop a 2D array whose columns represent frequencies to only leave the frequencies with
different absolute values.
"""
result_arr = Type(arr.dtype, (arr.shape[0], arr.shape[1] // 2 + 1))
return Transformation(
[
Parameter('output', Annotation(result_arr, 'o')),
Parameter('input', Annotation(arr, 'i')),
],
"""
if (${idxs[1]} < ${input.shape[1] // 2 + 1})
${output.store_idx}(${idxs[0]}, ${idxs[1]}, ${input.load_same});
""",
# note that only the "load_same"-using argument can serve as a connector!
connectors=['input'])
class Spectrogram(Computation):
def __init__(self, x, NFFT=256, noverlap=128, pad_to=None, window=hanning_window):
assert dtypes.is_real(x.dtype)
assert x.ndim == 1
rolling_frame_trf = rolling_frame(x, NFFT, noverlap, pad_to)
complex_dtype = dtypes.complex_for(x.dtype)
fft_arr = Type(complex_dtype, rolling_frame_trf.output.shape)
real_fft_arr = Type(x.dtype, rolling_frame_trf.output.shape)
window_trf = window(real_fft_arr, NFFT)
broadcast_zero_trf = transformations.broadcast_const(real_fft_arr, 0)
to_complex_trf = transformations.combine_complex(fft_arr)
amplitude_trf = transformations.norm_const(fft_arr, 1)
crop_trf = crop_frequencies(amplitude_trf.output)
fft = FFT(fft_arr, axes=(1,))
fft.parameter.input.connect(
to_complex_trf, to_complex_trf.output,
input_real=to_complex_trf.real, input_imag=to_complex_trf.imag)
fft.parameter.input_imag.connect(
broadcast_zero_trf, broadcast_zero_trf.output)
fft.parameter.input_real.connect(
window_trf, window_trf.output, unwindowed_input=window_trf.input)
fft.parameter.unwindowed_input.connect(
rolling_frame_trf, rolling_frame_trf.output, flat_input=rolling_frame_trf.input)
fft.parameter.output.connect(
amplitude_trf, amplitude_trf.input, amplitude=amplitude_trf.output)
fft.parameter.amplitude.connect(
crop_trf, crop_trf.input, cropped_amplitude=crop_trf.output)
self._fft = fft
self._transpose = Transpose(fft.parameter.cropped_amplitude)
Computation.__init__(self,
[Parameter('output', Annotation(self._transpose.parameter.output, 'o')),
Parameter('input', Annotation(fft.parameter.flat_input, 'i'))])
def _build_plan(self, plan_factory, device_params, output, input_):
plan = plan_factory()
temp = plan.temp_array_like(self._fft.parameter.cropped_amplitude)
plan.computation_call(self._fft, temp, input_)
plan.computation_call(self._transpose, output, temp)
return plan
if __name__ == '__main__':
numpy.random.seed(125)
x, params = get_data()
fig = plt.figure()
s = fig.add_subplot(2, 1, 1)
spectre, freqs, ts = specgram(x, mode='magnitude', **params)
# Renormalize to match the computation
spectre *= window_hanning(numpy.ones(params['NFFT'])).sum()
s.imshow(
numpy.log10(spectre),
extent=(ts[0], ts[-1], freqs[0], freqs[-1]),
aspect='auto',
origin='lower')
api = any_api()
thr = api.Thread.create()
specgram_reikna = Spectrogram(
x, NFFT=params['NFFT'], noverlap=params['noverlap'], pad_to=params['pad_to']).compile(thr)
x_dev = thr.to_device(x)
spectre_dev = thr.empty_like(specgram_reikna.parameter.output)
specgram_reikna(spectre_dev, x_dev)
spectre_reikna = spectre_dev.get()
assert numpy.allclose(spectre, spectre_reikna)
s = fig.add_subplot(2, 1, 2)
im=s.imshow(
numpy.log10(spectre_reikna),
extent=(ts[0], ts[-1], freqs[0], freqs[-1]),
aspect='auto',
origin='lower')
fig.savefig('demo_specgram.png') | /reikna-0.8.0.tar.gz/reikna-0.8.0/examples/demo_specgram.py | 0.701917 | 0.499817 | demo_specgram.py | pypi |
# TorchUtils
> Some handy utilities for pytorch
```
#| default_exp torchutils
#| hide
from nbdev.showdoc import *
#| export
from torch.utils.data import Dataset
import torch
#| export
def device_by_name(name: str) -> torch.device:
''' Return reference to cuda device by using Part of it's name
Args:
name: part of the cuda device name (shuuld be distinct)
Return:
Reference to cuda device
Updated: Yuval 12/10/19
'''
assert torch.cuda.is_available(), "No cuda device"
device = None
for i in range(torch.cuda.device_count()):
dv = torch.device("cuda:{}".format(i))
if name in torch.cuda.get_device_name(dv):
device = dv
break
assert device, "device {} not found".format(name)
return device
show_doc(device_by_name)
```
#### How to use
```
#| eval: false
device_by_name("Tesla")
```
If the device doesn't exist we should get an error
```
#|eval: false
error = False
try:
device_by_name("fff")
except AssertionError:
error = True
assert error
#| export
class DatasetCat(Dataset):
'''
Concatenate datasets for Pytorch dataloader
The normal pytorch implementation does it only for raws. this is a "column" implementation
Arges:
datasets: list of datasets, of the same length
Updated: Yuval 12/10/2019
'''
def __init__(self, *datasets):
'''
Args: datasets - an iterable containing the datasets
'''
super(DatasetCat, self).__init__()
self.datasets=datasets
assert len(self.datasets)>0
for dataset in datasets:
assert len(self.datasets[0])==len(dataset),"Datasets length should be equal"
def __len__(self):
return len(self.datasets[0])
def __getitem__(self, idx):
outputs = tuple(dataset.__getitem__(idx) for i in self.datasets for dataset in (i if isinstance(i, tuple) else (i,)))
return tuple(output for i in outputs for output in (i if isinstance(i, tuple) else (i,)))
show_doc(DatasetCat)
```
### How to use
This is one dataset
```
dataset1=torch.utils.data.TensorDataset(torch.ones(5,1),torch.randn(5,1))
print(len(dataset1))
print (dataset1.__getitem__(0))
```
This is the 2nd
```
dataset2=torch.utils.data.TensorDataset(torch.zeros(5,1),torch.randn(5,1))
print(len(dataset2))
print (dataset2.__getitem__(0))
```
And we will concat them row wise
```
dataset3 = DatasetCat(dataset1,dataset2)
print(len(dataset3))
print (dataset3.__getitem__(0))
assert dataset3.__getitem__(3) == (*dataset1.__getitem__(3),*dataset2.__getitem__(3))
assert len(dataset3) == len(dataset1)
#| hide
import nbdev; nbdev.nbdev_export()
```
| /reinautils-0.0.3.tar.gz/reinautils-0.0.3/nbs/01_TorchUtils.ipynb | 0.779238 | 0.883939 | 01_TorchUtils.ipynb | pypi |
# Parameters
> Define a spctial class with is easy you use for config/parametrs
```
#| default_exp parameters
#| hide
from nbdev.showdoc import *
#| export
import json
#| export
class Parameters():
'''A splecial class whos atributes can be referenced as attributs or as dictionaty keys'''
def __init__(self, **kargs):
self.add_attr(**kargs)
def __call__(self, param, value=None):
if value is not None:
setattr(self, param, value)
return self.__getattribute__(param)
def __repr__(self):
str_out = f'{self.__class__}'
for key, value in self.__dict__.items():
str_out += f'\n {key} : '+'\n '.join([i for i in repr(value).split('\n')]) if isinstance(value,Parameters) else f'\n {key} : {repr(value)}'
return str_out
def __str__(self):
str_out = 'Parameters:'
for key,value in self.__dict__.items():
str_out += f'\n {key} : '+'\n '.join([i for i in str(value).split('\n')]) if isinstance(value,Parameters) else f'\n {key} : {value}'
return str_out
def add_attr(self, **kargs):
'''Add attributes to the Parameters class, this will be done recursivly'''
for key, value in kargs.items():
if isinstance(value, dict):
setattr(self, key, Parameters(**value))
else:
setattr(self, key, value)
def to_dict(self):
'''Convert the parameters to dictionary recorsively'''
return {key:value if not isinstance(value,Parameters) else value.to_dict() for key,value in self.__dict__.items()}
def __len__(self) -> int:
return len(self.__dict__ )
def __getitem__(self, key):
return self.__getattribute__(key)
def __setitem__(self, key, value):
self.add_attr(**{key: value})
def __delitem__(self, key):
delattr(self, key)
def from_json(self, json_file_name:str):
'''Read json file and add to parameters'''
with open(json_file_name) as json_data_file:
data = json.load(json_data_file)
self.add_attr(**data)
return self
show_doc(Parameters)
show_doc(Parameters.add_attr,title_level=4)
show_doc(Parameters.to_dict,title_level=4)
show_doc(Parameters.from_json,title_level=4)
```
## Use cases
We can instantiate a Parameter class and immediately add attributes
```
params=Parameters(first=1,second='A')
assert (params.first==1) & (params.second=='A')
```
Attributes can be added later
```
params.added = 'I am new'
assert params['added'] == 'I am new'
```
And they can also be added recursively
```
params.add_attr(file_name='test.ini', paths = {'path1':'hello_world', 'path2':'http2'})
assert params.file_name == 'test.ini'
assert params.paths.path2 == 'http2'
```
You can see we can referance the attribute directly as in dict
```
assert params.paths.path1 == params['paths']['path1']
params['stam'] = 'no'
assert params.stam == 'no'
assert params['paths'].path2 == 'http2'
```
And can be deleted
```
del params['stam']
assert not hasattr(params,'stam')
```
The Parameters class can be printed and can be converted recursively to dict
```
print(params)
print(params.to_dict())
```
The parameters can also be populated using a json file
```
params2=Parameters().from_json('config_demo.json')
print(params2)
assert params2.platform == 'myserver'
#| hide
import nbdev; nbdev.nbdev_export()
```
| /reinautils-0.0.3.tar.gz/reinautils-0.0.3/nbs/00_Parameters.ipynb | 0.444806 | 0.607605 | 00_Parameters.ipynb | pypi |
from __future__ import print_function
import random
import sys
# pylint: disable=C0413
import pyximport; pyximport.install() # pylint: disable=C0321
import xo_fast # pylint: disable=E0611
from . import rl
EMPTY = '.'
X = 'x'
O = 'o'
US = 'us'
THEM = 'them'
WINNING_COMBOS = (
(0, 1, 2),
(3, 4, 5),
(6, 7, 8),
(0, 3, 6),
(1, 4, 7),
(2, 5, 8),
(0, 4, 8),
(2, 4, 6),
)
def transform_board(b):
new_boards = [b]
b = expand_board(b)
# Flip-x.
new_boards.append(flatten_board([list(reversed(_)) for _ in b]))
# Flip-y.
new_boards.append(flatten_board([list(_) for _ in list(reversed(b))]))
#http://stackoverflow.com/q/42519/247542
# Rotate-90.
rot90 = [list(_) for _ in zip(*b[::-1])]
new_boards.append(flatten_board(rot90))
# Rotate-180.
rot180 = [list(_) for _ in zip(*rot90[::-1])]
new_boards.append(flatten_board(rot180))
# Rotate-270.
rot270 = [list(_) for _ in zip(*rot180[::-1])]
new_boards.append(flatten_board(rot270))
# Transpose from top-left to bottom-right.
new_boards.append(flatten_board([list(_) for _ in zip(*b)]))
# Transpose from top-right to bottom-left.
new_boards.append(flatten_board([list(_) for _ in list(reversed(rot90))]))
return new_boards
def flatten_board(b):
"""
Converts a 2D list to 1D.
"""
assert len(b) == 3 and len(b[0]) == 3
return b[0] + b[1] + b[2]
def expand_board(b):
"""
Converts a 1D list to 2D.
"""
assert len(b) == 9
return [b[0:3], b[3:6], b[6:9]]
class Game(rl.Domain):
"""
Organizes a Tic-Tac-Toe match between two players.
"""
def __init__(self, players=None, player1=None, player2=None, *args, **kwargs):
if players:
players = list(players)
random.shuffle(players)
self.player1 = players[0]
self.player2 = players[1]
else:
assert player1 and player2
self.player1 = player1
self.player2 = player2
self.player1.color = X
self.player2.color = O
self.board = None # board positions
self.empty = None # indexes of empty board positions
super(Game, self).__init__(*args, **kwargs)
self.reset()
@property
def players(self):
return [(X, self.player1), (O, self.player2)]
def reset(self):
self.board = [EMPTY]*9
self.empty = list(range(9))
self.player1.reset()
self.player2.reset()
def is_over(self):
for combo in WINNING_COMBOS:
colors = list(set(self.board[i] for i in combo))
if len(colors) == 1 and colors[0] != EMPTY:
return colors[0]
if not self.empty:
return True
return False
def get_color(self, player):
for _color, _player in self.players:
if _player == player:
return _color
def is_winner(self, player):
result = self.is_over()
return result == self.get_color(player)
def get_actions(self, player):
return list(self.empty)
def get_other_player(self, player):
if player == self.player1:
return self.player2
return self.player1
@property
def pretty_board(self):
return '\n'.join([
''.join(self.board[0:3]),
''.join(self.board[3:6]),
''.join(self.board[6:9]),
])
#TODO:support Q-value updates based on afterstate?
#http://webdocs.cs.ualberta.ca/~sutton/book/ebook/node68.html
def run(self, verbose=0):
while not self.is_over():
for color, player in self.players:
if verbose:
print(self.pretty_board)
print()
# Get player action.
action = player.get_action(
state=list(self.board),
actions=list(self.empty))
if action not in self.empty:
raise Exception(('Player %s returned invalid action '
'"%s" in state "%s"') % (player, action, self.board))
# Update state.
self.board[action] = color
self.empty.remove(action)
# Check for game termination.
result = self.is_over()
end = bool(result)
# Give player feedback.
if result == color:
# Player won.
feedback = +1
elif result or not result:
# Player drew or game ongoing.
feedback = 0
else:
# Player lost.
feedback = -1
player.reinforce(
feedback=feedback,
state=list(self.board),
end=end)
# If it's the end of the game, give the other player feedback
# immediately and then exit.
if end:
if verbose:
print(self.pretty_board)
print()
self.get_other_player(player).reinforce(
feedback=-feedback,
state=list(self.board),
replace_last=True,
end=end)
break
class Player(rl.Agent):
"""
An abstract agent that plays Tic-Tac-Toe.
"""
def __init__(self, *args, **kwargs):
super(Player, self).__init__(*args, **kwargs)
self.color = None # Set by game.
def relativize_state(self, state):
"""
Convert literal state into relative state.
"""
assert self.color in (X, O)
key = {
EMPTY: EMPTY,
X: US if self.color == X else THEM,
O: US if self.color == O else THEM,
}
state = list(key[_] for _ in state)
return state
class RandomPlayer(Player):
"""
Choses a random action.
"""
def get_action(self, state, actions):
"""
Retrieves the agent's action for the given state.
"""
return random.choice(actions)
class SARSAPlayer(Player, rl.SARSAAgent):
"""
Learns to play using basic SARSA.
"""
filename = 'models/sarsa-xo-player.yaml'
def normalize_state(self, state):
"""
Converts state into a list of numbers that can be used in
a linear function approximation.
"""
state = tuple(self.relativize_state(state))
return state
class SARSALFAPlayer(Player, rl.SARSALFAAgent):
"""
Learns to play using basic SARSA and linear function approximation.
"""
filename = 'models/sarsalfa-xo-player.yaml'
LFA_KEY = {
EMPTY: 0,
US: 1,
THEM: -1,
}
def normalize_state(self, state):
"""
Converts state into a list of numbers that can be used in
a linear function approximation.
"""
state = self.relativize_state(state)
state = [self.LFA_KEY[_] for _ in state]
return state
class ANNPlayer(Player, rl.ANNAgent):
"""
Learns to play using an artificial neural network.
Works by using the ANN to estimate the the expected reward after
performing each legal action and recommends the action corresponding
to the highest expected reward.
"""
filename = 'models/ann-xo-player.yaml'
symbol_to_int = {
US: +1,
EMPTY: 0,
THEM: -1,
}
def __init__(self, *args, **kwargs):
Player.__init__(self, *args, **kwargs)
rl.ANNAgent.__init__(self, *args, **kwargs)
def normalize_state(self, state):
"""
Converts state into a list of numbers.
"""
assert self.color is not None
# Make the board relative to us.
if not (US in state or THEM in state):
state = self.relativize_state(state)
# Convert to integers suitable for input into the ANN.
state = [self.symbol_to_int[_] for _ in state]
# Convert one of 8 possible symmetric versions to the standard.
boards = sorted(xo_fast.transform_board(state))
state = boards[0]
return state
def simulate_action(self, state, action):
"""
Returns the expected next-state if the given action is performed
in the given state.
"""
# print 'state0:',state,action
state = self.relativize_state(state)
invalid = set(state).difference([US, THEM, EMPTY])
assert not invalid, invalid
assert state[action] == EMPTY
state = list(state)
state[action] = US
# print 'state1:',state
return state | /reinforce-toolkit-0.2.1.tar.gz/reinforce-toolkit-0.2.1/reinforce/xo.py | 0.41052 | 0.294126 | xo.py | pypi |
# Reinforced-lib: Reinforcement learning library
[![PyPI version][pypi-badge]][pypi]
[![License: MPL 2.0][license-badge]][license]
[![build and test][tests-badge]][github-actions]
[![Documentation Status][rtd-badge]][documentation]
[pypi-badge]: https://img.shields.io/pypi/v/reinforced-lib
[pypi]: https://pypi.org/project/reinforced-lib/
[license-badge]: https://img.shields.io/badge/License-MPL%202.0-brightgreen.svg
[license]: https://opensource.org/licenses/MPL-2.0
[tests-badge]: https://github.com/m-wojnar/reinforced-lib/actions/workflows/python-package.yml/badge.svg
[github-actions]: https://github.com/m-wojnar/reinforced-lib/actions
[rtd-badge]: https://readthedocs.org/projects/reinforced-lib/badge/?version=latest
[documentation]: https://reinforced-lib.readthedocs.io/en/latest/
**Introducing Reinforced-lib:** a lightweight Python library for rapid development of RL solutions. It is open-source,
prioritizes ease of use, provides comprehensive documentation, and offers both deep reinforcement learning
(DRL) and classic non-neural agents. Built on [JAX](https://jax.readthedocs.io/en/latest/), it facilitates exporting
trained models to embedded devices, and makes it great for research and prototyping with RL algorithms. Access to JAX's
JIT functionality ensure high-performance results.
## Installation
You can install the latest released version of Reinforced-lib from PyPI via:
```bash
pip install reinforced-lib
```
To have an easy access to the [example files](https://github.com/m-wojnar/reinforced-lib/tree/main/examples)
you can clone the source code from our repository, and than install it locally with pip:
```bash
git clone git@github.com:m-wojnar/reinforced-lib.git
cd reinforced-lib
pip install .
```
In the spirit of making Reinforced-lib a lightweight solution, we included only the necessary dependencies in the base
requirements. To fully benefit from Reinforced-lib conveniences, like TF Lite export, install with the "full" suffix:
```bash
pip3 install ".[full]"
```
## Key components
Reinforced-lib facilitates seamless interaction between RL agents and the environment. Here are the key components
within of the library, represented in the API as different modules.
- **RLib** - The core module which provides a simple and intuitive interface to manage agents, use extensions,
and configure the logging system. Even if you're not a reinforcement learning (RL) expert, *RLib* makes it easy to
implement the agent-environment interaction loop.
- **Agents** - Choose from a variety of RL agents available in the *Agents* module. These agents are designed to be
versatile and work with any environment. If needed, you can even create your own agents using our documented recipes.
- **Extensions** - Enhance agent observations with domain-specific knowledge by adding a suitable extension from the
*Extensions* module. This module enables seamless agent switching and parameter tuning without extensive reconfiguration.
- **Logging** - This module allows you to monitor agent-environment interactions. Customize and adapt logging to your
specific needs, capturing training metrics, internal agent state, or environment observations. The library includes
various loggers for creating plots and output files, simplifying visualization and data processing.
The figure below provides a visual representation of Reinforced-lib and the data-flow between its modules.
<img src="docs/resources/data-flow.png" width="600">
## JAX Backend
Our library is built on top of JAX, a high-performance numerical computing library. JAX makes it easy to implement
RL algorithms efficiently. It provides powerful transformations, including JIT compilation, automatic differentiation,
vectorization, and parallelization. Our library is fully compatible with DeepMind's JAX ecosystem, granting access to
state-of-the-art RL models and helper libraries. JIT compilation significantly accelerates execution and ensures
portability across different architectures (CPUs, GPUs, TPUs) without requiring code modifications.
## Edge Device Export
Reinforced-lib is designed to work seamlessly on wireless, low-powered devices, where resources are limited. It's the
perfect solution for energy-constrained environments that may struggle with other ML frameworks. You can export your
trained models to [TensorFlow Lite](https://www.tensorflow.org/lite) with ease, reducing runtime overhead and
optimizing performance. This means you can deploy RL agents on resource-limited devices efficiently.
## Example code
Experience the simplicity of our library and witness the fundamental agent-environment interaction loop with our
straightforward example. This code can by used to train a deep Q-learning agent on the `CartPole-v1` environment
effortlessly using Reinforced-lib.
```python
import gymnasium as gym
import haiku as hk
import optax
from chex import Array
from reinforced_lib import RLib
from reinforced_lib.agents.deep import QLearning
from reinforced_lib.exts import Gymnasium
@hk.transform_with_state
def q_network(x: Array) -> Array:
return hk.nets.MLP([256, 2])(x)
if __name__ == '__main__':
rl = RLib(
agent_type=QLearning,
agent_params={
'q_network': q_network,
'optimizer': optax.rmsprop(3e-4, decay=0.95, eps=1e-2),
},
ext_type=Gymnasium,
ext_params={'env_id': 'CartPole-v1'}
)
for epoch in range(300):
env = gym.make('CartPole-v1', render_mode='human')
_, _ = env.reset()
action = env.action_space.sample()
terminal = False
while not terminal:
env_state = env.step(action.item())
action = rl.sample(*env_state)
terminal = env_state[2] or env_state[3]
```
## Citing Reinforced-lib
To cite this repository:
```
@software{reinforcedlib2022,
author = {Maksymilian Wojnar and Wojciech Ciężobka},
title = {{R}einforced-lib: {R}einforcement learning library},
url = {http://github.com/m-wojnar/reinforced-lib},
year = {2022},
}
```
| /reinforced-lib-1.0.0.tar.gz/reinforced-lib-1.0.0/README.md | 0.663451 | 0.921393 | README.md | pypi |
from __future__ import annotations
import os
import pickle
from typing import Union
import cloudpickle
import gymnasium as gym
import jax.random
import lz4.frame
from chex import dataclass
from reinforced_lib.agents import BaseAgent
from reinforced_lib.exts import BaseExt
from reinforced_lib.logs import Source
from reinforced_lib.logs.logs_observer import LogsObserver
from reinforced_lib.utils.exceptions import *
from reinforced_lib.utils import is_scalar, timestamp
@dataclass
class AgentContainer:
"""
Class containing the state of a given agent and all its dependencies.
Attributes
----------
state : BaseAgent
Current state of the agent.
key : jax.random.PRNGKey
A PRNG key used as the random key.
action : any
Action selected by the agent.
step : int
Current step of the agent.
"""
state: BaseAgent
key: jax.random.PRNGKey
action: any
step: int
class RLib:
"""
Main class of the library. Exposes a simple and intuitive interface to use the library.
Parameters
----------
agent_type : type, optional
Type of the selected agent. Must inherit from the ``BaseAgent`` class.
agent_params : dict, optional
Parameters of the selected agent.
ext_type : type, optional
Type of the selected extension. Must inherit from the ``BaseExt`` class.
ext_params : dict, optional
Parameters of the selected extension.
logger_types : type or list[type], optional
Types of the selected logging modules. Must inherit from the ``BaseLogger`` class.
logger_sources : Source or list[Source], optional
Sources to log.
logger_params : dict, optional
Parameters of the selected loggers.
no_ext_mode : bool, default=False
Pass observations directly to the agent (do not use the extensions).
auto_checkpoint : int, optional
Automatically save the experiment every ``auto_checkpoint`` steps.
If ``None``, the automatic checkpointing is disabled.
auto_checkpoint_path : str, optional, default=~
Path to the directory where the automatic checkpoints will be saved.
"""
def __init__(
self, *,
agent_type: type = None,
agent_params: dict[str, any] = None,
ext_type: type = None,
ext_params: dict[str, any] = None,
logger_types: Union[type, list[type]] = None,
logger_sources: Union[Source, list[Source]] = None,
logger_params: dict[str, any] = None,
no_ext_mode: bool = False,
auto_checkpoint: int = None,
auto_checkpoint_path: str = None
) -> None:
self._lz4_ext = ".pkl.lz4"
self._default_path = os.path.expanduser("~")
self._auto_checkpoint = auto_checkpoint
self._auto_checkpoint_path = auto_checkpoint_path if auto_checkpoint_path else self._default_path
self._agent = None
self._agent_type = agent_type
self._agent_params = agent_params
self._agent_containers = []
self._ext = None
self._no_ext_mode = no_ext_mode
self._ext_type = ext_type
self._ext_params = ext_params
self._logger_types = logger_types
self._logger_sources = logger_sources
self._logger_params = logger_params
self._logs_observer = LogsObserver()
self._init_loggers = True
self._cumulative_reward = 0.0
if ext_type:
self.set_ext(ext_type, ext_params)
if agent_type:
self.set_agent(agent_type, agent_params)
if logger_types:
self.set_loggers(logger_types, logger_sources, logger_params)
def __del__(self) -> None:
"""
Automatically finalizes the library work.
"""
self.finish()
def finish(self) -> None:
"""
Used to explicitly finalize the library's work. In particular, it finishes the logger's work.
"""
self._logs_observer.finish_loggers()
def set_agent(self, agent_type: type, agent_params: dict = None) -> None:
"""
Initializes an agent of type ``agent_type`` with parameters ``agent_params``. The agent type must inherit from
the ``BaseAgent`` class. The agent type cannot be changed after the first agent instance has been initialized.
Parameters
----------
agent_type : type
Type of the selected agent. Must inherit from the ``BaseAgent`` class.
agent_params : dict, optional
Parameters of the selected agent.
"""
if len(self._agent_containers) > 0:
raise ForbiddenAgentChangeError()
if not issubclass(agent_type, BaseAgent):
raise IncorrectAgentTypeError(agent_type)
self._agent_type = agent_type
self._agent_params = agent_params
if not self._no_ext_mode and self._ext:
agent_params = self._ext.get_agent_params(agent_type, agent_type.parameter_space(), agent_params)
self._agent = agent_type(**agent_params)
self._ext.setup_transformations(self._agent.update_observation_space, self._agent.sample_observation_space)
else:
agent_params = agent_params if agent_params else {}
self._agent = agent_type(**agent_params)
def set_ext(self, ext_type: type, ext_params: dict = None) -> None:
"""
Initializes an extension of type ``ext_type`` with parameters ``ext_params``. The extension type must inherit
from the ``BaseExt`` class. The extension type cannot be changed after the first agent instance has been
initialized.
Parameters
----------
ext_type : type
Type of selected extension. Must inherit from the ``BaseExt`` class.
ext_params : dict, optional
Parameters of the selected extension.
"""
if self._no_ext_mode:
raise ForbiddenExtensionSetError()
if len(self._agent_containers) > 0:
raise ForbiddenExtensionChangeError()
if not issubclass(ext_type, BaseExt):
raise IncorrectExtensionTypeError(ext_type)
self._ext_type = ext_type
self._ext_params = ext_params
ext_params = ext_params if ext_params else {}
self._ext = ext_type(**ext_params)
if self._agent:
agent_params = self._ext.get_agent_params(
self._agent_type,
self._agent_type.parameter_space(),
self._agent_params
)
self._agent = self._agent_type(**agent_params)
self._ext.setup_transformations(self._agent.update_observation_space, self._agent.sample_observation_space)
def set_loggers(
self,
logger_types: Union[type, list[type]],
logger_sources: Union[Source, list[Source]] = None,
logger_params: dict[str, any] = None
) -> None:
"""
Initializes loggers of types ``logger_types`` with parameters ``logger_params``. The logger types must inherit
from the ``BaseLogger`` class. The logger types cannot be changed after the first agent instance has been
initialized. ``logger_types`` and ``logger_sources`` can be objects or lists of objects, the function broadcasts
them to the same length. The ``logger_sources`` parameter specifies the sources to log. A source can be None
(then the logger is used to log a custom values passed by the ``log`` method), a name of the sources (e.g.,
"action") or tuple containing the name and the ``SourceType`` (e.g., ``("action", SourceType.OBSERVATION)``).
If the name itself is inconclusive (e.g., it occurs as a metric and as an observation), the behaviour depends
on the implementation of the logger.
Parameters
----------
logger_types : type or list[type]
Types of the selected logging modules.
logger_sources : Source or list[Source], optional
Sources to log.
logger_params : dict, optional
Parameters of the selected logging modules.
"""
if not self._init_loggers:
raise ForbiddenLoggerSetError()
self._logger_types = logger_types
self._logger_sources = logger_sources
self._logger_params = logger_params
logger_params = logger_params if logger_params else {}
logger_types, logger_sources = self._object_to_list(logger_types), self._object_to_list(logger_sources)
logger_types, logger_sources = self._broadcast(logger_types, logger_sources)
for logger_type, source in zip(logger_types, logger_sources):
self._logs_observer.add_logger(source, logger_type, logger_params)
@staticmethod
def _object_to_list(obj: Union[any, list[any]]) -> list[any]:
return obj if isinstance(obj, list) else [obj]
@staticmethod
def _broadcast(list_a: list[any], list_b: list[any]) -> tuple[list[any], list[any]]:
if len(list_a) == len(list_b):
return list_a, list_b
if len(list_a) == 1:
return list_a * len(list_b), list_b
if len(list_b) == 1:
return list_a, list_b * len(list_a)
raise TypeError('Incompatible length of given lists.')
@property
def observation_space(self) -> gym.spaces.Space:
"""
Returns the observation space of the selected extension (or agent, if ``no_ext_mode`` is set).
Returns
-------
gym.spaces.Space
Observation space of the selected extension or agent.
"""
if self._no_ext_mode:
if not self._agent:
raise NoAgentError()
else:
return gym.spaces.Dict({
'update_observation_space': self._agent.update_observation_space,
'sample_observation_space': self._agent.sample_observation_space
})
else:
if not self._ext:
raise NoExtensionError()
else:
return self._ext.observation_space
@property
def action_space(self) -> gym.spaces.Space:
"""
Returns the action space of the selected agent.
Returns
-------
gym.spaces.Space
Action space of the selected agent.
"""
if not self._agent:
raise NoAgentError()
return self._agent.action_space
def init(self, seed: int = 42) -> int:
"""
Initializes a new instance of the agent.
Parameters
----------
seed : int, default=42
Number used to initialize the JAX pseudo-random number generator.
Returns
-------
int
Identifier of the created instance.
"""
agent_id = len(self._agent_containers)
init_key, key = jax.random.split(jax.random.PRNGKey(seed))
self._agent_containers.append(AgentContainer(
state=self._agent.init(init_key),
key=key,
action=None,
step=0
))
return agent_id
def sample(
self,
*args,
agent_id: int = 0,
is_training: bool = True,
update_observations: Union[dict, tuple, any] = None,
sample_observations: Union[dict, tuple, any] = None,
**kwargs
) -> any:
"""
Takes the extension state as an input, updates the agent state, and returns the next action selected by
the agent. If ``no_ext_mode`` is disabled, observations are passed by args and kwargs (the observations must
match the extension observation space). If ``no_ext_mode`` is enabled, observations must be passed
by the ``update_observations`` and ``sample_observations`` parameters (the observations must match the agent's
``update_observation_space`` and ``sample_observation_space``). If there are no agent instances initialized,
the method automatically initializes the first instance. If the ``is_training`` flag is set, the ``update`` and
``sample`` agent methods will be called. Otherwise, only the ``sample`` method will be called.
Parameters
----------
*args : tuple
Environment observations.
agent_id : int, default=0
The identifier of the agent instance.
is_training : bool
Flag indicating whether the agent state should be updated in this step.
update_observations : dict or tuple or any, optional
Observations used when ``no_ext_mode`` is enabled (must match agent's ``update_observation_space``).
sample_observations : dict or tuple or any, optional
Observations used when ``no_ext_mode`` is enabled (must match agent's ``sample_observation_space``).
**kwargs : dict
Environment observations.
Returns
-------
any
Action selected by the agent.
"""
if not self._agent:
raise NoAgentError()
if not self._no_ext_mode and not self._ext:
raise NoExtensionError()
update_observations = update_observations if update_observations else {}
sample_observations = sample_observations if sample_observations else {}
if self._init_loggers:
self._logs_observer.init_loggers()
self._init_loggers = False
if len(self._agent_containers) == 0:
self.init()
key, update_key, sample_key = jax.random.split(self._agent_containers[agent_id].key, 3)
state = self._agent_containers[agent_id].state
action = self._agent_containers[agent_id].action
step = self._agent_containers[agent_id].step
if not self._no_ext_mode:
update_observations, sample_observations = self._ext.transform(*args, action=action, **kwargs)
all_observations = kwargs
if isinstance(update_observations, dict) and isinstance(sample_observations, dict):
all_observations |= update_observations
all_observations |= sample_observations
self._logs_observer.update_observations(all_observations)
else:
self._logs_observer.update_observations(update_observations)
self._logs_observer.update_observations(sample_observations)
if is_training and step > 0:
if isinstance(update_observations, dict):
state = self._agent.update(state, update_key, **update_observations)
elif isinstance(update_observations, tuple):
state = self._agent.update(state, update_key, *update_observations)
else:
state = self._agent.update(state, update_key, update_observations)
if self._auto_checkpoint is not None and (step + 1) % self._auto_checkpoint == 0:
checkpoint_path = os.path.join(self._auto_checkpoint_path, f'rlib-checkpoint-agent-{agent_id}-step-{step + 1}')
self.save(checkpoint_path, agent_ids=agent_id)
if isinstance(sample_observations, dict):
action = self._agent.sample(state, sample_key, **sample_observations)
elif isinstance(sample_observations, tuple):
action = self._agent.sample(state, sample_key, *sample_observations)
else:
action = self._agent.sample(state, sample_key, sample_observations)
self._logs_observer.update_agent_state(state)
self._logs_observer.update_metrics(action, 'action')
def log_reward(reward: float) -> None:
self._cumulative_reward += reward
self._logs_observer.update_metrics(reward, 'reward')
self._logs_observer.update_metrics(self._cumulative_reward, 'cumulative')
if 'reward' in all_observations:
log_reward(all_observations['reward'])
elif self._ext:
try:
if hasattr(self._ext, 'reward'):
log_reward(self._ext.reward(**all_observations))
elif 'reward' in self._ext._observation_functions:
log_reward(self._ext._observation_functions['reward'](**all_observations))
except TypeError:
pass
self._agent_containers[agent_id] = AgentContainer(
state=state,
key=key,
action=action,
step=step + 1
)
return action
def save(self, path: str = None, *, agent_ids: Union[int, list[int]] = None) -> str:
"""
Saves the state of the experiment to a file in lz4 format. For each agent, both the state and the initialization
parameters are saved. The extension and loggers settings are saved as well to fully reconstruct the experiment.
Parameters
----------
path : str, optional
Path to the checkpoint file. If none specified, saves to the default path.
If the ``.pkl.lz4`` suffix is not detected, it will be appended automatically.
agent_ids : int or array_like, optional
The identifier of the agent instance(s) to save. If none specified, saves the state of all agents.
Returns
-------
str
Path to the saved checkpoint file.
"""
if agent_ids is None:
agent_ids = list(range(len(self._agent_containers)))
elif is_scalar(agent_ids):
agent_ids = [agent_ids]
agent_containers = [self._agent_containers[agent_id] for agent_id in agent_ids]
if path is None:
path = os.path.join(self._default_path, f"rlib-checkpoint-{timestamp()}.pkl.lz4")
elif path[-8:] != self._lz4_ext:
path = path + self._lz4_ext
experiment_state = {
"agent_type": self._agent_type,
"agent_params": self._agent_params,
"agents": {
agent_id: {
"state": agent.state,
"key": agent.key,
"action": agent.action,
"step": agent.step
} for agent_id, agent in zip(agent_ids, agent_containers)
},
"ext_type": self._ext_type,
"ext_params": self._ext_params,
"logger_types": self._logger_types,
"logger_sources": self._logger_sources,
"logger_params": self._logger_params,
"auto_checkpoint": self._auto_checkpoint
}
with lz4.frame.open(path, 'wb') as f:
f.write(cloudpickle.dumps(experiment_state))
return path
@staticmethod
def load(
path: str,
*,
agent_params: dict[str, any] = None,
ext_params: dict[str, any] = None,
logger_types: Union[type, list[type]] = None,
logger_sources: Union[Source, list[Source]] = None,
logger_params: dict[str, any] = None
) -> RLib:
"""
Loads the state of the experiment from a file in lz4 format.
Parameters
----------
path : str
Path to the checkpoint file.
agent_params : dict[str, any], optional
Dictionary of altered agent parameters with their new values, by default None.
ext_params : dict[str, any], optional
Dictionary of altered extension parameters with their new values, by default None.
logger_types : type or list[type], optional
Types of the selected logging modules. Must inherit from the ``BaseLogger`` class.
logger_sources : Source or list[Source], optional
Sources to log.
logger_params : dict, optional
Parameters of the selected loggers.
"""
with lz4.frame.open(path, 'rb') as f:
experiment_state = pickle.loads(f.read())
rlib = RLib(
auto_checkpoint=experiment_state["auto_checkpoint"],
no_ext_mode=experiment_state["ext_type"] is None
)
rlib._agent_containers = []
if experiment_state["ext_type"]:
if ext_params:
rlib.set_ext(experiment_state["ext_type"], ext_params)
else:
rlib.set_ext(experiment_state["ext_type"], experiment_state["ext_params"])
if experiment_state["agent_type"]:
if agent_params:
rlib.set_agent(experiment_state["agent_type"], agent_params)
else:
rlib.set_agent(experiment_state["agent_type"], experiment_state["agent_params"])
if logger_types:
rlib.set_loggers(logger_types, logger_sources, logger_params)
elif experiment_state["logger_types"]:
rlib.set_loggers(
experiment_state["logger_types"],
experiment_state["logger_sources"],
logger_params if logger_params else experiment_state["logger_params"]
)
for agent_id, agent_container in experiment_state["agents"].items():
while agent_id >= len(rlib._agent_containers):
rlib.init()
rlib._agent_containers[agent_id] = AgentContainer(
state=agent_container["state"],
key=agent_container["key"],
action=agent_container["action"],
step=agent_container["step"]
)
return rlib
def log(self, name: str, value: any) -> None:
"""
Logs a custom value.
Parameters
----------
name : str
The name of the value to log.
value : any
The value to log.
"""
self._logs_observer.update_custom(value, name)
def to_tflite(self, path: str = None, *, agent_id: int = None, sample_only: bool = False) -> None:
"""
Converts the agent to a TensorFlow Lite model and saves it to a file.
Parameters
----------
path : str, optional
Path to the output file.
agent_id : int, optional
The identifier of the agent instance to convert. If specified,
state of the selected agent will be saved.
sample_only : bool
Flag indicating if the method should save only the sample function.
"""
if not self._agent:
raise NoAgentError()
if len(self._agent_containers) == 0:
self.init()
if sample_only and agent_id is None:
raise ValueError("Agent ID must be specified when saving sample function only.")
if path is None:
path = self._default_path
if agent_id is None:
init_tfl, update_tfl, sample_tfl = self._agent.export(
init_key=jax.random.PRNGKey(42)
)
else:
init_tfl, update_tfl, sample_tfl = self._agent.export(
init_key=self._agent_containers[agent_id].key,
state=self._agent_containers[agent_id].state,
sample_only=sample_only
)
base_name = self._agent.__class__.__name__
base_name += f'-{agent_id}-' if agent_id is not None else '-'
base_name += timestamp()
with open(os.path.join(path, f'rlib-{base_name}-init.tflite'), 'wb') as f:
f.write(init_tfl)
with open(os.path.join(path, f'rlib-{base_name}-sample.tflite'), 'wb') as f:
f.write(sample_tfl)
if not sample_only:
with open(os.path.join(path, f'rlib-{base_name}-update.tflite'), 'wb') as f:
f.write(update_tfl) | /reinforced-lib-1.0.0.tar.gz/reinforced-lib-1.0.0/reinforced_lib/rlib.py | 0.915361 | 0.257634 | rlib.py | pypi |
from abc import ABC, abstractmethod
from functools import partial
import inspect
from typing import Union
import gymnasium as gym
from reinforced_lib.exts.utils import *
from reinforced_lib.utils.exceptions import IncorrectSpaceError, IncompatibleSpacesError, NoDefaultParameterError
class BaseExt(ABC):
"""
Container for domain-specific knowledge and functions for a given environment. Provides the transformation
from the raw observations to the agent update and sample spaces. Stores the default argument values for
agent initialization.
"""
def __init__(self) -> None:
self._observation_functions: dict[str, Callable] = {}
self._parameter_functions: dict[str, Callable] = {}
self._add_action_to_observations = False
for name in dir(self):
obj = getattr(self, name)
if hasattr(obj, 'observation_info'):
self._observation_functions[obj.observation_info.name] = obj
if hasattr(obj, 'parameter_info'):
self._parameter_functions[obj.parameter_info.name] = obj
@property
@abstractmethod
def observation_space(self) -> gym.spaces.Space:
"""
Basic observations of the environment in Gymnasium format.
"""
pass
def get_agent_params(
self,
agent_type: type = None,
agent_parameter_space: gym.spaces.Dict = None,
user_parameters: dict[str, any] = None
) -> dict[str, any]:
"""
Composes agent initialization arguments from values passed by the user and default values stored in the
parameter functions. Returns a dictionary with the parameters matching the agent parameters space.
Parameters
----------
agent_type : type, optional
Type of the selected agent.
agent_parameter_space : gym.spaces.Dict, optional
Parameters required by the agents' constructor in Gymnasium format.
user_parameters : dict, optional
Parameters provided by the user.
Returns
-------
dict
Dictionary with the initialization parameters for the agent.
"""
parameters = user_parameters if user_parameters else {}
if agent_parameter_space is None:
return parameters
default_parameters = set()
if agent_type is not None:
for key, value in inspect.signature(agent_type.__init__).parameters.items():
if value.default != inspect._empty:
default_parameters.add(key)
for name, space in agent_parameter_space.spaces.items():
if name in parameters:
continue
if name not in self._parameter_functions:
if name in default_parameters:
continue
raise NoDefaultParameterError(type(self), name, space)
func = self._parameter_functions[name]
func_space = func.parameter_info.type
if space is None or type(space) == type(func_space):
parameters[name] = func()
else:
raise IncompatibleSpacesError(func_space, space)
return parameters
def setup_transformations(
self,
agent_update_space: gym.spaces.Space = None,
agent_sample_space: gym.spaces.Space = None
) -> None:
"""
Creates functions that transform raw observations and values provided by the observation functions
to the agent update and sample spaces.
Parameters
----------
agent_update_space : gym.spaces.Space, optional
Observations required by the agent ``update`` function in Gymnasium format.
agent_sample_space : gym.spaces.Space, optional
Observations required by the agent ``sample`` function in Gymnasium format.
"""
if 'action' not in self._observation_functions and \
isinstance(self.observation_space, gym.spaces.Dict) and \
'action' not in self.observation_space:
if 'action' in agent_update_space.spaces:
self.observation_space['action'] = agent_update_space['action']
if 'action' in agent_sample_space.spaces:
self.observation_space['action'] = agent_sample_space['action']
self._add_action_to_observations = True
self._update_space_transform = self._transform_spaces(self.observation_space, agent_update_space)
self._sample_space_transform = self._transform_spaces(self.observation_space, agent_sample_space)
def _transform_spaces(
self,
in_space: gym.spaces.Space,
out_space: gym.spaces.Space,
accessor: Union[str, int] = None
) -> Callable:
"""
Creates function that transforms environment observations and values provided by the observation
functions to a given space. If the ``out_space`` is not defined, returns observations unchanged.
Parameters
----------
in_space : gym.spaces.Space
Source space.
out_space : gym.spaces.Space
Target space.
accessor : str or int, optional
Path to nested observations.
Returns
-------
Callable
Function that transforms values from ``in_space`` to ``out_space``.
"""
if out_space is None:
return lambda *args, **kwargs: None
simple_types = {
gym.spaces.Box: test_box,
gym.spaces.Discrete: test_discrete,
gym.spaces.MultiBinary: test_multi_binary,
gym.spaces.MultiDiscrete: test_multi_discrete,
gym.spaces.Sequence: test_sequence,
gym.spaces.Space: test_space
}
if type(out_space) in simple_types:
if type(in_space) not in simple_types:
raise IncompatibleSpacesError(in_space, out_space)
test_function = simple_types[type(out_space)]
if test_function(in_space, out_space):
return partial(self._simple_transform, accessor)
for observation_function in self._observation_functions.values():
func_space = observation_function.observation_info.type
if func_space is None or test_function(func_space, out_space):
return observation_function
if isinstance(out_space, gym.spaces.Dict):
if not isinstance(in_space, gym.spaces.Dict):
raise IncompatibleSpacesError(in_space, out_space)
observations: dict[str, Callable] = {}
for name, space in out_space.spaces.items():
if name in in_space.spaces:
if type(space) not in simple_types:
observations[name] = self._transform_spaces(in_space[name], space, name)
elif simple_types[type(space)](in_space[name], space):
observations[name] = partial(lambda inner_name, *args, **kwargs: kwargs[inner_name], name)
else:
raise IncompatibleSpacesError(in_space, space)
elif name in self._observation_functions:
func_space = self._observation_functions[name].observation_info.type
if func_space is None or simple_types[type(space)](func_space, space):
observations[name] = partial(
lambda func, inner_name, inner_accessor, *args, **kwargs:
self._function_transform(func, inner_name, inner_accessor, *args, **kwargs),
self._observation_functions[name], name, accessor
)
else:
raise IncompatibleSpacesError(func_space, space)
else:
raise IncompatibleSpacesError(in_space, space)
return partial(self._dict_transform, observations, accessor)
if isinstance(out_space, gym.spaces.Tuple):
if not isinstance(in_space, gym.spaces.Tuple) or len(in_space.spaces) != len(out_space.spaces):
raise IncompatibleSpacesError(in_space, out_space)
observations: list[Callable] = []
for i, (agent_space, ext_space) in enumerate(zip(in_space.spaces, out_space.spaces)):
if type(agent_space) not in simple_types:
observations.append(self._transform_spaces(ext_space, agent_space, i))
elif simple_types[type(agent_space)](ext_space, agent_space):
observations.append(partial(lambda inner_i, *args, **kwargs: args[inner_i], i))
else:
raise IncompatibleSpacesError(agent_space, in_space)
return partial(self._tuple_transform, observations, accessor)
raise IncorrectSpaceError()
@staticmethod
def _get_nested_args(accessor: Union[str, int], *args, **kwargs) -> tuple[tuple, dict]:
"""
Selects the appropriate nested args or kwargs.
Parameters
----------
accessor : str or int
Path to nested observations.
*args : tuple
Environment observations.
**kwargs : dict
Environment observations.
Returns
-------
tuple[tuple, dict]
Args and kwargs.
"""
if accessor is not None:
if isinstance(accessor, int):
arguments = args[accessor]
else:
arguments = kwargs[accessor]
if isinstance(arguments, dict):
return tuple(), arguments
else:
return arguments, {}
return args, kwargs
def _simple_transform(self, accessor: Union[str, int], *args, **kwargs) -> any:
"""
Returns the appropriate observation from environment observations.
Parameters
----------
accessor : str or int
Path to nested observations.
*args : tuple
Environment observations.
**kwargs : dict
Environment observations.
Returns
-------
any
Selected observation from the extension observation space.
"""
args, kwargs = self._get_nested_args(accessor, *args, **kwargs)
if len(args) > 0:
return args[0]
else:
first, *_ = kwargs.values()
return first
def _function_transform(self, func: Callable, name: str, accessor: Union[str, int], *args, **kwargs) -> any:
"""
Returns the appropriate observation from the observation function or from environment observations
if present.
Parameters
----------
func : Callable
Function that returns selected observation.
name : str
Name of the selected observation.
accessor : str or int
Path to nested observations.
*args : tuple
Environment observations.
**kwargs : dict
Environment observations.
Returns
-------
any
Selected observation.
"""
args, kwargs = self._get_nested_args(accessor, *args, **kwargs)
if name in kwargs:
return kwargs[name]
else:
return func(*args, **kwargs)
def _dict_transform(self, observations: dict[str, Callable], accessor: Union[str, int], *args, **kwargs) -> dict:
"""
Returns a dictionary filled with appropriate environment observations and values provided by
the observation functions.
Parameters
----------
observations : dict[str, Callable]
Dictionary with observation names and functions that provide according observations.
accessor : str or int
Path to nested observations.
*args : tuple
Environment observations.
**kwargs : dict
Environment observations.
Returns
-------
dict
Dictionary with functions providing observations.
"""
args, kwargs = self._get_nested_args(accessor, *args, **kwargs)
return {name: func(*args, **kwargs) for name, func in observations.items()}
def _tuple_transform(self, observations: list[Callable], accessor: Union[str, int], *args, **kwargs) -> tuple:
"""
Returns a tuple filled with appropriate environment observations and values provided by
the observation functions.
Parameters
----------
observations : list[Callable]
List with functions that provide selected observations.
accessor : str or int
Path to nested observations.
*args : tuple
Environment observations.
**kwargs : dict
Environment observations.
Returns
-------
tuple
Tuple with functions providing observations.
"""
args, kwargs = self._get_nested_args(accessor, *args, **kwargs)
return tuple(func(*args, **kwargs) for func in observations)
def transform(self, *args, action: any = None, **kwargs) -> tuple[any, any]:
"""
Transforms raw observations and values provided by the observation functions to the agent observation
and sample spaces. Provides the last action selected by the agent if it is required by the agent.
Parameters
----------
*args : tuple
Environment observations.
action : any
The last action selected by the agent.
**kwargs : dict
Environment observations.
Returns
-------
tuple[any, any]
Agent update and sample observations.
"""
if self._add_action_to_observations:
kwargs['action'] = action
return self._update_space_transform(*args, **kwargs), self._sample_space_transform(*args, **kwargs) | /reinforced-lib-1.0.0.tar.gz/reinforced-lib-1.0.0/reinforced_lib/exts/base_ext.py | 0.955558 | 0.54153 | base_ext.py | pypi |
from typing import Callable, NamedTuple
import gymnasium as gym
import numpy as np
class ObservationInfo(NamedTuple):
"""
Description of the observation function that provides one of the values from the agent observation space.
Attributes
----------
name : str
Name of the provided observation.
type : gym.spaces.Space
Type of the provided value in Gymnasium format.
"""
name: str
type: gym.spaces.Space
class ParameterInfo(NamedTuple):
"""
Description of the parameter function that provides one of the parameters of the agent constructor.
Attributes
----------
name : str
Name of the provided parameter.
type : gym.spaces.Space
Type of the provided parameter in Gymnasium format.
"""
name: str
type: gym.spaces.Space
def observation(observation_name: str = None, observation_type: gym.spaces.Space = None) -> Callable:
"""
Decorator used to annotate the observation functions.
Parameters
----------
observation_name : str, optional
Name of the provided observation.
observation_type : gym.spaces.Space, optional
Type of the provided value in Gymnasium format.
Returns
-------
Callable
Function that returns the appropriate observation.
"""
def decorator(function):
name = observation_name if observation_name is not None else function.__name__
function.observation_info = ObservationInfo(name, observation_type)
return function
return decorator
def parameter(parameter_name: str = None, parameter_type: gym.spaces.Space = None) -> Callable:
"""
Decorator used to annotate the parameter functions.
Parameters
----------
parameter_name : str, optional
Name of the provided parameter.
parameter_type : gym.spaces.Space, optional
Type of the provided parameter in Gymnasium format.
Returns
-------
Callable
Function that returns the appropriate parameter.
"""
def decorator(function):
name = parameter_name if parameter_name is not None else function.__name__
function.parameter_info = ParameterInfo(name, parameter_type)
return function
return decorator
def test_box(a: gym.spaces.Space, b: gym.spaces.Box) -> bool:
"""
Tests if the space ``a`` is identical to the gym.space.Box space ``b``.
Parameters
----------
a : gym.spaces.Space
Space ``a``.
b : gym.spaces.Box
Box space ``b``.
Returns
-------
bool
Result of the comparison.
"""
return isinstance(a, gym.spaces.Box) and \
np.array_equal(a.low, b.low) and \
np.array_equal(a.high, b.high) and \
a.shape == b.shape and \
a.dtype == b.dtype
def test_discrete(a: gym.spaces.Space, b: gym.spaces.Discrete) -> bool:
"""
Tests if the space ``a`` is identical to the gym.space.Discrete space ``b``.
Parameters
----------
a : gym.spaces.Space
Space ``a``.
b : gym.spaces.Discrete
Discrete space ``b``.
Returns
-------
bool
Result of the comparison.
"""
return isinstance(a, gym.spaces.Discrete) and \
a.n == b.n and \
a.start == b.start
def test_multi_binary(a: gym.spaces.Space, b: gym.spaces.MultiBinary) -> bool:
"""
Tests if the space ``a`` is identical to the gym.space.MultiBinary space ``b``.
Parameters
----------
a : gym.spaces.Space
Space ``a``.
b : gym.spaces.MultiBinary
MultiBinary space ``b``.
Returns
-------
bool
Result of the comparison.
"""
return isinstance(a, gym.spaces.MultiBinary) and \
np.array_equal(a.n, b.n)
def test_multi_discrete(a: gym.spaces.Space, b: gym.spaces.MultiDiscrete) -> bool:
"""
Tests if the space ``a`` is identical to the gym.space.MultiDiscrete space ``b``.
Parameters
----------
a : gym.spaces.Space
Space ``a``.
b : gym.spaces.MultiDiscrete
MultiDiscrete space ``b``.
Returns
-------
bool
Result of the comparison.
"""
return isinstance(a, gym.spaces.MultiDiscrete) and \
np.array_equal(a.nvec, b.nvec) and \
a.dtype == b.dtype
def test_sequence(a: gym.spaces.Space, b: gym.spaces.Sequence) -> bool:
"""
Tests if the space ``a`` is identical to the gym.space.Sequence space ``b``.
Parameters
----------
a : gym.spaces.Space
Space ``a``.
b : gym.spaces.Sequence
Sequence space ``b``.
Returns
-------
bool
Result of the comparison.
"""
if not isinstance(a, gym.spaces.Sequence):
return False
if isinstance(b, gym.spaces.Box):
return test_box(a, b)
elif isinstance(b, gym.spaces.Discrete):
return test_discrete(a, b)
elif isinstance(b, gym.spaces.MultiBinary):
return test_multi_binary(a, b)
elif isinstance(b, gym.spaces.MultiDiscrete):
return test_multi_discrete(a, b)
else:
return test_space(a, b)
def test_space(a: gym.spaces.Space, b: gym.spaces.Space) -> bool:
"""
Tests if the space ``a`` is identical to the space ``b``.
Parameters
----------
a : gym.spaces.Space
Space ``a``.
b : gym.spaces.Space
Space ``b``.
Returns
-------
bool
Result of the comparison.
"""
return a.shape == b.shape and \
a.dtype == b.dtype | /reinforced-lib-1.0.0.tar.gz/reinforced-lib-1.0.0/reinforced_lib/exts/utils.py | 0.961804 | 0.913368 | utils.py | pypi |
import gymnasium as gym
import numpy as np
from reinforced_lib.exts import BaseExt, observation, parameter
class Gymnasium(BaseExt):
"""
Gymnasium [1]_ extension. Simplifies interaction of RL agents with the Gymnasium environments by providing
the environment state, reward, terminal flag, and shapes of the observation and action spaces.
Parameters
----------
env_id : str
Name of the Gymnasium environment.
References
----------
.. [1] Gymnasium https://gymnasium.farama.org
"""
def __init__(self, env_id: str) -> None:
self.env = gym.make(env_id)
super().__init__()
observation_space = gym.spaces.Dict({})
@observation()
def env_state(self, env_state, reward, terminal, truncated, info, *args, **kwargs) -> any:
return env_state
@observation(observation_type=gym.spaces.Box(-np.inf, np.inf, (1,)))
def reward(self, env_state, reward, terminal, truncated, info, *args, **kwargs) -> float:
return reward
@observation(observation_type=gym.spaces.MultiBinary(1))
def terminal(self, env_state, reward, terminal, truncated, info, *args, **kwargs) -> bool:
return terminal or truncated
@parameter(parameter_type=gym.spaces.Sequence(gym.spaces.Box(1, np.inf, (1,), np.int32)))
def obs_space_shape(self) -> tuple:
return self.env.observation_space.shape
@parameter(parameter_type=gym.spaces.Sequence(gym.spaces.Box(1, np.inf, (1,), np.int32)))
def act_space_shape(self) -> tuple:
return self.env.action_space.shape
@parameter(parameter_type=gym.spaces.Box(1, np.inf, (1,), np.int32))
def act_space_size(self) -> int:
if isinstance(self.env.action_space, gym.spaces.Discrete):
return self.env.action_space.n
raise AttributeError()
@parameter(parameter_type=gym.spaces.Sequence(gym.spaces.Box(-np.inf, np.inf)))
def min_action(self) -> tuple:
return self.env.action_space.low
@parameter(parameter_type=gym.spaces.Sequence(gym.spaces.Box(-np.inf, np.inf)))
def max_action(self) -> tuple:
return self.env.action_space.high | /reinforced-lib-1.0.0.tar.gz/reinforced-lib-1.0.0/reinforced_lib/exts/gymnasium.py | 0.913563 | 0.76544 | gymnasium.py | pypi |
from typing import Callable
import jax
import jax.numpy as jnp
from chex import dataclass, Array, Numeric, Scalar, PRNGKey, Shape
@dataclass
class ExperienceReplay:
"""
Container for experience replay buffer functions.
Attributes
----------
init : Callable
Function that initializes the replay buffer.
append : Callable
Function that appends a new values to the replay buffer.
sample : Callable
Function that samples a batch from the replay buffer.
is_ready : Callable
Function that checks if the replay buffer is ready to be sampled.
"""
init: Callable
append: Callable
sample: Callable
is_ready: Callable
@dataclass
class ReplayBuffer:
"""
Dataclass containing the replay buffer values. The replay buffer is implemented as a circular buffer.
Attributes
----------
states : array_like
Array containing the states.
actions : array_like
Array containing the actions.
rewards : array_like
Array containing the rewards.
terminals : array_like
Array containing the terminal flags.
next_states : array_like
Array containing the next states.
size : int
Current size of the replay buffer.
ptr : int
Current pointer of the replay buffer.
"""
states: Array
actions: Array
rewards: Array
terminals: Array
next_states: Array
size: jnp.int32
ptr: jnp.int32
def experience_replay(
buffer_size: jnp.int32,
batch_size: jnp.int32,
obs_space_shape: Shape,
act_space_shape: Shape
) -> ExperienceReplay:
"""
Experience replay buffer used for off-policy learning. Improves the stability of the learning process
by reducing the correlation between the samples and enables an agent to learn from past experiences.
Parameters
----------
buffer_size : int
Maximum size of the replay buffer.
batch_size : int
Size of the batch to be sampled from the replay buffer.
obs_space_shape : Shape
Shape of the observation space.
act_space_shape : Shape
Shape of the action space.
Returns
-------
out : ExperienceReplay
Container for experience replay buffer functions.
"""
def init() -> ReplayBuffer:
"""
Initializes the replay buffer with empty arrays.
Returns
-------
buffer : ReplayBuffer
Dataclass containing the replay buffer values.
"""
return ReplayBuffer(
states=jnp.empty((buffer_size, *obs_space_shape)),
actions=jnp.empty((buffer_size, *act_space_shape)),
rewards=jnp.empty((buffer_size, 1)),
terminals=jnp.empty((buffer_size, 1), dtype=jnp.bool_),
next_states=jnp.empty((buffer_size, *obs_space_shape)),
size=0,
ptr=0
)
def append(
buffer: ReplayBuffer,
state: Numeric,
action: Numeric,
reward: Scalar,
terminal: jnp.bool_,
next_state: Numeric
) -> ReplayBuffer:
"""
Appends a new values to the replay buffer.
Parameters
----------
buffer : ReplayBuffer
Dataclass containing the replay buffer values.
state : array_like
State of the environment.
action : array_like
Action taken by the agent.
reward : float
Reward received by the agent.
terminal : bool
Flag indicating if the episode has terminated.
next_state : array_like
Next state of the environment.
Returns
-------
buffer : ReplayBuffer
Updated replay buffer.
"""
return ReplayBuffer(
states=buffer.states.at[buffer.ptr].set(state),
actions=buffer.actions.at[buffer.ptr].set(action),
rewards=buffer.rewards.at[buffer.ptr].set(reward),
terminals=buffer.terminals.at[buffer.ptr].set(terminal),
next_states=buffer.next_states.at[buffer.ptr].set(next_state),
size=jax.lax.min(buffer.size + 1, buffer_size),
ptr=(buffer.ptr + 1) % buffer_size
)
def sample(buffer: ReplayBuffer, key: PRNGKey) -> tuple:
"""
Samples a batch from the replay buffer (there may be duplicates!).
Parameters
----------
buffer : ReplayBuffer
Dataclass containing the replay buffer values.
key : PRNGKey
Pseudo-random number generator key.
Returns
-------
batch : tuple
Tuple containing the batch of states, actions, rewards, terminals and next states.
"""
idxs = jax.random.uniform(key, shape=(batch_size,), minval=0, maxval=buffer.size).astype(jnp.int32)
states = buffer.states[idxs]
actions = buffer.actions[idxs]
rewards = buffer.rewards[idxs]
terminals = buffer.terminals[idxs]
next_states = buffer.next_states[idxs]
return states, actions, rewards, terminals, next_states
def is_ready(buffer: ReplayBuffer) -> jnp.bool_:
"""
Checks if the replay buffer is ready to be sampled (contains at least ``batch_size`` elements).
Parameters
----------
buffer : ReplayBuffer
Dataclass containing the replay buffer values.
Returns
-------
ready : bool
Flag indicating if the replay buffer is ready to be sampled.
"""
return buffer.size >= batch_size
return ExperienceReplay(
init=jax.jit(init),
append=jax.jit(append),
sample=jax.jit(sample),
is_ready=jax.jit(is_ready)
) | /reinforced-lib-1.0.0.tar.gz/reinforced-lib-1.0.0/reinforced_lib/utils/experience_replay.py | 0.947442 | 0.756493 | experience_replay.py | pypi |
import gymnasium as gym
class NoAgentError(Exception):
"""
Raised when no agent is specified.
"""
def __str__(self) -> str:
return 'No agent is specified.'
class NoExtensionError(Exception):
"""
Raised when no extension is specified.
"""
def __str__(self) -> str:
return 'No extension is specified.'
class IncorrectTypeError(Exception):
"""
Raised when provided class type is incorrect.
Parameters
----------
provided_type : type, optional
Type provided by the user.
expected_module : str, optional
Name of the module that ``provided_type`` should match.
"""
def __init__(self, provided_type: type = None, expected_module: str = None) -> None:
self._provided_type = provided_type.__name__ if provided_type else 'Provided type'
self._expected_module = expected_module if expected_module else ''
def __str__(self) -> str:
return f'{self._provided_type} is not a valid {self._expected_module} type.'
class IncorrectAgentTypeError(IncorrectTypeError):
"""
Raised when provided agent does not inherit from the BaseAgent class.
Parameters
----------
provided_type : type
Type provided by the user.
"""
def __init__(self, provided_type: type) -> None:
super().__init__(provided_type, 'agent')
class IncorrectExtensionTypeError(IncorrectTypeError):
"""
Raised when provided extension does not inherit from the BaseExt class.
Parameters
----------
provided_type : type
Type provided by the user.
"""
def __init__(self, provided_type: type) -> None:
super().__init__(provided_type, 'extension')
class IncorrectLoggerTypeError(IncorrectTypeError):
"""
Raised when provided logger does not inherit from the BaseLogger class.
Parameters
----------
provided_type : type
Type provided by the user.
"""
def __init__(self, provided_type: type) -> None:
super().__init__(provided_type, 'logger')
class ForbiddenOperationError(Exception):
"""
Raised when the user performs a forbidden operation.
"""
def __str__(self) -> str:
return 'Forbidden operation.'
class ForbiddenAgentChangeError(ForbiddenOperationError):
"""
Raised when the user changes the agent type after the first agent instance has been initialized.
"""
def __str__(self) -> str:
return 'Cannot change agent type after the first agent instance is initialized.'
class ForbiddenExtensionChangeError(ForbiddenOperationError):
"""
Raised when the user changes the extension type after the first agent instance has been initialized.
"""
def __str__(self) -> str:
return 'Cannot change extension type after the first agent instance is initialized.'
class ForbiddenExtensionSetError(ForbiddenOperationError):
"""
Raised when the user sets the extension type when ``no_ext_mode`` is enabled.
"""
def __str__(self) -> str:
return 'Cannot set extension type when \'no_ext_mode\' is enabled.'
class ForbiddenLoggerSetError(ForbiddenOperationError):
"""
Raised when the user adds a new logger after the first step has been made.
"""
def __str__(self) -> str:
return 'Cannot add new loggers type after the first step has been made.'
class IncorrectSpaceError(Exception):
"""
Raised when an unknown space is provided, for example a custom Gymnasium space.
"""
def __str__(self) -> str:
return 'Cannot find corresponding Gymnasium space.'
class UnimplementedSpaceError(Exception):
"""
Raised when an observation space is required but not implemented.
"""
def __str__(self) -> str:
return 'Appropriate observation space is not implemented.'
class IncompatibleSpacesError(Exception):
"""
Raised when the observation spaces of two different modules are not compatible.
Parameters
----------
ext_space : gym.spaces.Space
Observation space of the extension.
agent_space : gym.spaces.Space
Observation space of the agent.
"""
def __init__(self, ext_space: gym.spaces.Space, agent_space: gym.spaces.Space) -> None:
self._ext_space = ext_space
self._agent_space = agent_space
def __str__(self) -> str:
return f'Agents space of type {self._agent_space} is not compatible ' \
f'with extension space of type {self._ext_space}.'
class NoDefaultParameterError(Exception):
"""
Raised when the extension does not define a default parameter value for the agent.
Parameters
----------
extension_type : type
Type of the used extension.
parameter_name : str
Name of the missing parameter.
parameter_type : gym.spaces.Space
Type of the missing parameter.
"""
def __init__(self, extension_type: type, parameter_name: str, parameter_type: gym.spaces.Space) -> None:
self._extension_name = extension_type.__name__
self._parameter_name = parameter_name
self._parameter_type = parameter_type
def __str__(self) -> str:
return f'Extension {self._extension_name} does not provide parameter ' \
f'{self._parameter_name} of type {self._parameter_type}.'
class UnsupportedLogTypeError(Exception):
"""
Raised when the user logs values that are not supported by the logger.
Parameters
----------
logger_type : type
Type of the used logger.
log_type : type
Type of the logged value.
"""
def __init__(self, logger_type: type, log_type: type) -> None:
self._logger_name = logger_type.__name__
self._log_name = log_type.__name__
def __str__(self) -> str:
return f'Logger {self._logger_name} does not support logging {self._log_name}.'
class IncorrectSourceTypeError(IncorrectTypeError):
"""
Raised when the provided source is not a correct source type (i.e., ``Union[Tuple[str, SourceType], str]``).
Parameters
----------
provided_type : type
Type provided by the user.
"""
def __init__(self, provided_type: type) -> None:
super().__init__(provided_type, 'source')
class UnsupportedCustomLogsError(Exception):
"""
Raised when the user tries to log custom values with a logger that does not support custom logging.
Parameters
----------
logger_type : type
Type of the used logger.
"""
def __init__(self, logger_type: type) -> None:
self._logger_name = logger_type.__name__
def __str__(self) -> str:
return f'Logger {self._logger_name} does not support custom logging.' | /reinforced-lib-1.0.0.tar.gz/reinforced-lib-1.0.0/reinforced_lib/utils/exceptions.py | 0.920119 | 0.572185 | exceptions.py | pypi |
from abc import ABC, abstractmethod
from functools import wraps, partial
from typing import Callable
import gymnasium as gym
import jax
from chex import dataclass, PRNGKey, ArrayTree
from reinforced_lib.utils.exceptions import UnimplementedSpaceError
from reinforced_lib.utils import is_array, is_dict
@dataclass
class AgentState:
"""
Base class for agent state containers.
"""
class BaseAgent(ABC):
"""
Base interface of agents.
"""
@staticmethod
@abstractmethod
def init(key: PRNGKey, *args, **kwargs) -> AgentState:
"""
Creates and initializes instance of the agent.
"""
pass
@staticmethod
@abstractmethod
def update(state: AgentState, key: PRNGKey, *args, **kwargs) -> AgentState:
"""
Updates the state of the agent after performing some action and receiving a reward.
"""
pass
@staticmethod
@abstractmethod
def sample(state: AgentState, key: PRNGKey, *args, **kwargs) -> any:
"""
Selects the next action based on the current environment and agent state.
"""
pass
@staticmethod
def parameter_space() -> gym.spaces.Dict:
"""
Parameters of the agent constructor in Gymnasium format. Type of returned value is required to
be ``gym.spaces.Dict`` or ``None``. If ``None``, the user must provide all parameters manually.
"""
return None
@property
def update_observation_space(self) -> gym.spaces.Space:
"""
Observation space of the ``update`` method in Gymnasium format. Allows to infer missing
observations using an extensions and easily export the agent to TensorFlow Lite format.
If ``None``, the user must provide all parameters manually.
"""
return None
@property
def sample_observation_space(self) -> gym.spaces.Space:
"""
Observation space of the ``sample`` method in Gymnasium format. Allows to infer missing
observations using an extensions and easily export the agent to TensorFlow Lite format.
If ``None``, the user must provide all parameters manually.
"""
return None
@property
def action_space(self) -> gym.spaces.Space:
"""
Action space of the agent in Gymnasium format.
"""
raise NotImplementedError()
def export(self, init_key: PRNGKey, state: AgentState = None, sample_only: bool = False) -> tuple[any, any, any]:
"""
Exports the agent to TensorFlow Lite format.
Parameters
----------
init_key : PRNGKey
Key used to initialize the agent.
state : AgentState, optional
State of the agent to be exported. If not specified, the agent is initialized with ``init_key``.
sample_only : bool, optional
If ``True``, the exported agent will only be able to sample actions, but not update its state.
"""
try:
import tensorflow as tf
except ModuleNotFoundError:
raise ModuleNotFoundError('TensorFlow installation is required to export agents to TensorFlow Lite.')
@dataclass
class TfLiteState:
state: ArrayTree
key: PRNGKey
def append_value(value: any, value_name: str, args: any) -> any:
if args is None:
raise UnimplementedSpaceError()
elif is_dict(args):
return {value_name: value} | args
elif is_array(args):
return [value] + list(args)
else:
return [value, args]
def flatten_args(tree_args_fun: Callable, treedef: ArrayTree) -> Callable:
@wraps(tree_args_fun)
def flat_args_fun(*leaves):
tree_args = jax.tree_util.tree_unflatten(treedef, leaves)
if is_dict(tree_args):
tree_ret = tree_args_fun(**tree_args)
else:
tree_ret = tree_args_fun(*tree_args)
return jax.tree_util.tree_leaves(tree_ret)
return flat_args_fun
def make_converter(fun: Callable, arguments: any) -> tf.lite.TFLiteConverter:
leaves, treedef = jax.tree_util.tree_flatten(arguments)
flat_fun = flatten_args(fun, treedef)
inputs = [[(f'arg{i}', l) for i, l in enumerate(leaves)]]
converter = tf.lite.TFLiteConverter.experimental_from_jax([flat_fun], inputs)
converter.target_spec.supported_ops = [
tf.lite.OpsSet.TFLITE_BUILTINS, # enable TensorFlow Lite ops.
tf.lite.OpsSet.SELECT_TF_OPS # enable TensorFlow ops.
]
return converter
def init() -> TfLiteState:
return TfLiteState(
state=self.init(init_key),
key=init_key
)
def sample(state: TfLiteState, *args, **kwargs) -> tuple[any, TfLiteState]:
sample_key, key = jax.random.split(state.key)
action = self.sample(state.state, sample_key, *args, **kwargs)
return action, TfLiteState(state=state.state, key=key)
def update(state: TfLiteState, *args, **kwargs) -> TfLiteState:
update_key, key = jax.random.split(state.key)
new_state = self.update(state.state, update_key, *args, **kwargs)
return TfLiteState(state=new_state, key=key)
def get_key() -> PRNGKey:
return init_key
def sample_without_state(state: AgentState, key: PRNGKey, *args, **kwargs) -> tuple[any, PRNGKey]:
sample_key, key = jax.random.split(key)
action = self.sample(state, sample_key, *args, **kwargs)
return action, key
if not sample_only:
if state is None:
state = init()
else:
state = TfLiteState(state=state, key=init_key)
update_args = append_value(state, 'state', self.update_observation_space.sample())
sample_args = append_value(state, 'state', self.sample_observation_space.sample())
init_tfl = make_converter(init, []).convert()
update_tfl = make_converter(update, update_args).convert()
sample_tfl = make_converter(sample, sample_args).convert()
return init_tfl, update_tfl, sample_tfl
elif state is not None:
sample_args = append_value(init_key, 'key', self.sample_observation_space.sample())
init_tfl = make_converter(get_key, []).convert()
sample_tfl = make_converter(partial(sample_without_state, state), sample_args).convert()
return init_tfl, None, sample_tfl
else:
raise ValueError('Either `state` must be provided or `sample_only` must be False.') | /reinforced-lib-1.0.0.tar.gz/reinforced-lib-1.0.0/reinforced_lib/agents/base_agent.py | 0.924942 | 0.466299 | base_agent.py | pypi |
from copy import deepcopy
from functools import partial
from typing import Callable
import gymnasium as gym
import haiku as hk
import jax
import jax.numpy as jnp
import optax
from chex import dataclass, Array, PRNGKey, Scalar, Shape, Numeric
from reinforced_lib.agents import BaseAgent, AgentState
from reinforced_lib.utils.experience_replay import experience_replay, ExperienceReplay, ReplayBuffer
from reinforced_lib.utils.jax_utils import gradient_step
@dataclass
class DDPGState(AgentState):
r"""
Container for the state of the deep deterministic policy gradient agent.
Attributes
----------
q_params : haiku.Params
Parameters of the Q-network (critic).
q_state : haiku.State
State of the Q-network (critic).
q_params_target : haiku.Params
Parameters of the target Q-network.
q_state_target : haiku.State
State of the target Q-network.
q_opt_state : optax.OptState
Optimizer state of the Q-network.
a_params : haiku.Params
Parameters of the policy network (actor).
a_state : haiku.State
State of the policy network (actor).
a_params_target : haiku.Params
Parameters of the target policy network.
a_state_target : haiku.State
State of the target policy network.
a_opt_state : optax.OptState
Optimizer state of the policy network.
replay_buffer : ReplayBuffer
Experience replay buffer.
prev_env_state : array_like
Previous environment state.
noise : Scalar
Current noise level.
"""
q_params: hk.Params
q_state: hk.State
q_params_target: hk.Params
q_state_target: hk.State
q_opt_state: optax.OptState
a_params: hk.Params
a_state: hk.State
a_params_target: hk.Params
a_state_target: hk.State
a_opt_state: optax.OptState
replay_buffer: ReplayBuffer
prev_env_state: Array
noise: Scalar
class DDPG(BaseAgent):
r"""
Deep deterministic policy gradient [3]_ [4]_ agent with white Gaussian noise exploration and experience replay
buffer. The agent simultaneously learns a Q-function and a policy. The Q-function is updated using the Bellman
equation. The policy is learned using the gradient of the Q-function with respect to the policy parameters,
it is trained to maximize the Q-value. The agent uses two Q-networks (critics) and two policy networks (actors)
to stabilize the learning process and avoid overestimation. The target networks are updated with a soft update.
This agent follows the off-policy learning paradigm and is suitable for environments with continuous action spaces.
Parameters
----------
q_network : hk.TransformedWithState
Architecture of the Q-networks (critics).
The input to the network should be two tensors of observations and actions respectively.
a_network : hk.TransformedWithState
Architecture of the policy networks (actors).
obs_space_shape : Shape
Shape of the observation space.
act_space_shape : Shape
Shape of the action space.
min_action : Scalar or Array
Minimum action value.
max_action : Scalar or Array
Maximum action value.
q_optimizer : optax.GradientTransformation, optional
Optimizer of the Q-networks. If None, the Adam optimizer with learning rate 1e-3 is used.
a_optimizer : optax.GradientTransformation, optional
Optimizer of the policy networks. If None, the Adam optimizer with learning rate 1e-3 is used.
experience_replay_buffer_size : jnp.int32, default=10000
Size of the experience replay buffer.
experience_replay_batch_size : jnp.int32, default=64
Batch size of the samples from the experience replay buffer.
experience_replay_steps : jnp.int32, default=5
Number of experience replay steps per update.
discount : Scalar, default=0.99
Discount factor. :math:`\gamma = 0.0` means no discount, :math:`\gamma = 1.0` means infinite discount. :math:`0 \leq \gamma \leq 1`
noise : Scalar, default=(max_action - min_action) / 2
Initial Gaussian noise level. :math:`0 \leq \sigma`.
noise_decay : Scalar, default=0.99
Gaussian noise decay factor. :math:`\sigma_{t+1} = \sigma_{t} * \sigma_{decay}`. :math:`0 \leq \sigma_{decay} \leq 1`.
noise_min : Scalar, default=0.01
Minimum Gaussian noise level. :math:`0 \leq \sigma_{min} \leq \sigma`.
tau : Scalar, default=0.01
Soft update factor. :math:`\tau = 0.0` means no soft update, :math:`\tau = 1.0` means hard update. :math:`0 \leq \tau \leq 1`.
References
----------
.. [3] David Silver, Guy Lever, Nicolas Heess, Thomas Degris, Daan Wierstra, and Martin Riedmiller. 2014.
Deterministic policy gradient algorithms. In Proceedings of the 31st International Conference on International
Conference on Machine Learning - Volume 32 (ICML'14). JMLR.org, I–387–I–395.
.. [4] Timothy P. Lillicrap, Jonathan J. Hunt, Alexander Pritzel, Nicolas Heess, Tom Erez, Yuval Tassa, David Silver,
and Daan Wierstra. 2015. Continuous control with deep reinforcement learning. CoRR abs/1509.02971.
"""
def __init__(
self,
q_network: hk.TransformedWithState,
a_network: hk.TransformedWithState,
obs_space_shape: Shape,
act_space_shape: Shape,
min_action: Numeric,
max_action: Numeric,
q_optimizer: optax.GradientTransformation = None,
a_optimizer: optax.GradientTransformation = None,
experience_replay_buffer_size: jnp.int32 = 10000,
experience_replay_batch_size: jnp.int32 = 64,
experience_replay_steps: jnp.int32 = 5,
discount: Scalar = 0.99,
noise: Scalar = None,
noise_decay: Scalar = 0.99,
noise_min: Scalar = 0.01,
tau: Scalar = 0.01
) -> None:
assert experience_replay_buffer_size > experience_replay_batch_size > 0
assert 0.0 <= discount <= 1.0
assert 0.0 <= noise_decay <= 1.0
assert 0.0 <= tau <= 1.0
if noise is None:
noise = (max_action - min_action) / 2
assert 0.0 <= noise
assert 0.0 <= noise_min <= noise
if q_optimizer is None:
q_optimizer = optax.adam(1e-3)
if a_optimizer is None:
a_optimizer = optax.adam(1e-3)
self.obs_space_shape = obs_space_shape if jnp.ndim(obs_space_shape) > 0 else (obs_space_shape,)
self.act_space_shape = act_space_shape if jnp.ndim(act_space_shape) > 0 else (act_space_shape,)
er = experience_replay(
experience_replay_buffer_size,
experience_replay_batch_size,
self.obs_space_shape,
self.act_space_shape
)
self.init = jax.jit(partial(
self.init,
obs_space_shape=self.obs_space_shape, act_space_shape=self.act_space_shape,
q_network=q_network, a_network=a_network,
q_optimizer=q_optimizer, a_optimizer=a_optimizer,
experience_replay=er,
noise=noise
))
self.update = jax.jit(partial(
self.update,
q_step_fn=partial(
gradient_step,
optimizer=q_optimizer,
loss_fn=partial(self.q_loss_fn, q_network=q_network, a_network=a_network, discount=discount)
),
a_step_fn=partial(
gradient_step,
optimizer=a_optimizer,
loss_fn=partial(self.a_loss_fn, q_network=q_network, a_network=a_network)
),
experience_replay=er, experience_replay_steps=experience_replay_steps,
noise_decay=noise_decay, noise_min=noise_min,
tau=tau
))
self.sample = jax.jit(partial(
self.sample,
a_network=a_network,
min_action=min_action, max_action=max_action
))
@staticmethod
def parameter_space() -> gym.spaces.Dict:
return gym.spaces.Dict({
'obs_space_shape': gym.spaces.Sequence(gym.spaces.Box(1, jnp.inf, (1,), jnp.int32)),
'act_space_shape': gym.spaces.Sequence(gym.spaces.Box(1, jnp.inf, (1,), jnp.int32)),
'min_action': gym.spaces.Sequence(gym.spaces.Box(-jnp.inf, jnp.inf)),
'max_action': gym.spaces.Sequence(gym.spaces.Box(-jnp.inf, jnp.inf)),
'experience_replay_buffer_size': gym.spaces.Box(1, jnp.inf, (1,), jnp.int32),
'experience_replay_batch_size': gym.spaces.Box(1, jnp.inf, (1,), jnp.int32),
'discount': gym.spaces.Box(0.0, 1.0, (1,)),
'noise': gym.spaces.Box(0.0, jnp.inf, (1,)),
'noise_decay': gym.spaces.Box(0.0, 1.0, (1,)),
'noise_min': gym.spaces.Box(0.0, jnp.inf, (1,)),
'tau': gym.spaces.Box(0.0, 1.0, (1,))
})
@property
def update_observation_space(self) -> gym.spaces.Dict:
return gym.spaces.Dict({
'env_state': gym.spaces.Box(-jnp.inf, jnp.inf, self.obs_space_shape),
'action': gym.spaces.Box(-jnp.inf, jnp.inf, self.act_space_shape),
'reward': gym.spaces.Box(-jnp.inf, jnp.inf, (1,)),
'terminal': gym.spaces.MultiBinary(1)
})
@property
def sample_observation_space(self) -> gym.spaces.Dict:
return gym.spaces.Dict({
'env_state': gym.spaces.Box(-jnp.inf, jnp.inf, self.obs_space_shape)
})
@property
def action_space(self) -> gym.spaces.Box:
return gym.spaces.Box(-jnp.inf, jnp.inf, self.act_space_shape)
@staticmethod
def init(
key: PRNGKey,
obs_space_shape: Shape,
act_space_shape: Shape,
q_network: hk.TransformedWithState,
a_network: hk.TransformedWithState,
q_optimizer: optax.GradientTransformation,
a_optimizer: optax.GradientTransformation,
experience_replay: ExperienceReplay,
noise: Scalar
) -> DDPGState:
r"""
Initializes the Q-networks and the policy networks, optimizers, and experience replay buffer.
First state of the environment is assumed to be a tensor of zeros.
Parameters
----------
key : PRNGKey
A PRNG key used as the random key.
obs_space_shape : Shape
The shape of the observation space.
act_space_shape : Shape
The shape of the action space.
q_network : hk.TransformedWithState
The Q-network.
a_network : hk.TransformedWithState
The policy network.
q_optimizer : optax.GradientTransformation
The Q-network optimizer.
a_optimizer : optax.GradientTransformation
The policy network optimizer.
experience_replay : ExperienceReplay
The experience replay buffer.
noise : Scalar
The initial noise value.
Returns
-------
DDPGState
Initial state of the deep deterministic policy gradient agent.
"""
s_dummy = jnp.empty(obs_space_shape)
a_dummy = jnp.empty(act_space_shape)
key, q_key, a_key = jax.random.split(key, 3)
q_params, q_state = q_network.init(q_key, s_dummy, a_dummy)
a_params, a_state = a_network.init(a_key, s_dummy)
q_opt_state = q_optimizer.init(q_params)
a_opt_state = a_optimizer.init(a_params)
replay_buffer = experience_replay.init()
return DDPGState(
q_params=q_params,
q_state=q_state,
q_params_target=deepcopy(q_params),
q_state_target=deepcopy(q_state),
q_opt_state=q_opt_state,
a_params=a_params,
a_state=a_state,
a_params_target=deepcopy(a_params),
a_state_target=deepcopy(a_state),
a_opt_state=a_opt_state,
replay_buffer=replay_buffer,
prev_env_state=jnp.zeros(obs_space_shape),
noise=noise
)
@staticmethod
def q_loss_fn(
q_params: hk.Params,
key: PRNGKey,
ddpg_state: DDPGState,
batch: tuple,
non_zero_loss: jnp.bool_,
q_network: hk.TransformedWithState,
a_network: hk.TransformedWithState,
discount: Scalar
) -> tuple[Scalar, hk.State]:
r"""
Loss is the mean squared Bellman error :math:`\mathcal{L}(\theta) = \mathbb{E}_{s, a, r, s'} \left[ \left( r
+ \gamma \max Q'(s', \pi'(s')) - Q(s, a) \right)^2 \right]` where :math:`s` is the current state, :math:`a`
is the current action, :math:`r` is the reward, :math:`s'` is the next state, :math:`\gamma` is the discount
factor, :math:`Q(s, a)` is the Q-value of the main Q-network, :math:`Q'(s, a)` is the Q-value of the target
Q-network, and :math:`\pi'(s)` is the action of the target policy network. The policy network parameters
are considered as fixed. Loss can be calculated on a batch of transitions.
Parameters
----------
q_params : hk.Params
The parameters of the Q-network.
key : PRNGKey
A PRNG key used as the random key.
ddpg_state : DDPGState
The state of the deep deterministic policy gradient agent.
batch : tuple
A batch of transitions from the experience replay buffer.
non_zero_loss : bool
Flag used to avoid updating the Q-network when the experience replay buffer is not full.
q_network : hk.TransformedWithState
The Q-network.
a_network : hk.TransformedWithState
The policy network.
discount : Scalar
The discount factor.
Returns
-------
tuple[Scalar, hk.State]
The loss and the new state of the Q-network.
"""
states, actions, rewards, terminals, next_states = batch
q_key, q_target_key, a_target_key = jax.random.split(key, 3)
q_values, q_state = q_network.apply(q_params, ddpg_state.q_state, q_key, states, actions)
actions_target, _ = a_network.apply(ddpg_state.a_params_target, ddpg_state.a_state_target, a_target_key, next_states)
q_values_target, _ = q_network.apply(ddpg_state.q_params_target, ddpg_state.q_state_target, q_target_key, next_states, actions_target)
target = rewards + (1 - terminals) * discount * q_values_target
target = jax.lax.stop_gradient(target)
loss = optax.l2_loss(q_values, target).mean()
return loss * non_zero_loss, q_state
@staticmethod
def a_loss_fn(
a_params: hk.Params,
key: PRNGKey,
ddpg_state: DDPGState,
batch: tuple,
non_zero_loss: jnp.bool_,
q_network: hk.TransformedWithState,
a_network: hk.TransformedWithState
) -> tuple[Scalar, hk.State]:
r"""
The policy network is updated using the gradient of the Q-network to maximize the Q-value of the current state
and action :math:`\max_{\theta} \mathbb{E}_{s, a} \left[ Q(s, \pi_{\theta}(s)) \right]`. Q-network parameters are
considered as fixed. The policy network can be updated on a batch of transitions.
Parameters
----------
a_params : hk.Params
The parameters of the policy network.
key : PRNGKey
A PRNG key used as the random key.
ddpg_state : DDPGState
The state of the deep deterministic policy gradient agent.
batch : tuple
A batch of transitions from the experience replay buffer.
non_zero_loss : bool
Flag used to avoid updating the policy network when the experience replay buffer is not full.
q_network : hk.TransformedWithState
The Q-network.
a_network : hk.TransformedWithState
The policy network.
Returns
-------
tuple[Scalar, hk.State]
The loss and the new state of the policy network.
"""
states, _, _, _, _ = batch
a_key, q_key = jax.random.split(key)
actions, a_state = a_network.apply(a_params, ddpg_state.a_state, a_key, states)
q_values, _ = q_network.apply(ddpg_state.q_params, ddpg_state.q_state, q_key, states, actions)
loss = -jnp.mean(q_values)
return loss * non_zero_loss, a_state
@staticmethod
def update(
state: DDPGState,
key: PRNGKey,
env_state: Array,
action: Array,
reward: Scalar,
terminal: jnp.bool_,
q_step_fn: Callable,
a_step_fn: Callable,
experience_replay: ExperienceReplay,
experience_replay_steps: jnp.int32,
noise_decay: Scalar,
noise_min: Scalar,
tau: Scalar
) -> DDPGState:
r"""
Appends the transition to the experience replay buffer and performs ``experience_replay_steps`` steps.
Each step consists of sampling a batch of transitions from the experience replay buffer, calculating the
Q-network loss and the policy network loss using ``q_loss_fn`` and ``a_loss_fn`` respectively, performing
a gradient step on both networks, and soft updating the target networks. Soft update of the parameters
is defined as :math:`\theta_{target} = \tau \theta + (1 - \tau) \theta_{target}`.The noise parameter is
decayed by ``noise_decay``.
Parameters
----------
state : DDPGState
The current state of the double Q-learning agent.
key : PRNGKey
A PRNG key used as the random key.
env_state : Array
The current state of the environment.
action : Array
The action taken by the agent.
reward : Scalar
The reward received by the agent.
terminal : bool
Whether the episode has terminated.
q_step_fn : Callable
The function that performs a single gradient step on the Q-network.
a_step_fn : Callable
The function that performs a single gradient step on the policy network.
experience_replay : ExperienceReplay
The experience replay buffer.
experience_replay_steps : int
The number of experience replay steps.
noise_decay : Scalar
The decay rate of the noise parameter.
noise_min : Scalar
The minimum value of the noise parameter.
tau : Scalar
The soft update parameter.
Returns
-------
DDPGState
The updated state of the deep deterministic policy gradient agent.
"""
replay_buffer = experience_replay.append(
state.replay_buffer, state.prev_env_state,
action, reward, terminal, env_state
)
q_params, q_net_state, q_opt_state = state.q_params, state.q_state, state.q_opt_state
q_params_target, q_state_target = state.q_params_target, state.q_state_target
a_params, a_net_state, a_opt_state = state.a_params, state.a_state, state.a_opt_state
a_params_target, a_state_target = state.a_params_target, state.a_state_target
non_zero_loss = experience_replay.is_ready(replay_buffer)
for _ in range(experience_replay_steps):
batch_key, q_network_key, a_network_key, key = jax.random.split(key, 4)
batch = experience_replay.sample(replay_buffer, batch_key)
q_params, q_net_state, q_opt_state, _ = q_step_fn(
q_params, (q_network_key, state, batch, non_zero_loss), q_opt_state)
a_params, a_net_state, a_opt_state, _ = a_step_fn(
a_params, (a_network_key, state, batch, non_zero_loss), a_opt_state)
q_params_target, q_state_target = optax.incremental_update(
(q_params, q_net_state), (q_params_target, q_state_target), tau)
a_params_target, a_state_target = optax.incremental_update(
(a_params, a_net_state), (a_params_target, a_state_target), tau)
return DDPGState(
q_params=q_params,
q_state=q_net_state,
q_opt_state=q_opt_state,
q_params_target=q_params_target,
q_state_target=q_state_target,
a_params=a_params,
a_state=a_net_state,
a_opt_state=a_opt_state,
a_params_target=a_params_target,
a_state_target=a_state_target,
replay_buffer=replay_buffer,
prev_env_state=env_state,
noise=jnp.maximum(state.noise * noise_decay, noise_min)
)
@staticmethod
def sample(
state: DDPGState,
key: PRNGKey,
env_state: Array,
a_network: hk.TransformedWithState,
min_action: Scalar,
max_action: Scalar
) -> Numeric:
r"""
Calculates deterministic action using the policy network. Then adds white Gaussian noise with standard
deviation ``state.noise`` to the action and clips it to the range :math:`[min\_action, max\_action]`.
Parameters
----------
state : DDPGState
The state of the double Q-learning agent.
key : PRNGKey
A PRNG key used as the random key.
env_state : Array
The current state of the environment.
a_network : hk.TransformedWithState
The policy network.
min_action : Scalar or Array
The minimum value of the action.
max_action : Scalar or Array
The maximum value of the action.
Returns
-------
Scalar or Array
Selected action.
"""
network_key, noise_key = jax.random.split(key)
action, _ = a_network.apply(state.a_params, state.a_state, network_key, env_state)
action += jax.random.normal(noise_key, action.shape) * state.noise
return jnp.clip(action, min_action, max_action) | /reinforced-lib-1.0.0.tar.gz/reinforced-lib-1.0.0/reinforced_lib/agents/deep/ddpg.py | 0.944753 | 0.624866 | ddpg.py | pypi |
from copy import deepcopy
from functools import partial
from typing import Callable
import gymnasium as gym
import haiku as hk
import jax
import jax.numpy as jnp
import optax
from chex import dataclass, Array, PRNGKey, Scalar, Shape
from reinforced_lib.agents import BaseAgent, AgentState
from reinforced_lib.utils.experience_replay import experience_replay, ExperienceReplay, ReplayBuffer
from reinforced_lib.utils.jax_utils import gradient_step
@dataclass
class QLearningState(AgentState):
r"""
Container for the state of the deep Q-learning agent.
Attributes
----------
params : hk.Params
Parameters of the Q-network.
state : hk.State
State of the Q-network.
opt_state : optax.OptState
Optimizer state.
replay_buffer : ReplayBuffer
Experience replay buffer.
prev_env_state : Array
Previous environment state.
epsilon : Scalar
:math:`\epsilon`-greedy parameter.
"""
params: hk.Params
state: hk.State
opt_state: optax.OptState
replay_buffer: ReplayBuffer
prev_env_state: Array
epsilon: Scalar
class QLearning(BaseAgent):
r"""
Deep Q-learning agent [1]_ with :math:`\epsilon`-greedy exploration and experience replay buffer. The agent uses
a deep neural network to approximate the Q-value function. The Q-network is trained to minimize the Bellman
error. This agent follows the off-policy learning paradigm and is suitable for environments with discrete action
spaces.
Parameters
----------
q_network : hk.TransformedWithState
Architecture of the Q-network.
obs_space_shape : Shape
Shape of the observation space.
act_space_size : jnp.int32
Size of the action space.
optimizer : optax.GradientTransformation, optional
Optimizer of the Q-network. If None, the Adam optimizer with learning rate 1e-3 is used.
experience_replay_buffer_size : jnp.int32, default=10000
Size of the experience replay buffer.
experience_replay_batch_size : jnp.int32, default=64
Batch size of the samples from the experience replay buffer.
experience_replay_steps : jnp.int32, default=5
Number of experience replay steps per update.
discount : Scalar, default=0.99
Discount factor. :math:`\gamma = 0.0` means no discount, :math:`\gamma = 1.0` means infinite discount. :math:`0 \leq \gamma \leq 1`
epsilon : Scalar, default=1.0
Initial :math:`\epsilon`-greedy parameter. :math:`0 \leq \epsilon \leq 1`.
epsilon_decay : Scalar, default=0.999
Epsilon decay factor. :math:`\epsilon_{t+1} = \epsilon_{t} * \epsilon_{decay}`. :math:`0 \leq \epsilon_{decay} \leq 1`.
epsilon_min : Scalar, default=0.01
Minimum :math:`\epsilon`-greedy parameter. :math:`0 \leq \epsilon_{min} \leq \epsilon`.
References
----------
.. [1] Mnih, V., Kavukcuoglu, K., Silver, D., Graves, A., Antonoglou, I., Wierstra, D. & Riedmiller, M. (2013).
Playing Atari with Deep Reinforcement Learning.
"""
def __init__(
self,
q_network: hk.TransformedWithState,
obs_space_shape: Shape,
act_space_size: jnp.int32,
optimizer: optax.GradientTransformation = None,
experience_replay_buffer_size: jnp.int32 = 10000,
experience_replay_batch_size: jnp.int32 = 64,
experience_replay_steps: jnp.int32 = 5,
discount: Scalar = 0.99,
epsilon: Scalar = 1.0,
epsilon_decay: Scalar = 0.999,
epsilon_min: Scalar = 0.001
) -> None:
assert experience_replay_buffer_size > experience_replay_batch_size > 0
assert 0.0 <= discount <= 1.0
assert 0.0 <= epsilon <= 1.0
assert 0.0 <= epsilon_decay <= 1.0
if optimizer is None:
optimizer = optax.adam(1e-3)
self.obs_space_shape = obs_space_shape if jnp.ndim(obs_space_shape) > 0 else (obs_space_shape,)
self.act_space_size = act_space_size
er = experience_replay(
experience_replay_buffer_size,
experience_replay_batch_size,
self.obs_space_shape,
(1,)
)
self.init = jax.jit(partial(
self.init,
obs_space_shape=self.obs_space_shape,
q_network=q_network,
optimizer=optimizer,
experience_replay=er,
epsilon=epsilon
))
self.update = jax.jit(partial(
self.update,
step_fn=partial(
gradient_step,
optimizer=optimizer,
loss_fn=partial(self.loss_fn, q_network=q_network, discount=discount)
),
experience_replay=er,
experience_replay_steps=experience_replay_steps,
epsilon_decay=epsilon_decay,
epsilon_min=epsilon_min
))
self.sample = jax.jit(partial(
self.sample,
q_network=q_network,
act_space_size=act_space_size
))
@staticmethod
def parameter_space() -> gym.spaces.Dict:
return gym.spaces.Dict({
'obs_space_shape': gym.spaces.Sequence(gym.spaces.Box(1, jnp.inf, (1,), jnp.int32)),
'act_space_size': gym.spaces.Box(1, jnp.inf, (1,), jnp.int32),
'experience_replay_buffer_size': gym.spaces.Box(1, jnp.inf, (1,), jnp.int32),
'experience_replay_batch_size': gym.spaces.Box(1, jnp.inf, (1,), jnp.int32),
'discount': gym.spaces.Box(0.0, 1.0, (1,)),
'epsilon': gym.spaces.Box(0.0, 1.0, (1,)),
'epsilon_decay': gym.spaces.Box(0.0, 1.0, (1,)),
'epsilon_min': gym.spaces.Box(0.0, 1.0, (1,))
})
@property
def update_observation_space(self) -> gym.spaces.Dict:
return gym.spaces.Dict({
'env_state': gym.spaces.Box(-jnp.inf, jnp.inf, self.obs_space_shape),
'action': gym.spaces.Discrete(self.act_space_size),
'reward': gym.spaces.Box(-jnp.inf, jnp.inf, (1,)),
'terminal': gym.spaces.MultiBinary(1)
})
@property
def sample_observation_space(self) -> gym.spaces.Dict:
return gym.spaces.Dict({
'env_state': gym.spaces.Box(-jnp.inf, jnp.inf, self.obs_space_shape)
})
@property
def action_space(self) -> gym.spaces.Discrete:
return gym.spaces.Discrete(self.act_space_size)
@staticmethod
def init(
key: PRNGKey,
obs_space_shape: Shape,
q_network: hk.TransformedWithState,
optimizer: optax.GradientTransformation,
experience_replay: ExperienceReplay,
epsilon: Scalar
) -> QLearningState:
r"""
Initializes the Q-network, optimizer and experience replay buffer with given parameters.
First state of the environment is assumed to be a tensor of zeros.
Parameters
----------
key : PRNGKey
A PRNG key used as the random key.
obs_space_shape : Shape
The shape of the observation space.
q_network : hk.TransformedWithState
The Q-network.
optimizer : optax.GradientTransformation
The optimizer.
experience_replay : ExperienceReplay
The experience replay buffer.
epsilon : Scalar
The initial :math:`\epsilon`-greedy parameter.
Returns
-------
QLearningState
Initial state of the deep Q-learning agent.
"""
x_dummy = jnp.empty(obs_space_shape)
params, state = q_network.init(key, x_dummy)
opt_state = optimizer.init(params)
replay_buffer = experience_replay.init()
return QLearningState(
params=params,
state=state,
opt_state=opt_state,
replay_buffer=replay_buffer,
prev_env_state=jnp.zeros(obs_space_shape),
epsilon=epsilon
)
@staticmethod
def loss_fn(
params: hk.Params,
key: PRNGKey,
net_state: hk.State,
params_target: hk.Params,
net_state_target: hk.State,
batch: tuple,
non_zero_loss: jnp.bool_,
q_network: hk.TransformedWithState,
discount: Scalar
) -> tuple[Scalar, hk.State]:
r"""
Loss is the mean squared Bellman error :math:`\mathcal{L}(\theta) = \mathbb{E}_{s, a, r, s'} \left[ \left( r +
\gamma \max_{a'} Q(s', a') - Q(s, a) \right)^2 \right]` where :math:`s` is the current state, :math:`a` is the
current action, :math:`r` is the reward, :math:`s'` is the next state, :math:`\gamma` is the discount factor,
:math:`Q(s, a)` is the Q-value of the state-action pair. Loss can be calculated on a batch of transitions.
Parameters
----------
params : hk.Params
The parameters of the Q-network.
key : PRNGKey
A PRNG key used as the random key.
net_state : hk.State
The state of the Q-network.
params_target : hk.Params
The parameters of the target Q-network.
net_state_target : hk.State
The state of the target Q-network.
batch : tuple
A batch of transitions from the experience replay buffer.
non_zero_loss : bool
Flag used to avoid updating the Q-network when the experience replay buffer is not full.
q_network : hk.TransformedWithState
The Q-network.
discount : Scalar
The discount factor.
Returns
-------
Tuple[Scalar, hk.State]
The loss and the new state of the Q-network.
"""
states, actions, rewards, terminals, next_states = batch
q_key, q_target_key = jax.random.split(key)
q_values, state = q_network.apply(params, net_state, q_key, states)
q_values = jnp.take_along_axis(q_values, actions.astype(jnp.int32), axis=-1)
q_values_target, _ = q_network.apply(params_target, net_state_target, q_target_key, next_states)
target = rewards + (1 - terminals) * discount * jnp.max(q_values_target, axis=-1, keepdims=True)
target = jax.lax.stop_gradient(target)
loss = optax.l2_loss(q_values, target).mean()
return loss * non_zero_loss, state
@staticmethod
def update(
state: QLearningState,
key: PRNGKey,
env_state: Array,
action: Array,
reward: Scalar,
terminal: jnp.bool_,
step_fn: Callable,
experience_replay: ExperienceReplay,
experience_replay_steps: jnp.int32,
epsilon_decay: Scalar,
epsilon_min: Scalar
) -> QLearningState:
r"""
Appends the transition to the experience replay buffer and performs ``experience_replay_steps`` steps.
Each step consists of sampling a batch of transitions from the experience replay buffer, calculating the loss
using the ``loss_fn`` function and performing a gradient step on the Q-network. The :math:`\epsilon`-greedy
parameter is decayed by ``epsilon_decay``.
Parameters
----------
state : QLearningState
The current state of the deep Q-learning agent.
key : PRNGKey
A PRNG key used as the random key.
env_state : Array
The current state of the environment.
action : Array
The action taken by the agent.
reward : Scalar
The reward received by the agent.
terminal : bool
Whether the episode has terminated.
step_fn : Callable
The function that performs a single gradient step on the Q-network.
experience_replay : ExperienceReplay
The experience replay buffer.
experience_replay_steps : int
The number of experience replay steps.
epsilon_decay : Scalar
The decay rate of the :math:`\epsilon`-greedy parameter.
epsilon_min : Scalar
The minimum value of the :math:`\epsilon`-greedy parameter.
Returns
-------
QLearningState
The updated state of the deep Q-learning agent.
"""
replay_buffer = experience_replay.append(
state.replay_buffer, state.prev_env_state,
action, reward, terminal, env_state
)
params, net_state, opt_state = state.params, state.state, state.opt_state
params_target, net_state_target = deepcopy(params), deepcopy(net_state)
non_zero_loss = experience_replay.is_ready(replay_buffer)
for _ in range(experience_replay_steps):
batch_key, network_key, key = jax.random.split(key, 3)
batch = experience_replay.sample(replay_buffer, batch_key)
loss_params = (network_key, net_state, params_target, net_state_target, batch, non_zero_loss)
params, net_state, opt_state, _ = step_fn(params, loss_params, opt_state)
return QLearningState(
params=params,
state=net_state,
opt_state=opt_state,
replay_buffer=replay_buffer,
prev_env_state=env_state,
epsilon=jax.lax.max(state.epsilon * epsilon_decay, epsilon_min)
)
@staticmethod
def sample(
state: QLearningState,
key: PRNGKey,
env_state: Array,
q_network: hk.TransformedWithState,
act_space_size: jnp.int32
) -> jnp.int32:
r"""
Samples random action with probability :math:`\epsilon` and the greedy action with probability
:math:`1 - \epsilon`. The greedy action is the action with the highest Q-value.
Parameters
----------
state : QLearningState
The state of the deep Q-learning agent.
key : PRNGKey
A PRNG key used as the random key.
env_state : Array
The current state of the environment.
q_network : hk.TransformedWithState
The Q-network.
act_space_size : jnp.int32
The size of the action space.
Returns
-------
int
Selected action.
"""
network_key, epsilon_key, action_key = jax.random.split(key, 3)
return jax.lax.cond(
jax.random.uniform(epsilon_key) < state.epsilon,
lambda: jax.random.choice(action_key, act_space_size),
lambda: jnp.argmax(q_network.apply(state.params, state.state, network_key, env_state)[0])
) | /reinforced-lib-1.0.0.tar.gz/reinforced-lib-1.0.0/reinforced_lib/agents/deep/q_learning.py | 0.943125 | 0.688655 | q_learning.py | pypi |
from copy import deepcopy
from functools import partial
from typing import Callable
import gymnasium as gym
import haiku as hk
import jax
import jax.numpy as jnp
import optax
from chex import dataclass, Array, PRNGKey, Scalar, Shape
from reinforced_lib.agents import BaseAgent, AgentState
from reinforced_lib.utils.experience_replay import experience_replay, ExperienceReplay, ReplayBuffer
from reinforced_lib.utils.jax_utils import gradient_step
@dataclass
class ExpectedSarsaState(AgentState):
"""
Container for the state of the deep expected SARSA agent.
Attributes
----------
params : hk.Params
Parameters of the Q-network.
state : hk.State
State of the Q-network.
opt_state : optax.OptState
Optimizer state.
replay_buffer : ReplayBuffer
Experience replay buffer.
prev_env_state : Array
Previous environment state.
"""
params: hk.Params
state: hk.State
opt_state: optax.OptState
replay_buffer: ReplayBuffer
prev_env_state: Array
class ExpectedSarsa(BaseAgent):
r"""
Deep expected SARSA agent with temperature parameter :math:`\tau` and experience replay buffer. The agent uses
a deep neural network to approximate the Q-value function. The Q-network is trained to minimize the Bellman
error. This agent follows the on-policy learning paradigm and is suitable for environments with discrete action
spaces.
Parameters
----------
q_network : hk.TransformedWithState
Architecture of the Q-network.
obs_space_shape : Shape
Shape of the observation space.
act_space_size : jnp.int32
Size of the action space.
optimizer : optax.GradientTransformation, optional
Optimizer of the Q-network. If None, the Adam optimizer with learning rate 1e-3 is used.
experience_replay_buffer_size : jnp.int32, default=10000
Size of the experience replay buffer.
experience_replay_batch_size : jnp.int32, default=64
Batch size of the samples from the experience replay buffer.
experience_replay_steps : jnp.int32, default=5
Number of experience replay steps per update.
discount : Scalar, default=0.99
Discount factor. :math:`\gamma = 0.0` means no discount, :math:`\gamma = 1.0` means infinite discount. :math:`0 \leq \gamma \leq 1`
tau : Scalar, default=1.0
Temperature parameter. :math:`\tau = 0.0` means no exploration, :math:`\tau = \infty` means infinite exploration. :math:`\tau > 0`
"""
def __init__(
self,
q_network: hk.TransformedWithState,
obs_space_shape: Shape,
act_space_size: jnp.int32,
optimizer: optax.GradientTransformation = None,
experience_replay_buffer_size: jnp.int32 = 10000,
experience_replay_batch_size: jnp.int32 = 64,
experience_replay_steps: jnp.int32 = 5,
discount: Scalar = 0.99,
tau: Scalar = 1.0
) -> None:
assert experience_replay_buffer_size > experience_replay_batch_size > 0
assert 0.0 <= discount <= 1.0
assert tau > 0.0
if optimizer is None:
optimizer = optax.adam(1e-3)
self.obs_space_shape = obs_space_shape if jnp.ndim(obs_space_shape) > 0 else (obs_space_shape,)
self.act_space_size = act_space_size
er = experience_replay(
experience_replay_buffer_size,
experience_replay_batch_size,
self.obs_space_shape,
(1,)
)
self.init = jax.jit(partial(
self.init,
obs_space_shape=self.obs_space_shape,
q_network=q_network,
optimizer=optimizer,
experience_replay=er
))
self.update = jax.jit(partial(
self.update,
q_network=q_network,
step_fn=partial(
gradient_step,
optimizer=optimizer,
loss_fn=partial(self.loss_fn, q_network=q_network, discount=discount, tau=tau)
),
experience_replay=er,
experience_replay_steps=experience_replay_steps
))
self.sample = jax.jit(partial(
self.sample,
q_network=q_network,
act_space_size=act_space_size,
tau=tau
))
@staticmethod
def parameter_space() -> gym.spaces.Dict:
return gym.spaces.Dict({
'obs_space_shape': gym.spaces.Sequence(gym.spaces.Box(1, jnp.inf, (1,), jnp.int32)),
'act_space_size': gym.spaces.Box(1, jnp.inf, (1,), jnp.int32),
'experience_replay_buffer_size': gym.spaces.Box(1, jnp.inf, (1,), jnp.int32),
'experience_replay_batch_size': gym.spaces.Box(1, jnp.inf, (1,), jnp.int32),
'discount': gym.spaces.Box(0.0, 1.0, (1,)),
'tau': gym.spaces.Box(0.0, jnp.inf, (1,))
})
@property
def update_observation_space(self) -> gym.spaces.Dict:
return gym.spaces.Dict({
'env_state': gym.spaces.Box(-jnp.inf, jnp.inf, self.obs_space_shape),
'action': gym.spaces.Discrete(self.act_space_size),
'reward': gym.spaces.Box(-jnp.inf, jnp.inf, (1,)),
'terminal': gym.spaces.MultiBinary(1)
})
@property
def sample_observation_space(self) -> gym.spaces.Dict:
return gym.spaces.Dict({
'env_state': gym.spaces.Box(-jnp.inf, jnp.inf, self.obs_space_shape)
})
@property
def action_space(self) -> gym.spaces.Discrete:
return gym.spaces.Discrete(self.act_space_size)
@staticmethod
def init(
key: PRNGKey,
obs_space_shape: Shape,
q_network: hk.TransformedWithState,
optimizer: optax.GradientTransformation,
experience_replay: ExperienceReplay
) -> ExpectedSarsaState:
r"""
Initializes the Q-network, optimizer and experience replay buffer with given parameters.
First state of the environment is assumed to be a tensor of zeros.
Parameters
----------
key : PRNGKey
A PRNG key used as the random key.
obs_space_shape : Shape
The shape of the observation space.
q_network : hk.TransformedWithState
The Q-network.
optimizer : optax.GradientTransformation
The optimizer.
experience_replay : ExperienceReplay
The experience replay buffer.
Returns
-------
ExpectedSarsaState
Initial state of the deep expected SARSA agent.
"""
x_dummy = jnp.empty(obs_space_shape)
params, state = q_network.init(key, x_dummy)
opt_state = optimizer.init(params)
replay_buffer = experience_replay.init()
return ExpectedSarsaState(
params=params,
state=state,
opt_state=opt_state,
replay_buffer=replay_buffer,
prev_env_state=jnp.zeros(obs_space_shape)
)
@staticmethod
def loss_fn(
params: hk.Params,
key: PRNGKey,
net_state: hk.State,
params_target: hk.Params,
net_state_target: hk.State,
batch: tuple,
non_zero_loss: jnp.bool_,
q_network: hk.TransformedWithState,
discount: Scalar,
tau: Scalar
) -> tuple[Scalar, hk.State]:
r"""
Loss is the mean squared Bellman error :math:`\mathcal{L}(\theta) = \mathbb{E}_{s, a, r, s'} \left[ \left( r +
\gamma \sum_{a'} \pi(a'|s') Q(s', a') - Q(s, a) \right)^2 \right]` where :math:`s` is the current state,
:math:`a` is the current action, :math:`r` is the reward, :math:`s'` is the next state, :math:`\gamma` is
the discount factor, :math:`Q(s, a)` is the Q-value of the state-action pair. Loss can be calculated on a batch
of transitions.
Parameters
----------
params : hk.Params
The parameters of the Q-network.
key : PRNGKey
A PRNG key used as the random key.
net_state : hk.State
The state of the Q-network.
params_target : hk.Params
The parameters of the target Q-network.
net_state_target : hk.State
The state of the target Q-network.
batch : tuple
A batch of transitions from the experience replay buffer.
non_zero_loss : bool
Flag used to avoid updating the Q-network when the experience replay buffer is not full.
q_network : hk.TransformedWithState
The Q-network.
discount : Scalar
The discount factor.
tau : Scalar
The temperature parameter.
Returns
-------
Tuple[Scalar, hk.State]
The loss and the new state of the Q-network.
"""
states, actions, rewards, terminals, next_states = batch
q_key, q_target_key = jax.random.split(key)
q_values, state = q_network.apply(params, net_state, q_key, states)
q_values = jnp.take_along_axis(q_values, actions.astype(jnp.int32), axis=-1)
q_values_target, _ = q_network.apply(params_target, net_state_target, q_target_key, next_states)
probs_target = jax.nn.softmax(q_values_target / tau)
target = rewards + (1 - terminals) * discount * jnp.sum(probs_target * q_values_target, axis=-1, keepdims=True)
target = jax.lax.stop_gradient(target)
loss = optax.l2_loss(q_values, target).mean()
return loss * non_zero_loss, state
@staticmethod
def update(
state: ExpectedSarsaState,
key: PRNGKey,
env_state: Array,
action: Array,
reward: Scalar,
terminal: jnp.bool_,
q_network: hk.TransformedWithState,
step_fn: Callable,
experience_replay: ExperienceReplay,
experience_replay_steps: jnp.int32
) -> ExpectedSarsaState:
r"""
Appends the transition to the experience replay buffer and performs ``experience_replay_steps`` steps.
Each step consists of sampling a batch of transitions from the experience replay buffer, calculating the loss
using the ``loss_fn`` function and performing a gradient step on the Q-network.
Parameters
----------
state : ExpectedSarsaState
The current state of the deep expected SARSA agent.
key : PRNGKey
A PRNG key used as the random key.
env_state : Array
The current state of the environment.
action : Array
The action taken by the agent.
reward : Scalar
The reward received by the agent.
terminal : bool
Whether the episode has terminated.
q_network : hk.TransformedWithState
The Q-network.
step_fn : Callable
The function that performs a single gradient step on the Q-network.
experience_replay : ExperienceReplay
The experience replay buffer.
experience_replay_steps : int
The number of experience replay steps.
Returns
-------
ExpectedSarsaState
The updated state of the deep expected SARSA agent.
"""
replay_buffer = experience_replay.append(
state.replay_buffer, state.prev_env_state,
action, reward, terminal, env_state
)
params, net_state, opt_state = state.params, state.state, state.opt_state
params_target, net_state_target = deepcopy(params), deepcopy(net_state)
non_zero_loss = experience_replay.is_ready(replay_buffer)
for _ in range(experience_replay_steps):
batch_key, network_key, key = jax.random.split(key, 3)
batch = experience_replay.sample(replay_buffer, batch_key)
loss_params = (network_key, net_state, params_target, net_state_target, batch, non_zero_loss)
params, net_state, opt_state, _ = step_fn(params, loss_params, opt_state)
return ExpectedSarsaState(
params=params,
state=net_state,
opt_state=opt_state,
replay_buffer=replay_buffer,
prev_env_state=env_state
)
@staticmethod
def sample(
state: ExpectedSarsaState,
key: PRNGKey,
env_state: Array,
q_network: hk.TransformedWithState,
act_space_size: jnp.int32,
tau: Scalar
) -> jnp.int32:
r"""
Selects an action using the softmax policy with the temperature parameter :math:`\tau`:
.. math::
\pi(a|s) = \frac{e^{Q(s, a) / \tau}}{\sum_{a'} e^{Q(s, a') / \tau}}
Parameters
----------
state : ExpectedSarsaState
The state of the deep expected SARSA agent.
key : PRNGKey
A PRNG key used as the random key.
env_state : Array
The current state of the environment.
q_network : hk.TransformedWithState
The Q-network.
act_space_size : jnp.int32
The size of the action space.
tau : Scalar
The temperature parameter.
Returns
-------
int
Selected action.
"""
network_key, categorical_key = jax.random.split(key)
logits = q_network.apply(state.params, state.state, network_key, env_state)[0]
return jax.random.categorical(categorical_key, logits / tau) | /reinforced-lib-1.0.0.tar.gz/reinforced-lib-1.0.0/reinforced_lib/agents/deep/expected_sarsa.py | 0.94148 | 0.578091 | expected_sarsa.py | pypi |
from copy import deepcopy
from functools import partial
from typing import Callable
import gymnasium as gym
import haiku as hk
import jax
import jax.numpy as jnp
import optax
from chex import dataclass, Array, PRNGKey, Scalar, Shape
from reinforced_lib.agents import BaseAgent, AgentState
from reinforced_lib.utils.experience_replay import experience_replay, ExperienceReplay, ReplayBuffer
from reinforced_lib.utils.jax_utils import gradient_step
@dataclass
class DQNState(AgentState):
r"""
Container for the state of the double Q-learning agent.
Attributes
----------
params : hk.Params
Parameters of the main Q-network.
state : hk.State
State of the main Q-network.
params_target : hk.Params
Parameters of the target Q-network.
state_target : hk.State
State of the target Q-network.
opt_state : optax.OptState
Optimizer state of the main Q-network.
replay_buffer : ReplayBuffer
Experience replay buffer.
prev_env_state : Array
Previous environment state.
epsilon : Scalar
:math:`\epsilon`-greedy parameter.
"""
params: hk.Params
state: hk.State
params_target: hk.Params
state_target: hk.State
opt_state: optax.OptState
replay_buffer: ReplayBuffer
prev_env_state: Array
epsilon: Scalar
class DQN(BaseAgent):
r"""
Double Q-learning agent [2]_ with :math:`\epsilon`-greedy exploration and experience replay buffer. The agent
uses two Q-networks to stabilize the learning process and avoid overestimation of the Q-values. The main Q-network
is trained to minimize the Bellman error. The target Q-network is updated with a soft update. This agent follows
the off-policy learning paradigm and is suitable for environments with discrete action spaces.
Parameters
----------
q_network : hk.TransformedWithState
Architecture of the Q-networks.
obs_space_shape : Shape
Shape of the observation space.
act_space_size : jnp.int32
Size of the action space.
optimizer : optax.GradientTransformation, optional
Optimizer of the Q-networks. If None, the Adam optimizer with learning rate 1e-3 is used.
experience_replay_buffer_size : jnp.int32, default=10000
Size of the experience replay buffer.
experience_replay_batch_size : jnp.int32, default=64
Batch size of the samples from the experience replay buffer.
experience_replay_steps : jnp.int32, default=5
Number of experience replay steps per update.
discount : Scalar, default=0.99
Discount factor. :math:`\gamma = 0.0` means no discount, :math:`\gamma = 1.0` means infinite discount. :math:`0 \leq \gamma \leq 1`
epsilon : Scalar, default=1.0
Initial :math:`\epsilon`-greedy parameter. :math:`0 \leq \epsilon \leq 1`.
epsilon_decay : Scalar, default=0.999
Epsilon decay factor. :math:`\epsilon_{t+1} = \epsilon_{t} * \epsilon_{decay}`. :math:`0 \leq \epsilon_{decay} \leq 1`.
epsilon_min : Scalar, default=0.01
Minimum :math:`\epsilon`-greedy parameter. :math:`0 \leq \epsilon_{min} \leq \epsilon`.
tau : Scalar, default=0.01
Soft update factor. :math:`\tau = 0.0` means no soft update, :math:`\tau = 1.0` means hard update. :math:`0 \leq \tau \leq 1`.
References
----------
.. [2] van Hasselt, H., Guez, A., & Silver, D. (2016). Deep Reinforcement Learning with Double Q-Learning.
Proceedings of the Thirtieth AAAI Conference on Artificial Intelligence, 2094–2100. Phoenix, Arizona: AAAI Press.
"""
def __init__(
self,
q_network: hk.TransformedWithState,
obs_space_shape: Shape,
act_space_size: jnp.int32,
optimizer: optax.GradientTransformation = None,
experience_replay_buffer_size: jnp.int32 = 10000,
experience_replay_batch_size: jnp.int32 = 64,
experience_replay_steps: jnp.int32 = 5,
discount: Scalar = 0.99,
epsilon: Scalar = 1.0,
epsilon_decay: Scalar = 0.999,
epsilon_min: Scalar = 0.001,
tau: Scalar = 0.01
) -> None:
assert experience_replay_buffer_size > experience_replay_batch_size > 0
assert 0.0 <= discount <= 1.0
assert 0.0 <= epsilon <= 1.0
assert 0.0 <= epsilon_decay <= 1.0
assert 0.0 <= epsilon_min <= epsilon
assert 0.0 <= tau <= 1.0
if optimizer is None:
optimizer = optax.adam(1e-3)
self.obs_space_shape = obs_space_shape if jnp.ndim(obs_space_shape) > 0 else (obs_space_shape,)
self.act_space_size = act_space_size
er = experience_replay(
experience_replay_buffer_size,
experience_replay_batch_size,
self.obs_space_shape,
(1,)
)
self.init = jax.jit(partial(
self.init,
obs_space_shape=self.obs_space_shape,
q_network=q_network,
optimizer=optimizer,
experience_replay=er,
epsilon=epsilon
))
self.update = jax.jit(partial(
self.update,
step_fn=partial(
gradient_step,
optimizer=optimizer,
loss_fn=partial(self.loss_fn, q_network=q_network, discount=discount)
),
experience_replay=er,
experience_replay_steps=experience_replay_steps,
epsilon_decay=epsilon_decay,
epsilon_min=epsilon_min,
tau=tau
))
self.sample = jax.jit(partial(
self.sample,
q_network=q_network,
act_space_size=act_space_size
))
@staticmethod
def parameter_space() -> gym.spaces.Dict:
return gym.spaces.Dict({
'obs_space_shape': gym.spaces.Sequence(gym.spaces.Box(1, jnp.inf, (1,), jnp.int32)),
'act_space_size': gym.spaces.Box(1, jnp.inf, (1,), jnp.int32),
'experience_replay_buffer_size': gym.spaces.Box(1, jnp.inf, (1,), jnp.int32),
'experience_replay_batch_size': gym.spaces.Box(1, jnp.inf, (1,), jnp.int32),
'discount': gym.spaces.Box(0.0, 1.0, (1,)),
'epsilon': gym.spaces.Box(0.0, 1.0, (1,)),
'epsilon_decay': gym.spaces.Box(0.0, 1.0, (1,)),
'epsilon_min': gym.spaces.Box(0.0, 1.0, (1,)),
'tau': gym.spaces.Box(0.0, 1.0, (1,))
})
@property
def update_observation_space(self) -> gym.spaces.Dict:
return gym.spaces.Dict({
'env_state': gym.spaces.Box(-jnp.inf, jnp.inf, self.obs_space_shape),
'action': gym.spaces.Discrete(self.act_space_size),
'reward': gym.spaces.Box(-jnp.inf, jnp.inf, (1,)),
'terminal': gym.spaces.MultiBinary(1)
})
@property
def sample_observation_space(self) -> gym.spaces.Dict:
return gym.spaces.Dict({
'env_state': gym.spaces.Box(-jnp.inf, jnp.inf, self.obs_space_shape)
})
@property
def action_space(self) -> gym.spaces.Discrete:
return gym.spaces.Discrete(self.act_space_size)
@staticmethod
def init(
key: PRNGKey,
obs_space_shape: Shape,
q_network: hk.TransformedWithState,
optimizer: optax.GradientTransformation,
experience_replay: ExperienceReplay,
epsilon: Scalar
) -> DQNState:
r"""
Initializes the Q-networks, optimizer and experience replay buffer with given parameters.
First state of the environment is assumed to be a tensor of zeros.
Parameters
----------
key : PRNGKey
A PRNG key used as the random key.
obs_space_shape : Shape
The shape of the observation space.
q_network : hk.TransformedWithState
The Q-network.
optimizer : optax.GradientTransformation
The optimizer.
experience_replay : ExperienceReplay
The experience replay buffer.
epsilon : Scalar
The initial :math:`\epsilon`-greedy parameter.
Returns
-------
DQNState
Initial state of the double Q-learning agent.
"""
x_dummy = jnp.empty(obs_space_shape)
params, state = q_network.init(key, x_dummy)
opt_state = optimizer.init(params)
replay_buffer = experience_replay.init()
return DQNState(
params=params,
state=state,
params_target=deepcopy(params),
state_target=deepcopy(state),
opt_state=opt_state,
replay_buffer=replay_buffer,
prev_env_state=jnp.zeros(obs_space_shape),
epsilon=epsilon
)
@staticmethod
def loss_fn(
params: hk.Params,
key: PRNGKey,
dqn_state: DQNState,
batch: tuple,
non_zero_loss: jnp.bool_,
q_network: hk.TransformedWithState,
discount: Scalar
) -> tuple[Scalar, hk.State]:
r"""
Loss is the mean squared Bellman error :math:`\mathcal{L}(\theta) = \mathbb{E}_{s, a, r, s'} \left[ \left( r +
\gamma \max_{a'} Q'(s', a') - Q(s, a) \right)^2 \right]` where :math:`s` is the current state, :math:`a` is the
current action, :math:`r` is the reward, :math:`s'` is the next state, :math:`\gamma` is the discount factor,
:math:`Q(s, a)` is the Q-value of the main Q-network, :math:`Q'(s', a')` is the Q-value of the target
Q-network. Loss can be calculated on a batch of transitions.
Parameters
----------
params : hk.Params
The parameters of the Q-network.
key : PRNGKey
A PRNG key used as the random key.
dqn_state : DQNState
The state of the double Q-learning agent.
batch : tuple
A batch of transitions from the experience replay buffer.
non_zero_loss : bool
Flag used to avoid updating the Q-network when the experience replay buffer is not full.
q_network : hk.TransformedWithState
The Q-network.
discount : Scalar
The discount factor.
Returns
-------
tuple[Scalar, hk.State]
The loss and the new state of the Q-network.
"""
states, actions, rewards, terminals, next_states = batch
q_key, q_target_key = jax.random.split(key)
q_values, state = q_network.apply(params, dqn_state.state, q_key, states)
q_values = jnp.take_along_axis(q_values, actions.astype(jnp.int32), axis=-1)
q_values_target, _ = q_network.apply(dqn_state.params_target, dqn_state.state_target, q_target_key, next_states)
target = rewards + (1 - terminals) * discount * jnp.max(q_values_target, axis=-1, keepdims=True)
target = jax.lax.stop_gradient(target)
loss = optax.l2_loss(q_values, target).mean()
return loss * non_zero_loss, state
@staticmethod
def update(
state: DQNState,
key: PRNGKey,
env_state: Array,
action: Array,
reward: Scalar,
terminal: jnp.bool_,
step_fn: Callable,
experience_replay: ExperienceReplay,
experience_replay_steps: jnp.int32,
epsilon_decay: Scalar,
epsilon_min: Scalar,
tau: Scalar
) -> DQNState:
r"""
Appends the transition to the experience replay buffer and performs ``experience_replay_steps`` steps.
Each step consists of sampling a batch of transitions from the experience replay buffer, calculating the loss
using the ``loss_fn`` function, performing a gradient step on the main Q-network, and soft updating the target
Q-network. Soft update of the parameters is defined as :math:`\theta_{target} = \tau \theta + (1 - \tau) \theta_{target}`.
The :math:`\epsilon`-greedy parameter is decayed by ``epsilon_decay``.
Parameters
----------
state : DQNState
The current state of the double Q-learning agent.
key : PRNGKey
A PRNG key used as the random key.
env_state : Array
The current state of the environment.
action : Array
The action taken by the agent.
reward : Scalar
The reward received by the agent.
terminal : bool
Whether the episode has terminated.
step_fn : Callable
The function that performs a single gradient step on the Q-network.
experience_replay : ExperienceReplay
The experience replay buffer.
experience_replay_steps : int
The number of experience replay steps.
epsilon_decay : Scalar
The decay rate of the :math:`\epsilon`-greedy parameter.
epsilon_min : Scalar
The minimum value of the :math:`\epsilon`-greedy parameter.
tau : Scalar
The soft update parameter.
Returns
-------
DQNState
The updated state of the double Q-learning agent.
"""
replay_buffer = experience_replay.append(
state.replay_buffer, state.prev_env_state,
action, reward, terminal, env_state
)
params, net_state, opt_state = state.params, state.state, state.opt_state
params_target, state_target = state.params_target, state.state_target
non_zero_loss = experience_replay.is_ready(replay_buffer)
for _ in range(experience_replay_steps):
batch_key, network_key, key = jax.random.split(key, 3)
batch = experience_replay.sample(replay_buffer, batch_key)
params, net_state, opt_state, _ = step_fn(params, (network_key, state, batch, non_zero_loss), opt_state)
params_target, state_target = optax.incremental_update(
(params, net_state), (params_target, state_target), tau)
return DQNState(
params=params,
state=net_state,
params_target=params_target,
state_target=state_target,
opt_state=opt_state,
replay_buffer=replay_buffer,
prev_env_state=env_state,
epsilon=jax.lax.max(state.epsilon * epsilon_decay, epsilon_min)
)
@staticmethod
def sample(
state: DQNState,
key: PRNGKey,
env_state: Array,
q_network: hk.TransformedWithState,
act_space_size: jnp.int32
) -> jnp.int32:
r"""
Samples random action with probability :math:`\epsilon` and the greedy action with probability
:math:`1 - \epsilon` using the main Q-network. The greedy action is the action with the highest Q-value.
Parameters
----------
state : DQNState
The state of the double Q-learning agent.
key : PRNGKey
A PRNG key used as the random key.
env_state : Array
The current state of the environment.
q_network : hk.TransformedWithState
The Q-network.
act_space_size : jnp.int32
The size of the action space.
Returns
-------
int
Selected action.
"""
network_key, epsilon_key, action_key = jax.random.split(key, 3)
return jax.lax.cond(
jax.random.uniform(epsilon_key) < state.epsilon,
lambda: jax.random.choice(action_key, act_space_size),
lambda: jnp.argmax(q_network.apply(state.params, state.state, network_key, env_state)[0])
) | /reinforced-lib-1.0.0.tar.gz/reinforced-lib-1.0.0/reinforced_lib/agents/deep/dqn.py | 0.975069 | 0.582669 | dqn.py | pypi |
from functools import partial
from typing import Callable
import jax
import jax.numpy as jnp
from chex import dataclass, Array, Numeric, PRNGKey, Scalar, Shape
from reinforced_lib.agents import AgentState
@dataclass
class ParticleFilterState(AgentState):
"""
Container for the state of the particle filter agent.
Attributes
----------
positions : array_like
Positions of the particles.
logit_weights : array_like
Unnormalized log weights of the particles.
"""
positions: Array
logit_weights: Array
def simple_resample(operands: tuple[ParticleFilterState, PRNGKey]) -> ParticleFilterState:
"""
Samples new particle positions from a categorical distribution with particle weights, then sets all weights equal.
Parameters
----------
operands : tuple[ParticleFilterState, PRNGKey]
Tuple containing the filter state and a PRNG key.
Returns
-------
ParticleFilterState
Updated filter state.
"""
state, key = operands
positions_idx = jax.random.categorical(key, state.logit_weights, shape=state.positions.shape)
return ParticleFilterState(
positions=state.positions[positions_idx],
logit_weights=jnp.zeros_like(state.logit_weights)
)
def effective_sample_size(state: ParticleFilterState, threshold: Scalar = 0.5) -> bool:
r"""
Calculates the effective sample size [9]_ (ESS). If ESS is smaller than the number of sample times threshold,
then a resampling is necessary.
Parameters
----------
state : ParticleFilterState
Current state of the filter.
threshold : float, default=0.5
Threshold value used to decide if a resampling is necessary. :math:`thr \in (0, 1)`.
Returns
-------
bool
Information whether a resampling should be performed.
References
----------
.. [9] https://en.wikipedia.org/wiki/Effective_sample_size#Weighted_samples
"""
weights = jax.nn.softmax(state.logit_weights)
return 1 < jnp.sum(weights ** 2) * state.positions.size * threshold
def simple_transition(state: ParticleFilterState, key: PRNGKey, scale: Scalar, *args) -> ParticleFilterState:
r"""
Performs simple movement of the particle positions according to a normal distribution with
:math:`\mu = 0` and :math:`\sigma = scale`.
Parameters
----------
state : ParticleFilterState
Current state of the filter.
key : PRNGKey
A PRNG key used as the random key.
scale : float
Scale of the random movement of particles. :math:`scale > 0`.
Returns
-------
ParticleFilterState
Updated filter state.
"""
positions_update = scale * jax.random.normal(key, state.positions.shape)
return ParticleFilterState(
positions=state.positions + positions_update,
logit_weights=state.logit_weights
)
def linear_transition(state: ParticleFilterState, key: PRNGKey, scale: Scalar, delta_time: Scalar) -> ParticleFilterState:
r"""
Performs movement of the particle positions according to a normal distribution with :math:`\mu = 0` and
:math:`\sigma = scale \cdot \Delta t`, where :math:`\Delta t` is the time elapsed since the last update.
Parameters
----------
state : ParticleFilterState
Current state of the filter.
key : PRNGKey
A PRNG key used as the random key.
scale : float
Scale of the random movement of particles. :math:`scale > 0`.
delta_time : float
Time elapsed since the last update.
Returns
-------
ParticleFilterState
Updated filter state.
"""
return simple_transition(state, key, scale * delta_time)
def affine_transition(state: ParticleFilterState, key: PRNGKey, scale: Array, delta_time: Scalar) -> ParticleFilterState:
r"""
Performs movement of the particle positions according to a normal distribution with :math:`\mu = 0` and
:math:`\sigma = scale_0 \cdot \Delta t + scale_1`, where :math:`\Delta t` is the time elapsed since
the last update.
Parameters
----------
state : ParticleFilterState
Current state of the filter.
key : PRNGKey
A PRNG key used as the random key.
scale : array_like
Scale of the random movement of particles. :math:`scale_0, scale_1 > 0`.
delta_time : float
Time elapsed since the last update.
Returns
-------
ParticleFilterState
Updated filter state.
"""
return simple_transition(state, key, scale[0] * delta_time + scale[1])
class ParticleFilter:
"""
Particle filter (sequential Monte Carlo) algorithm estimating the
internal environment state given noisy or partial observations.
Parameters
----------
initial_distribution_fn : callable
Function that samples the initial particle positions; takes two positional arguments:
- ``key``: a PRNG key used as a random key (`PRNGKey`).
- ``shape``: shape of the sample (`Shape`).
Returns the initial particle positions (`Array`).
positions_shape : array_like
Shape of the particle positions array.
weights_shape : array_like
Shape of the particle weights array.
scale : array_like
Scale of the random movement of the particles.
observation_fn : callable
Function that updates particles based on an observation from the environment; takes two positional arguments:
- ``state``: the state of the filter (`ParticleFilterState`).
- ``observation``: an observation from the environment (`any`).
Returns the updated state of the filter (`ParticleFilterState`).
resample_fn : callable, default=particle_filter.simple_resample
Function that performs resampling of the particles; takes one positional argument:
- ``operands``: a tuple containing the filter state and a PRNG key (`tuple[ParticleFilterState, PRNGKey]`).
Returns the updated state of the filter (`ParticleFilterState`).
resample_criterion_fn : callable, default=particle_filter.effective_sample_size
Function that checks if a resampling is necessary; takes one positional argument:
- ``state``: the state of the filter (`ParticleFilterState`).
Returns information whether a resampling should be performed (`bool`).
transition_fn : callable, default=particle_filter.simple_transition
Function that updates the particle positions; takes four positional arguments:
- ``state``: the state of the filter (`ParticleFilterState`).
- ``key``: a PRNG key used as a random key (`PRNGKey`).
- ``scale``: scale of the random movement of the particles (`array_like`).
- ``time``: the current time (`float`).
Returns the updated state of the filter (`ParticleFilterState`).
"""
def __init__(
self,
initial_distribution_fn: Callable,
positions_shape: Shape,
weights_shape: Shape,
scale: Numeric,
observation_fn: Callable[[ParticleFilterState, any], ParticleFilterState],
resample_fn: Callable[[tuple[ParticleFilterState, PRNGKey]], ParticleFilterState] = simple_resample,
resample_criterion_fn: Callable[[ParticleFilterState], bool] = effective_sample_size,
transition_fn: Callable[[ParticleFilterState, PRNGKey, Numeric, Scalar], ParticleFilterState] = simple_transition
) -> None:
self.init = jax.jit(
partial(
self.init,
initial_distribution_fn=initial_distribution_fn,
positions_shape=positions_shape,
weights_shape=weights_shape
)
)
self.update = jax.jit(
partial(
self.update,
observation_fn=observation_fn,
resample_fn=resample_fn,
resample_criterion_fn=resample_criterion_fn,
transition_fn=transition_fn,
scale=scale
)
)
self.sample = jax.jit(self.sample)
@staticmethod
def init(
key: PRNGKey,
initial_distribution_fn: Callable,
positions_shape: Shape,
weights_shape: Shape
) -> ParticleFilterState:
"""
Creates and initializes an instance of the particle filter.
Parameters
----------
key : PRNGKey
A PRNG key used as the random key.
initial_distribution_fn : callable
Function that samples the initial particle positions.
- ``key``: PRNG key used as a random key (`PRNGKey`).
- ``shape``: shape of the sample (`Shape`).
Returns the initial particle positions (`Array`).
positions_shape : array_like
Shape of the particle positions array.
weights_shape : array_like
Shape of the particle weights array.
Returns
-------
ParticleFilterState
Initial state of the Particle Filter.
"""
return ParticleFilterState(
positions=initial_distribution_fn(key, positions_shape),
logit_weights=jnp.zeros(weights_shape)
)
@staticmethod
def update(
state: ParticleFilterState,
key: PRNGKey,
observation_fn: Callable[[ParticleFilterState, any], ParticleFilterState],
observation: any,
resample_fn: Callable[[tuple[ParticleFilterState, PRNGKey]], ParticleFilterState],
resample_criterion_fn: Callable[[ParticleFilterState], bool],
transition_fn: Callable[[ParticleFilterState, PRNGKey, Numeric, Scalar], ParticleFilterState],
delta_time: Scalar,
scale: Numeric
) -> ParticleFilterState:
"""
Updates the state of the filter based on an observation from the environment, then
performs resampling (if necessary) and transition of the particles.
Parameters
----------
state : ParticleFilterState
Current state of the filter.
key : PRNGKey
A PRNG key used as the random key.
observation_fn : callable
Function that updates particles based on an observation from the environment; takes two positional arguments:
- ``state``: the state of the filter (`ParticleFilterState`).
- ``observation``: an observation from the environment (`any`).
Returns the updated state of the filter (`ParticleFilterState`).
observation : any
An observation from the environment.
resample_fn : callable, default=particle_filter.simple_resample
Function that performs resampling of the particles; takes one positional argument:
- ``operands``: a tuple containing the filter state and a PRNG key (`tuple[ParticleFilterState, PRNGKey]`).
Returns the updated state of the filter (`ParticleFilterState`).
resample_criterion_fn : callable, default=particle_filter.effective_sample_size
Function that checks if a resampling is necessary; takes one positional argument:
- ``state``: the state of the filter (`ParticleFilterState`).
Returns information whether a resampling should be performed (`bool`).
transition_fn : callable, default=particle_filter.simple_transition
Function that updates the particle positions; takes four positional arguments:
- ``state``: the state of the filter (`ParticleFilterState`).
- ``key``: a PRNG key used as a random key (`PRNGKey`).
- ``scale``: scale of the random movement of the particles (`array_like`).
- ``time``: the current time (`float`).
delta_time : float
Time difference between the current and the previous observation.
scale : array_like
Scale of the random movement of the particles.
Returns
-------
ParticleFilterState
Updated filter state.
"""
resample_key, transition_key = jax.random.split(key)
state = observation_fn(state, observation)
state = jax.lax.cond(resample_criterion_fn(state), resample_fn, lambda op: op[0], (state, resample_key))
state = transition_fn(state, transition_key, scale, delta_time)
return ParticleFilterState(
positions=state.positions,
logit_weights=state.logit_weights
)
@staticmethod
def sample(
state: ParticleFilterState,
key: PRNGKey
) -> Numeric:
"""
Samples the estimated environment state from a categorical distribution with particle weights.
Parameters
----------
state : ParticleFilterState
Current state of the filter.
key : PRNGKey
A PRNG key used as the random key.
Returns
-------
array_like
Estimated environment state.
"""
return state.positions[jax.random.categorical(key, state.logit_weights)] | /reinforced-lib-1.0.0.tar.gz/reinforced-lib-1.0.0/reinforced_lib/agents/core/particle_filter.py | 0.96799 | 0.757929 | particle_filter.py | pypi |
from functools import partial
import gymnasium as gym
import jax
import jax.numpy as jnp
from chex import dataclass, Array, Scalar, PRNGKey
from reinforced_lib.agents import BaseAgent, AgentState
@dataclass
class UCBState(AgentState):
"""
Container for the state of the UCB agent.
Attributes
----------
R : array_like
Sum of the rewards obtained for each arm.
N : array_like
Number of tries for each arm.
"""
R: Array
N: Array
class UCB(BaseAgent):
r"""
UCB agent with optional discounting. The main idea behind this algorithm is to introduce a preference factor
in its policy, so that the selection of the next action is based on both the current estimation of the
action-value function and the uncertainty of this estimation.
Parameters
----------
n_arms : int
Number of bandit arms. :math:`N \in \mathbb{N}_{+}`.
c : float
Degree of exploration. :math:`c \geq 0`.
gamma : float, default=1.0
If less than one, a discounted UCB algorithm [8]_ is used. :math:`\gamma \in (0, 1]`.
References
----------
.. [8] Aurélien Garivier, Eric Moulines. 2008. On Upper-Confidence Bound Policies for Non-Stationary
Bandit Problems. 10.48550/ARXIV.0805.3415.
"""
def __init__(
self,
n_arms: jnp.int32,
c: Scalar,
gamma: Scalar = 1.0
) -> None:
assert c >= 0
assert 0 < gamma <= 1
self.n_arms = n_arms
self.init = jax.jit(partial(self.init, n_arms=n_arms))
self.update = jax.jit(partial(self.update, gamma=gamma))
self.sample = jax.jit(partial(self.sample, c=c))
@staticmethod
def parameter_space() -> gym.spaces.Dict:
return gym.spaces.Dict({
'n_arms': gym.spaces.Box(1, jnp.inf, (1,), jnp.int32),
'c': gym.spaces.Box(0.0, jnp.inf, (1,), jnp.float32),
'gamma': gym.spaces.Box(0.0, 1.0, (1,), jnp.float32)
})
@property
def update_observation_space(self) -> gym.spaces.Dict:
return gym.spaces.Dict({
'action': gym.spaces.Discrete(self.n_arms),
'reward': gym.spaces.Box(-jnp.inf, jnp.inf, (1,), jnp.float32)
})
@property
def sample_observation_space(self) -> gym.spaces.Dict:
return gym.spaces.Dict({})
@property
def action_space(self) -> gym.spaces.Space:
return gym.spaces.Discrete(self.n_arms)
@staticmethod
def init(
key: PRNGKey,
n_arms: jnp.int32
) -> UCBState:
"""
Creates and initializes instance of the UCB agent for ``n_arms`` arms. The sum of the rewards is set to zero
and the number of tries is set to one for each arm.
Parameters
----------
key : PRNGKey
A PRNG key used as the random key.
n_arms : int
Number of bandit arms.
Returns
-------
UCBState
Initial state of the UCB agent.
"""
return UCBState(
R=jnp.zeros((n_arms, 1)),
N=jnp.ones((n_arms, 1))
)
@staticmethod
def update(
state: UCBState,
key: PRNGKey,
action: jnp.int32,
reward: Scalar,
gamma: Scalar
) -> UCBState:
r"""
In the stationary case, the sum of the rewards for a given arm is increased by reward :math:`r` obtained after
step :math:`t` and the number of tries for the corresponding arm is incremented. In the non-stationary case,
the update follows the equations
.. math::
\begin{gather}
R_{t + 1}(a) = \mathbb{1}_{A_t = a} r + \gamma R_t(a) , \\
N_{t + 1}(a) = \mathbb{1}_{A_t = a} + \gamma N_t(a).
\end{gather}
Parameters
----------
state : UCBState
Current state of agent.
key : PRNGKey
A PRNG key used as the random key.
action : int
Previously selected action.
reward : float
Reward collected by the agent after taking the previous action.
gamma : float
Discount factor.
Returns
-------
UCBState
Updated agent state.
"""
return UCBState(
R=(gamma * state.R).at[action].add(reward),
N=(gamma * state.N).at[action].add(1)
)
@staticmethod
def sample(
state: UCBState,
key: PRNGKey,
c: Scalar
) -> jnp.int32:
r"""
UCB agent follows the policy
.. math::
A = \operatorname*{argmax}_{a \in \mathscr{A}} \left[ Q(a) + c \sqrt{\frac{\ln \left( {\sum_{a' \in \mathscr{A}}} N(a') \right) }{N(a)}} \right] .
where :math:`\mathscr{A}` is a set of all actions and :math:`Q` is calculated as :math:`Q(a) = \frac{R(a)}{N(a)}`.
The second component of the sum represents a sort of upper bound on the value of :math:`Q`, where :math:`c`
behaves like a confidence interval and the square root - like an approximation of the :math:`Q` function
estimation uncertainty. Note that the UCB policy is deterministic.
Parameters
----------
state : UCBState
Current state of the agent.
key : PRNGKey
A PRNG key used as the random key.
c : float
Degree of exploration.
Returns
-------
int
Selected action.
"""
Q = state.R / state.N
t = jnp.sum(state.N)
return jnp.argmax(Q + c * jnp.sqrt(jnp.log(t) / state.N)) | /reinforced-lib-1.0.0.tar.gz/reinforced-lib-1.0.0/reinforced_lib/agents/mab/ucb.py | 0.95594 | 0.715784 | ucb.py | pypi |
from functools import partial
import gymnasium as gym
import jax
import jax.numpy as jnp
from chex import dataclass, Array, Scalar, PRNGKey
from reinforced_lib.agents import BaseAgent, AgentState
@dataclass
class SoftmaxState(AgentState):
r"""
Container for the state of the Softmax agent.
Attributes
----------
H : array_like
Preference for each arm.
r : float
Average of all obtained rewards :math:`\bar{R}`.
n : int
Step number.
"""
H: Array
r: Scalar
n: jnp.int64
class Softmax(BaseAgent):
r"""
Softmax agent with baseline and optional exponential recency-weighted average update. It learns a preference
function :math:`H`, which indicates a preference of selecting one arm over others. Algorithm policy can be
controlled by the temperature parameter :math:`\tau`. The implementation is inspired by the work of Sutton and Barto [5]_.
Parameters
----------
n_arms : int
Number of bandit arms. :math:`N \in \mathbb{N}_{+}`.
lr : float
Step size. :math:`lr > 0`.
alpha : float, default=0.0
If non-zero, exponential recency-weighted average is used to update :math:`\bar{R}`. :math:`\alpha \in [0, 1]`.
tau : float, default=1.0
Temperature parameter. :math:`\tau > 0`.
multiplier : float, default=1.0
Multiplier for the reward. :math:`multiplier > 0`.
"""
def __init__(
self,
n_arms: jnp.int32,
lr: Scalar,
alpha: Scalar = 0.0,
tau: Scalar = 1.0,
multiplier: Scalar = 1.0
) -> None:
assert lr > 0
assert 0 <= alpha <= 1
assert tau > 0
assert multiplier > 0
self.n_arms = n_arms
self.init = jax.jit(partial(self.init, n_arms=n_arms))
self.update = jax.jit(partial(self.update, lr=lr, alpha=alpha, tau=tau, multiplier=multiplier))
self.sample = jax.jit(partial(self.sample, tau=tau))
@staticmethod
def parameter_space() -> gym.spaces.Dict:
return gym.spaces.Dict({
'n_arms': gym.spaces.Box(1, jnp.inf, (1,), jnp.int32),
'lr': gym.spaces.Box(0.0, jnp.inf, (1,), jnp.float32),
'alpha': gym.spaces.Box(0.0, 1.0, (1,), jnp.float32),
'tau': gym.spaces.Box(0.0, jnp.inf, (1,), jnp.float32)
})
@property
def update_observation_space(self) -> gym.spaces.Dict:
return gym.spaces.Dict({
'action': gym.spaces.Discrete(self.n_arms),
'reward': gym.spaces.Box(-jnp.inf, jnp.inf, (1,), jnp.float32)
})
@property
def sample_observation_space(self) -> gym.spaces.Dict:
return gym.spaces.Dict({})
@property
def action_space(self) -> gym.spaces.Discrete:
return gym.spaces.Discrete(self.n_arms)
@staticmethod
def init(
key: PRNGKey,
n_arms: jnp.int32
) -> SoftmaxState:
r"""
Creates and initializes instance of the Softmax agent for ``n_arms`` arms. Preferences :math:`H` for each arm
are set to zero, as well as the average of all rewards :math:`\bar{R}`. The step number :math:`n` is
initialized to one.
Parameters
----------
key : PRNGKey
A PRNG key used as the random key.
n_arms : int
Number of bandit arms.
Returns
-------
SoftmaxState
Initial state of the Softmax agent.
"""
return SoftmaxState(
H=jnp.zeros((n_arms, 1)),
r=0.0,
n=1
)
@staticmethod
def update(
state: SoftmaxState,
key: PRNGKey,
action: jnp.int32,
reward: Scalar,
lr: Scalar,
alpha: Scalar,
tau: Scalar,
multiplier: Scalar
) -> SoftmaxState:
r"""
Preferences :math:`H` can be learned by stochastic gradient ascent. The softmax algorithm searches
for such a set of preferences that maximizes the expected reward :math:`\mathbb{E}[R]`.
The updates of :math:`H` for each action :math:`a` are calculated as:
.. math::
H_{t + 1}(a) = H_t(a) + \alpha (R_t - \bar{R}_t)(\mathbb{1}_{A_t = a} - \pi_t(a)),
where :math:`\bar{R_t}` is the average of all rewards up to but not including step :math:`t`
(by definition :math:`\bar{R}_1 = R_1`). The derivation of given formula can be found in [5]_.
In the stationary case, :math:`\bar{R_t}` can be calculated as
:math:`\bar{R}_{t + 1} = \bar{R}_t + \frac{1}{t} \lbrack R_t - \bar{R}_t \rbrack`. To improve the
algorithm's performance in the non-stationary case, we apply
:math:`\bar{R}_{t + 1} = \bar{R}_t + \alpha \lbrack R_t - \bar{R}_t \rbrack` with a constant
step size :math:`\alpha`.
Reward :math:`R_t` is multiplied by ``multiplier`` before updating preferences to allow for
more flexible reward scaling while keeping the algorithm's properties.
Parameters
----------
state : SoftmaxState
Current state of the agent.
key : PRNGKey
A PRNG key used as the random key.
action : int
Previously selected action.
reward : float
Reward collected by the agent after taking the previous action.
lr : float
Step size.
alpha : float
Exponential recency-weighted average factor (used when :math:`\alpha > 0`).
tau : float
Temperature parameter.
multiplier : float
Multiplier for the reward.
Returns
-------
SoftmaxState
Updated agent state.
"""
reward *= multiplier
r = jnp.where(state.n == 1, reward, state.r)
pi = jax.nn.softmax(state.H / tau)
return SoftmaxState(
H=state.H + lr * (reward - r) * (jnp.zeros_like(state.H).at[action].set(1) - pi),
r=r + (reward - r) * jnp.where(alpha == 0, 1 / state.n, alpha),
n=state.n + 1
)
@staticmethod
def sample(
state: SoftmaxState,
key: PRNGKey,
tau: Scalar
) -> jnp.int32:
r"""
The policy of the Softmax algorithm is stochastic. The algorithm draws the next action from the softmax
distribution. The probability of selecting action :math:`i` is calculated as:
.. math::
softmax(H)_i = \frac{\exp(H_i / \tau)}{\sum_{h \in H} \exp(h / \tau)} .
Parameters
----------
state : SoftmaxState
Current state of the agent.
key : PRNGKey
A PRNG key used as the random key.
tau : float
Temperature parameter.
Returns
-------
int
Selected action.
"""
return jax.random.categorical(key, state.H.squeeze() / tau) | /reinforced-lib-1.0.0.tar.gz/reinforced-lib-1.0.0/reinforced_lib/agents/mab/softmax.py | 0.947039 | 0.65332 | softmax.py | pypi |
from functools import partial
import gymnasium as gym
import jax
import jax.numpy as jnp
from chex import dataclass, Array, Scalar, PRNGKey
from reinforced_lib.agents import BaseAgent, AgentState
@dataclass
class EGreedyState(AgentState):
r"""
Container for the state of the :math:`\epsilon`-greedy agent.
Attributes
----------
Q : array_like
Action-value function estimates for each arm.
N : array_like
Number of tries for each arm.
"""
Q: Array
N: Array
class EGreedy(BaseAgent):
r"""
Epsilon-greedy [5]_ agent with an optimistic start behavior and optional exponential recency-weighted average update.
It selects a random action from a set of all actions :math:`\mathscr{A}` with probability
:math:`\epsilon` (exploration), otherwise it chooses the currently best action (exploitation).
Parameters
----------
n_arms : int
Number of bandit arms. :math:`N \in \mathbb{N}_{+}`.
e : float
Experiment rate (epsilon). :math:`\epsilon \in [0, 1]`.
optimistic_start : float, default=0.0
Interpreted as the optimistic start to encourage exploration in the early stages.
alpha : float, default=0.0
If non-zero, exponential recency-weighted average is used to update :math:`Q` values. :math:`\alpha \in [0, 1]`.
References
----------
.. [5] Richard Sutton and Andrew Barto. 2018. Reinforcement Learning: An Introduction. The MIT Press.
"""
def __init__(
self,
n_arms: jnp.int32,
e: Scalar,
optimistic_start: Scalar = 0.0,
alpha: Scalar = 0.0
) -> None:
assert 0 <= e <= 1
assert 0 <= alpha <= 1
self.n_arms = n_arms
self.init = jax.jit(partial(self.init, n_arms=n_arms, optimistic_start=optimistic_start))
self.update = jax.jit(partial(self.update, alpha=alpha))
self.sample = jax.jit(partial(self.sample, e=e))
@staticmethod
def parameter_space() -> gym.spaces.Dict:
return gym.spaces.Dict({
'n_arms': gym.spaces.Box(1, jnp.inf, (1,), jnp.int32),
'e': gym.spaces.Box(0.0, 1.0, (1,), jnp.float32),
'optimistic_start': gym.spaces.Box(0.0, jnp.inf, (1,), jnp.float32),
'alpha': gym.spaces.Box(0.0, 1.0, (1,), jnp.float32)
})
@property
def update_observation_space(self) -> gym.spaces.Dict:
return gym.spaces.Dict({
'action': gym.spaces.Discrete(self.n_arms),
'reward': gym.spaces.Box(-jnp.inf, jnp.inf, (1,), jnp.float32)
})
@property
def sample_observation_space(self) -> gym.spaces.Dict:
return gym.spaces.Dict({})
@property
def action_space(self) -> gym.spaces.Discrete:
return gym.spaces.Discrete(self.n_arms)
@staticmethod
def init(
key: PRNGKey,
n_arms: jnp.int32,
optimistic_start: Scalar
) -> EGreedyState:
r"""
Creates and initializes instance of the :math:`\epsilon`-greedy agent for ``n_arms`` arms. Action-value function estimates are
set to ``optimistic_start`` value and the number of tries is one for each arm.
Parameters
----------
key : PRNGKey
A PRNG key used as the random key.
n_arms : int
Number of bandit arms.
optimistic_start : float
Interpreted as the optimistic start to encourage exploration in the early stages.
Returns
-------
EGreedyState
Initial state of the :math:`\epsilon`-greedy agent.
"""
return EGreedyState(
Q=(optimistic_start * jnp.ones((n_arms, 1))),
N=jnp.ones((n_arms, 1), dtype=jnp.int32)
)
@staticmethod
def update(
state: EGreedyState,
key: PRNGKey,
action: jnp.int32,
reward: Scalar,
alpha: Scalar
) -> EGreedyState:
r"""
In the stationary case, the action-value estimate for a given arm is updated as
:math:`Q_{t + 1} = Q_t + \frac{1}{t} \lbrack R_t - Q_t \rbrack` after receiving reward :math:`R_t` at step
:math:`t` and the number of tries for the corresponding arm is incremented. In the non-stationary case,
the update follows the equation :math:`Q_{t + 1} = Q_t + \alpha \lbrack R_t - Q_t \rbrack`.
Parameters
----------
state : EGreedyState
Current state of the agent.
key : PRNGKey
A PRNG key used as the random key.
action : int
Previously selected action.
reward : float
Reward collected by the agent after taking the previous action.
alpha : float
Exponential recency-weighted average factor (used when :math:`\alpha > 0`).
Returns
-------
EGreedyState
Updated agent state.
"""
def classic_update(operands: tuple) -> EGreedyState:
state, action, reward, alpha = operands
return EGreedyState(
Q=state.Q.at[action].add((reward - state.Q[action]) / state.N[action]),
N=state.N.at[action].add(1)
)
def erwa_update(operands: tuple) -> EGreedyState:
state, action, reward, alpha = operands
return EGreedyState(
Q=state.Q.at[action].add(alpha * (reward - state.Q[action])),
N=state.N.at[action].add(1)
)
return jax.lax.cond(alpha == 0, classic_update, erwa_update, (state, action, reward, alpha))
@staticmethod
def sample(
state: EGreedyState,
key: PRNGKey,
e: Scalar
) -> jnp.int32:
r"""
Epsilon-greedy agent follows the policy:
.. math::
A =
\begin{cases}
\operatorname*{argmax}_{a \in \mathscr{A}} Q(a) & \text{with probability } 1 - \epsilon , \\
\text{random action} & \text{with probability } \epsilon . \\
\end{cases}
Parameters
----------
state : EGreedyState
Current state of the agent.
key : PRNGKey
A PRNG key used as the random key.
e : float
Experiment rate (epsilon).
Returns
-------
int
Selected action.
"""
epsilon_key, choice_key = jax.random.split(key)
return jax.lax.cond(
jax.random.uniform(epsilon_key) < e,
lambda: jax.random.choice(choice_key, state.Q.size),
lambda: jnp.argmax(state.Q)
) | /reinforced-lib-1.0.0.tar.gz/reinforced-lib-1.0.0/reinforced_lib/agents/mab/e_greedy.py | 0.95232 | 0.637962 | e_greedy.py | pypi |
from functools import partial
import gymnasium as gym
import jax
import jax.numpy as jnp
from chex import dataclass, Array, PRNGKey, Scalar
from reinforced_lib.agents import BaseAgent, AgentState
@dataclass
class ThompsonSamplingState(AgentState):
"""
Container for the state of the Thompson sampling agent.
Attributes
----------
alpha : array_like
Number of successful tries for each arm.
beta : array_like
Number of failed tries for each arm.
"""
alpha: Array
beta: Array
class ThompsonSampling(BaseAgent):
r"""
Contextual Bernoulli Thompson sampling agent with the exponential smoothing. The implementation is inspired by the
work of Krotov et al. [7]_. Thompson sampling is based on a beta distribution with parameters related to the number
of successful and failed attempts. Higher values of the parameters decrease the entropy of the distribution while
changing the ratio of the parameters shifts the expected value.
Parameters
----------
n_arms : int
Number of bandit arms. :math:`N \in \mathbb{N}_{+}`.
decay : float, default=1.0
Decay rate. If equal to zero, smoothing is not applied. :math:`w \geq 0`.
References
----------
.. [7] Alexander Krotov, Anton Kiryanov and Evgeny Khorov. 2020. Rate Control With Spatial Reuse
for Wi-Fi 6 Dense Deployments. IEEE Access. 8. 168898-168909.
"""
def __init__(self, n_arms: jnp.int32, decay: Scalar = 1.0) -> None:
assert decay >= 0
self.n_arms = n_arms
self.init = jax.jit(partial(self.init, n_arms=self.n_arms))
self.update = jax.jit(partial(self.update, decay=decay))
self.sample = jax.jit(self.sample)
@staticmethod
def parameter_space() -> gym.spaces.Dict:
return gym.spaces.Dict({
'n_arms': gym.spaces.Box(1, jnp.inf, (1,), jnp.int32),
'decay': gym.spaces.Box(0.0, jnp.inf, (1,))
})
@property
def update_observation_space(self) -> gym.spaces.Dict:
return gym.spaces.Dict({
'action': gym.spaces.Discrete(self.n_arms),
'n_successful': gym.spaces.Box(0, jnp.inf, (1,), jnp.int32),
'n_failed': gym.spaces.Box(0, jnp.inf, (1,), jnp.int32),
'delta_time': gym.spaces.Box(0.0, jnp.inf, (1,))
})
@property
def sample_observation_space(self) -> gym.spaces.Dict:
return gym.spaces.Dict({
'context': gym.spaces.Box(-jnp.inf, jnp.inf, (self.n_arms,))
})
@property
def action_space(self) -> gym.spaces.Discrete:
return gym.spaces.Discrete(self.n_arms)
@staticmethod
def init(key: PRNGKey, n_arms: jnp.int32) -> ThompsonSamplingState:
r"""
Creates and initializes an instance of the Thompson sampling agent for ``n_arms`` arms. The :math:`\mathbf{\alpha}`
and :math:`\mathbf{\beta}` vectors are set to zero to create a non-informative prior distribution.
The ``last_decay`` is also set to zero.
Parameters
----------
key : PRNGKey
A PRNG key used as the random key.
n_arms : int
Number of bandit arms.
Returns
-------
ThompsonSamplingState
Initial state of the Thompson sampling agent.
"""
return ThompsonSamplingState(
alpha=jnp.zeros((n_arms,1)),
beta=jnp.zeros((n_arms,1))
)
@staticmethod
def update(
state: ThompsonSamplingState,
key: PRNGKey,
action: jnp.int32,
n_successful: jnp.int32,
n_failed: jnp.int32,
delta_time: Scalar,
decay: Scalar
) -> ThompsonSamplingState:
r"""
Thompson sampling can be adjusted to non-stationary environments by exponential smoothing of values of
vectors :math:`\mathbf{\alpha}` and :math:`\mathbf{\beta}` which increases the entropy of a distribution
over time. Given a result of trial :math:`s`, we apply the following equations for each action :math:`a`:
.. math::
\begin{gather}
\mathbf{\alpha}_{t + 1}(a) = \mathbf{\alpha}_t(a) e^{\frac{-\Delta t}{w}} + \mathbb{1}_{A = a} \cdot s , \\
\mathbf{\beta}_{t + 1}(a) = \mathbf{\beta}_t(a) e^{\frac{-\Delta t}{w}} + \mathbb{1}_{A = a} \cdot (1 - s) ,
\end{gather}
where :math:`\Delta t` is the time elapsed since the last action selection and :math:`w` is the decay rate.
Parameters
----------
state : ThompsonSamplingState
Current state of the agent.
key : PRNGKey
A PRNG key used as the random key.
action : int
Previously selected action.
n_successful : int
Number of successful tries.
n_failed : int
Number of failed tries.
delta_time : float
Time elapsed since the last action selection.
decay : float
Decay rate.
Returns
-------
ThompsonSamplingState
Updated agent state.
"""
smoothing_value = jnp.exp(-decay * delta_time)
return ThompsonSamplingState(
alpha=(state.alpha * smoothing_value).at[action].add(n_successful),
beta=(state.beta * smoothing_value).at[action].add(n_failed)
)
@staticmethod
def sample(
state: ThompsonSamplingState,
key: PRNGKey,
context: Array
) -> jnp.int32:
r"""
The Thompson sampling policy is stochastic. The algorithm draws :math:`q_a` from the distribution
:math:`\operatorname{Beta}(1 + \mathbf{\alpha}(a), 1 + \mathbf{\beta}(a))` for each arm :math:`a`.
The next action is selected as
.. math::
A = \operatorname*{argmax}_{a \in \mathscr{A}} q_a r_a ,
where :math:`r_a` is contextual information for the arm :math:`a`, and :math:`\mathscr{A}` is a set
of all actions.
Parameters
----------
state : ThompsonSamplingState
Current state of the agent.
key : PRNGKey
A PRNG key used as the random key.
context : array_like
One-dimensional array of features for each arm.
Returns
-------
int
Selected action.
"""
success_prob = jax.random.beta(key, 1 + state.alpha, 1 + state.beta).squeeze()
action = jnp.argmax(success_prob * context)
return action | /reinforced-lib-1.0.0.tar.gz/reinforced-lib-1.0.0/reinforced_lib/agents/mab/thompson_sampling.py | 0.957228 | 0.712651 | thompson_sampling.py | pypi |
from functools import partial
import gymnasium as gym
import jax
import jax.numpy as jnp
from chex import dataclass, Array, Scalar, PRNGKey
from reinforced_lib.agents import BaseAgent, AgentState
@dataclass
class Exp3State(AgentState):
"""
Container for the state of the Exp3 agent.
Attributes
----------
omega : array_like
Preference for each arm.
"""
omega: Array
class Exp3(BaseAgent):
r"""
Basic Exp3 agent for stationary multi-armed bandit problems with exploration factor :math:`\gamma`. The higher
the value, the more the agent explores. The implementation is inspired by the work of Auer et al. [6]_. There
are many variants of the Exp3 algorithm, you can find more information in the original paper.
Parameters
----------
n_arms : int
Number of bandit arms. :math:`N \in \mathbb{N}_{+}`.
gamma : float
Exploration factor. :math:`\gamma \in (0, 1]`.
min_reward : float
Minimum possible reward.
max_reward : float
Maximum possible reward.
References
----------
.. [6] Peter Auer, Nicolò Cesa-Bianchi, Yoav Freund, and Robert E. Schapire. 2002. The Nonstochastic Multiarmed
Bandit Problem. SIAM Journal on Computing, 32(1), 48–77.
"""
def __init__(
self,
n_arms: jnp.int32,
gamma: Scalar,
min_reward: Scalar,
max_reward: Scalar
) -> None:
assert 0 < gamma <= 1
self.n_arms = n_arms
self.init = jax.jit(partial(self.init, n_arms=n_arms))
self.update = jax.jit(partial(self.update, gamma=gamma, min_reward=min_reward, max_reward=max_reward))
self.sample = jax.jit(partial(self.sample, gamma=gamma))
@staticmethod
def parameter_space() -> gym.spaces.Dict:
return gym.spaces.Dict({
'n_arms': gym.spaces.Box(1, jnp.inf, (1,), jnp.int32),
'gamma': gym.spaces.Box(0, 1, (1,), jnp.float32),
'min_reward': gym.spaces.Box(-jnp.inf, jnp.inf, (1,), jnp.float32),
'max_reward': gym.spaces.Box(-jnp.inf, jnp.inf, (1,), jnp.float32)
})
@property
def update_observation_space(self) -> gym.spaces.Dict:
return gym.spaces.Dict({
'action': gym.spaces.Discrete(self.n_arms),
'reward': gym.spaces.Box(-jnp.inf, jnp.inf, (1,), jnp.float32)
})
@property
def sample_observation_space(self) -> gym.spaces.Dict:
return gym.spaces.Dict({})
@property
def action_space(self) -> gym.spaces.Discrete:
return gym.spaces.Discrete(self.n_arms)
@staticmethod
def init(
key: PRNGKey,
n_arms: jnp.int32
) -> Exp3State:
"""
Initializes the Exp3 agent state with uniform preference for each arm.
Parameters
----------
key : PRNGKey
A PRNG key used as the random key.
n_arms : int
Number of bandit arms.
Returns
-------
Exp3State
Initial state of the Exp3 agent.
"""
return Exp3State(
omega=jnp.ones((n_arms, 1)) / n_arms
)
@staticmethod
def update(
state: Exp3State,
key: PRNGKey,
action: jnp.int32,
reward: Scalar,
gamma: Scalar,
min_reward: Scalar,
max_reward: Scalar
) -> Exp3State:
r"""
Agent updates its preference for the selected arm :math:`a` according to the following formula:
.. math::
\omega_{t + 1}(a) = \omega_{t}(a) \exp \left( \frac{\gamma r}{\pi(a) K} \right)
where :math:`\omega_{t + 1}(a)` is the preference of arm :math:`a` at time :math:`t + 1`, :math:`\pi(a)` is
the probability of selecting arm :math:`a`, and :math:`K` is the number of arms. The reward :math:`r` is
normalized to the range :math:`[0, 1]`. The exponential growth significantly increases the weight of good arms,
so in the long use of the agent it is important to **ensure that the values of** :math:`\omega` **do not exceed
the maximum value of the floating point type!**
Parameters
----------
state : Exp3State
Current state of the agent.
key : PRNGKey
A PRNG key used as the random key.
action : int
Previously selected action.
reward : float
Reward collected by the agent after taking the previous action.
gamma : float
Exploration factor.
min_reward : float
Minimum possible reward.
max_reward : float
Maximum possible reward.
Returns
-------
Exp3State
Updated agent state.
"""
reward = (reward - min_reward) / (max_reward - min_reward)
n_arms = state.omega.size
pi = (1 - gamma) * state.omega / state.omega.sum() + gamma / n_arms
return Exp3State(
omega=state.omega.at[action].mul(jnp.exp(gamma * reward / (pi[action] * n_arms)))
)
@staticmethod
def sample(
state: Exp3State,
key: PRNGKey,
gamma: Scalar
) -> jnp.int32:
r"""
The Exp3 policy is stochastic. Algorithm chooses a random arm with probability :math:`\gamma`, otherwise it
draws arm :math:`a` with probability :math:`\omega(a) / \sum_{b=1}^N \omega(b)`.
Parameters
----------
state : Exp3State
Current state of the agent.
key : PRNGKey
A PRNG key used as the random key.
gamma : float
Exploration factor.
Returns
-------
int
Selected action.
"""
pi = (1 - gamma) * state.omega / state.omega.sum() + gamma / state.omega.size
return jax.random.categorical(key, jnp.log(pi.squeeze())) | /reinforced-lib-1.0.0.tar.gz/reinforced-lib-1.0.0/reinforced_lib/agents/mab/exp3.py | 0.960166 | 0.737324 | exp3.py | pypi |
import os.path
from collections import defaultdict
import jax.numpy as jnp
import matplotlib.pyplot as plt
from chex import Array, Scalar, Numeric
from reinforced_lib.logs import BaseLogger, Source
from reinforced_lib.utils import timestamp
class PlotsLogger(BaseLogger):
r"""
Logger that presents and saves values as matplotlib plots. Offers smoothing of the curve, scatter plots, and
multiple curves in a single chart (while logging arrays). ``PlotsLogger`` is able to synchronizes the logged
values in time. This means that if the same source is logged less often than other sources, the step will be
increased accordingly to maintain the appropriate spacing between the values on the x-axis.
Parameters
----------
plots_dir : str, default="~"
Output directory for the plots.
plots_ext : str, default="svg"
Extension of the saved plots.
plots_smoothing : float, default=0.6
Weight of the exponential moving average (EMA/EWMA) [1]_ used for smoothing. :math:`\alpha \in [0, 1)`.
plots_scatter : bool, default=False
Set to ``True`` if you want to generate a scatter plot instead of a line plot.
``plots_smoothing`` parameter does not apply to the scatter plots.
plots_sync_steps : bool, default=False
Set to ``True`` if you want to synchronize the logged values in time.
References
----------
.. [1] https://en.wikipedia.org/wiki/Moving_average#Exponential_moving_average
"""
def __init__(
self,
plots_dir: str = None,
plots_ext: str = 'pdf',
plots_smoothing: Scalar = 0.6,
plots_scatter: bool = False,
plots_sync_steps: bool = False,
**kwargs
) -> None:
assert 1 > plots_smoothing >= 0
super().__init__(**kwargs)
self._dir = plots_dir if plots_dir else os.path.expanduser("~")
self._ext = plots_ext
self._smoothing = plots_smoothing
self._scatter = plots_scatter
self._sync_steps = plots_sync_steps
self._current_values = set()
self._step = 0
self._values = defaultdict(list)
self._steps = defaultdict(list)
def finish(self) -> None:
"""
Shows the generated plots and saves them to the output directory with the specified extension
(the names of the files follow the pattern ``"rlib-plot-[source]-[date]-[time].[ext]"``).
"""
def exponential_moving_average(values: list, weight: Scalar) -> list:
smoothed = [values[0]]
for value in values[1:]:
smoothed.append((1 - weight) * value + weight * smoothed[-1])
return smoothed
def lineplot(values: list, steps: list, alpha: Scalar = 1.0, label: bool = False) -> None:
values = jnp.array(values)
values = jnp.squeeze(values)
if values.ndim == 1:
plt.plot(steps, values, alpha=alpha, c='C0')
elif values.ndim == 2:
for i, val in enumerate(jnp.array(values).T):
plt.plot(steps, val, alpha=alpha, c=f'C{i % 10}', label=i if label else '')
plt.legend()
def scatterplot(values: list, steps: list, label: bool = False) -> None:
values = jnp.array(values).squeeze()
if values.ndim == 1:
plt.scatter(steps, values, c='C0', marker='.', s=4)
elif values.ndim == 2:
for i, val in enumerate(jnp.array(values).T):
plt.scatter(steps, val, c=f'C{i % 10}', label=i if label else '', marker='.', s=4)
plt.legend()
for name, values in self._values.items():
filename = f'rlib-plot-{name}-{timestamp()}.{self._ext}'
if self._scatter:
scatterplot(values, self._steps[name], True)
else:
smoothed = exponential_moving_average(values, self._smoothing)
lineplot(values, self._steps[name], alpha=0.3)
lineplot(smoothed, self._steps[name], label=True)
plt.title(name)
plt.xlabel('step')
plt.savefig(os.path.join(self._dir, filename), bbox_inches='tight', dpi=300)
plt.show()
def log_scalar(self, source: Source, value: Scalar, *_) -> None:
"""
Adds a given scalar to the plot values.
Parameters
----------
source : Source
Source of the logged value.
value : float
Scalar to log.
"""
self._log(source, value)
def log_array(self, source: Source, value: Array, *_) -> None:
"""
Adds a given array to the plot values.
Parameters
----------
source : Source
Source of the logged value.
value : array_like
Array to log.
"""
self._log(source, value)
def _log(self, source: Source, value: Numeric) -> None:
"""
Adds a given scalar to the plot values.
Parameters
----------
source : Source
Source of the logged value.
value : Numeric
Value to log.
"""
name = self.source_to_name(source)
if self._sync_steps:
if name in self._current_values:
self._step += 1
self._current_values.clear()
self._current_values.add(name)
step = self._step
else:
step = self._steps[name][-1] + 1 if self._steps[name] else 0
self._values[name].append(value)
self._steps[name].append(step) | /reinforced-lib-1.0.0.tar.gz/reinforced-lib-1.0.0/reinforced_lib/logs/plots_logger.py | 0.868813 | 0.649731 | plots_logger.py | pypi |
import json
import os.path
from collections import defaultdict
import jax.numpy as jnp
import numpy as np
from chex import Array, Scalar
from reinforced_lib.logs import BaseLogger, Source
from reinforced_lib.utils import timestamp
class CsvLogger(BaseLogger):
"""
Logger that saves values in CSV format. It saves the logged values to the CSV file when the experiment is finished.
``CsvLogger`` synchronizes the logged values in time. It means that if the same source is logged twice in a row,
the step number will be incremented for all columns and the logger will move to the next row.
Parameters
----------
csv_path : str, default="~/rlib-logs-[date]-[time].csv"
Path to the output file.
"""
def __init__(self, csv_path: str = None, **kwargs) -> None:
super().__init__(**kwargs)
if csv_path is None:
csv_path = f'rlib-logs-{timestamp()}.csv'
csv_path = os.path.join(os.path.expanduser("~"), csv_path)
self._csv_path = csv_path
self._current_values = set()
self._step = 0
self._values = defaultdict(list)
self._steps = defaultdict(list)
def finish(self) -> None:
"""
Saves the logged values to the CSV file.
"""
file = open(self._csv_path, 'w')
file.write(','.join(self._values.keys()) + '\n')
rows, cols = self._step + 1, len(self._values)
csv_array = np.full((rows, cols), fill_value='', dtype=object)
for j, (name, values) in enumerate(self._values.items()):
for i, v in enumerate(values):
csv_array[self._steps[name][i], j] = v
for row in csv_array:
file.write(','.join(map(str, row)) + '\n')
file.close()
def log_scalar(self, source: Source, value: Scalar, *_) -> None:
"""
Logs a scalar as a standard value in a column.
Parameters
----------
source : Source
Source of the logged value.
value : float
Scalar to log.
"""
self._log(source, value)
def log_array(self, source: Source, value: Array, *_) -> None:
"""
Logs an array as a JSON string.
Parameters
----------
source : Source
Source of the logged value.
value : array_like
Array to log.
"""
if isinstance(value, (np.ndarray, jnp.ndarray)):
value = value.tolist()
self._log(source, f"\"{json.dumps(value)}\"")
def log_dict(self, source: Source, value: dict, *_) -> None:
"""
Logs a dictionary as a JSON string.
Parameters
----------
source : Source
Source of the logged value.
value : dict
Dictionary to log.
"""
self._log(source, f"\"{json.dumps(value)}\"")
def log_other(self, source: Source, value: any, *_) -> None:
"""
Logs an object as a JSON string.
Parameters
----------
source : Source
Source of the logged value.
value : any
Value of any type to log.
"""
self._log(source, f"\"{json.dumps(value)}\"")
def _log(self, source: Source, value: any) -> None:
"""
Saves the logged value and controls the current step.
Parameters
----------
source : Source
Source of the logged value.
value : any
Value to log.
"""
name = self.source_to_name(source)
if name in self._current_values:
self._step += 1
self._current_values.clear()
self._current_values.add(name)
self._values[name].append(value)
self._steps[name].append(self._step) | /reinforced-lib-1.0.0.tar.gz/reinforced-lib-1.0.0/reinforced_lib/logs/csv_logger.py | 0.743727 | 0.355523 | csv_logger.py | pypi |
import json
from collections import defaultdict
from chex import Array, Scalar
from tensorboardX import SummaryWriter
from reinforced_lib.logs import BaseLogger, Source
class TensorboardLogger(BaseLogger):
"""
Logger that saves values in TensorBoard [2]_ format. Offers a possibility to log to Comet [3]_.
``TensorboardLogger`` synchronizes the logged values in time. This means that if the same source
is logged less often than other sources, the step will be increased accordingly to maintain the
appropriate spacing between the values on the x-axis.
Parameters
----------
tb_log_dir : str, optional
Path to the output directory. If None, the default directory is used.
tb_comet_config : dict, optional
Configuration for the Comet logger. If None, the logger is disabled.
tb_sync_steps : bool, default=False
Set to ``True`` if you want to synchronize the logged values in time.
References
----------
.. [2] TensorBoard. https://www.tensorflow.org/tensorboard
.. [3] Comet. https://www.comet.ml
"""
def __init__(
self,
tb_log_dir: str = None,
tb_comet_config: dict[str, any] = None,
tb_sync_steps: bool = False,
**kwargs,
) -> None:
super().__init__(**kwargs)
if tb_comet_config is None:
tb_comet_config = {'disabled': True}
self._sync_steps = tb_sync_steps
self._current_values = set()
self._step = 0
self._writer = SummaryWriter(log_dir=tb_log_dir, comet_config=tb_comet_config)
self._steps = defaultdict(int)
def finish(self) -> None:
"""
Closes the summary writer.
"""
self._writer.close()
def log_scalar(self, source: Source, value: Scalar, *_) -> None:
"""
Adds a given scalar to the summary writer.
Parameters
----------
source : Source
Source of the logged value.
value : float
Scalar to log.
"""
name = self.source_to_name(source)
step = self._get_step(name)
self._writer.add_scalar(name, value, step)
def log_array(self, source: Source, value: Array, *_) -> None:
"""
Adds a given array to the summary writer as a histogram.
Parameters
----------
source : Source
Source of the logged value.
value : array_like
Array to log.
"""
name = self.source_to_name(source)
step = self._get_step(name)
self._writer.add_histogram(name, value, step)
def log_dict(self, source: Source, value: dict, *_) -> None:
"""
Logs a dictionary as a JSON string.
Parameters
----------
source : Source
Source of the logged value.
value : dict
Dictionary to log.
"""
self.log_other(source, value, None)
def log_other(self, source: Source, value: any, *_) -> None:
"""
Logs an object as a JSON string.
Parameters
----------
source : Source
Source of the logged value.
value : dict
Dictionary to log.
"""
name = self.source_to_name(source)
step = self._get_step(name)
self._writer.add_text(name, json.dumps(value), step)
def _get_step(self, name: str) -> int:
"""
Returns the current step for a given source.
Parameters
----------
name : str
Name of the source.
Returns
-------
int
Current step for the given source.
"""
if self._sync_steps:
if name in self._current_values:
self._step += 1
self._current_values.clear()
self._current_values.add(name)
step = self._step
else:
step = self._steps[name] + 1
self._steps[name] = step
return step | /reinforced-lib-1.0.0.tar.gz/reinforced-lib-1.0.0/reinforced_lib/logs/tb_logger.py | 0.922266 | 0.562657 | tb_logger.py | pypi |
from abc import ABC
from enum import Enum
from typing import Union
from chex import Array, Scalar
from reinforced_lib.utils.exceptions import UnsupportedLogTypeError
class SourceType(Enum):
OBSERVATION = 0
STATE = 1
METRIC = 2
Source = Union[tuple[str, SourceType], str, None]
class BaseLogger(ABC):
"""
Base interface for loggers.
"""
def __init__(self, **kwargs):
pass
def init(self, sources: list[Source]) -> None:
"""
Initializes the logger given the list of all sources defined by the user.
Parameters
----------
sources : list[Source]
List containing the sources to log.
"""
pass
def finish(self) -> None:
"""
Finalizes the loggers work (e.g., closes file or shows plots).
"""
pass
def log_scalar(self, source: Source, value: Scalar, custom: bool) -> None:
"""
Method of the logger interface used for logging scalar values.
Parameters
----------
source : Source
Source of the logged value.
value : float
Scalar to log.
custom : bool
Whether the source is a custom source.
"""
raise UnsupportedLogTypeError(type(self), type(value))
def log_array(self, source: Source, value: Array, custom: bool) -> None:
"""
Method of the logger interface used for logging one-dimensional arrays.
Parameters
----------
source : Source
Source of the logged value.
value : array_like
Array to log.
custom : bool
Whether the source is a custom source.
"""
raise UnsupportedLogTypeError(type(self), type(value))
def log_dict(self, source: Source, value: dict, custom: bool) -> None:
"""
Method of the logger interface used for logging dictionaries.
Parameters
----------
source : Source
Source of the logged value.
value : dict
Dictionary to log.
custom : bool
Whether the source is a custom source.
"""
raise UnsupportedLogTypeError(type(self), type(value))
def log_other(self, source: Source, value: any, custom: bool) -> None:
"""
Method of the logger interface used for logging other values.
Parameters
----------
source : Source
Source of the logged value.
value : any
Value of any type to log.
custom : bool
Whether the source is a custom source.
"""
raise UnsupportedLogTypeError(type(self), type(value))
@staticmethod
def source_to_name(source: Source) -> str:
"""
Returns a full name of the source. If source is a string itself, returns that string.
Otherwise, it returns a string in the format "name-sourcetype" (e.g., "action-metric").
Parameters
----------
source : Source
Source of the logged value.
Returns
-------
str
Name of the source.
"""
if isinstance(source, tuple):
return f'{source[0]}-{source[1].name.lower()}'
else:
return source | /reinforced-lib-1.0.0.tar.gz/reinforced-lib-1.0.0/reinforced_lib/logs/base_logger.py | 0.948692 | 0.463201 | base_logger.py | pypi |
import json
import jax.numpy as jnp
import numpy as np
from chex import Array, Scalar
from reinforced_lib.logs import BaseLogger, Source
class StdoutLogger(BaseLogger):
"""
Logger that writes values to the standard output.
"""
def __init__(self, **kwargs) -> None:
super().__init__(**kwargs)
self._values = {}
def finish(self) -> None:
"""
Prints the last row if there are any unprinted values left.
"""
if len(self._values) > 0:
print('\t'.join(f'{n}: {v}' for n, v in self._values.items()))
def log_scalar(self, source: Source, value: Scalar, custom: bool) -> None:
"""
Logs a scalar as the standard value.
Parameters
----------
source : Source
Source of the logged value.
value : float
Scalar to log.
custom : bool
Whether the source is a custom source.
"""
self._log(source, value, custom)
def log_array(self, source: Source, value: Array, custom: bool) -> None:
"""
Logs an array as a JSON string.
Parameters
----------
source : Source
Source of the logged value.
value : array_like
Array to log.
custom : bool
Whether the source is a custom source.
"""
if isinstance(value, (np.ndarray, jnp.ndarray)):
value = value.tolist()
self._log(source, json.dumps(value), custom)
def log_dict(self, source: Source, value: dict, custom: bool) -> None:
"""
Logs a dictionary as a JSON string.
Parameters
----------
source : Source
Source of the logged value.
value : dict
Dictionary to log.
custom : bool
Whether the source is a custom source.
"""
self._log(source, json.dumps(value), custom)
def log_other(self, source: Source, value: any, custom: bool) -> None:
"""
Logs an object as a JSON string.
Parameters
----------
source : Source
Source of the logged value.
value : any
Value of any type to log.
custom : bool
Whether the source is a custom source.
"""
self._log(source, json.dumps(value), custom)
def _log(self, source: Source, value: any, custom: bool) -> None:
"""
Prints a new row to the standard output if there is a new value for a
standard source or the source is custom.
Parameters
----------
source : Source
Source of the logged value.
value : any
Value of any type to log.
custom : bool
Whether the source is a custom source.
"""
name = self.source_to_name(source)
if not custom:
if name in self._values:
print('\t'.join(f'{n}: {v}' for n, v in self._values.items()))
self._values = {}
self._values[name] = value
else:
print(f'{name}: {value}') | /reinforced-lib-1.0.0.tar.gz/reinforced-lib-1.0.0/reinforced_lib/logs/stdout_logger.py | 0.817793 | 0.503174 | stdout_logger.py | pypi |
from collections import defaultdict
from typing import Callable
from reinforced_lib.agents import BaseAgent
from reinforced_lib.logs import BaseLogger, Source, SourceType
from reinforced_lib.utils.exceptions import IncorrectLoggerTypeError, IncorrectSourceTypeError
from reinforced_lib.utils import is_scalar, is_array, is_dict
class LogsObserver:
"""
Class responsible for managing singleton instances of the loggers, initialization and finalization
of the loggers, and passing the logged values to the appropriate loggers and their methods.
"""
def __init__(self) -> None:
self._logger_instances = {}
self._logger_sources = defaultdict(list)
self._observations_loggers = defaultdict(list)
self._agent_state_loggers = defaultdict(list)
self._metrics_loggers = defaultdict(list)
self._custom_loggers = defaultdict(list)
def add_logger(self, source: Source, logger_type: type, logger_params: dict[str, any]) -> None:
"""
Initializes a singleton instance of the logger and connects a given source with that logger.
Parameters
----------
source : Source
Source to connect.
logger_type : type
Type of the selected loger.
logger_params : dict
Parameters of the selected logger.
"""
if not issubclass(logger_type, BaseLogger):
raise IncorrectLoggerTypeError(logger_type)
if isinstance(source, tuple):
if len(source) != 2 or not isinstance(source[0], str) or not hasattr(source[1], 'name'):
raise IncorrectSourceTypeError(type(source))
elif source is not None and not isinstance(source, str):
raise IncorrectSourceTypeError(type(source))
logger = self._logger_instances.get(logger_type, logger_type(**logger_params))
if isinstance(source, tuple):
if source[1] == SourceType.OBSERVATION:
self._observations_loggers[logger].append((source, source[0]))
elif source[1] == SourceType.STATE:
self._agent_state_loggers[logger].append((source, source[0]))
elif source[1] == SourceType.METRIC:
self._metrics_loggers[logger].append((source, source[0]))
elif isinstance(source, str):
self._observations_loggers[logger].append((source, source))
self._agent_state_loggers[logger].append((source, source))
self._metrics_loggers[logger].append((source, source))
elif source is None:
self._custom_loggers[logger] = [(None, None)]
self._logger_sources[logger].append(source)
self._logger_instances[logger_type] = logger
def init_loggers(self):
"""
Initializes all loggers by calling their ``init`` method.
"""
for logger, sources in self._logger_sources.items():
logger.init(sources)
def finish_loggers(self):
"""
Finalizes the work of all loggers by calling their ``finish`` method.
"""
for logger in self._logger_sources.keys():
logger.finish()
def update_observations(self, observations: any) -> None:
"""
Passes new observations to the loggers.
Parameters
----------
observations : dicy or any
Observations received by the agent.
"""
if isinstance(observations, dict):
self._update(self._observations_loggers, lambda name: observations.get(name, None))
else:
self._update(self._observations_loggers, lambda name: observations)
def update_agent_state(self, agent_state: BaseAgent) -> None:
"""
Passes the agent state to the loggers.
Parameters
----------
agent_state : BaseAgent
Current agent state.
"""
self._update(self._agent_state_loggers, lambda name: getattr(agent_state, name, None))
def update_metrics(self, metric: any, metric_name: str) -> None:
"""
Passes metrics to loggers.
Parameters
----------
metric : any
Metric value.
metric_name : str
Name of the metric.
"""
self._update(self._metrics_loggers, lambda name: metric if name == metric_name else None)
def update_custom(self, value: any, name: str) -> None:
"""
Passes values provided by the user to the loggers.
Parameters
----------
value : any
Value to log.
name : str
Name of the value.
"""
self._update(self._custom_loggers, lambda _: (name, value))
@staticmethod
def _update(loggers: dict[BaseLogger, list[Source]], get_value: Callable) -> None:
"""
Passes values to the appropriate loggers and method based on the type and the source of the value.
Parameters
----------
loggers : dict
Dictionary with the loggers instances and the connected sources.
get_value : callable
Function that gets the selected value from the observations, state, or metrics.
"""
for logger, sources in loggers.items():
for source, name in sources:
if (value := get_value(name)) is not None:
if name is None:
source, value = value
custom = name is None
if is_scalar(value):
logger.log_scalar(source, value, custom)
elif is_dict(value):
logger.log_dict(source, value, custom)
elif is_array(value):
logger.log_array(source, value, custom)
else:
logger.log_other(source, value, custom) | /reinforced-lib-1.0.0.tar.gz/reinforced-lib-1.0.0/reinforced_lib/logs/logs_observer.py | 0.941244 | 0.279441 | logs_observer.py | pypi |
import copy
import warnings
from dataclasses import dataclass
from typing import Callable, Union, Dict, Any
from typing import List
import gym
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
from joblib import Parallel, delayed
from joblib.externals.loky.process_executor import BrokenProcessPool
from reinforcement_learning_keras.agents.agent_base import AgentBase
from reinforcement_learning_keras.enviroments.config_base import ConfigBase
@dataclass
class AgentExperiment:
agent_class: Callable
agent_config: ConfigBase
name: str = "unnamed_experiment"
n_reps: int = 5
n_jobs: int = 1
training_options: Union[None, Dict[str, Any]] = None
def __post_init__(self):
self._trained_agents: List[AgentBase] = []
self._set_default_training_options()
def _set_default_training_options(self):
if self.training_options is None:
self.training_options = {}
defaults = {"n_episodes": 500, "max_episode_steps": 500, "render": False, "verbose": False}
for k, v in defaults.items():
if k not in self.training_options:
self.training_options[k] = defaults[k]
@property
def agent_scores(self) -> List[float]:
return [a.training_history.current_performance for a in self._trained_agents]
@property
def best_agent(self) -> AgentBase:
return self._trained_agents[int(np.argmax(self.agent_scores))]
@property
def worst_agent(self) -> AgentBase:
return self._trained_agents[int(np.argmin(self.agent_scores))]
@staticmethod
def _fit_agent(agent_class: Callable, agent_config: ConfigBase, training_options: Dict[str, Any]):
with warnings.catch_warnings():
warnings.simplefilter('ignore', FutureWarning)
config_dict = agent_config.build()
# Give each agent a unique name for easier tracking with verbose and multiprocessing
config_dict["name"] = f"{config_dict.get('name', 'Agent')}_{np.random.randint(0, 2 ** 16)}"
agent = agent_class(**config_dict)
agent.train(**training_options)
# Might as well save agent. This will also unready and save buffers, models, etc.
agent.save()
agent.unready()
return agent
def _run(self) -> None:
self._trained_agents = Parallel(
backend='loky', verbose=10,
n_jobs=self.n_jobs)(delayed(self._fit_agent)(self.agent_class, self.agent_config, self.training_options)
for _ in range(self.n_reps))
def run(self) -> None:
try:
# All agents support pickling, so joblib can run some in parallel...
self._run()
except BrokenProcessPool:
# ... Except for TF models running on GPU, they'll probably crap out. Run 1 by 1.
# OR it'll crash Chrome, and Python, and hang for all eternity. It's best not to rely on this.
self.n_jobs = 1
self._run()
self.plot()
self.play_best()
self.save_best_agent()
def plot(self, err: str = 'range') -> None:
"""
Plot reward vs episode for experiment.
Plots:
- Mean of all agents
- Std or range of all agents (not constrained by max episodes or minimum score)
- Min and max score across all agents for each episode (observed; constrained by max episodes or minimum score)
- Score of best and worst agents (with 5% moving average). Best and worst defined using "current_performance"
property of agents, which is mean score over most recent n episodes, where n is whatever the rolling average
specified in the agents training history was.
"""
sns.set()
full_history = np.hstack([np.vstack(a.training_history.get_metric('total_reward'))
for a in self._trained_agents])
# Summary stats
n_episodes = full_history.shape[0]
y_mean = np.mean(full_history, axis=1)
plt.plot(y_mean, color='#1f77b4', label='Mean score', lw=1.25)
if err == 'range':
plt.fill_between(range(n_episodes), np.min(full_history, axis=1), np.max(full_history, axis=1),
color='lightgrey', label='Score range', alpha=0.5)
else:
y_std = np.std(full_history, axis=1)
plt.fill_between(range(n_episodes), y_mean - y_std, y_mean + y_std,
color='#1f77b4', label='Score std', alpha=0.3)
# Best and worst agents
mv_avg_pts = max(1, int(n_episodes * 0.05)) # 5% moving avg
plt.plot(np.convolve(self.best_agent.training_history.get_metric('total_reward'),
np.ones(mv_avg_pts), 'valid') / mv_avg_pts,
label='Best (mv avg)', ls='--', color='#d62728', lw=0.7)
plt.plot(np.convolve(self.worst_agent.training_history.get_metric('total_reward'),
np.ones(mv_avg_pts), 'valid') / mv_avg_pts,
label='Worst (mv avg)', ls='--', color='#9467bd', lw=0.7)
plt.title(f'{self.name}', fontweight='bold')
plt.xlabel('Episode', fontweight='bold')
plt.ylabel('Score', fontweight='bold')
plt.legend(title='Agents')
plt.tight_layout()
plt.savefig(f'{self.name}_{self.agent_config.env_spec}.png')
plt.close()
def play_best(self, episode_steps: int = None):
if episode_steps is None:
episode_steps = self.training_options["max_episode_steps"]
best_agent = copy.deepcopy(self.best_agent)
best_agent.check_ready()
best_agent.env_builder.set_env(gym.wrappers.Monitor(best_agent.env,
f'{self._trained_agents[0].name}_monitor_dir',
force=True))
try:
best_agent.play_episode(training=False, render=False, max_episode_steps=episode_steps)
except (ImportError, gym.error.DependencyNotInstalled) as e:
print(f"Monitor wrapper failed, not saving video: \n{e}")
def save(self, fn: str):
"""Disabled for now... Needed?"""
pass
def save_best_agent(self):
self.best_agent.save() | /reinforcement_learning_keras-0.6.1-py3-none-any.whl/reinforcement_learning_keras/experiment/agent_experiment.py | 0.835584 | 0.297317 | agent_experiment.py | pypi |
import abc
from typing import List, Dict, Any
import gym
from reinforcement_learning_keras.agents.components.history.training_history import TrainingHistory
class ConfigBase(abc.ABC):
env_spec: str
supported_agents: List[str]
gpu_memory: int = 256
def __init__(self, agent_type: str, plot_during_training: bool = True, folder: str = ''):
self.plot_during_training = plot_during_training
self.folder = folder
# Example env
self.unwrapped_env = gym.make(self.env_spec)
self.wrapped_env = self.unwrapped_env # (Default no wrapper)
self._check_supported(agent_type)
self.agent_type = agent_type
def _check_supported(self, agent_type: str):
if agent_type not in self.supported_agents:
raise NotImplementedError(f"Agent {agent_type} not in supported agents: {self.supported_agents}")
@property
def _default_training_history_kwargs(self) -> Dict[str, Any]:
return {"plotting_on": self.plot_during_training, "plot_every": 50, "rolling_average": 25}
def build(self) -> Dict[str, Any]:
if self.agent_type.lower() == 'linear_q':
config_dict = self._build_for_linear_q()
elif self.agent_type.lower() == 'dqn':
config_dict = self._build_for_dqn()
elif self.agent_type.lower() == 'dueling_dqn':
config_dict = self._build_for_dueling_dqn()
elif self.agent_type.lower() == 'double_dqn':
config_dict = self._build_for_double_dqn()
elif self.agent_type.lower() == 'double_dueling_dqn':
config_dict = self._build_for_double_dueling_dqn()
elif self.agent_type.lower() == 'reinforce':
config_dict = self._build_for_reinforce()
elif self.agent_type.lower() == 'random':
config_dict = self._build_for_random()
else:
raise NotImplementedError
config_dict.update({'training_history': TrainingHistory(agent_name=config_dict['name'],
**self._default_training_history_kwargs)})
return config_dict
def _build_for_linear_q(self) -> Dict[str, Any]:
raise NotImplementedError
def _build_for_dqn(self) -> Dict[str, Any]:
raise NotImplementedError
def _build_for_dueling_dqn(self) -> Dict[str, Any]:
raise NotImplementedError
def _build_for_double_dqn(self) -> Dict[str, Any]:
raise NotImplementedError
def _build_for_double_dueling_dqn(self) -> Dict[str, Any]:
raise NotImplementedError
def _build_for_random(self) -> Dict[str, Any]:
raise NotImplementedError
def _build_for_reinforce(self) -> Dict[str, Any]:
raise NotImplementedError | /reinforcement_learning_keras-0.6.1-py3-none-any.whl/reinforcement_learning_keras/enviroments/config_base.py | 0.832305 | 0.260832 | config_base.py | pypi |
import abc
from dataclasses import dataclass
from typing import Tuple, Union, Callable, List
from tensorflow import keras
@dataclass
class ModelBase(abc.ABC):
def __init__(self, observation_shape: List[int], n_actions: int, output_activation: Union[None, str] = None,
unit_scale: int = 1, learning_rate: float = 0.0001, opt: str = 'Adam') -> None:
"""
:param observation_shape: Tuple specifying input shape.
:param n_actions: Int specifying number of outputs
:param output_activation: Activation function for output. Eg. None for value estimation (off-policy methods) or
'softmax' for action probabilities (on-policy methods).
:param unit_scale: Multiplier for all units in FC layers in network. Default 1 = 16 units for first layer,
8 for second.
:param opt: Keras optimiser to use. Should be string. This is to avoid storing TF/Keras objects here.
:param learning_rate: Learning rate for optimiser.
"""
self.observation_shape = observation_shape
self.n_actions = n_actions
self.unit_scale = unit_scale
self.output_activation = output_activation
self.learning_rate = learning_rate
self.opt = opt
def compile(self, model_name: str = 'model', loss: Union[str, Callable] = 'mse') -> keras.Model:
"""
Compile a copy of the model using the provided loss.
Note loss is added here to avoid storing in self. We don't want to do that, as if this model is pickled deepcopy
will be disabled for TF objects if eager mode is disabled. It's better to use as needed rather than storing.
:param model_name: Name of model
:param loss: Model loss. Default 'mse'. Can be custom callable.
"""
# Get optimiser
if self.opt.lower() == 'adam':
opt = keras.optimizers.Adam
elif self.opt.lower() == 'rmsprop':
opt = keras.optimizers.RMSprop
else:
raise ValueError(f"Invalid optimiser {self.opt}")
state_input, action_output = self._model_architecture()
model = keras.Model(inputs=[state_input], outputs=[action_output], name=model_name)
model.compile(optimizer=opt(learning_rate=self.learning_rate), loss=loss)
return model
@abc.abstractmethod
def _model_architecture(self) -> Tuple[keras.layers.Layer, keras.layers.Layer]:
"""Define model construction function. Should return input layer and output layer."""
pass
def plot(self, model_name: str = 'model') -> None:
keras.utils.plot_model(self.compile(model_name), to_file=f"{model_name}.png", show_shapes=True) | /reinforcement_learning_keras-0.6.1-py3-none-any.whl/reinforcement_learning_keras/enviroments/model_base.py | 0.930946 | 0.443118 | model_base.py | pypi |
import os
from functools import partial
from typing import Any, Dict
from reinforcement_learning_keras.agents.components.replay_buffers.continuous_buffer import ContinuousBuffer
from reinforcement_learning_keras.agents.q_learning.exploration.epsilon_greedy import EpsilonGreedy
from reinforcement_learning_keras.enviroments.atari.environment_processing.fire_start_wrapper import FireStartWrapper
from reinforcement_learning_keras.enviroments.atari.environment_processing.frame_buffer_wrapper import \
FrameBufferWrapper
from reinforcement_learning_keras.enviroments.atari.environment_processing.image_process_wrapper import \
ImageProcessWrapper
from reinforcement_learning_keras.enviroments.atari.environment_processing.max_and_skip_wrapper import MaxAndSkipWrapper
from reinforcement_learning_keras.agents.models.conv_nn import ConvNN
from reinforcement_learning_keras.agents.models.dueling_conv_nn import DuelingConvNN
from reinforcement_learning_keras.enviroments.config_base import ConfigBase
class AtariDefaultConfig(ConfigBase):
"""Defines configs for Pong."""
env_spec: str
supported_agents = ('dqn', 'double_dqn', 'dueling_dqn', 'double_dueling_dqn', 'random')
supported_modes = ('diff', 'stack')
gpu_memory: int = 2048
_wrappers_stack = (MaxAndSkipWrapper, ImageProcessWrapper, FireStartWrapper, FrameBufferWrapper)
_wrappers_diff = (MaxAndSkipWrapper, ImageProcessWrapper, FireStartWrapper,
partial(FrameBufferWrapper, buffer_length=2, buffer_function='diff'))
def __init__(self, *args, mode: str = 'diff', **kwargs) -> None:
super().__init__(*args, **kwargs)
if mode not in self.supported_modes:
raise ValueError(f"Mode {mode} is not a supported mode ({self.supported_modes})")
self.mode = mode
if self.mode == "diff":
self.env_wrappers = self._wrappers_diff
self.frame_depth = 1
self.wrapped_env = FrameBufferWrapper(
FireStartWrapper(ImageProcessWrapper(MaxAndSkipWrapper(self.unwrapped_env))),
buffer_length=2, buffer_function='diff')
if self.mode == "stack":
self.env_wrappers = self._wrappers_stack
self.frame_depth = 3
self.wrapped_env = FrameBufferWrapper(FireStartWrapper(
ImageProcessWrapper(MaxAndSkipWrapper(self.unwrapped_env))),
buffer_length=3, buffer_function='stack')
def _build_for_dqn(self) -> Dict[str, Any]:
return {'name': os.path.join(self.folder, 'DeepQAgent'),
'env_spec': self.env_spec,
'env_wrappers': self.env_wrappers,
'model_architecture': ConvNN(observation_shape=(84, 84, self.frame_depth), n_actions=6,
output_activation=None, opt='adam', learning_rate=0.000105),
'gamma': 0.99,
'final_reward': None,
# Use eps_initial > 1 here so only random actions used for first steps, which will make filling the
# replay buffer more efficient. It'll also avoid decaying eps while not training.
# Alternative: 'eps': EpsilonGreedy(eps_initial=1.2, decay=0.000025, eps_min=0.01,
# decay_schedule='compound'),
'eps': EpsilonGreedy(eps_initial=1.1, decay=0.00001, eps_min=0.01, decay_schedule='linear'),
'replay_buffer': ContinuousBuffer(buffer_size=10000),
'replay_buffer_samples': 32}
def _build_for_dueling_dqn(self) -> Dict[str, Any]:
config_dict = self._build_for_dqn()
config_dict.update({'name': os.path.join(self.folder, 'DuelingDQN'),
'model_architecture': DuelingConvNN(observation_shape=(84, 84, self.frame_depth),
n_actions=6, opt='adam', learning_rate=0.000102)})
return config_dict
def _build_for_double_dqn(self) -> Dict[str, Any]:
config_dict = self._build_for_dqn()
config_dict.update({'name': os.path.join(self.folder, 'DoubleDQN'),
'double': True,
'model_architecture': ConvNN(observation_shape=(84, 84, self.frame_depth),
n_actions=6, opt='adam', learning_rate=0.000102)})
return config_dict
def _build_for_double_dueling_dqn(self) -> Dict[str, Any]:
config_dict = self._build_for_dqn()
config_dict.update({'name': os.path.join(self.folder, 'DoubleDuelingDQN'),
'double': True,
'model_architecture': DuelingConvNN(observation_shape=(84, 84, self.frame_depth),
n_actions=6, opt='adam', learning_rate=0.000102)})
return config_dict
def _build_for_random(self):
return {'name': os.path.join(self.folder, 'RandomAgent'),
'env_spec': self.env_spec} | /reinforcement_learning_keras-0.6.1-py3-none-any.whl/reinforcement_learning_keras/enviroments/atari/atari_default_config.py | 0.868409 | 0.297566 | atari_default_config.py | pypi |
import collections
from typing import Any, Dict, Tuple, Callable
import gym
import numpy as np
class MaxAndSkipWrapper(gym.Wrapper):
"""Pools frames across steps, returning max, and repeat action for a number of frames."""
def __init__(self, env: gym.Env,
frame_buffer_length: int = 2,
n_action_frames: int = 4,
frame_buffer_agg_f: Callable = np.max) -> None:
"""
:param env: Gym environment to wrap, "inner environment".
:param frame_buffer_length: Max number of frames to collect. FIFO buffer, contains most recent frames only.
:param n_action_frames: Number of frames to repeat action for. Can be longer than frame buffer.
:param frame_buffer_agg_f: Function used to aggregate frames in frame buffer to create observation.
Default np.max.
"""
super().__init__(env)
self._n_action_frames = n_action_frames
self._frame_buffer_length = frame_buffer_length
self._frame_buffer_agg_f = frame_buffer_agg_f
self._prepare_frame_buffer()
def _prepare_frame_buffer(self) -> None:
self._frame_buffer = collections.deque(maxlen=self._frame_buffer_length)
def _aggregate_buffer_frames(self, ) -> np.ndarray:
return self._frame_buffer_agg_f(np.stack(self._frame_buffer), axis=0)
def step(self, action: int) -> Tuple[np.ndarray, float, bool, Dict[Any, Any]]:
"""
Each step call iterates inner env 4 times, max of these is returned.
Same action is applied for each step, and reward accumulated. If done, loop breaks and returns outputs
based on shorter pool.
:param action: Int id of action to perform.
"""
total_reward = 0.0
done = False
info = {}
for _ in range(self._n_action_frames):
obs, reward, done, info = self.env.step(action)
self._frame_buffer.append(obs)
total_reward += reward
if done:
break
agg_frame = self._aggregate_buffer_frames()
return agg_frame, total_reward, done, info
def reset(self) -> np.ndarray:
"""
Additionally clears frame buffer
:return: Observation from inner_env.reset() call.
"""
self._frame_buffer.clear()
obs = self.env.reset()
self._frame_buffer.append(obs)
return obs | /reinforcement_learning_keras-0.6.1-py3-none-any.whl/reinforcement_learning_keras/enviroments/atari/environment_processing/max_and_skip_wrapper.py | 0.925734 | 0.368633 | max_and_skip_wrapper.py | pypi |
import collections
from typing import Any, Dict, Tuple
import gym
import numpy as np
class FrameBufferWrapper(gym.Wrapper):
"""
Adds last step obs to buffer, returns whole buffer.
Returned buffer contains previous steps, eg. for env.step at t=3, returns obs for t=3, t=2, t=1. In games like pong,
this adds directional information to the returned observations.
"""
def __init__(self, env: gym.Env,
obs_shape: Tuple[int, int] = (84, 84),
buffer_length: int = 3,
buffer_function: str = 'stack') -> None:
"""
:param env: Gym env.
:param obs_shape: Expected shape of single observation.
:param buffer_length: Number of frames to include in buffer.
:param buffer_function: Function to apply to use contents of buffer. Supports 'stack or 'diff':
- 'stack' stack contents of buffer on a new (final) axis
- 'diff' take diff between two frames without changing dimensions.
"""
super().__init__(env)
self._buffer_length = buffer_length
self._buffer_function = buffer_function
self._obs_shape = obs_shape
self._prepare_obs_buffer()
def _prepare_obs_buffer(self) -> None:
"""Create buffer and preallocate with empty arrays of expected shape."""
self._obs_buffer = collections.deque(maxlen=self._buffer_length)
for _ in range(self._buffer_length):
self._obs_buffer.append(np.zeros(shape=self._obs_shape))
def _buffer_obs(self) -> np.ndarray:
agg_buff = None
if self._buffer_function == "stack":
agg_buff = np.stack([obs.squeeze() for obs in self._obs_buffer],
axis=len(self._obs_shape))
if self._buffer_function == 'diff':
if self._buffer_length != 2:
raise ValueError("When using diff, buffer length must be 2.")
agg_buff = self._obs_buffer[1].squeeze() - self._obs_buffer[0].squeeze()
agg_buff = np.expand_dims(agg_buff,
axis=len(self._obs_shape))
if agg_buff is None:
raise ValueError(f"Unknown buffer op {self._buffer_function}")
return agg_buff
def step(self, action: int) -> Tuple[np.ndarray, float, bool, Dict[Any, Any]]:
"""Step env, add new obs to buffer, return buffer."""
obs, reward, done, info = self.env.step(action)
self._obs_buffer.append(obs)
return self._buffer_obs(), reward, done, info
def reset(self) -> np.ndarray:
"""Add initial obs to end of pre-allocated buffer.
:return: Buffered observation
"""
self._prepare_obs_buffer()
obs = self.env.reset()
self._obs_buffer.append(obs)
return self._buffer_obs() | /reinforcement_learning_keras-0.6.1-py3-none-any.whl/reinforcement_learning_keras/enviroments/atari/environment_processing/frame_buffer_wrapper.py | 0.933756 | 0.479199 | frame_buffer_wrapper.py | pypi |
import os
from functools import partial
from typing import Dict, Any
from reinforcement_learning_keras.agents.components.replay_buffers.continuous_buffer import ContinuousBuffer
from reinforcement_learning_keras.agents.models.conv_nn import ConvNN
from reinforcement_learning_keras.agents.q_learning.exploration.epsilon_greedy import EpsilonGreedy
from reinforcement_learning_keras.enviroments.atari.atari_default_config import AtariDefaultConfig
from reinforcement_learning_keras.enviroments.atari.environment_processing.fire_start_wrapper import FireStartWrapper
from reinforcement_learning_keras.enviroments.atari.environment_processing.frame_buffer_wrapper import \
FrameBufferWrapper
from reinforcement_learning_keras.enviroments.atari.environment_processing.image_process_wrapper import \
ImageProcessWrapper
from reinforcement_learning_keras.enviroments.atari.environment_processing.max_and_skip_wrapper import MaxAndSkipWrapper
class SpaceInvadersConfig(AtariDefaultConfig):
"""Defines configs tweaks for Space Invaders."""
env_spec = 'SpaceInvadersNoFrameskip-v0'
_wrappers_stack = (partial(MaxAndSkipWrapper, frame_buffer_length=4),
ImageProcessWrapper,
FireStartWrapper,
FrameBufferWrapper)
_wrappers_diff = (partial(MaxAndSkipWrapper, frame_buffer_length=4),
ImageProcessWrapper,
FireStartWrapper,
partial(FrameBufferWrapper, buffer_length=2, buffer_function='diff'))
def _build_for_dqn(self) -> Dict[str, Any]:
return {'name': os.path.join(self.folder, 'DeepQAgent'),
'env_spec': self.env_spec,
'env_wrappers': self.env_wrappers,
'model_architecture': ConvNN(observation_shape=(84, 84, self.frame_depth), n_actions=6,
output_activation=None, opt='adam', learning_rate=0.00008),
'gamma': 0.99,
'final_reward': None,
'eps': EpsilonGreedy(eps_initial=2, decay=0.000025, eps_min=0.01, decay_schedule='linear'),
'replay_buffer': ContinuousBuffer(buffer_size=40000),
'replay_buffer_samples': 32} | /reinforcement_learning_keras-0.6.1-py3-none-any.whl/reinforcement_learning_keras/enviroments/atari/space_invaders/space_invaders_config.py | 0.760384 | 0.282377 | space_invaders_config.py | pypi |
import os
from functools import partial
from typing import Any, Dict
from reinforcement_learning_keras.agents.components.replay_buffers.continuous_buffer import ContinuousBuffer
from reinforcement_learning_keras.agents.q_learning.exploration.epsilon_greedy import EpsilonGreedy
from reinforcement_learning_keras.enviroments.cart_pole.environment_processing.clipepr_wrapper import ClipperWrapper
from reinforcement_learning_keras.enviroments.cart_pole.environment_processing.rbf_wrapepr import RBFSWrapper
from reinforcement_learning_keras.enviroments.cart_pole.environment_processing.squeeze_wrapper import SqueezeWrapper
from reinforcement_learning_keras.enviroments.cart_pole.models.small_dueling_nn import SmallDuelingNN
from reinforcement_learning_keras.enviroments.cart_pole.models.small_nn import SmallNN
from reinforcement_learning_keras.enviroments.config_base import ConfigBase
class CartPoleConfig(ConfigBase):
"""Defines config for cart_pole."""
env_spec = 'CartPole-v0'
supported_agents = ('linear_q', 'dqn', 'double_dqn', 'dueling_dqn', 'double_dueling_dqn', 'reinforce', 'random')
gpu_memory = 128
@property
def _default_training_history_kwargs(self) -> Dict[str, Any]:
return {"plotting_on": self.plot_during_training,
"plot_every": 25, "rolling_average": 12}
def _build_for_linear_q(self) -> Dict[str, Any]:
return {'name': os.path.join(self.folder, 'LinearQAgent'),
'env_spec': self.env_spec,
'env_wrappers': [partial(ClipperWrapper, lim=(-1, 1)), RBFSWrapper, SqueezeWrapper],
'gamma': 0.99,
'log_exemplar_space': False,
'final_reward': -200,
'eps': EpsilonGreedy(eps_initial=0.4, eps_min=0.01)}
def _build_for_dqn(self) -> Dict[str, Any]:
return {'name': os.path.join(self.folder, 'DeepQAgent'),
'env_spec': self.env_spec,
'model_architecture': SmallNN(observation_shape=(4,), n_actions=2, output_activation=None,
opt='adam', learning_rate=0.001),
'gamma': 0.99,
'final_reward': -200,
'replay_buffer_samples': 75,
'eps': EpsilonGreedy(eps_initial=0.2, decay=0.002, eps_min=0.002),
'replay_buffer': ContinuousBuffer(buffer_size=200)}
def _build_for_dueling_dqn(self) -> Dict[str, Any]:
config_dict = self._build_for_dqn()
config_dict.update({'name': os.path.join(self.folder, 'DuelingDQN'),
'model_architecture': SmallDuelingNN(observation_shape=(4,), n_actions=2, opt='adam',
learning_rate=0.001)})
return config_dict
def _build_for_double_dueling_dqn(self) -> Dict[str, Any]:
config_dict = self._build_for_dqn()
config_dict.update({'name': os.path.join(self.folder, 'DoubleDuelingDQN'),
'double': True,
'model_architecture': SmallDuelingNN(observation_shape=(4,), n_actions=2, opt='adam',
learning_rate=0.001)})
return config_dict
def _build_for_double_dqn(self) -> Dict[str, Any]:
config_dict = self._build_for_dqn()
config_dict.update({'name': os.path.join(self.folder, 'DoubleDQN'),
'double': True,
'model_architecture': SmallNN(observation_shape=(4,), n_actions=2, opt='adam',
learning_rate=0.001)})
return config_dict
def _build_for_reinforce(self) -> Dict[str, Any]:
return {'name': os.path.join(self.folder, 'REINFORCEAgent'),
'env_spec': self.env_spec,
'model_architecture': SmallNN(observation_shape=(4,), n_actions=2, output_activation='softmax',
opt='adam', learning_rate=0.001),
'final_reward': -2,
'gamma': 0.99,
'alpha': 0.00001}
def _build_for_random(self):
return {'name': os.path.join(self.folder, 'RandomAgent'),
'env_spec': self.env_spec} | /reinforcement_learning_keras-0.6.1-py3-none-any.whl/reinforcement_learning_keras/enviroments/cart_pole/cart_pole_config.py | 0.794385 | 0.26886 | cart_pole_config.py | pypi |
import os
from typing import Any, Dict
from reinforcement_learning_keras.agents.components.replay_buffers.continuous_buffer import ContinuousBuffer
from reinforcement_learning_keras.agents.q_learning.exploration.epsilon_greedy import EpsilonGreedy
from reinforcement_learning_keras.enviroments.cart_pole.models.small_dueling_nn import SmallDuelingNN
from reinforcement_learning_keras.enviroments.config_base import ConfigBase
from reinforcement_learning_keras.enviroments.mountain_car.models.small_nn import SmallNN
class MountainCarConfig(ConfigBase):
"""Defines config for mountain_car"""
env_spec = 'MountainCar-v0'
supported_agents = ('linear_q', 'dueling_dqn', 'dqn', 'random')
gpu_memory = 128
@property
def _default_training_history_kwargs(self) -> Dict[str, Any]:
return {"plotting_on": self.plot_during_training, "plot_every": 200, "rolling_average": 12}
def _build_for_linear_q(self) -> Dict[str, Any]:
return {'name': os.path.join(self.folder, 'LinearQAgent'),
'env_spec': self.env_spec,
'final_reward': 500,
'gamma': 0.99,
'log_exemplar_space': False,
'eps': EpsilonGreedy(eps_initial=0.3, eps_min=0.005)}
def _build_for_dqn(self) -> Dict[str, Any]:
"""This isn't tuned."""
return {'name': os.path.join(self.folder, 'DeepQAgent'),
'env_spec': self.env_spec,
'model_architecture': SmallNN(observation_shape=(2,), n_actions=3, output_activation=None,
opt='adam', learning_rate=0.001),
'gamma': 0.99,
'final_reward': 650,
'replay_buffer_samples': 32,
'eps': EpsilonGreedy(eps_initial=0.1, decay=0.002, eps_min=0.002),
'replay_buffer': ContinuousBuffer(buffer_size=200)}
def _build_for_dueling_dqn(self) -> Dict[str, Any]:
config_dict = self._build_for_dqn()
config_dict.update({'name': os.path.join(self.folder, 'DuelingDQN'),
'model_architecture': SmallDuelingNN(observation_shape=(2,), n_actions=3,
opt='adam', learning_rate=0.001)})
return config_dict
def _build_for_random(self):
return {'name': os.path.join(self.folder, 'RandomAgent'),
'env_spec': self.env_spec} | /reinforcement_learning_keras-0.6.1-py3-none-any.whl/reinforcement_learning_keras/enviroments/mountain_car/mountain_car_config.py | 0.768386 | 0.254017 | mountain_car_config.py | pypi |
import os
from abc import ABC
from functools import partial
from typing import Any, Dict, Tuple
# Although unused, this import will register Doom envs with Gym
# noinspection PyUnresolvedReferences
import vizdoomgym
from reinforcement_learning_keras.agents.components.replay_buffers.continuous_buffer import ContinuousBuffer
from reinforcement_learning_keras.agents.models.conv_nn import ConvNN
from reinforcement_learning_keras.agents.models.dueling_conv_nn import DuelingConvNN
from reinforcement_learning_keras.agents.q_learning.exploration.epsilon_greedy import EpsilonGreedy
from reinforcement_learning_keras.enviroments.config_base import ConfigBase
from reinforcement_learning_keras.enviroments.doom.environment_processing.frame_buffer_wrapper import FrameBufferWrapper
from reinforcement_learning_keras.enviroments.doom.environment_processing.image_process_wrapper import \
ImageProcessWrapper
class DoomDefaultConfig(ConfigBase, ABC):
"""Defines default configs for Doom."""
env_spec = 'VizdoomBasic-v0'
n_actions = 3
supported_agents = ('dqn', 'dueling_dqn', 'double_dqn', 'double_dueling_dqn', 'random')
supported_modes = ('diff', 'stack')
gpu_memory: int = 2048
# 3 different possible resolutions:
# (96, 128) @ 40%
# (96, 128) @ 20%
# (90, 160) @ 20%
res_scale: float = 0.4
target_obs_shape: Tuple[int, int] = (96, 128)
def __init__(self, *args, mode: str = 'diff', **kwargs) -> None:
super().__init__(*args, **kwargs)
if mode not in self.supported_modes:
raise ValueError(f"Mode {mode} is not a supported mode ({self.supported_modes})")
self._wrappers_stack = (partial(ImageProcessWrapper, scale=self.res_scale),
partial(FrameBufferWrapper, obs_shape=self.target_obs_shape, buffer_function='stack'))
self._wrappers_diff = (partial(ImageProcessWrapper, scale=self.res_scale),
partial(FrameBufferWrapper, obs_shape=self.target_obs_shape, buffer_length=2,
buffer_function='diff'))
self.mode = mode
if self.mode == "diff":
self.env_wrappers = self._wrappers_diff
self.frame_depth = 1
if self.mode == "stack":
self.env_wrappers = self._wrappers_stack
self.frame_depth = 3
self.wrapped_env = self.env_wrappers[1](self.env_wrappers[0](self.unwrapped_env))
def _build_for_dqn(self) -> Dict[str, Any]:
return {'name': os.path.join(self.folder, 'DeepQAgent'),
'env_spec': self.env_spec,
'env_wrappers': self.env_wrappers,
'model_architecture': ConvNN(
observation_shape=(self.target_obs_shape[0], self.target_obs_shape[1], self.frame_depth),
n_actions=self.n_actions, output_activation=None, opt='adam', learning_rate=0.0001),
'gamma': 0.99,
'final_reward': None,
'eps': EpsilonGreedy(eps_initial=1.2, decay=0.0000019, eps_min=0.01, decay_schedule='linear'),
'replay_buffer': ContinuousBuffer(buffer_size=20000),
'replay_buffer_samples': 32}
def _build_for_dueling_dqn(self) -> Dict[str, Any]:
config_dict = self._build_for_dqn()
config_dict.update({'name': os.path.join(self.folder, 'DuelingDQN'),
'model_architecture': DuelingConvNN(
observation_shape=(self.target_obs_shape[0], self.target_obs_shape[1],
self.frame_depth),
n_actions=6, opt='adam', learning_rate=0.0001)})
return config_dict
def _build_for_double_dqn(self) -> Dict[str, Any]:
config_dict = self._build_for_dqn()
config_dict.update({'name': os.path.join(self.folder, 'DoubleDQN'),
'double': True,
'model_architecture': ConvNN(
observation_shape=(self.target_obs_shape[0], self.target_obs_shape[1],
self.frame_depth),
n_actions=self.n_actions, output_activation=None, opt='adam', learning_rate=0.0001)})
return config_dict
def _build_for_double_dueling_dqn(self) -> Dict[str, Any]:
config_dict = self._build_for_dqn()
config_dict.update({'name': os.path.join(self.folder, 'DoubleDuelingDQN'),
'double': True,
'model_architecture': DuelingConvNN(
observation_shape=(self.target_obs_shape[0], self.target_obs_shape[1],
self.frame_depth),
n_actions=self.n_actions, opt='adam', learning_rate=0.0001)})
return config_dict
def _build_for_random(self):
return {'name': os.path.join(self.folder, 'RandomAgent'),
'env_spec': self.env_spec} | /reinforcement_learning_keras-0.6.1-py3-none-any.whl/reinforcement_learning_keras/enviroments/doom/doom_default_config.py | 0.867976 | 0.264691 | doom_default_config.py | pypi |
import abc
import copy
import gc
import time
from typing import Any, Callable, Union, Dict, Tuple, Iterable
import gym
import joblib
import numpy as np
from reinforcement_learning_keras.agents.components.helpers.env_builder import EnvBuilder
from reinforcement_learning_keras.agents.components.helpers.tqdm_handler import TQDMHandler
from reinforcement_learning_keras.agents.components.history.episode_report import EpisodeReport
from reinforcement_learning_keras.agents.components.history.training_history import TrainingHistory
class AgentBase(abc.ABC):
name: str
env_spec: str
env_kwargs: Dict[str, Any]
env_builder: Union[EnvBuilder, None]
env_wrappers: Iterable[Callable]
env_kwargs: Union[None, Dict[str, Any]] = None
gamma: float
final_reward: float
ready: bool
training_history: TrainingHistory
_tqdm = TQDMHandler()
@property
def env(self) -> gym.Env:
return self.env_builder.env
def _pickle_compatible_getstate(self) -> Dict[str, Any]:
"""
Prepare agent with a keras model object for pickling.
Calls .unready to prepare this object for pickling, and .check_ready to put it back how it was after pickling.
In addition to what's defined in the unready method, it avoids trying to copy the env. We can't copy this,
but we also don't want to make a new one (like we can with the models). This would cause a new render window
per episode...
The default unready method in AgentBase does nothing. The GPU models should modify .unready and .check_ready to
handle complied Keras, which also can't be pickled.
Object that need to use this should implement their own __getstate__:
def __getstate__(self) -> Dict[str, Any]:
return self._pickle_compatible_getstate()
It's not implemented in AgentBase as the standard __getstate__ is required by the deepcopy below.
"""
# Remove things
self.unready()
# Get object spec to pickle, everything left except env_builder references
# This is dodgy. Can end up with recursion errors depending on how deepcopy behaves...
object_state_dict = copy.deepcopy({k: v for k, v in self.__dict__.items()})
# Put this object back how it was
self.check_ready()
return object_state_dict
def check_ready(self) -> None:
"""
Check the model is ready to use.
If super is used, should be at end of overloading method.
Default implementation:
- Check _env is set (most models?)
Example of other model specific steps that might need doing:
- For Keras models, check model is ready, for example if it needs recompiling after loading.
"""
self.env_builder = EnvBuilder(env_spec=self.env_spec, env_wrappers=self.env_wrappers,
env_kwargs=self.env_kwargs)
self.env_builder.set_env()
self.ready = True
gc.collect()
def unready(self) -> None:
"""
Remove anything that causes issues with pickling, such as keras models.
If super is used, should be at end of overloading method.
"""
if self.env_builder is not None:
self.env_builder.env.close()
self.env_builder = None
self.ready = False
gc.collect()
def transform(self, s: Any) -> Any:
"""Run the any pre-preprocessing on raw state, if used."""
return s
def update_experience(self, *args) -> None:
"""Remember an experience, if used by agent."""
pass
@abc.abstractmethod
def _build_model(self) -> None:
"""Prepare the model(s) the agent will use."""
pass
@abc.abstractmethod
def update_model(self, *args, **kwargs) -> None:
"""Update the agents model(s)."""
pass
def _discounted_reward(self, reward: float, estimated_future_action_rewards: np.ndarray) -> float:
"""Use this to define the discounted reward for unfinished episodes, default is 1 step TD."""
return reward + self.gamma * np.max(estimated_future_action_rewards)
def _get_reward(self, reward: float, estimated_future_action_rewards: np.ndarray, done: bool) -> float:
"""
Calculate discounted reward for a single step.
:param reward: Last real reward.
:param estimated_future_action_rewards: Estimated future values of actions taken on next step.
:param done: Flag indicating if this is the last step on an episode.
:return: Reward.
"""
if done:
# If done, reward is just this step. Can finish because agent has won or lost.
return self._final_reward(reward)
else:
# Otherwise, it's the reward plus the predicted max value of next action
return self._discounted_reward(reward, estimated_future_action_rewards)
@abc.abstractmethod
def get_action(self, s: Any, **kwargs) -> int:
"""
Given state s, get an action from the agent.
May include other kwargs if needed - for example, a training flag for methods using epsilon greedy.
"""
pass
@abc.abstractmethod
def _play_episode(self, max_episode_steps: int = 500,
training: bool = False, render: bool = True) -> Tuple[float, int]:
"""
Play a single episode with the agent (run multiple steps). Should return reward and n frames.
:param max_episode_steps: Max steps before stopping, overrides any time limit set by Gym.
:param training: Bool to indicate whether or not to use this experience to update the model.
:param render: Bool to indicate whether or not to call env.render() each training step.
:return: The total real reward for the episode and number of frames run.
"""
pass
def play_episode(self, max_episode_steps: int = 500,
training: bool = False, render: bool = True) -> EpisodeReport:
"""
Run Agent's _play_episode and produce episode report.
:param max_episode_steps: Max steps before stopping, overrides any time limit set by Gym.
:param training: Bool to indicate whether or not to use this experience to update the model.
:param render: Bool to indicate whether or not to call env.render() each training step.
:return: The total real reward for the episode, number of frames run, and time taken in seconds.
"""
t0 = time.time()
total_reward, frames = self._play_episode(max_episode_steps=max_episode_steps, training=training, render=render)
t1 = time.time()
return EpisodeReport(total_reward=total_reward,
frames=frames,
time_taken=np.round(t1 - t0, 3),
epsilon_used=getattr(self, 'eps', None))
def train(self, n_episodes: int = 10000, max_episode_steps: int = 500, verbose: bool = True, render: bool = True,
checkpoint_every: Union[bool, int] = 0, update_every: Union[bool, int] = 1) -> None:
"""
Run the default training loop
:param n_episodes: Number of episodes to run.
:param max_episode_steps: Max steps before stopping, overrides any time limit set by Gym.
:param verbose: If verbose, use tqdm and print last episode score for feedback during training.
:param render: Bool to indicate whether or not to call env.render() each training step.
:param checkpoint_every: Save the model every n steps while training. Set to 0 or false to turn off.
:param update_every: Run the _after_episode_update() step every n episodes.
"""
self._tqdm.set_tqdm(verbose)
for ep in self._tqdm.tqdm_runner(range(n_episodes)):
episode_report = self.play_episode(max_episode_steps=max_episode_steps, training=True, render=render)
self._update_history(episode_report, verbose)
if (update_every > 0) and not (ep % update_every):
# Run the after-episode update step
self._after_episode_update()
if (checkpoint_every > 0) and (ep > 0) and (not ep % checkpoint_every):
self.save()
def _after_episode_update(self) -> None:
"""
Run an update step after an episode completes.
In the default implementation of .train, update_every parameter can be used to control how often this method
runs.
Eg.
- For DQN, synchronize target and value models
- For REINFORCE do MC model training step
- For random agent this is passed
Note this there is no equivalent "during episode update" method as each agent defines it's own play_episode
method, which can call model specific updates as needed (eg.f or DQN this sample from buffer + training
step, for REINFORCE do nothing, etc.)
"""
pass
def _update_history(self, episode_report: EpisodeReport, verbose: bool = True) -> None:
"""
Add an episodes reward to history and maybe plot depending on history settings.
:param episode_report: Episode report to add to history.
:param verbose: If verbose, print the last episode and run the history plot. The history plot will display
depending on it's own settings. Verbose = False will turn it off totally.
"""
self.training_history.append(episode_report)
if verbose:
print(f"{self.name}: {episode_report}")
self.training_history.training_plot()
def save(self) -> None:
with open(f"{self.name}_{self.env_spec}", 'wb') as f:
joblib.dump(self, f)
@classmethod
def load(cls, fn: str) -> "AgentBase":
with open(fn, 'rb') as f:
new_agent = joblib.load(f)
new_agent.check_ready()
return new_agent
@classmethod
def example(cls, config: Dict[str, Any]) -> "AgentBase":
"""Optional example function using this agent."""
raise NotImplementedError | /reinforcement_learning_keras-0.6.1-py3-none-any.whl/reinforcement_learning_keras/agents/agent_base.py | 0.857261 | 0.288635 | agent_base.py | pypi |
from dataclasses import dataclass
from typing import Any, Dict, Tuple, Union, Callable, Iterable
import numpy as np
from sklearn.linear_model import SGDRegressor
from reinforcement_learning_keras.agents.agent_base import AgentBase
from reinforcement_learning_keras.agents.components.helpers.env_builder import EnvBuilder
from reinforcement_learning_keras.agents.components.history.training_history import TrainingHistory
from reinforcement_learning_keras.agents.q_learning.exploration.epsilon_greedy import EpsilonGreedy
from reinforcement_learning_keras.enviroments.config_base import ConfigBase
@dataclass
class LinearQAgent(AgentBase):
eps: EpsilonGreedy
training_history: TrainingHistory
env_spec: str = "CartPole-v0"
env_wrappers: Iterable[Callable] = ()
name: str = 'LinearQAgent'
gamma: float = 0.99
log_exemplar_space: bool = False
final_reward: Union[float, None] = None
def __post_init__(self, ) -> None:
self.env_builder = EnvBuilder(env_spec=self.env_spec, env_wrappers=self.env_wrappers,
env_kwargs=self.env_kwargs)
self._build_model()
def __getstate__(self) -> Dict[str, Any]:
return self._pickle_compatible_getstate()
def _build_model(self) -> None:
"""
Build the models to estimate Q(a|s).
Because this is linear regression with multiple outputs, use multiple models.
"""
# Create SGDRegressor for each action in space and initialise by calling .partial_fit for the first time on
# dummy data
# Prep pipeline with scaler and rbfs paths
mods = {a: SGDRegressor() for a in range(self.env.action_space.n)}
for mod in mods.values():
mod.partial_fit(self.transform(self.env.reset()), [0])
self.mods = mods
def transform(self, s: np.ndarray) -> np.ndarray:
"""Check shape. It's always single input with this agent."""
if len(s.shape) == 1:
s = s.reshape(1, -1)
return s
def partial_fit(self, s: np.ndarray, a: int, g: float) -> None:
"""
Run partial fit for a single row of training data.
:param s: The raw state observation.
:param a: The action taken.
:param g: The reward + discounted value of next state.
"""
x = self.transform(s)
self.mods[a].partial_fit(x, [g])
def predict(self, s: np.ndarray) -> Dict[int, float]:
"""
Given a single state observation, predict action values from each model.
:param s: The raw state observation.
:return: Dict containing action values, indexed by action id.
"""
s = self.transform(s)
return {a: float(mod.predict(s)) for a, mod in self.mods.items()}
def get_best_action(self, s: np.ndarray) -> int:
"""
Find the best action from the values predicted by each model.
Remember preds is Dict[int, float]: This is essentially argmax on a dict.
:param s: The raw state observation.
:return: The action with the highest predicted value.
"""
preds = self.predict(s)
best_action_value = -np.inf
best_action = 0
for k, v in preds.items():
if v > best_action_value:
best_action_value = v
best_action = k
return best_action
def get_action(self, s: np.ndarray, training: bool = False) -> int:
"""
Get an action using epsilon greedy.
Epsilon decays every time a random action is chosen.
:param s: The raw state observation.
:param training: Bool to indicate whether or not to use this experience to update the model. If False, just
returns best action.
:return: The selected action.
"""
action = self.eps.select(greedy_option=lambda: self.get_best_action(s),
random_option=lambda: self.env.action_space.sample(),
training=training)
return action
def update_model(self, s: np.ndarray, a: int, r: float, d: bool, s_: np.ndarray) -> None:
"""
For a single step set, calculate discounted reward and update the appropriate action model.
:param s: The raw state observation the action was selected for.
:param a: The selected action.
:param r: The reward for performing that action.
:param d: Flag indicating if done.
:param s_: The next state following the action.
"""
# Update model
if d:
# Done flag is only true if env ends due to agent failure (not if max steps are reached). Punish.
g = self.final_reward if self.final_reward is not None else 0
else:
# Calculate the reward for this step and the discounted max value of actions in the next state.
g = r + self.gamma * np.max(list(self.predict(s_).values()))
# Update the model s = x, g = y, and a is the model to update
self.partial_fit(s, a, g)
def _play_episode(self, max_episode_steps: int = 500,
training: bool = False, render: bool = True) -> Tuple[float, int]:
"""
Play a single episode and return the total reward.
:param max_episode_steps: Max steps before stopping, overrides any time limit set by Gym.
:param training: Bool to indicate whether or not to use this experience to update the model.
:param render: Bool to indicate whether or not to call env.render() each training step.
:return: The total real reward for the episode.
"""
self.env._max_episode_steps = max_episode_steps
obs = self.env.reset()
total_reward = 0
for frame in range(max_episode_steps):
action = self.get_action(obs, training=training)
prev_obs = obs
obs, reward, done, info = self.env.step(action)
total_reward += reward
if render:
self.env.render()
if training:
self.update_model(s=prev_obs, a=action, r=reward, d=done, s_=obs)
if done:
break
return total_reward, frame
@classmethod
def example(cls, config: ConfigBase, render: bool = True,
n_episodes: int = 10, max_episode_steps: int = 500, update_every: int = 10) -> "AgentBase":
"""Create, train, and save agent for a given config."""
config_dict = config.build()
agent = cls(**config_dict)
agent.train(verbose=True, render=render,
n_episodes=n_episodes, max_episode_steps=max_episode_steps, update_every=update_every)
agent.save()
return agent
if __name__ == "__main__":
from reinforcement_learning_keras.enviroments.cart_pole import CartPoleConfig
from reinforcement_learning_keras.enviroments import MountainCarConfig
agent_cart_pole = LinearQAgent.example(CartPoleConfig(agent_type='linear_q', plot_during_training=True))
agent_mountain_car = LinearQAgent.example(MountainCarConfig(agent_type='linear_q', plot_during_training=True)) | /reinforcement_learning_keras-0.6.1-py3-none-any.whl/reinforcement_learning_keras/agents/q_learning/linear_q_agent.py | 0.962444 | 0.490785 | linear_q_agent.py | pypi |
import os
import warnings
from dataclasses import dataclass
from typing import Dict, Any, Union, Tuple, Iterable, Callable
import joblib
import numpy as np
import tensorflow as tf
from tensorflow import keras
from reinforcement_learning_keras.agents.agent_base import AgentBase
from reinforcement_learning_keras.agents.components.helpers.env_builder import EnvBuilder
from reinforcement_learning_keras.agents.components.helpers.virtual_gpu import VirtualGPU
from reinforcement_learning_keras.agents.components.history.training_history import TrainingHistory
from reinforcement_learning_keras.agents.components.replay_buffers.continuous_buffer import ContinuousBuffer
from reinforcement_learning_keras.agents.q_learning.exploration.epsilon_greedy import EpsilonGreedy
from reinforcement_learning_keras.enviroments.config_base import ConfigBase
from reinforcement_learning_keras.enviroments.model_base import ModelBase
tf.compat.v1.disable_eager_execution()
@dataclass
class DeepQAgent(AgentBase):
replay_buffer: ContinuousBuffer
eps: EpsilonGreedy
training_history: TrainingHistory
model_architecture: ModelBase
double: bool = False
noisy: bool = False
env_spec: str = "CartPole-v0"
env_wrappers: Iterable[Callable] = ()
name: str = 'DQNAgent'
gamma: float = 0.99
replay_buffer_samples: int = 75
final_reward: Union[float, None] = None
def __post_init__(self) -> None:
self.env_builder = EnvBuilder(env_spec=self.env_spec, env_wrappers=self.env_wrappers,
env_kwargs=self.env_kwargs)
self._build_model()
self._fn = f"{self.name}_{self.env_spec}"
self.ready = True
def __getstate__(self) -> Dict[str, Any]:
return self._pickle_compatible_getstate()
def _save_models_and_buffer(self) -> None:
if not os.path.exists(f"{self._fn}"):
os.mkdir(f"{self._fn}")
self._action_model.save(f"{self._fn}/action_model")
self._target_model.save(f"{self._fn}/target_model")
self.replay_buffer.save(f"{self._fn}/replay_buffer.joblib")
def _load_models_and_buffer(self) -> None:
self._action_model = keras.models.load_model(f"{self._fn}/action_model")
self._target_model = keras.models.load_model(f"{self._fn}/target_model")
self.replay_buffer = ContinuousBuffer.load(f"{self._fn}/replay_buffer.joblib")
def unready(self) -> None:
if self.ready:
self._save_models_and_buffer()
self._action_model = None
self._target_model = None
self.replay_buffer = None
keras.backend.clear_session()
tf.compat.v1.reset_default_graph()
super().unready()
def check_ready(self):
if not self.ready:
self._load_models_and_buffer()
super().check_ready()
def _build_model(self) -> None:
"""
Prepare two of the same model.
The action model is used to pick actions and the value model is used to predict value of Q(s', a). Action model
weights are updated on every buffer sample + training step. The target model is never directly trained, but it's
weights are updated to match the action model at the end of each episode.
:return:
"""
self._action_model = self.model_architecture.compile(model_name='action_model', loss='mse')
self._target_model = self.model_architecture.compile(model_name='target_model', loss='mse')
def transform(self, s: np.ndarray) -> np.ndarray:
"""Check input shape, add Row dimension if required."""
if len(s.shape) < len(self._action_model.input.shape):
s = np.expand_dims(s, 0)
return s
def update_experience(self, s: np.ndarray, a: int, r: float, d: bool) -> None:
"""
First the most recent step is added to the buffer.
Note that s' isn't saved because there's no need. It'll be added next step. s' for any s is always index + 1 in
the buffer.
"""
# Add s, a, r, d to experience buffer
self.replay_buffer.append((s, a, r, d))
def update_model(self) -> None:
"""
Sample a batch from the replay buffer, calculate targets using value model, and train action model.
If the buffer is below its minimum size, no training is done.
If the buffer has reached its minimum size, a training batch from the replay buffer and the action model is
updated.
This update samples random (s, a, r, s') sets from the buffer and calculates the discounted reward for each set.
The value of the actions at states s and s' are predicted from the value model. The action model is updated
using these value predictions as the targets. The value of performed action is updated with the discounted
reward (using its value prediction at s'). ie. x=s, y=[action value 1, action value 2].
GPU Performance notes (with 1080ti and eps @ 0.01, while rendering pong):
- Looping here with 2 predict calls and 1 train call (each single rows) is unusably slow.
- Two predict calls before loop and 1 train call after (on batches) runs at ~16 fps for pong (~2 GPU util).
- Switching TF to non-eager mode improves performance to 50fps (~7% GPU util) (also stops memory leaks).
- Reducing the predict calls to 1 by joining s and s' increases performance to ~73 fps (~14% util).
- Render off: ~81fps (~16% util)
- Vectorizing out the remaining loop: ~73fps (~14% util)
- Render off: ~84fps (~16% util)
"""
# If buffer isn't full, don't train
if not self.replay_buffer.full:
return
# Else sample batch from buffer
ss, aa, rr, dd, ss_ = self.replay_buffer.sample_batch(self.replay_buffer_samples)
# Calculate estimated S,A values for current states and next states. These are stacked together first to avoid
# making two separate predict calls
ss = np.array(ss)
ss_ = np.array(ss_)
ss_and_ss_ = np.vstack((ss, ss_))
y_now_and_future = self._target_model.predict_on_batch(ss_and_ss_)
y_now = y_now_and_future[0:self.replay_buffer_samples]
y_future = y_now_and_future[self.replay_buffer_samples::]
# Update rewards where not done with y_future predictions
dd_mask = np.array(dd, dtype=bool).squeeze()
rr = np.array(rr, dtype=float).squeeze()
# Gather max action indexes and update relevant actions in y
if self.double:
# If using double dqn select best actions using the action model, but the value of those action using the
# target model (already have in y_future). Note that this doesn't appear to have as much of a performance
# cost as might be expected - presumably because the ss_ data is already on the GPU so transfer delay
# avoided.
y_future_action_model = self._action_model.predict_on_batch(ss_)
selected_actions = np.argmax(y_future_action_model[~dd_mask, :], axis=1)
else:
# If normal dqn select targets using target model, and value of those from target model too
selected_actions = np.argmax(y_future[~dd_mask, :], axis=1)
# Update reward values with estimated values (where not done) and final rewards (where done)
rr[~dd_mask] += y_future[~dd_mask, selected_actions]
if self.final_reward is not None:
# If self.final_reward is set, set done cases to this value. Else leave as observed reward.
rr[dd_mask] = self.final_reward
aa = np.array(aa, dtype=int)
np.put_along_axis(y_now, aa.reshape(-1, 1), rr.reshape(-1, 1), axis=1)
# Fit model with updated y_now values
self._action_model.train_on_batch(ss, y_now)
def get_best_action(self, s: np.ndarray) -> np.ndarray:
"""
Get best action(s) from model - the one with the highest predicted value.
:param s: A single or multiple rows of state observations.
:return: The selected action.
"""
preds = self._action_model.predict(self.transform(s))
return np.argmax(preds)
def get_action(self, s: np.ndarray, training: bool = False) -> int:
"""
Get an action using epsilon greedy.
Epsilon decays every time a random action is chosen.
:param s: The raw state observation.
:param training: Bool to indicate whether or not to use this experience to update the model. If False, just
returns best action.
:return: The selected action.
"""
action = self.eps.select(greedy_option=lambda: self.get_best_action(s),
random_option=lambda: self.env.action_space.sample(),
training=training)
return action
def update_target_model(self) -> None:
"""
Update the value model with the weights of the action model (which is updated each step).
The value model is updated less often to aid stability.
"""
self._target_model.set_weights(self._action_model.get_weights())
def _play_episode(self, max_episode_steps: int = 500,
training: bool = False, render: bool = True) -> Tuple[float, int]:
"""
Play a single episode and return the total reward.
:param max_episode_steps: Max steps before stopping, overrides any time limit set by Gym.
:param training: Bool to indicate whether or not to use this experience to update the model.
:param render: Bool to indicate whether or not to call env.render() each training step.
:return: The total real reward for the episode.
"""
self.env._max_episode_steps = max_episode_steps
obs = self.env.reset()
total_reward = 0
for frame in range(max_episode_steps):
action = self.get_action(obs, training=training)
prev_obs = obs
obs, reward, done, info = self.env.step(action)
total_reward += reward
if render:
self.env.render()
if training:
self.update_experience(s=prev_obs, a=action, r=reward, d=done)
# Action model is updated in TD(λ) fashion
self.update_model()
if done:
break
return total_reward, frame
def _after_episode_update(self) -> None:
"""Value model synced with action model at the end of each episode."""
self.update_target_model()
@classmethod
def example(cls, config: ConfigBase, render: bool = True,
n_episodes: int = 500, max_episode_steps: int = 500, update_every: int = 10,
checkpoint_every: int = 100) -> "DeepQAgent":
"""For a given config, create new, or load existing agent. Then train and save agent."""
VirtualGPU(config.gpu_memory)
config_dict = config.build()
if os.path.exists(config_dict['name']):
agent = cls.load(config_dict['name'])
warnings.warn('Loaded existing agent.')
else:
agent = cls(**config_dict)
agent.train(verbose=True, render=render,
n_episodes=n_episodes, max_episode_steps=max_episode_steps, update_every=update_every,
checkpoint_every=checkpoint_every)
agent.save()
return agent
def _save_self(self):
"""Save agent.joblib."""
if not os.path.exists(f"{self._fn}"):
os.mkdir(f"{self._fn}")
joblib.dump(self, f"{self._fn}/agent.joblib")
def save(self, make_ready: bool = True) -> None:
"""
Saves buffer, etc. via unready and agent.joblib with save.
:param make_ready: Make agent ready agent after saving (reload buffer, etc). This can be skipped to save time
in some situations. Default True.
"""
self.unready()
self._save_self()
if make_ready:
self.check_ready()
@classmethod
def load(cls, fn: str) -> "DeepQAgent":
new_agent = joblib.load(f"{fn}/agent.joblib")
new_agent.check_ready()
return new_agent
if __name__ == "__main__":
from reinforcement_learning_keras.enviroments.atari.pong.pong_config import PongConfig
from reinforcement_learning_keras.enviroments.cart_pole import CartPoleConfig
from reinforcement_learning_keras.enviroments import MountainCarConfig
# DQNs
agent_cart_pole = DeepQAgent.example(CartPoleConfig(agent_type='dqn', plot_during_training=True), render=False)
agent_mountain_car = DeepQAgent.example(MountainCarConfig(agent_type='dqn', plot_during_training=True))
agent_pong = DeepQAgent.example(PongConfig(agent_type='dqn', plot_during_training=True),
max_episode_steps=10000, update_every=5, render=False, checkpoint_every=10)
# Dueling DQNs
dueling_agent_cart_pole = DeepQAgent.example(CartPoleConfig(agent_type='dueling_dqn', plot_during_training=True))
dueling_agent_mountain_car = DeepQAgent.example(MountainCarConfig(agent_type='dueling_dqn',
plot_during_training=True)) | /reinforcement_learning_keras-0.6.1-py3-none-any.whl/reinforcement_learning_keras/agents/q_learning/deep_q_agent.py | 0.88785 | 0.333368 | deep_q_agent.py | pypi |
import copy
from dataclasses import dataclass
from typing import Callable, Any, List
import matplotlib.pyplot as plt
import numpy as np
@dataclass
class EpsilonGreedy:
"""
Handles epsilon-greedy action selection, decay of epsilon during training.
Decay schedule can be linear or compound, with additional perturbation by readding epsilon every n steps.
.future_value can be used to calculate decay for different step, rate, and schedule combinations. It ignores
other factors (min eps and any perurbation).
.simulate runs epsilon and returns complete output with current settings, from current point.
There's no protection against creating an object that increases epsilon, so be careful....
Examples
1) Simple linear decay to min_eps
>>> eps = EpsilonGreedy(eps_initial=1, decay=0.0002, decay_schedule='linear')
2) Simple compound decay to mins_eps
>>> eps = EpsilonGreedy(eps_initial=1, decay=0.00075, decay_schedule='compound')
3) Compund decay, perturb by 0.5 when min_eps is reeached
>>> eps = EpsilonGreedy(eps_initial=1, decay=0.001, decay_schedule='compound',
>>> perturb_increase_every=3000, perturb_increase_mag=0.5)
4) Compound decay, spends time at min_eps
>>> eps = EpsilonGreedy(eps_initial=1, decay=0.01, decay_schedule='compound',
>>> perturb_increase_every=1000, perturb_increase_mag=0.5)
:param eps_initial: Initial epsilon value.
:param decay: Decay rate in percent (should be positive to decay).
:param decay_schedule: 'linear' or 'compound'.
:param eps_min: The min value epsilon can fall to.
:param state: Random state, used to pick between the greedy or random options.
:param perturb_increase_every: Increase epsilon every n steps. Defualt 0.
:param perturb_increase_mag: Value to add every perturb_increase_every. Default 0.
"""
eps_initial: float = 0.2
decay: float = 0.0001
decay_schedule: str = 'compound'
eps_min: float = 0.01
state = None
perturb_increase_every: int = 0
perturb_increase_mag: float = 0
def __post_init__(self) -> None:
self._step: int = 0
self.eps_current = self.eps_initial
valid_decay = ('linear', 'compound')
if self.decay_schedule.lower() not in valid_decay:
raise ValueError(f"Invalid decay schedule {self.decay_schedule}. Pick from {valid_decay}.")
self._set_random_state()
def _set_random_state(self) -> None:
self._state = np.random.RandomState(self.state)
def _linear_decay(self) -> float:
return self.eps_current - self.decay
def _compound_decay(self) -> float:
return self.eps_current - self.eps_current * self.decay
def _decay(self):
new_eps = np.nan
if self.decay_schedule.lower() == 'linear':
new_eps = self._linear_decay()
if self.decay_schedule.lower() == 'compound':
new_eps = self._compound_decay()
if (self.perturb_increase_every > 0) and (self._step > 0) and (not self._step % self.perturb_increase_every):
new_eps += self.perturb_increase_mag
self._step += 1
return max(self.eps_min, new_eps)
def select(self, greedy_option: Callable, random_option: Callable,
training: bool = False) -> Any:
"""
Apply epsilon greedy selection.
If training, decay epsilon, and return selected option. If not training, just return greedy_option.
Use of lambdas is to avoid unnecessarily picking between two pre-computed options.
:param greedy_option: Function to evaluate if random option is NOT picked.
:param random_option: Function to evaluate if random option IS picked.
:param training: Bool indicating if call is during training and to use epsilon greedy and decay.
:return: Evaluated selected option.
"""
if training:
self.eps_current = self._decay()
if self._state.random() < self.eps_current:
return random_option()
return greedy_option()
def simulate(self, steps: int = 10000, plot: bool = False) -> List[float]:
eps = copy.copy(self)
eps_value = []
for _ in range(steps):
eps.select(lambda: 0, lambda: 1, training=True)
eps_value.append(eps.eps_current)
if plot:
plt.plot(eps_value)
plt.xlabel('Step')
plt.ylabel('Epsilon')
plt.show()
return eps_value
@classmethod
def future_value(cls, eps: float, decay: float, steps: int, decay_schedule: str) -> float:
"""
Calculate what eps will be after a number of steps.
Decay is either decay
% per step (like compound interest), or linear with constant decay amount.
:param eps: Current/initial epsilon.
:param decay: Decay rate per step.
:param steps: Number of steps (usually training frames, rather than whole episodes)
:param decay_schedule: 'linear' or 'compound'.
:return:
"""
if decay_schedule.lower() == 'compound':
return eps * (1 - decay) ** steps
if decay_schedule.lower() == 'linear':
return eps - decay * steps | /reinforcement_learning_keras-0.6.1-py3-none-any.whl/reinforcement_learning_keras/agents/q_learning/exploration/epsilon_greedy.py | 0.94837 | 0.621684 | epsilon_greedy.py | pypi |
import os
from dataclasses import dataclass
from typing import List, Tuple, Union, Dict, Any, Callable, Iterable
import joblib
import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import backend as K
from reinforcement_learning_keras.agents.agent_base import AgentBase
from reinforcement_learning_keras.agents.components.helpers.env_builder import EnvBuilder
from reinforcement_learning_keras.agents.components.helpers.virtual_gpu import VirtualGPU
from reinforcement_learning_keras.agents.components.history.training_history import TrainingHistory
from reinforcement_learning_keras.agents.policy_gradient.loss import reinforce_loss
from reinforcement_learning_keras.enviroments.config_base import ConfigBase
from reinforcement_learning_keras.enviroments.model_base import ModelBase
tf.compat.v1.disable_eager_execution()
@dataclass
class ReinforceAgent(AgentBase):
"""
Uses a simple replay buffer.
Has 2 components:
- _current_ : List of steps being collected for current episode
- _buffer_ : Dict containing backlog of completed episodes not yet used for training model
At the end of an episode, the current episode is moved to the backlog. This is cleared after updating model,
which can occur less often.
TODO: Move replay buffer to agents.components.replay_buffer.episodic_buffer
"""
training_history: TrainingHistory
model_architecture: ModelBase
env_spec: str = "CartPole-v0"
env_wrappers: Iterable[Callable] = ()
name: str = 'REINFORCEAgent'
alpha: float = 0.0001
gamma: float = 0.99
final_reward: Union[float, None] = None
def __post_init__(self) -> None:
self.env_builder = EnvBuilder(env_spec=self.env_spec, env_wrappers=self.env_wrappers,
env_kwargs=self.env_kwargs)
# Keep track of number of trained episodes, only used for IDing episodes in buffer.
self._ep_tracker: int = -1
self._fn = f"{self.name}_{self.env_spec}"
self._build_model()
self.clear_memory()
self.ready = True
def __getstate__(self) -> Dict[str, Any]:
return self._pickle_compatible_getstate()
def _save_model(self):
if not os.path.exists(f"{self._fn}"):
os.mkdir(f"{self._fn}")
self._model.save(f"{self.name}_{self.env_spec}/model")
def _load_model(self):
self._model = keras.models.load_model(f"{self._fn}/model",
custom_objects={'reinforce_loss': reinforce_loss})
def unready(self) -> None:
if self.ready:
self._save_model()
self._model = None
keras.backend.clear_session()
tf.compat.v1.reset_default_graph()
super().unready()
def check_ready(self):
if not self.ready:
self._load_model()
super().check_ready()
def _clear_current_episode(self) -> None:
"""Clear buffer for current episode."""
self._current_states: List[np.ndarray] = []
self._current_action_probs: List[np.ndarray] = []
self._current_actions: List[int] = []
self._current_rewards: List[float] = []
def _clear_buffer_backlog(self) -> None:
"""Clear backlog of collected episodes not yet trained on."""
self._buffer_states: Dict[int, List[np.ndarray]] = {}
self._buffer_action_probs: Dict[int, List[np.ndarray]] = {}
self._buffer_actions: Dict[int, np.ndarray] = {}
self._buffer_rewards: Dict[int, np.ndarray] = {}
self._buffer_discounted_rewards: Dict[int, np.ndarray] = {}
def _move_current_episode_to_backlog(self, episode: int):
"""Move current episode to backlog, calc discounted rewards, and clear."""
self._buffer_states[episode] = self._current_states
self._buffer_action_probs[episode] = self._current_action_probs
self._buffer_actions[episode] = np.array(self._current_actions)
self._buffer_rewards[episode] = np.array(self._current_rewards)
self._buffer_discounted_rewards[episode] = self._calc_discounted_rewards(self._current_rewards)
self._clear_current_episode()
def clear_memory(self) -> None:
"""Clear current episode and backlog buffers."""
self._clear_current_episode()
self._clear_buffer_backlog()
def _build_model(self) -> None:
"""State -> model -> action probs"""
self._model = self.model_architecture.compile(model_name='action_model', loss=reinforce_loss)
def transform(self, s: Union[List[np.ndarray], np.ndarray]) -> np.ndarray:
"""No transforming of state here, just stacking and dimension checking."""
if len(s.shape) < len(self._model.input.shape):
s = np.expand_dims(s, 0)
return s
def get_action(self, s: np.ndarray, training=None) -> Tuple[np.ndarray, int]:
"""
Use the current policy to select an action from a single state observation.
Sample actions using the probabilities provided by the action model.
"""
actions_probs = self._model.predict(self.transform(s)).squeeze()
return actions_probs, np.random.choice(range(self.env.action_space.n),
p=actions_probs)
def update_experience(self, s: np.ndarray, a: int, r: float, a_p: np.ndarray) -> None:
"""
Add step of experience to the buffer.
:param s: State
:param a: Action
:param r: Reward
:param a_p: Action probabilities
"""
self._current_states.append(s)
self._current_action_probs.append(a_p)
self._current_actions.append(a)
self._current_rewards.append(r)
def _calc_discounted_rewards(self, rr: List[float]) -> np.ndarray:
"""Calculate discounted rewards for a whole episode and normalise."""
# Full episode returns
disc_rr = np.zeros_like(rr)
cumulative_reward = 0
for t in reversed(range(0, disc_rr.size)):
cumulative_reward = cumulative_reward * self.gamma + rr[t]
disc_rr[t] = cumulative_reward
# Normalise
disc_rr_mean = np.mean(disc_rr)
disc_rr_std = np.std(disc_rr) + 1e-9
disc_rr_norm = (disc_rr - disc_rr_mean) / disc_rr_std
return np.vstack(disc_rr_norm)
@staticmethod
def _flatten_list(nested_list: List[List[Any]]) -> List[Any]:
return [item for sublist in nested_list for item in sublist]
def update_model(self) -> None:
# Stack all available episodes
states = np.concatenate(list(self._buffer_states.values()))
disc_rewards = np.concatenate(list(self._buffer_discounted_rewards.values()))
actions = np.concatenate(list(self._buffer_actions.values()))
action_probs = np.vstack(list(self._buffer_action_probs.values()))
# One hot actions
actions_oh = K.one_hot(actions,
num_classes=self.env.action_space.n)
# Calculate prob updates
dlogps = (actions_oh - action_probs) * disc_rewards
y = action_probs + self.alpha * dlogps
# Train
x = self.transform(states)
self._model.train_on_batch(x, y)
def _play_episode(self, max_episode_steps: int = 500,
training: bool = False, render: bool = True) -> Tuple[float, int]:
self.env._max_episode_steps = max_episode_steps
obs = self.env.reset()
total_reward = 0
for frame in range(max_episode_steps):
action_probs, action = self.get_action(obs)
prev_obs = obs
obs, reward, done, _ = self.env.step(action)
total_reward += reward
if render:
self.env.render()
if training:
self.update_experience(s=prev_obs, a=action, r=reward, a_p=action_probs)
if done:
break
if training:
# Only keep episode buffer if actually training
self._ep_tracker += 1
self._move_current_episode_to_backlog(self._ep_tracker)
return total_reward, frame
def _after_episode_update(self) -> None:
"""Monte-Carlo update of policy model is updated (ie. after each full episode, or more)"""
self.update_model()
self.clear_memory()
def save(self) -> None:
# No need to unready, this uses __getstate__
if not os.path.exists(f"{self._fn}"):
os.mkdir(f"{self._fn}")
self.unready()
joblib.dump(self, f"{self.name}_{self.env_spec}/agent.joblib")
self.check_ready()
@classmethod
def load(cls, fn: str) -> "ReinforceAgent":
new_agent = joblib.load(f"{fn}/agent.joblib")
new_agent.check_ready()
return new_agent
@classmethod
def example(cls, config: ConfigBase, render: bool = True,
n_episodes: int = 500, max_episode_steps: int = 500, update_every: int = 1) -> "ReinforceAgent":
"""Create, train, and save agent for a given config."""
VirtualGPU(config.gpu_memory)
config_dict = config.build()
agent = cls(**config_dict)
agent.train(verbose=True, render=render,
n_episodes=n_episodes, max_episode_steps=max_episode_steps, update_every=update_every)
agent.save()
return agent
if __name__ == "__main__":
from reinforcement_learning_keras.enviroments.cart_pole import CartPoleConfig
agent_cart_pole = ReinforceAgent.example(CartPoleConfig(agent_type='reinforce', plot_during_training=True),
render=False) | /reinforcement_learning_keras-0.6.1-py3-none-any.whl/reinforcement_learning_keras/agents/policy_gradient/reinforce_agent.py | 0.879419 | 0.357764 | reinforce_agent.py | pypi |
from dataclasses import dataclass
import warnings
@dataclass
class VirtualGPU:
"""
gpu_memory_limit: Max memory in MB for virtual device. Setting LOWER than total available this can help
avoid out of memory errors on some set ups when TF tries to allocate too much memory
(seems to be a bug).
gpu_device_id: Integer device identifier for the real GPU the virtual GPU should use.
"""
gpu_memory_limit: int = 512
gpu_device_id: int = 0
def __post_init__(self):
self.physical_device = None
self.virtual_device = None
self.on = self._set_tf()
def _set_tf(self) -> bool:
"""
Helper function for training on tf. Reduces GPU memory footprint for keras/tf models.
Creates a virtual device on the request GPU with limited memory. Will fail gracefully if GPU isn't available.
:return: Bool indicating if TF appears to be running on GPU. Can be used, for example, to avoid using
multiprocessing in the caller when running on GPU. This will likely result in an exception, but may
result in hanging forever, so probably best avoided.
"""
import tensorflow as tf
try:
self.physical_device = tf.config.experimental.list_physical_devices('GPU')[self.gpu_device_id]
virtual_device = tf.config.experimental.VirtualDeviceConfiguration(memory_limit=self.gpu_memory_limit)
except (IndexError, AttributeError) as e:
# IndexError: Assuming using GPU but indexed device not found.
# AAttributeError: Assuming no GPU.
warnings.warn(f"Not using GPU due to: {e}")
return False
# First check a virtual device hasn't already been set. If it has, we don't want to try and set a new one.
# - If the device has not been used before, it will be replaced and no error is raised
# - If the device has been used before it will be initialised and will be immutable, raising a RuntimeError
# on set_virtual_device_configuration call.
# Aim here is to make behaviour more predictable. This allows multiple models to run in the same session, as
# long as the max memory required is set at the start. The alternative is it fixing to the first used model's
# memory requirement (for example in a session that calls VirtualGPU again in agent.example()).
existing_device = tf.config.experimental.get_virtual_device_configuration(self.physical_device)
if existing_device is not None:
warnings.warn(f"A virtual GPU with {existing_device[0].memory_limit} MB memory already exists, "
f"using this rather than creating another with the requested {self.gpu_memory_limit} MB."
f"Good luck.")
self.virtual_device = existing_device[0]
self.gpu_memory_limit = existing_device[0].memory_limit
else:
# GPU available and no existing virtual device. Create a new one.
tf.config.experimental.set_virtual_device_configuration(self.physical_device, [virtual_device])
return True | /reinforcement_learning_keras-0.6.1-py3-none-any.whl/reinforcement_learning_keras/agents/components/helpers/virtual_gpu.py | 0.850965 | 0.261941 | virtual_gpu.py | pypi |
from dataclasses import dataclass
from typing import List, Tuple, Any
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
from reinforcement_learning_keras.agents.components.history.episode_report import EpisodeReport
@dataclass
class TrainingHistory:
plotting_on: bool = False
plot_every: int = 50
agent_name: str = 'Unnamed agents'
rolling_average: int = 10
def __post_init__(self) -> None:
sns.set()
self.history: List[EpisodeReport] = []
def append(self, episode_report: EpisodeReport) -> None:
self.history.append(episode_report)
def extend(self, episode_report: List[EpisodeReport]) -> None:
self.history.extend(episode_report)
def get_metric(self, metric: str = "total_reward") -> List[Any]:
return [getattr(ep, metric) for ep in self.history]
def plot(self, metrics: List[str], show: bool = True) -> Tuple[plt.Figure, plt.Axes]:
"""Plot current history."""
return self._plot(show=show, metrics=metrics)
def training_plot(self, show: bool = True) -> Tuple[plt.Figure, plt.Axes]:
"""Plot if it's turned on and is a plot step."""
if self.plotting_on and (not len(self.history) % self.plot_every):
return self._plot(show=show, metrics=["total_reward", "frames"])
def _plot(self, metrics: List[str], show: bool = True) -> Tuple[plt.Figure, plt.Axes]:
"""Create axes and plot. Not storing matplotlib objects to self as they cause pickle issues."""
plt.close('all')
fig, axs = plt.subplots(nrows=len(metrics), ncols=1)
if not isinstance(axs, np.ndarray):
axs = [axs]
for ax, metric in zip(axs, metrics):
ax.plot(self._rolling_average(self.get_metric(metric)), label=metric)
ax.set_xlabel('N Episodes', fontweight='bold')
ax.set_ylabel(metric, fontweight='bold')
axs[0].set_title(self.agent_name, fontweight='bold')
if show:
fig.show()
return fig, ax
def _rolling_average(self, x) -> np.ndarray:
"""Rolling average over """
return np.convolve(x, np.ones(self.rolling_average), 'valid') / self.rolling_average
@property
def total_frames(self) -> int:
return int(np.sum(self.get_metric("frames")))
@property
def current_performance(self, metric: str = "total_reward") -> float:
"""Return average performance over the last rolling average window."""
return float(np.mean(self.get_metric(metric)[len(self.history) - self.rolling_average:-1])) | /reinforcement_learning_keras-0.6.1-py3-none-any.whl/reinforcement_learning_keras/agents/components/history/training_history.py | 0.953848 | 0.493226 | training_history.py | pypi |
from dataclasses import dataclass
from typing import Any, Dict, Tuple, Iterable, Callable
from reinforcement_learning_keras.agents.agent_base import AgentBase
from reinforcement_learning_keras.agents.components.helpers.env_builder import EnvBuilder
from reinforcement_learning_keras.agents.components.history.training_history import TrainingHistory
from reinforcement_learning_keras.agents.random.random_model import RandomModel
from reinforcement_learning_keras.enviroments.config_base import ConfigBase
@dataclass
class RandomAgent(AgentBase):
"""
A non-abstract agent implementing AgentBase interface but acts randomly and learns nothing.
Useful as a baseline and for testing.
"""
env_spec: str
training_history: TrainingHistory
env_wrappers: Iterable[Callable] = ()
name: str = 'RandomAgent'
def __post_init__(self) -> None:
self.env_builder = EnvBuilder(env_spec=self.env_spec, env_wrappers=self.env_wrappers,
env_kwargs=self.env_kwargs)
self._build_model()
def __getstate__(self) -> Dict[str, Any]:
return self._pickle_compatible_getstate()
def _build_model(self) -> None:
"""Set model function. Note using a lambda breaks pickle support."""
self.model = RandomModel(self.env.action_space.n)
def update_model(self, *args, **kwargs) -> None:
"""No model to update."""
pass
def get_action(self, s: Any, **kwargs) -> int:
return self.model.predict()
def _play_episode(self, max_episode_steps: int = 500,
training: bool = False, render: bool = True) -> Tuple[float, int]:
"""
Play a single episode and return the total reward.
:param max_episode_steps: Max steps before stopping, overrides any time limit set by Gym.
:param training: Bool to indicate whether or not to use this experience to update the model.
:param render: Bool to indicate whether or not to call env.render() each training step.
:return: The total real reward for the episode.
"""
self.env._max_episode_steps = max_episode_steps
_ = self.env.reset()
total_reward = 0
for frame in range(max_episode_steps):
action = self.get_action(None)
_, reward, done, _ = self.env.step(action)
total_reward += reward
if render:
self.env.render()
if done:
break
return total_reward, frame
@classmethod
def example(cls, config: ConfigBase, render: bool = True,
n_episodes: int = 500, max_episode_steps: int = 500) -> "RandomAgent":
"""Create, train, and save agent for a given config."""
config_dict = config.build()
agent = cls(**config_dict)
agent.train(verbose=True, render=render,
n_episodes=n_episodes, max_episode_steps=max_episode_steps)
agent.save()
return agent
if __name__ == "__main__":
from reinforcement_learning_keras.enviroments import PongConfig
from reinforcement_learning_keras.enviroments.cart_pole import CartPoleConfig
from reinforcement_learning_keras.enviroments import MountainCarConfig
agent_mountain_car = RandomAgent.example(
MountainCarConfig(agent_type='random', plot_during_training=True), max_episode_steps=1500, render=False)
agent_cart_pole = RandomAgent.example(CartPoleConfig(agent_type='random', plot_during_training=True))
agent_mountain_car = RandomAgent.example(MountainCarConfig(agent_type='random', plot_during_training=True))
agent_pong = RandomAgent.example(PongConfig(agent_type='random', plot_during_training=True),
max_episode_steps=10000) | /reinforcement_learning_keras-0.6.1-py3-none-any.whl/reinforcement_learning_keras/agents/random/random_agent.py | 0.956217 | 0.312291 | random_agent.py | pypi |
import argparse
import logging
import gym
import numpy as np
try:
import tensorflow as tf
import tensorflow.compat.v1 as tf1
except ImportError:
raise ImportError("reinforcement requires tensorflow 1.14")
from example.baselines import ValueBaseline
from example.log_utils import NoLog
from example.policies import ParameterizedPolicy
from reinforcement.algorithm.reinforce import Reinforce
from reinforcement.agents.basis import BatchAgent
logger = logging.getLogger(__name__)
class Reporter:
def __init__(self, cfg):
self.cfg = cfg
def should_render(self, e):
return _matches_frequency(e, self.cfg.render_frq)
def should_log(self, e):
return _matches_frequency(e, self.cfg.log_frq)
def report(self, e, rs):
r, lf = rs[-1], self.cfg.log_frq
return f"Episode {e}: reward={r}; mean reward of last {lf} episodes: {np.mean(rs[-lf:])}"
def _matches_frequency(e, f):
return f > 0 and e % f == 0
def run_reinforce(config):
reporter, env, rewards = Reporter(config), gym.make('CartPole-v0'), []
with tf1.Session() as session:
agent = _make_agent(config, session, env)
for episode in range(1, config.episodes + 1):
reward = _run_episode(env, episode, agent, reporter)
rewards.append(reward)
if reporter.should_log(episode):
logger.info(reporter.report(episode, rewards))
env.close()
def _make_agent(config, session, env):
p = ParameterizedPolicy(session, env.observation_space.shape[0], env.action_space.n, NoLog(), config.lr_policy)
b = ValueBaseline(session, env.observation_space.shape[0], NoLog(), config.lr_baseline)
alg = Reinforce(p, config.gamma, b, config.num_trajectories)
return BatchAgent(alg)
def _run_episode(env, episode, agent, report):
obs = env.reset()
done, reward = False, 0
while not done:
if report.should_render(episode):
env.render()
obs, r, done, _ = env.step(agent.next_action(obs))
agent.signal(r)
reward += r
agent.train()
return reward
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Run reinforce example.')
parser.add_argument('-e', '--episodes', type=int, default=3000, help='number of episodes to be run')
parser.add_argument('-n', '--num-trajectories', type=int, default=10,
help='number of trajectories used in training of agent')
parser.add_argument('-g', '--gamma', type=float, default=0.99, help='gamma used for reward accumulation')
parser.add_argument('--lr-policy', type=float, default=50, help='learning rate of policy ANN')
parser.add_argument('--lr-baseline', type=float, default=0.01, help='learning rate of baseline ANN')
parser.add_argument('--render-frq', type=int, default=0, help='render every x episode')
parser.add_argument('--log-frq', type=int, default=100, help='log every x episode')
parser.add_argument('--log-lvl', type=str, default="info", help='log level (default: info)')
args = parser.parse_args()
logging.basicConfig(level=getattr(logging, args.log_lvl.upper()))
run_reinforce(args) | /reinforcement-1.2.0-py3-none-any.whl/example/reinforce.py | 0.776072 | 0.273816 | reinforce.py | pypi |
import numpy as np
try:
import tensorflow as tf
import tensorflow.compat.v1 as tf1
except ImportError:
raise ImportError("reinforcement requires tensorflow 1.14")
from example.log_utils import log_2d_tensor_as_img
from reinforcement import tf_operations as tf_ops
class ParameterizedPolicy:
def __init__(self, session, obs_dims, num_actions, summary_writer, lr=10):
self._session = session
self._lr = lr
self._in_actions = tf1.placeholder(shape=(None,), dtype=tf.uint8, name="actions")
self._in_returns = tf1.placeholder(shape=(None,), dtype=tf.float32, name="returns")
self._in_observations = tf1.placeholder(shape=(None, obs_dims), dtype=tf.float32, name="observations")
theta = tf1.get_variable(f"theta", shape=(obs_dims, num_actions), dtype=tf.float32,
initializer=tf.glorot_uniform_initializer())
self._out_probabilities = tf.nn.softmax(tf.matmul(self._in_observations, theta))
self._train = None
self._logs = [tf1.summary.scalar(f"mean_normalized_return", tf.reduce_mean(self._in_returns)),
log_2d_tensor_as_img(f"theta", theta)]
self._log_summary = tf.no_op
self._summary_writer = summary_writer
self._cur_episode = 0
def set_signal_calc(self, signal_calc):
loss = -signal_calc(tf_ops, self._in_actions, self._out_probabilities, self._in_returns)
self._train = tf1.train.GradientDescentOptimizer(learning_rate=self._lr).minimize(loss)
self._session.run(tf1.global_variables_initializer())
self._finish_logs(loss)
def _finish_logs(self, loss):
self._logs.append(tf1.summary.scalar("loss", loss))
self._log_summary = tf1.summary.merge(self._logs)
def estimate(self, observation):
return np.squeeze(
self._session.run(self._out_probabilities, {self._in_observations: np.array(observation).reshape(1, -1)}))
def fit(self, trajectory):
_, log = self._session.run([self._train, self._log_summary],
{self._in_observations: trajectory.observations,
self._in_actions: trajectory.actions,
self._in_returns: trajectory.advantages})
self._summary_writer.add_summary(log, self._cur_episode)
self._cur_episode += 1 | /reinforcement-1.2.0-py3-none-any.whl/example/policies.py | 0.847621 | 0.257065 | policies.py | pypi |
import base64
import io
from io import BytesIO
from operator import itemgetter
import numpy as np
from rdkit import Chem
from rdkit.Chem import AllChem, Draw as rkcd
from torch.utils.tensorboard import summary as tbs
def find_matching_pattern_in_smiles(list_of_mols: [], smarts_pattern=None) -> []:
def orient_molecule_according_to_matching_pattern(molecule, pattern):
try:
pattern_mol = Chem.MolFromSmarts(pattern)
if pattern_mol is not None:
AllChem.Compute2DCoords(pattern_mol)
AllChem.GenerateDepictionMatching2DStructure(molecule, pattern_mol, acceptFailure=True)
except:
pass
matches = []
if smarts_pattern is not None:
for mol in list_of_mols:
if mol is not None:
match_pattern = mol.GetSubstructMatch(Chem.MolFromSmarts(smarts_pattern))
orient_molecule_according_to_matching_pattern(mol, smarts_pattern) if len(match_pattern) > 0 else ()
matches.append(match_pattern)
else:
no_pattern = ()
matches.append(no_pattern)
return matches
def padding_with_invalid_smiles(smiles, sample_size):
diff = len(smiles) - sample_size
if diff < 0:
bulk = ["INVALID" for _ in range(-diff)]
bulk_np = np.array(bulk)
smiles = np.concatenate((smiles, bulk_np))
return smiles
def check_for_invalid_mols_and_create_legend(smiles, score, sample_size):
legends = []
list_of_mols = []
for i in range(sample_size):
list_of_mols.extend([Chem.MolFromSmiles(smiles[i])])
if list_of_mols[i] is not None:
legends.extend([f"{score[i].item():.3f}"])
elif list_of_mols[i] is None:
legends.extend([f"This Molecule Is Invalid"])
return list_of_mols, legends
def sort_smiles_by_score(score, smiles: []):
paired = []
for indx, _ in enumerate(score):
paired.append((score[indx], smiles[indx]))
result = sorted(paired, key=itemgetter(0), reverse=True)
sorted_score = []
sorted_smiles = []
for r in result:
sorted_score.append(r[0])
sorted_smiles.append(r[1])
return sorted_score, sorted_smiles
def mol_to_png_string(mol_list: [], molsPerRow=4, subImgSize=(300, 300), legend=None, matches=None):
image = rkcd.MolsToGridImage(mols=mol_list, molsPerRow=molsPerRow, subImgSize=subImgSize, useSVG=False,
legends=legend, highlightAtomLists=matches)
buffered = BytesIO()
image.save(buffered, format="png")
img_str = base64.b64encode(buffered.getvalue())
buffered.close()
return str(img_str)[2:][:-1] # trim on both ends b' and '
def mol_to_svg_string(mol_list: [], molsPerRow=4, subImgSize=(300, 300), legend=None, matches=None):
image = rkcd.MolsToGridImage(mols=mol_list, molsPerRow=molsPerRow, subImgSize=subImgSize, useSVG=True,
legends=legend, highlightAtomLists=matches)
return image
def add_mols(writer, tag, mols, mols_per_row=1, legends=None, global_step=None, walltime=None, size_per_mol=(300, 300), pattern=None):
"""
Adds molecules in a grid.
"""
image = rkcd.MolsToGridImage(mols, molsPerRow=mols_per_row, subImgSize=size_per_mol, legends=legends, highlightAtomLists=pattern)
add_image(writer, tag, image, global_step, walltime)
def add_image(writer, tag, image, global_step=None, walltime=None):
"""
Adds an image from a PIL image.
"""
channel = len(image.getbands())
width, height = image.size
output = io.BytesIO()
image.save(output, format='png')
image_string = output.getvalue()
output.close()
summary_image = tbs.Summary.Image(height=height, width=width, colorspace=channel, encoded_image_string=image_string)
summary = tbs.Summary(value=[tbs.Summary.Value(tag=tag, image=summary_image)])
writer.file_writer.add_summary(summary, global_step, walltime)
def fraction_valid_smiles(smiles):
i = 0
for smile in smiles:
if Chem.MolFromSmiles(smile):
i += 1
fraction = 100 * i / len(smiles)
return fraction | /reinvent_chemistry-0.0.51.tar.gz/reinvent_chemistry-0.0.51/reinvent_chemistry/logging.py | 0.478285 | 0.330161 | logging.py | pypi |
import random
from typing import List, Tuple
from rdkit.Chem import AllChem, MolFromSmiles, MolToSmiles, MolStandardize, MolToInchiKey
from rdkit.Chem.rdchem import Mol
from rdkit.Chem.rdmolops import RenumberAtoms
from rdkit.DataStructs.cDataStructs import UIntSparseIntVect
from rdkit.Chem import SDWriter
class Conversions:
@staticmethod
def smiles_to_mols_and_indices(query_smiles: List[str]) -> Tuple[List[Mol], List[int]]:
mols = [MolFromSmiles(smile) for smile in query_smiles]
valid_mask = [mol is not None for mol in mols]
valid_idxs = [idx for idx, is_valid in enumerate(valid_mask) if is_valid]
valid_mols = [mols[idx] for idx in valid_idxs]
return valid_mols, valid_idxs
@staticmethod
def mols_to_fingerprints(molecules: List[Mol], radius=3, use_counts=True, use_features=True) \
-> List[UIntSparseIntVect]:
fingerprints = [AllChem.GetMorganFingerprint(mol, radius, useCounts=use_counts, useFeatures=use_features) for
mol in molecules]
return fingerprints
@staticmethod
def smiles_to_mols(query_smiles: List[str]) -> List[Mol]:
mols = [MolFromSmiles(smile) for smile in query_smiles]
valid_mask = [mol is not None for mol in mols]
valid_idxs = [idx for idx, is_valid in enumerate(valid_mask) if is_valid]
valid_mols = [mols[idx] for idx in valid_idxs]
return valid_mols
def smiles_to_fingerprints(self, query_smiles: List[str], radius=3, use_counts=True, use_features=True) -> List[
UIntSparseIntVect]:
mols = self.smiles_to_mols(query_smiles)
fingerprints = self.mols_to_fingerprints(mols, radius=radius, use_counts=use_counts, use_features=use_features)
return fingerprints
def smile_to_mol(self, smile: str) -> Mol:
"""
Creates a Mol object from a SMILES string.
:param smile: SMILES string.
:return: A Mol object or None if it's not valid.
"""
if smile:
return MolFromSmiles(smile)
def mols_to_smiles(self, molecules: List[Mol], isomericSmiles=False, canonical=True) -> List[str]:
"""This method assumes that all molecules are valid."""
valid_smiles = [MolToSmiles(mol, isomericSmiles=isomericSmiles, canonical=canonical) for mol in molecules]
return valid_smiles
def mol_to_smiles(self, molecule: Mol, isomericSmiles=False, canonical=True) -> str:
"""
Converts a Mol object into a canonical SMILES string.
:param molecule: Mol object.
:return: A SMILES string.
"""
if molecule:
return MolToSmiles(molecule, isomericSmiles=isomericSmiles, canonical=canonical)
def mol_to_random_smiles(self, molecule: Mol) -> str:
"""
Converts a Mol object into a random SMILES string.
:param molecule: Mol object
:return: A SMILES string.
"""
if molecule:
new_atom_order = list(range(molecule.GetNumAtoms()))
random.shuffle(new_atom_order)
random_mol = RenumberAtoms(molecule, newOrder=new_atom_order)
return MolToSmiles(random_mol, canonical=False, isomericSmiles=False)
def convert_to_rdkit_smiles(self, smiles: str, allowTautomers=True, sanitize=False, isomericSmiles=False) -> str:
"""
:param smiles: Converts a smiles string into a canonical SMILES string.
:type allowTautomers: allows having same molecule represented in different tautomeric forms
"""
if allowTautomers:
return MolToSmiles(MolFromSmiles(smiles, sanitize=sanitize), isomericSmiles=isomericSmiles)
else:
return MolStandardize.canonicalize_tautomer_smiles(smiles)
def copy_mol(self, molecule: Mol) -> Mol:
"""
Copies, sanitizes, canonicalizes and cleans a molecule.
:param molecule: A Mol object to copy.
:return : Another Mol object copied, sanitized, canonicalized and cleaned.
"""
return self.smile_to_mol(self.mol_to_smiles(molecule))
def randomize_smiles(self, smiles: str) -> str:
"""
Returns a random SMILES given a SMILES of a molecule.
:param smiles: A smiles string
:param random_type: The type (unrestricted, restricted) of randomization performed.
:return : A random SMILES string of the same molecule or None if the molecule is invalid.
"""
mol = MolFromSmiles(smiles)
if mol:
new_atom_order = list(range(mol.GetNumHeavyAtoms()))
random.shuffle(new_atom_order)
random_mol = RenumberAtoms(mol, newOrder=new_atom_order)
return MolToSmiles(random_mol, canonical=False, isomericSmiles=False)
def mol_to_inchi_key(self, molecule: Mol) -> str:
""" Returns the standard InChI key for a molecule """
if molecule:
inchi_key = MolToInchiKey(molecule)
return inchi_key
def mol_to_sdf(self, molecules: List, input_sdf_path: str):
""" Write a set of molecules to sdf file"""
writer = SDWriter(input_sdf_path)
for mol in molecules:
writer.write(mol) | /reinvent_chemistry-0.0.51.tar.gz/reinvent_chemistry-0.0.51/reinvent_chemistry/conversions.py | 0.873188 | 0.581303 | conversions.py | pypi |
import gzip
from typing import List
from reinvent_chemistry import Conversions
from reinvent_chemistry.standardization.filter_configuration import FilterConfiguration
from reinvent_chemistry.standardization.rdkit_standardizer import RDKitStandardizer
class FileReader:
def __init__(self, configuration: List[FilterConfiguration], logger):
self._conversions = Conversions()
self._standardizer = RDKitStandardizer(configuration, logger)
def read_library_design_data_file(self, file_path, ignore_invalid=True, num=-1, num_fields=0) -> str:
"""
Reads a library design data file.
:param num_fields: Number columns from the beginning to be loaded.
:param file_path: Path to a SMILES file.
:param ignore_invalid: Ignores invalid lines (empty lines)
:param num: Parse up to num rows.
:return: An iterator with the rows.
"""
with self._open_file(file_path, "rt") as csv_file:
for i, row in enumerate(csv_file):
if i == num:
break
splitted_row = row.rstrip().replace(",", " ").replace("\t", " ").split()
if splitted_row:
if num_fields > 0:
splitted_row = splitted_row[0:num_fields]
yield splitted_row
elif not ignore_invalid:
yield None
def _open_file(self, path, mode="r", with_gzip=False):
"""
Opens a file depending on whether it has or not gzip.
:param path: Path where the file is located.
:param mode: Mode to open the file.
:param with_gzip: Open as a gzip file anyway.
"""
open_func = open
if path.endswith(".gz") or with_gzip:
open_func = gzip.open
return open_func(path, mode)
def read_delimited_file(self, file_path, ignore_invalid=True, num=-1, standardize=False, randomize=False):
"""
Reads a file with SMILES strings in the first column.
:param randomize: Standardizes smiles.
:param standardize: Randomizes smiles.
:param file_path: Path to a SMILES file.
:param ignore_invalid: Ignores invalid lines (empty lines)
:param num: Parse up to num rows.
:return: An iterator with the rows.
"""
actions = []
if standardize:
actions.append(self._standardizer.apply_filter)
if randomize:
actions.append(self._conversions.randomize_smiles)
with open(file_path, "r") as csv_file:
for i, row in enumerate(csv_file):
if i == num:
break
splitted_row = row.rstrip().replace(",", " ").replace("\t", " ").split()
smiles = splitted_row[0]
for action in actions:
if smiles:
smiles = action(smiles)
if smiles:
yield smiles
elif not ignore_invalid:
yield None | /reinvent_chemistry-0.0.51.tar.gz/reinvent_chemistry-0.0.51/reinvent_chemistry/file_reader.py | 0.798658 | 0.4184 | file_reader.py | pypi |
from typing import List, Dict
import numpy as np
from rdkit import DataStructs
from rdkit.Avalon import pyAvalonTools
from rdkit.Chem import AllChem, MACCSkeys, Mol
from rdkit.Chem.rdMolDescriptors import GetHashedMorganFingerprint
from reinvent_chemistry.enums.component_specific_parameters_enum import ComponentSpecificParametersEnum
from reinvent_chemistry.enums.descriptor_types_enum import DescriptorTypesEnum
class Descriptors:
"""Molecular descriptors.
The descriptors in this class are mostly RDKit fingerprints used as inputs to
scikit-learn predictive models. Since scikit-learn predictive models take
numpy arrays as input, RDKit fingerprints are converted to numpy arrays.
"""
def __init__(self):
self._descriptor_types = DescriptorTypesEnum()
self._specific_parameters = ComponentSpecificParametersEnum()
def load_descriptor(self, parameters: {}):
descriptor_type = parameters.get(self._specific_parameters.DESCRIPTOR_TYPE, self._descriptor_types.ECFP_COUNTS)
registry = self._descriptor_registry()
descriptor = registry[descriptor_type]
return descriptor
def _descriptor_registry(self) -> dict:
descriptor_list = dict(ecfp=self.molecules_to_fingerprints,
ecfp_counts=self.molecules_to_count_fingerprints,
maccs_keys=self.maccs_keys,
avalon=self.avalon)
return descriptor_list
def maccs_keys(self, molecules: List[Mol], parameters: {}):
fps = [MACCSkeys.GenMACCSKeys(mol) for mol in molecules]
fingerprints = [self._numpy_fingerprint(fp, dtype=np.int32) for fp in fps]
return fingerprints
def avalon(self, molecules: List[Mol], parameters: {}):
size = parameters.get('size', 512)
fps = [pyAvalonTools.GetAvalonFP(mol, nBits=size) for mol in molecules]
fingerprints = [self._numpy_fingerprint(fp, dtype=np.int32) for fp in fps]
return fingerprints
def molecules_to_fingerprints(self, molecules: List[Mol], parameters: {}) -> List[np.ndarray]:
radius = parameters.get('radius', 3)
size = parameters.get('size', 2048)
fp_bits = [AllChem.GetMorganFingerprintAsBitVect(mol, radius, size) for mol in molecules]
fingerprints = [self._numpy_fingerprint(fp, dtype=np.int32) for fp in fp_bits]
return fingerprints
def molecules_to_count_fingerprints_ori(self, molecules: List[Mol], parameters: {}) -> np.ndarray:
"""Morgan Count Fingerprints.
This is "original" implementation, with own hashing code.
"""
radius = parameters.get('radius', 3)
useCounts = parameters.get('use_counts', True)
useFeatures = parameters.get('use_features', True)
size = parameters.get('size', 2048)
fps = [AllChem.GetMorganFingerprint(mol, radius, useCounts=useCounts, useFeatures=useFeatures) for mol in molecules]
nfp = np.zeros((len(fps), size), np.int32)
for i, fp in enumerate(fps):
for idx, v in fp.GetNonzeroElements().items():
nidx = idx % size
nfp[i, nidx] += int(v)
return nfp
def molecules_to_count_fingerprints(self, molecules: List[Mol], parameters: Dict) -> List[np.ndarray]:
"""Morgan Count Fingerprints from RDKit.
This implementation uses RDKit's hashing through GetHashedMorganFingerprint.
See https://stackoverflow.com/a/55119975
"""
radius = parameters.get("radius", 3)
useFeatures = parameters.get("use_features", True)
size = parameters.get("size", 2048)
fps = [
GetHashedMorganFingerprint(
mol, radius, nBits=size, useFeatures=useFeatures,
)
for mol in molecules
]
fingerprints = [self._numpy_fingerprint(fp, dtype=np.int32) for fp in fps]
return fingerprints
def _numpy_fingerprint(self, rdkit_fingerprint, dtype=None) -> np.ndarray:
"""Converts RDKit fingerprint to numpy array."""
numpy_fp = np.zeros((0,), dtype=dtype) # Initialize empty array, RDKit will resize it.
DataStructs.ConvertToNumpyArray(rdkit_fingerprint, numpy_fp)
return numpy_fp | /reinvent_chemistry-0.0.51.tar.gz/reinvent_chemistry-0.0.51/reinvent_chemistry/descriptors.py | 0.853532 | 0.491029 | descriptors.py | pypi |
from rdkit.Chem import Mol, GetDistanceMatrix, rdMolDescriptors, rdchem, Descriptors
from reinvent_chemistry import Conversions, TransformationTokens
from reinvent_chemistry.link_invent.bond_breaker import BondBreaker
from reinvent_chemistry.library_design.attachment_points import AttachmentPoints
from reinvent_chemistry.link_invent.attachment_point_modifier import AttachmentPointModifier
class LinkerDescriptors:
""" Molecular descriptors specific for properties of the linker """
def __init__(self):
self._bond_breaker = BondBreaker()
self._attachment_points = AttachmentPoints()
self._conversions = Conversions()
self._tokens = TransformationTokens()
self._attachment_point_modifier = AttachmentPointModifier()
def effective_length(self, labeled_mol: Mol) -> int:
linker_mol = self._bond_breaker.get_linker_fragment(labeled_mol)
ap_idx = [i[0] for i in self._bond_breaker.get_labeled_atom_dict(linker_mol).values()]
distance_matrix = GetDistanceMatrix(linker_mol)
effective_linker_length = distance_matrix[ap_idx[0], ap_idx[1]]
return int(effective_linker_length)
def max_graph_length(self, labeled_mol: Mol) -> int:
linker_mol = self._bond_breaker.get_linker_fragment(labeled_mol)
distance_matrix = GetDistanceMatrix(linker_mol)
max_graph_length = distance_matrix.max()
return int(max_graph_length)
def length_ratio(self, labeled_mol: Mol) -> float:
"""
ratio of the maximum graph length of the linker to the effective linker length
"""
max_length = self.max_graph_length(labeled_mol)
effective_length = self.effective_length(labeled_mol)
return effective_length / max_length * 100
def num_rings(self, labeled_mol: Mol) -> int:
linker_mol = self._bond_breaker.get_linker_fragment(labeled_mol)
linker_mol = self._attachment_point_modifier.cap_linker_with_hydrogen(linker_mol)
num_rings = rdMolDescriptors.CalcNumRings(linker_mol)
return num_rings
def num_aromatic_rings(self, labeled_mol: Mol) -> int:
linker_mol = self._bond_breaker.get_linker_fragment(labeled_mol)
linker_mol = self._attachment_point_modifier.cap_linker_with_hydrogen(linker_mol)
num_aromatic_rings = rdMolDescriptors.CalcNumAromaticRings(linker_mol)
return num_aromatic_rings
def num_aliphatic_rings(self, labeled_mol: Mol) -> int:
linker_mol = self._bond_breaker.get_linker_fragment(labeled_mol)
linker_mol = self._attachment_point_modifier.cap_linker_with_hydrogen(linker_mol)
num_aliphatic_rings = rdMolDescriptors.CalcNumAliphaticRings(linker_mol)
return num_aliphatic_rings
def num_sp_atoms(self, labeled_mol: Mol) -> int:
linker_mol = self._bond_breaker.get_linker_fragment(labeled_mol)
num_sp_atoms = len([atom for atom in linker_mol.GetAtoms()
if atom.GetHybridization() == rdchem.HybridizationType.SP])
return num_sp_atoms
def num_sp2_atoms(self, labeled_mol: Mol) -> int:
linker_mol = self._bond_breaker.get_linker_fragment(labeled_mol)
num_sp2_atoms = len([atom for atom in linker_mol.GetAtoms()
if atom.GetHybridization() == rdchem.HybridizationType.SP2])
return num_sp2_atoms
def num_sp3_atoms(self, labeled_mol: Mol) -> int:
linker_mol = self._bond_breaker.get_linker_fragment(labeled_mol)
num_sp3_atoms = len([atom for atom in linker_mol.GetAtoms()
if atom.GetHybridization() == rdchem.HybridizationType.SP3])
return num_sp3_atoms
def num_hbd(self, labeled_mol: Mol) -> int:
linker_mol = self._bond_breaker.get_linker_fragment(labeled_mol)
linker_mol = self._attachment_point_modifier.cap_linker_with_hydrogen(linker_mol)
num_hbd = rdMolDescriptors.CalcNumHBD(linker_mol)
return num_hbd
def num_hba(self, labeled_mol: Mol) -> int:
linker_mol = self._bond_breaker.get_linker_fragment(labeled_mol)
linker_mol = self._attachment_point_modifier.cap_linker_with_hydrogen(linker_mol)
num_hba = rdMolDescriptors.CalcNumHBA(linker_mol)
return num_hba
def mol_weight(self, labeled_mol: Mol) -> float:
linker_mol = self._bond_breaker.get_linker_fragment(labeled_mol)
linker_mol = self._attachment_point_modifier.cap_linker_with_hydrogen(linker_mol)
mol_weight = Descriptors.MolWt(linker_mol)
return mol_weight
def ratio_rotatable_bonds(self, labeled_mol: Mol) -> float:
linker_mol = self._bond_breaker.get_linker_fragment(labeled_mol)
linker_mol = self._attachment_point_modifier.cap_linker_with_hydrogen(linker_mol)
num_rotatable_bonds = rdMolDescriptors.CalcNumRotatableBonds(linker_mol)
total_num_bonds = linker_mol.GetNumBonds()
ratio = num_rotatable_bonds / total_num_bonds * 100
return ratio
def effective_length_from_smile(self, linker_smile: str) -> int:
linker_mol = self._conversions.smile_to_mol(linker_smile)
distance_matrix = GetDistanceMatrix(linker_mol)
(ap_idx_0, ), (ap_idx_1, ) = linker_mol.GetSubstructMatches(self._conversions.smile_to_mol(
self._tokens.ATTACHMENT_POINT_TOKEN))
effective_linker_length = distance_matrix[ap_idx_0, ap_idx_1] - 2
# subtract connection to the attachment points to be consistent with method effective_length
return int(effective_linker_length)
def max_graph_length_from_smile(self, linker_smile: str) -> int:
linker_mol = self._conversions.smile_to_mol(linker_smile)
(ap_idx_0, ), (ap_idx_1, ) = linker_mol.GetSubstructMatches(self._conversions.smile_to_mol(
self._tokens.ATTACHMENT_POINT_TOKEN))
distance_matrix = GetDistanceMatrix(linker_mol)
# ignore connection from attachment point to be consistent with method max_graph_length
distance_matrix[[ap_idx_0, ap_idx_1], :] = 0
distance_matrix[:, [ap_idx_0, ap_idx_1]] = 0
max_graph_length = distance_matrix.max()
return int(max_graph_length)
def length_ratio_from_smiles(self, linker_smile: str) -> float:
max_length = self.max_graph_length_from_smile(linker_smile)
effective_length = self.effective_length_from_smile(linker_smile)
return effective_length / max_length * 100
def num_rings_from_smiles(self, linker_smile: str) -> int:
linker_mol = self._conversions.smile_to_mol(linker_smile)
num_rings = rdMolDescriptors.CalcNumRings(linker_mol)
return num_rings
def num_aromatic_rings_from_smiles(self, linker_smile: str) -> int:
linker_mol = self._conversions.smile_to_mol(linker_smile)
num_aromatic_rings = rdMolDescriptors.CalcNumAromaticRings(linker_mol)
return num_aromatic_rings
def num_aliphatic_rings_from_smiles(self, linker_smile: str) -> int:
linker_mol = self._conversions.smile_to_mol(linker_smile)
num_aliphatic_rings = rdMolDescriptors.CalcNumAliphaticRings(linker_mol)
return num_aliphatic_rings
def num_sp_atoms_from_smiles(self, linker_smile: str) -> int:
linker_mol = self._conversions.smile_to_mol(linker_smile)
num_sp_atoms = len([atom for atom in linker_mol.GetAtoms()
if atom.GetHybridization() == rdchem.HybridizationType.SP])
return num_sp_atoms
def num_sp2_atoms_from_smiles(self, linker_smile: str) -> int:
linker_mol = self._conversions.smile_to_mol(linker_smile)
num_sp2_atoms = len([atom for atom in linker_mol.GetAtoms()
if atom.GetHybridization() == rdchem.HybridizationType.SP2])
return num_sp2_atoms
def num_sp3_atoms_from_smiles(self, linker_smile: str) -> int:
linker_mol = self._conversions.smile_to_mol(linker_smile)
num_sp3_atoms = len([atom for atom in linker_mol.GetAtoms()
if atom.GetHybridization() == rdchem.HybridizationType.SP3])
return num_sp3_atoms
def num_hbd_from_smiles(self, linker_smile: str) -> int:
linker_mol = self._conversions.smile_to_mol(linker_smile)
num_hbd = rdMolDescriptors.CalcNumHBD(linker_mol)
return num_hbd
def num_hba_from_smiles(self, linker_smile: str) -> int:
linker_mol = self._conversions.smile_to_mol(linker_smile)
num_hba = rdMolDescriptors.CalcNumHBA(linker_mol)
return num_hba
def mol_weight_from_smiles(self, linker_smile: str) -> float:
linker_mol = self._conversions.smile_to_mol(linker_smile)
mol_weight = Descriptors.MolWt(linker_mol)
return mol_weight
def ratio_rotatable_bonds_from_smiles(self, linker_smile: str) -> float:
linker_mol = self._conversions.smile_to_mol(linker_smile)
num_rotatable_bonds = rdMolDescriptors.CalcNumRotatableBonds(linker_mol)
total_num_bonds = linker_mol.GetNumBonds()
ratio = num_rotatable_bonds / total_num_bonds * 100
return ratio | /reinvent_chemistry-0.0.51.tar.gz/reinvent_chemistry-0.0.51/reinvent_chemistry/link_invent/linker_descriptors.py | 0.854339 | 0.497498 | linker_descriptors.py | pypi |
import re
import io
from typing import List, Tuple
from collections import defaultdict
from matplotlib import cm
from numpy import linspace
from rdkit.Chem import Mol
from rdkit.Chem.Draw import rdMolDraw2D
from PIL import Image
from reinvent_chemistry import Conversions, TransformationTokens
from reinvent_chemistry.library_design import BondMaker, AttachmentPoints
from reinvent_chemistry.link_invent.bond_breaker import BondBreaker
class MoleculeWithHighlighting:
def __init__(self, color_map_name: str = 'Set3', image_size=(400, 400)):
self._conversions = Conversions()
self._tokens = TransformationTokens()
self._bond_maker = BondMaker()
self._bond_breaker = BondBreaker()
self._attachment_points = AttachmentPoints()
self._color_map_name = color_map_name # name of the matplotlib colormap
self._image_size = image_size
self._re_attachment_point = re.compile(self._tokens.ATTACHMENT_POINT_REGEXP)
def get_image(self, mol: Mol, parts_str_list: List[str], label: str):
mol = self._make_mole_canonical(mol)
atom_dict, bond_dict = self._get_highlight_dicts(parts_str_list)
d = rdMolDraw2D.MolDraw2DCairo(*self._image_size)
d.DrawMoleculeWithHighlights(mol, label, atom_dict, bond_dict, {}, {})
image = Image.open(io.BytesIO(d.GetDrawingText()))
return image
def _get_highlight_dicts(self, parts_str_list: List[str]) -> Tuple[dict, dict]:
atoms_to_highlight = []
bonds_to_highlight = []
for idx, parts_str in enumerate(parts_str_list):
linker, warhead = self._get_parts(parts_str)
mol_numbered_ap = self._get_labeled_mol(linker, warhead)
mol_numbered_ap = self._make_mole_canonical(mol_numbered_ap)
atom_pairs_to_highlight = self._bond_breaker.get_bond_atoms_idx_pairs(mol_numbered_ap)
bonds_to_highlight.append([mol_numbered_ap.GetBondBetweenAtoms(*atom_pair).GetIdx() for atom_pair in
atom_pairs_to_highlight])
atoms_to_highlight.append([item for sublist in atom_pairs_to_highlight for item in sublist])
n_colors = len(parts_str_list)
colors = [tuple(c) for c in getattr(cm, self._color_map_name)(linspace(0, 1, n_colors))]
atom_dict = self._get_color_dict(atoms_to_highlight, colors)
bond_dict = self._get_color_dict(bonds_to_highlight, colors)
return atom_dict, bond_dict
def _get_parts(self, smile_parts_str: str) -> Tuple[str, str]:
parts_list = smile_parts_str.split(self._tokens.ATTACHMENT_SEPARATOR_TOKEN)
n_ap = [len(self._re_attachment_point.findall(p)) for p in parts_list]
linker = [part for part, number_ap in zip(parts_list, n_ap) if number_ap == 2][0]
warheads = self._tokens.ATTACHMENT_SEPARATOR_TOKEN.join(
[part for part, number_ap in zip(parts_list, n_ap) if number_ap == 1])
return linker, warheads
def _get_labeled_mol(self, linker_smi: str, warheads_smi: str) -> Mol:
linker_numbered = self._attachment_points.add_attachment_point_numbers(linker_smi, canonicalize=False)
mol = self._bond_maker.join_scaffolds_and_decorations(linker_numbered, warheads_smi, keep_labels_on_atoms=True)
return mol
def _make_mole_canonical(self, mol: Mol) -> Mol:
return self._conversions.smile_to_mol(self._conversions.mol_to_smiles(mol, canonical=True))
@staticmethod
def _get_color_dict(index_list: List[List[int]], color_list: List[tuple]) -> dict:
color_dict = defaultdict(list)
for color_idx, indexes in enumerate(index_list):
for index in indexes:
color_dict[index].append(color_list[color_idx])
return dict(color_dict) | /reinvent_chemistry-0.0.51.tar.gz/reinvent_chemistry-0.0.51/reinvent_chemistry/link_invent/molecule_with_highlighting.py | 0.686895 | 0.311152 | molecule_with_highlighting.py | pypi |
import re
from typing import List
from rdkit.Chem import AllChem, MolToSmiles
from rdkit.Chem import SaltRemover
from rdkit.Chem import rdmolops
from rdkit.Chem.rdmolfiles import MolFromSmarts, MolFromSmiles
from rdkit.Chem.rdmolops import RemoveHs
from reinvent_chemistry.enums import FilterTypesEnum
class FilterRegistry:
def __init__(self):
filter_types = FilterTypesEnum()
self._filters = {filter_types.NEUTRALIZE_CHARGES: self._neutralise_charges,
filter_types.GET_LARGEST_FRAGMENT: self._get_largest_fragment,
filter_types.REMOVE_HYDROGENS: self._remove_hydrogens,
filter_types.REMOVE_SALTS: self._remove_salts,
filter_types.GENERAL_CLEANUP: self._general_cleanup,
filter_types.TOKEN_FILTERS: self._token_filters,
filter_types.VOCABULARY_FILTER: self._vocabulary_filters,
filter_types.VALID_SIZE: self._valid_size,
filter_types.HEAVY_ATOM_FILTER: self._heavy_atom_filter,
filter_types.ALLOWED_ATOM_TYPE: self._allowed_atom_type,
filter_types.ALIPHATIC_CHAIN_FILTER: self._aliphatic_chain_filter,
filter_types.DEFAULT: self.standardize}
def get_filter(self, filter_name: str):
selected_filter = None
try:
selected_filter = self._filters.get(filter_name)
except:
KeyError(f'requested filter "{filter_name}" does not exist in the registry')
return selected_filter
def _get_largest_fragment(self, mol):
frags = rdmolops.GetMolFrags(mol, asMols=True, sanitizeFrags=True)
maxmol = None
for mol in frags:
if mol is None:
continue
if maxmol is None:
maxmol = mol
if maxmol.GetNumHeavyAtoms() < mol.GetNumHeavyAtoms():
maxmol = mol
return maxmol
def _remove_hydrogens(self, mol):
return RemoveHs(mol, implicitOnly=False, updateExplicitCount=False, sanitize=True)
def _remove_salts(self, mol):
return SaltRemover.SaltRemover().StripMol(mol, dontRemoveEverything=True)
def _initialiseNeutralisationReactions(self):
patts = (
# Imidazoles
('[n+;H]', 'n'),
# Amines
('[N+;!H0]', 'N'),
# Carboxylic acids and alcohols
('[$([O-]);!$([O-][#7])]', 'O'),
# Thiols
('[S-;X1]', 'S'),
# Sulfonamides
('[$([N-;X2]S(=O)=O)]', 'N'),
# Enamines
('[$([N-;X2][C,N]=C)]', 'N'),
# Tetrazoles
('[n-]', '[nH]'),
# Sulfoxides
('[$([S-]=O)]', 'S'),
# Amides
('[$([N-]C=O)]', 'N'),
)
return [(MolFromSmarts(x), MolFromSmiles(y, False)) for x, y in patts]
def _neutralise_charges(self, mol, reactions=None):
if reactions is None:
reactions = self._initialiseNeutralisationReactions()
for i, (reactant, product) in enumerate(reactions):
while mol.HasSubstructMatch(reactant):
rms = AllChem.ReplaceSubstructs(mol, reactant, product)
mol = rms[0]
return mol
def _general_cleanup(self, mol):
rdmolops.Cleanup(mol)
rdmolops.SanitizeMol(mol)
mol = rdmolops.RemoveHs(mol, implicitOnly=False, updateExplicitCount=False, sanitize=True)
return mol
def _token_filters(self, mol):
if mol:
cyano_filter = "[C-]#[N+]"
oh_filter = "[OH+]"
sulfur_filter = "[SH]"
if not mol.HasSubstructMatch(MolFromSmarts(cyano_filter)) \
and not mol.HasSubstructMatch(MolFromSmarts(oh_filter)) \
and not mol.HasSubstructMatch(MolFromSmarts(sulfur_filter)):
return mol
def _vocabulary_filters(self, mol, vocabulary: List[str]):
# TODO: clean this up
if mol:
smiles = MolToSmiles(mol, isomericSmiles=False, canonical=True)
REGEXPS = {
"brackets": re.compile(r"(\[[^\]]*\])"),
"2_ring_nums": re.compile(r"(%\d{2})"),
"brcl": re.compile(r"(Br|Cl)")
}
REGEXP_ORDER = ["brackets", "2_ring_nums", "brcl"]
def split_by(data, regexps):
if not regexps:
return list(data)
regexp = REGEXPS[regexps[0]]
splitted = regexp.split(data)
tokens = []
for i, split in enumerate(splitted):
if i % 2 == 0:
tokens += split_by(split, regexps[1:])
else:
tokens.append(split)
return tokens
tokens = split_by(smiles, REGEXP_ORDER)
for token in tokens:
if token not in vocabulary:
return None
return mol
def _aliphatic_chain_filter(self, mol):
if mol:
# remove aliphatic side chains with at least 5 carbons not in a ring
sma = '[CR0]-[CR0]-[CR0]-[CR0]-[CR0]'
has_long_sidechains = mol.HasSubstructMatch(MolFromSmarts(sma))
if not has_long_sidechains:
return mol
def _allowed_atom_type(self, mol, element_list=None):
if element_list is None:
element_list = [6, 7, 8, 9, 16, 17, 35]
if mol:
valid_elements = all([atom.GetAtomicNum() in element_list for atom in mol.GetAtoms()])
if valid_elements:
return mol
def _heavy_atom_filter(self, mol, min_heavy_atoms=2, max_heavy_atoms=70):
if mol:
correct_size = min_heavy_atoms < mol.GetNumHeavyAtoms() < max_heavy_atoms
if correct_size:
return mol
def _valid_size(self, mol, min_heavy_atoms=2, max_heavy_atoms=70, element_list=None, remove_long_side_chains=True):
"""Filters molecules on number of heavy atoms and atom types"""
if element_list is None:
element_list = [6, 7, 8, 9, 16, 17, 35]
if mol:
correct_size = min_heavy_atoms < mol.GetNumHeavyAtoms() < max_heavy_atoms
if not correct_size:
return
valid_elements = all([atom.GetAtomicNum() in element_list for atom in mol.GetAtoms()])
if not valid_elements:
return
if remove_long_side_chains:
# remove aliphatic side chains with at least 5 carbons not in a ring
sma = '[CR0]-[CR0]-[CR0]-[CR0]-[CR0]'
has_long_sidechains = mol.HasSubstructMatch(MolFromSmarts(sma))
if has_long_sidechains:
return
return mol
def standardize(self, mol, min_heavy_atoms=2, max_heavy_atoms=70, element_list=None,
remove_long_side_chains=True, neutralise_charges=True):
if mol:
mol = self._get_largest_fragment(mol)
if mol:
mol = self._remove_hydrogens(mol)
if mol:
mol = self._remove_salts(mol)
if mol and neutralise_charges:
mol = self._neutralise_charges(mol)
if mol:
mol = self._general_cleanup(mol)
if mol:
mol = self._token_filters(mol)
if mol:
mol = self._valid_size(mol, min_heavy_atoms, max_heavy_atoms, element_list, remove_long_side_chains)
return mol
return None | /reinvent_chemistry-0.0.51.tar.gz/reinvent_chemistry-0.0.51/reinvent_chemistry/standardization/filter_registry.py | 0.525856 | 0.242564 | filter_registry.py | pypi |
from typing import Dict, List
from rdkit.Chem.rdmolfiles import MolToSmiles
from reinvent_chemistry import Conversions
from reinvent_chemistry.standardization.filter_configuration import FilterConfiguration
from reinvent_chemistry.standardization.filter_registry import FilterRegistry
def disable_rdkit_logging():
"""
Disables RDKit whiny logging.
"""
import rdkit.RDLogger as rkl
logger = rkl.logger()
logger.setLevel(rkl.ERROR)
import rdkit.rdBase as rkrb
rkrb.DisableLog('rdApp.error')
disable_rdkit_logging()
class RDKitStandardizer:
def __init__(self, filter_configs: List[FilterConfiguration], logger):
self._conversions = Conversions()
self._filter_configs = self._set_filter_configs(filter_configs)
self._filters = self._load_filters(self._filter_configs)
self._logger = logger
def apply_filter(self, smile: str) -> str:
molecule = self._conversions.smile_to_mol(smile)
for config in self._filter_configs:
if molecule:
rdkit_filter = self._filters[config.name]
if config.parameters:
molecule = rdkit_filter(molecule, **config.parameters)
else:
molecule = rdkit_filter(molecule)
else:
message = f'filter: "{config.name}" detected the following SMILES string {smile} as invalid!'
self._log_message(message)
if not molecule:
message = f'filter: "{self._filter_configs[-1].name}" detected the following SMILES string {smile} as invalid!'
self._log_message(message)
return None
valid_smile = MolToSmiles(molecule, isomericSmiles=False)
return valid_smile
def _set_filter_configs(self, filter_configs):
return filter_configs if filter_configs else [FilterConfiguration(name="default", parameters={})]
def _load_filters(self, filter_configs: List[FilterConfiguration]) -> Dict:
registry = FilterRegistry()
return {filter_config.name: registry.get_filter(filter_config.name) for filter_config in filter_configs}
def _log_message(self, message: str):
if self._logger:
self._logger.log_message(message) | /reinvent_chemistry-0.0.51.tar.gz/reinvent_chemistry-0.0.51/reinvent_chemistry/standardization/rdkit_standardizer.py | 0.820577 | 0.214753 | rdkit_standardizer.py | pypi |
from typing import List, Dict, Tuple
from rdkit.Chem.rdchem import Mol, AtomKekulizeException, Atom
from rdkit.Chem.rdmolops import FragmentOnBonds, GetMolFrags
from reinvent_chemistry import Conversions, TransformationTokens
from reinvent_chemistry.library_design.dtos import ReactionOutcomeDTO
class BondMapper:
def __init__(self):
self._conversions = Conversions()
self._tokens = TransformationTokens()
def convert_building_blocks_to_fragments(self, molecule: Mol, neighbor_map: Dict,
list_of_outcomes: List[ReactionOutcomeDTO]) -> List[Tuple[Mol]]:
all_fragments = []
for outcome in list_of_outcomes:
try:
reagent_pairs = outcome.reaction_outcomes
for reagent_pair in reagent_pairs:
list_of_atom_pairs = self._find_bonds_targeted_by_reaction(reagent_pair, neighbor_map)
bonds_to_cut = self._find_indices_of_target_bonds(molecule, list_of_atom_pairs)
if len(bonds_to_cut) == 1:
reaction_fragments = self._create_fragments(molecule, bonds_to_cut)
all_fragments.append(reaction_fragments)
except AtomKekulizeException as ex:
raise AtomKekulizeException(f"failed scaffold: {self._conversions.mol_to_smiles(molecule)} \n for reaction: {outcome.reaction_smarts} \n {ex}") from ex
return all_fragments
def _find_bonds_targeted_by_reaction(self, reagent_pair: Tuple[Mol], neighbor_map: Dict) -> List[Tuple[int]]:
atom_pairs = []
for reagent in reagent_pair:
reactant_map = self._create_neighbor_map_for_reactant(reagent)
atom_pair = self._indentify_mismatching_indices(neighbor_map, reactant_map)
atom_pairs.extend(atom_pair)
return atom_pairs
def _create_neighbor_map_for_reactant(self, reactant: Mol) -> Dict[int, List[int]]:
interaction_map = {}
for atom in reactant.GetAtoms():
if atom.HasProp("react_atom_idx"):
neighbor_indxs = self._get_original_ids_from_reactant(atom)
interaction_map[int(atom.GetProp("react_atom_idx"))] = neighbor_indxs
return interaction_map
def _get_original_ids_from_reactant(self, atom: Atom) -> List[int]:
neighbours = atom.GetNeighbors()
indices = [int(neighbor.GetProp("react_atom_idx")) for neighbor in neighbours if
neighbor.HasProp("react_atom_idx")]
neighbor_indxs = [idx for idx in indices]
return neighbor_indxs
def _indentify_mismatching_indices(self, original: Dict, derived: Dict) -> List[Tuple]:
def is_a_mismatch(original_points: [], derived_points: []):
original_points.sort()
derived_points.sort()
return original_points != derived_points
mismatching_indices = []
for key in derived.keys():
if is_a_mismatch(original.get(key), derived.get(key)):
differences = list(set(original.get(key)) - set(derived.get(key)))
for difference in differences:
pair = (key, difference)
mismatching_indices.append(tuple(sorted(pair)))
return mismatching_indices
def _find_indices_of_target_bonds(self, molecule: Mol, list_of_atom_pairs: List[Tuple[int]]) -> List[int]:
list_of_atom_pairs = list(set(list_of_atom_pairs))
bonds_to_cut = [molecule.GetBondBetweenAtoms(pair[0], pair[1]).GetIdx() for pair in list_of_atom_pairs]
return bonds_to_cut
def _create_fragments(self, molecule: Mol, bonds_to_cut: List[int]) -> Tuple[Mol]:
attachment_point_idxs = [(i, i) for i in range(len(bonds_to_cut))]
cut_mol = FragmentOnBonds(molecule, bondIndices=bonds_to_cut, dummyLabels=attachment_point_idxs)
for atom in cut_mol.GetAtoms():
if atom.GetSymbol() == self._tokens.ATTACHMENT_POINT_TOKEN:
num = atom.GetIsotope()
atom.SetIsotope(0)
atom.SetProp("molAtomMapNumber", str(num))
cut_mol.UpdatePropertyCache()
fragments = GetMolFrags(cut_mol, asMols=True, sanitizeFrags=True)
return fragments | /reinvent_chemistry-0.0.51.tar.gz/reinvent_chemistry-0.0.51/reinvent_chemistry/library_design/bond_mapper.py | 0.775605 | 0.460228 | bond_mapper.py | pypi |
from typing import List
from rdkit.Chem.Crippen import MolLogP
from rdkit.Chem.Descriptors import MolWt
from rdkit.Chem.Lipinski import RingCount, NumRotatableBonds, NumHAcceptors, NumHDonors, HeavyAtomCount
from rdkit.Chem.rdchem import Mol
from reinvent_chemistry import TransformationTokens
from reinvent_chemistry.library_design.dtos import FilteringConditionDTO
from reinvent_chemistry.library_design.enums import MolecularDescriptorsEnum
class FragmentFilter:
def __init__(self, conditions: List[FilteringConditionDTO]):
"""
Initializes a fragment filter given the conditions.
:param conditions: Conditions to use. When None is given, everything is valid.
"""
self._descriptors_enum = MolecularDescriptorsEnum()
self._tockens = TransformationTokens()
self.conditions = conditions
self._CONDITIONS_FUNC = {
self._descriptors_enum.HEAVY_ATOM_COUNT: HeavyAtomCount, # pylint: disable=no-member
self._descriptors_enum.MOLECULAR_WEIGHT: MolWt,
self._descriptors_enum.CLOGP: MolLogP, # pylint: disable=no-member
self._descriptors_enum.HYDROGEN_BOND_DONORS: NumHDonors, # pylint: disable=no-member
self._descriptors_enum.HYDROGEN_BOND_ACCEPTORS: NumHAcceptors, # pylint: disable=no-member
self._descriptors_enum.ROTATABLE_BONDS: NumRotatableBonds, # pylint: disable=no-member
self._descriptors_enum.RING_COUNT: RingCount # pylint: disable=no-member
}
def filter(self, mol: Mol) -> bool:
"""
Validates whether a query molecule meets all filtering criteria.
:param mol: A molecule as a Mol object.
:return: A boolean whether the molecule is valid or not.
"""
return self._check_attachment_points(mol) and self._verify_conditions(mol)
def _check_attachment_points(self, mol: Mol) -> bool:
check = [atom.GetDegree() == 1 for atom in mol.GetAtoms() if
atom.GetSymbol() == self._tockens.ATTACHMENT_POINT_TOKEN]
return all(check) and len(check) > 0
def _verify_conditions(self, mol: Mol) -> bool:
try:
verification = []
for condition in self.conditions:
descriptor = self._CONDITIONS_FUNC.get(condition.name)
if condition.equals:
verification.append(condition.equals == descriptor(mol))
if condition.min:
verification.append(condition.min < descriptor(mol))
if condition.max:
verification.append(condition.max > descriptor(mol))
if not all(verification):
return False
except:
return False
return True | /reinvent_chemistry-0.0.51.tar.gz/reinvent_chemistry-0.0.51/reinvent_chemistry/library_design/fragment_filter.py | 0.865053 | 0.382891 | fragment_filter.py | pypi |
from collections import OrderedDict
from typing import List, Tuple
from rdkit.Chem.rdchem import Mol
from reinvent_chemistry import TransformationTokens, Conversions
from reinvent_chemistry.library_design.dtos import ReactionDTO, FailedReactionDTO
from reinvent_chemistry.library_design.fragment_reactions import FragmentReactions
from reinvent_chemistry.library_design.fragmented_molecule import FragmentedMolecule
class FailingReactionsEnumerator:
def __init__(self, chemical_reactions: List[ReactionDTO]):
"""
Class to enumerate over a list of reactions and a molecule in order to detect failures.
:param chemical_reactions: A list of ReactionDTO objects.
"""
self._tockens = TransformationTokens()
self._chemical_reactions = chemical_reactions
self._reactions = FragmentReactions()
self._conversions = Conversions()
def enumerate(self, molecule: Mol, failures_limit: int) -> List[FailedReactionDTO]:
"""
Enumerates all provided reactions on a molecule in order detect failures.
:param molecule: A mol object with the molecule to apply reactions on.
:param failures_limit: The number of failed examples to accumulate.
:return : A list of failed reaction/molecule pairs.
"""
original_smiles = self._conversions.mol_to_smiles(molecule)
failed_reactions = {}
for reaction in self._chemical_reactions:
dto = self._reactions.apply_reaction_on_molecule(molecule, reaction)
for pair in dto.reaction_outcomes:
if failures_limit <= len(failed_reactions):
break
for indx, _ in enumerate(pair):
decorations = self._select_all_except(pair, indx)
decoration = self._conversions.copy_mol(decorations[0])
scaffold = self._conversions.copy_mol(pair[indx])
if not self._validate(scaffold, decoration, original_smiles):
failed_reaction = FailedReactionDTO(reaction.reaction_smarts, original_smiles)
failed_reactions[failed_reaction.reaction_smirks] = failed_reaction
break
if failures_limit <= len(failed_reactions):
break
dtos = [dto for dto in failed_reactions.values()]
return dtos
def _select_all_except(self, fragments: Tuple[Mol], to_exclude: int) -> List[Mol]:
return [fragment for indx, fragment in enumerate(fragments) if indx != to_exclude]
def _label_scaffold(self, scaffold: Mol) -> Mol:
highest_number = self._find_highest_number(scaffold)
for atom in scaffold.GetAtoms():
if atom.GetSymbol() == self._tockens.ATTACHMENT_POINT_TOKEN:
try:
atom_number = int(atom.GetProp("molAtomMapNumber"))
except:
highest_number += 1
num = atom.GetIsotope()
atom.SetIsotope(0)
atom.SetProp("molAtomMapNumber", str(highest_number))
scaffold.UpdatePropertyCache()
return scaffold
def _find_highest_number(self, cut_mol: Mol) -> int:
highest_number = -1
for atom in cut_mol.GetAtoms():
if atom.GetSymbol() == self._tockens.ATTACHMENT_POINT_TOKEN:
try:
atom_number = int(atom.GetProp("molAtomMapNumber"))
if highest_number < atom_number:
highest_number = atom_number
except:
pass
return highest_number
def _validate(self, scaffold: Mol, decoration: Mol, original_smiles: str) -> bool:
if scaffold and decoration:
labeled_decoration = OrderedDict()
labeled_decoration[0] = decoration
labeled_scaffold = self._label_scaffold(scaffold)
sliced_mol = FragmentedMolecule(labeled_scaffold, labeled_decoration, original_smiles)
if original_smiles != sliced_mol.reassembled_smiles:
return False
return True
else:
return False | /reinvent_chemistry-0.0.51.tar.gz/reinvent_chemistry-0.0.51/reinvent_chemistry/library_design/failing_reactions_enumerator.py | 0.871365 | 0.519278 | failing_reactions_enumerator.py | pypi |
from collections import OrderedDict
from rdkit.Chem.rdchem import Mol
from reinvent_chemistry import Conversions, TransformationTokens
from reinvent_chemistry.library_design import BondMaker, AttachmentPoints
class FragmentedMolecule:
def __init__(self, scaffold: Mol, decorations: OrderedDict, original_smiles: str):
"""
Represents a molecule as a scaffold and the decorations associated with each attachment point.
:param scaffold: A Mol object with the scaffold.
:param decorations: Either a list or a dict with the decorations as Mol objects.
"""
self._tockens = TransformationTokens()
self._attachments = AttachmentPoints()
self._conversions = Conversions()
self._bond_maker = BondMaker()
self.scaffold = scaffold
self.decorations = decorations
self.original_smiles = original_smiles
self.scaffold_smiles = self._conversions.mol_to_smiles(self.scaffold)
self.re_label()
self.decorations_smiles = self._create_decorations_string()
self.reassembled_smiles = self._re_assemble()
# self.reorder_attachment_point_numbers()
def __eq__(self, other):
return self.decorations_smiles == other.decorations_smiles and self.scaffold_smiles == other.scaffold_smiles
def __hash__(self):
return tuple([self.scaffold_smiles, self.decorations_smiles]).__hash__()
def decorations_count(self) -> int:
return len(self.decorations)
def re_label(self):
labels = self._attachments.get_attachment_points(self.scaffold_smiles)
decorations = OrderedDict()
for i, v in enumerate(labels):
decorations[i] = self.decorations[v]
self.decorations = decorations
def reorder_attachment_point_numbers(self):
self.scaffold_smiles = self._attachments.add_attachment_point_numbers(self.scaffold_smiles)
def _re_assemble(self):
self.scaffold_smiles = self._attachments.add_attachment_point_numbers(self.scaffold_smiles, canonicalize=False)
molecule = self._bond_maker.join_scaffolds_and_decorations(self.scaffold_smiles, self.decorations_smiles)
return self._conversions.mol_to_smiles(molecule)
def _create_decorations_string(self):
values = [self._conversions.mol_to_smiles(smi) for num, smi in self.decorations.items()]
decorations = '|'.join(values)
return decorations
def to_smiles(self):
"""
Calculates the SMILES representation of the given variant of the scaffold and decorations.
:param variant: SMILES variant to use (see to_smiles)
:return: A tuple with the SMILES of the scaffold and a dict with the SMILES of the decorations.
"""
return (self._conversions.mol_to_smiles(self.scaffold),
{num: self._conversions.mol_to_smiles(dec) for num, dec in self.decorations.items()})
def to_random_smiles(self):
"""
Calculates the SMILES representation of the given variant of the scaffold and decorations.
:param variant: SMILES variant to use (see to_smiles)
:return: A tuple with the SMILES of the scaffold and a dict with the SMILES of the decorations.
"""
return (self._conversions.mol_to_random_smiles(self.scaffold),
{num: self._conversions.mol_to_random_smiles(dec) for num, dec in self.decorations.items()}) | /reinvent_chemistry-0.0.51.tar.gz/reinvent_chemistry-0.0.51/reinvent_chemistry/library_design/fragmented_molecule.py | 0.929416 | 0.380241 | fragmented_molecule.py | pypi |
import re
from typing import List
from rdkit.Chem.rdchem import Mol
from reinvent_chemistry import Conversions, TransformationTokens
class AttachmentPoints:
def __init__(self):
self._conversions = Conversions()
self._tokens = TransformationTokens()
def add_attachment_point_numbers(self, mol_or_smi, canonicalize=True):
"""
Adds the numbers for the attachment points throughout the molecule.
:param mol_or_smi: SMILES string to convert.
:param canonicalize: Canonicalize the SMILES so that the attachment points are always in the same order.
:return : A converted SMILES string.
"""
if isinstance(mol_or_smi, str):
smi = mol_or_smi
if canonicalize:
smi = self._conversions.mol_to_smiles(self._conversions.smile_to_mol(mol_or_smi))
# only add numbers ordered by the SMILES ordering
num = -1
def _ap_callback(_):
nonlocal num
num += 1
return "[{}:{}]".format(self._tokens.ATTACHMENT_POINT_TOKEN, num)
return re.sub(self._tokens.ATTACHMENT_POINT_REGEXP, _ap_callback, smi)
else:
mol = mol_or_smi
if canonicalize:
mol = self._conversions.smile_to_mol(self._conversions.mol_to_smiles(mol))
idx = 0
for atom in mol.GetAtoms():
if atom.GetSymbol() == self._tokens.ATTACHMENT_POINT_TOKEN:
atom.SetProp("molAtomMapNumber", str(idx))
idx += 1
return self._conversions.mol_to_smiles(mol)
def get_attachment_points(self, smile: str) -> List:
"""
Gets all attachment points from SMILES string.
:param smile: A SMILES string
:return : A list with the numbers ordered by appearance.
"""
return [int(match.group(1)) for match in re.finditer(self._tokens.ATTACHMENT_POINT_NUM_REGEXP, smile)]
def get_attachment_points_for_molecule(self, molecule: Mol) -> List:
"""
Gets all attachment points from RDKit Mol.
:param molecule: A Mol object.
:return : A list with the numbers ordered by appearance.
"""
if isinstance(molecule, Mol):
return [int(atom.GetProp("molAtomMapNumber")) for atom in molecule.GetAtoms()
if atom.GetSymbol() == self._tokens.ATTACHMENT_POINT_TOKEN and atom.HasProp("molAtomMapNumber")]
def add_first_attachment_point_number(self, smi, num):
"""
Changes/adds a number to the first attachment point.
:param smi: SMILES string with the molecule.
:param num: Number to add.
:return: A SMILES string with the number added.
"""
return re.sub(self._tokens.ATTACHMENT_POINT_REGEXP, "[{}:{}]".format(self._tokens.ATTACHMENT_POINT_TOKEN, num),
smi, count=1)
def remove_attachment_point_numbers(self, smile: str) -> str:
"""
Removes the numbers for the attachment points throughout the molecule.
:param smile: SMILES string.
:return : A converted SMILES string.
"""
result = re.sub(self._tokens.ATTACHMENT_POINT_NUM_REGEXP, "[{}]".format(self._tokens.ATTACHMENT_POINT_TOKEN),
smile)
return result
def remove_attachment_point_numbers_from_mol(self, molecule: Mol) -> Mol:
"""
Removes the numbers for the attachment points throughout the molecule.
:param molecule: RDKit molecule.
:return : A molecule.
"""
if isinstance(molecule, Mol):
for atom in molecule.GetAtoms():
atom.ClearProp("molAtomMapNumber")
return molecule
def add_brackets_to_attachment_points(self, scaffold: str):
"""
Adds brackets to the attachment points (if they don't have them).
:param scaffold: SMILES string.
:return: A SMILES string with attachments in brackets.
"""
return re.sub(self._tokens.ATTACHMENT_POINT_NO_BRACKETS_REGEXP, "[{}]".format(self._tokens.ATTACHMENT_POINT_TOKEN), scaffold) | /reinvent_chemistry-0.0.51.tar.gz/reinvent_chemistry-0.0.51/reinvent_chemistry/library_design/attachment_points.py | 0.860061 | 0.504516 | attachment_points.py | pypi |
from typing import List, Tuple
from rdkit.Chem import AllChem, Mol
from rdkit.Chem.Lipinski import RingCount
from rdkit.Chem.rdChemReactions import ChemicalReaction
from reinvent_chemistry import Conversions
from reinvent_chemistry.library_design import BondMapper
from reinvent_chemistry.library_design.dtos import ReactionDTO, ReactionOutcomeDTO
from reinvent_chemistry.tokens import TransformationTokens
class FragmentReactions:
def __init__(self):
self._conversions = Conversions()
self._tokens = TransformationTokens()
self._bond_mapper = BondMapper()
def create_reactions_from_smarts(self, smarts: List[str]) -> List[ChemicalReaction]:
reactions = [AllChem.ReactionFromSmarts(smirks) for smirks in smarts]
return reactions
def create_reaction_from_smirk(self, smirks: str) -> ReactionDTO:
reaction = ReactionDTO(smirks, AllChem.ReactionFromSmarts(smirks))
return reaction
def create_reactions_from_smirks(self, smirks: List[str]) -> List[ReactionDTO]:
reactions = [self.create_reaction_from_smirk(smirk) for smirk in smirks]
return reactions
def slice_molecule_to_fragments(self, molecule: Mol, reaction_dtos: List[ReactionDTO]) -> List[Tuple[Mol]]:
"""
This method applies a list of chemical reactions on a molecule and
decomposes the input molecule to complementary fragments.
:param molecule:
:param reaction_dtos:
:return: Different slicing combinations are returned.
"""
list_of_outcomes = self.apply_reactions_on_molecule(molecule, reaction_dtos)
all_outcomes = []
for outcome in list_of_outcomes:
all_outcomes.extend(outcome.reaction_outcomes)
# TODO: the overall data processing is extremely slow. consider reducing redundancy here.
return all_outcomes
def apply_reactions_on_molecule(self, molecule: Mol, reaction_dtos: List[ReactionDTO]) -> List[ReactionOutcomeDTO]:
"""Build list of possible splits of a molecule given multiple reactions."""
list_of_outcomes = []
for reaction_dto in reaction_dtos:
outcome_dto = self.apply_reaction_on_molecule(molecule, reaction_dto)
purged_outcome_dto = self._filter_pairs_with_no_ring_count_change(outcome_dto)
list_of_outcomes.append(purged_outcome_dto)
return list_of_outcomes
def apply_reaction_on_molecule(self, molecule: Mol, reaction_dto: ReactionDTO) -> ReactionOutcomeDTO:
"""Build list of possible splits of a molecule given a single reaction."""
molecule = self._conversions.copy_mol(molecule)
outcomes = reaction_dto.chemical_reaction.RunReactant(molecule, 0)
outcome_dto = ReactionOutcomeDTO(reaction_dto.reaction_smarts, list(outcomes), molecule)
return outcome_dto
def _filter_pairs_with_no_ring_count_change(self, outcome_dto: ReactionOutcomeDTO) -> ReactionOutcomeDTO:
molecule_rings = RingCount(outcome_dto.targeted_molecule)
acceptable_pairs = []
for pair in outcome_dto.reaction_outcomes:
if not self._detect_ring_break(molecule_rings, pair) and len(pair) == 2:
acceptable_pairs.append(pair)
outcome_dto.reaction_outcomes = acceptable_pairs
return outcome_dto
def _detect_ring_break(self, molecule_ring_count: int, pair: Tuple[Mol]) -> bool:
reagent_rings = 0
for reagent in pair:
reagent_smiles = self._conversions.mol_to_smiles(reagent)
reagent_mol = self._conversions.smile_to_mol(reagent_smiles)
try:
reagent_rings = reagent_rings + RingCount(reagent_mol)
except:
return True
return molecule_ring_count != reagent_rings | /reinvent_chemistry-0.0.51.tar.gz/reinvent_chemistry-0.0.51/reinvent_chemistry/library_design/fragment_reactions.py | 0.730578 | 0.626667 | fragment_reactions.py | pypi |
from collections import OrderedDict
from typing import List, Tuple, Set
from rdkit.Chem.rdchem import Mol
from reinvent_chemistry import TransformationTokens, Conversions
from reinvent_chemistry.library_design import FragmentFilter
from reinvent_chemistry.library_design.dtos import FilteringConditionDTO, ReactionDTO
from reinvent_chemistry.library_design.fragment_reactions import FragmentReactions
from reinvent_chemistry.library_design.fragmented_molecule import FragmentedMolecule
class FragmentReactionSliceEnumerator:
def __init__(self, chemical_reactions: List[ReactionDTO],
scaffold_conditions: List[FilteringConditionDTO],
decoration_conditions: List[FilteringConditionDTO]):
"""
Class to enumerate slicings given certain conditions.
:param chemical_reactions: A list of ChemicalReaction objects.
:param scaffold_conditions: Conditions to use when filtering scaffolds obtained from slicing molecules (see FragmentFilter).
:param decoration_conditions: Conditions to use when filtering decorations obtained from slicing molecules.
"""
self._tockens = TransformationTokens()
self._chemical_reactions = chemical_reactions
self._scaffold_filter = FragmentFilter(scaffold_conditions)
self._decoration_filter = FragmentFilter(decoration_conditions)
self._reactions = FragmentReactions()
self._conversions = Conversions()
def enumerate(self, molecule: Mol, cuts: int) -> List[FragmentedMolecule]:
"""
Enumerates all possible combination of slicings of a molecule given a number of cuts.
:param molecule: A mol object with the molecule to slice.
:param cuts: The number of cuts to perform.
:return : A list with all the possible (scaffold, decorations) pairs as SlicedMol objects.
"""
original_smiles = self._conversions.mol_to_smiles(molecule)
sliced_mols = set()
for cut in range(1, cuts + 1):
if cut == 1:
fragment_pairs = self._reactions.slice_molecule_to_fragments(molecule, self._chemical_reactions)
for pair in fragment_pairs:
for indx, _ in enumerate(pair):
decorations = self._select_all_except(pair, indx)
decoration = self._conversions.copy_mol(decorations[0])
labeled_decoration = OrderedDict()
labeled_decoration[0] = decoration # [ for decoration in decorations]
scaffold = self._conversions.copy_mol(pair[indx])
labeled_scaffold = self._label_scaffold(scaffold)
# TODO: filtering should take place after scaffold is generated
sliced_mol = FragmentedMolecule(labeled_scaffold, labeled_decoration, original_smiles)
if sliced_mol.original_smiles == sliced_mol.reassembled_smiles:
sliced_mols.add(sliced_mol)
else:
for slice in sliced_mols:
to_add = self._scaffold_slicing(slice, cut)
sliced_mols = sliced_mols.union(to_add)
return list(filter(self._filter, sliced_mols))
def _scaffold_slicing(self, slice: FragmentedMolecule, cut: int) -> Set[FragmentedMolecule]:
to_add = set()
if slice.decorations_count() == cut - 1:
fragment_pairs = self._reactions.slice_molecule_to_fragments(slice.scaffold, self._chemical_reactions)
for pair in fragment_pairs:
scaffold, decoration = self._split_scaffold_from_decorations(pair, cut)
if scaffold:
labeled_scaffold = self._label_scaffold(scaffold)
labeled_scaffold = self._conversions.copy_mol(labeled_scaffold)
decoration = self._conversions.copy_mol(decoration)
sliced_mol = self._create_sliced_molecule(slice, labeled_scaffold, decoration)
if sliced_mol.original_smiles == sliced_mol.reassembled_smiles:
to_add.add(sliced_mol)
return to_add
def _select_all_except(self, fragments: Tuple[Mol], to_exclude: int) -> List[Mol]:
return [fragment for indx, fragment in enumerate(fragments) if indx != to_exclude]
def _filter(self, sliced_mol: FragmentedMolecule) -> bool:
return self._scaffold_filter.filter(sliced_mol.scaffold) \
and all(self._decoration_filter.filter(dec) for dec in sliced_mol.decorations.values())
def _split_scaffold_from_decorations(self, pair: Tuple[Mol], cuts: int) -> Tuple[Mol, Mol]:
decoration = None
scaffold = None
for frag in pair:
num_att = len(
[atom for atom in frag.GetAtoms() if atom.GetSymbol() == self._tockens.ATTACHMENT_POINT_TOKEN])
# detect whether there is one fragment with as many attachment points as cuts (scaffold)
# the rest are decorations
if num_att == cuts and not scaffold:
scaffold = frag
if num_att == 1:
decoration = frag
if decoration and scaffold:
return scaffold, decoration
else:
return (None, None)
def _label_scaffold(self, scaffold: Mol) -> Mol:
highest_number = self._find_highest_number(scaffold)
for atom in scaffold.GetAtoms():
if atom.GetSymbol() == self._tockens.ATTACHMENT_POINT_TOKEN:
try:
atom_number = int(atom.GetProp("molAtomMapNumber"))
except:
highest_number += 1
num = atom.GetIsotope()
atom.SetIsotope(0)
atom.SetProp("molAtomMapNumber", str(highest_number))
scaffold.UpdatePropertyCache()
return scaffold
def _find_highest_number(self, cut_mol: Mol) -> int:
highest_number = -1
for atom in cut_mol.GetAtoms():
if atom.GetSymbol() == self._tockens.ATTACHMENT_POINT_TOKEN:
try:
atom_number = int(atom.GetProp("molAtomMapNumber"))
if highest_number < atom_number:
highest_number = atom_number
except:
pass
return highest_number
def _create_sliced_molecule(self, original_sliced_mol: FragmentedMolecule, scaffold: Mol,
decoration: Mol) -> FragmentedMolecule:
old_decorations = OrderedDict()
for k, v in original_sliced_mol.decorations.items():
old_decorations[k] = v
old_decorations[original_sliced_mol.decorations_count()] = decoration
sliced_mol = FragmentedMolecule(scaffold, old_decorations, original_sliced_mol.original_smiles)
return sliced_mol | /reinvent_chemistry-0.0.51.tar.gz/reinvent_chemistry-0.0.51/reinvent_chemistry/library_design/fragment_reaction_slice_enumerator.py | 0.743727 | 0.437944 | fragment_reaction_slice_enumerator.py | pypi |
from typing import List, Tuple
import pandas as pd
from reinvent_chemistry.library_design.bond_maker import BondMaker
from reinvent_chemistry.conversions import Conversions
from reinvent_chemistry.library_design.attachment_points import AttachmentPoints
from reinvent_chemistry.library_design.enums import ScaffoldMemoryFieldsEnum
from reinvent_chemistry.library_design.reaction_definitions.blocks_for_compound_dto import BuildingBlocksForCompoundDTO
from reinvent_chemistry.library_design.reaction_definitions.building_block_pair_dto import BuildingBlockPairDTO
from reinvent_chemistry.library_design.reaction_definitions.leaving_groups_dto import LeavingGroupsDTO
from reinvent_chemistry.library_design.reaction_definitions.standard_definitions import StandardDefinitions
class BuildingBlocks:
def __init__(self, reaction_definition_file: str):
self._reactions_library = StandardDefinitions(reaction_definition_file)
self._attachments = AttachmentPoints()
self._scaffold_memory_fields = ScaffoldMemoryFieldsEnum()
self._conversions = Conversions()
self._bond_maker = BondMaker()
def create(self, reaction_name: str, position: int, dataframe: pd.DataFrame) -> List[BuildingBlocksForCompoundDTO]:
leaving_group_pairs = self._reactions_library.get_leaving_group_pairs(reaction_name)
compound_fragments = {
compound: fragments for compound, fragments in
zip(dataframe[self._scaffold_memory_fields.SMILES],
dataframe[self._scaffold_memory_fields.SCAFFOLD])
}
blocks_for_compounds = [self._create_building_blocks_for_compound(compound, leaving_group_pairs,
compound_fragments[compound],
reaction_name, position)
for compound in compound_fragments.keys()]
return blocks_for_compounds
def _create_building_blocks_for_compound(self, compound: str, leaving_group_pairs: List[LeavingGroupsDTO],
scaffold_decorations: str, reaction_name: str, position: int)\
-> BuildingBlocksForCompoundDTO:
numbered_scaffold, numbered_decorations = self._separate_scaffold_and_decorations(scaffold_decorations)
blocks = self._create_building_blocks(leaving_group_pairs, numbered_scaffold,
position, numbered_decorations[position])
compound_blocks = BuildingBlocksForCompoundDTO(compound, reaction_name, position, blocks)
return compound_blocks
def _separate_scaffold_and_decorations(self, scaffold_decorations) -> Tuple[str, List[str]]:
scaffold, *decorations = scaffold_decorations.split('|')
scaffold = self._attachments.add_attachment_point_numbers(scaffold, False)
decorations = [self._attachments.add_attachment_point_numbers(decoration, False) for decoration in decorations]
return scaffold, decorations
def _create_building_blocks(self, leaving_group_pairs: List[LeavingGroupsDTO], scaffold: str,
attachment_position: int, decoration: str) -> List[BuildingBlockPairDTO]:
building_blocks = [self._create_building_block_pair(scaffold, decoration, attachment_position, leaving_group)
for leaving_group in leaving_group_pairs]
return building_blocks
def _create_building_block_pair(self, scaffold: str, decoration: str,
attachment_position: int, leaving_group: LeavingGroupsDTO ) -> BuildingBlockPairDTO:
scaffold_group = self._attachments.add_first_attachment_point_number(leaving_group.leaving_group_scaffold,
attachment_position)
scaffold_group_mol = self._conversions.smile_to_mol(scaffold_group)
scaffold_mol = self._conversions.smile_to_mol(scaffold)
scaffold_block_mol = self._bond_maker.join_molecule_fragments(scaffold_mol, scaffold_group_mol)
scaffold_block = self._conversions.mol_to_smiles(scaffold_block_mol)
decoration_group = self._attachments.add_first_attachment_point_number(leaving_group.leaving_group_decoration, 0)
decoration_group_mol = self._conversions.smile_to_mol(decoration_group)
decoration_mol = self._conversions.smile_to_mol(decoration)
decoration_block_mol = self._bond_maker.join_molecule_fragments(decoration_mol, decoration_group_mol)
decoration_block = self._conversions.mol_to_smiles(decoration_block_mol)
building_block = BuildingBlockPairDTO(scaffold_block, decoration_block)
return building_block | /reinvent_chemistry-0.0.51.tar.gz/reinvent_chemistry-0.0.51/reinvent_chemistry/library_design/reaction_definitions/building_blocks.py | 0.787768 | 0.379177 | building_blocks.py | pypi |
from typing import List
import requests
from reinvent_chemistry.library_design.aizynth.collection_of_pathways_dto import CollectionOfPathwaysDTO
from reinvent_chemistry.library_design.aizynth.synthetic_pathway_dto import SyntheticPathwayDTO
class AiZynthClient:
"""Currently this class is specific for the internal REST API of AstraZeneca"""
def __init__(self, prediction_url: str, availability_url: str, api_token: str, logger):
self._prediction_url = prediction_url
self._availability_url = availability_url
self._headers = self._compose_headers(api_token)
self._logger = logger
def synthesis_prediction(self, smile: str) -> CollectionOfPathwaysDTO:
data = {"smiles": smile, "policy": "Full Set"}
try:
response = requests.post(self._prediction_url, headers=self._headers, data=data)
response.raise_for_status()
result = response.json()
precursor_sets = [SyntheticPathwayDTO(precursors=precursor["smiles_split"])
for precursor in result["precursors"]]
pathway_collection = CollectionOfPathwaysDTO(input=smile, pathways=precursor_sets)
return pathway_collection
except requests.exceptions.HTTPError as e:
self._logger.log_message(e)
self._logger.log_message(f'Failed for string: {smile}')
return CollectionOfPathwaysDTO(input=smile, pathways=[])
def batch_synthesis_prediction(self, smiles: List[str]) -> List[CollectionOfPathwaysDTO]:
precursors = [self.synthesis_prediction(smile) for smile in smiles]
return precursors
def get_stock_availability(self, smile: str) -> bool:
response = requests.get(self._availability_url, headers=self._headers, params={"q": smile})
try:
response.raise_for_status()
except requests.exceptions.HTTPError as e:
self._logger.log_message(e)
if response.status_code == requests.codes.ok:
result = response.json()
return len(result["result"]) > 0
return False
def availability_score(self, pathway: SyntheticPathwayDTO) -> float:
count = 0
for building_block in pathway.precursors:
if self.get_stock_availability(building_block):
count+=1
score = count/max(1, len(pathway.precursors))
return score
def pathway_stock_availability_score(self, pathways: CollectionOfPathwaysDTO):
scores = [self.availability_score(pathway) for pathway in pathways.pathways]
best_score = max(scores) if scores else 0
return best_score
def batch_stock_availability_score(self, pathways: List[CollectionOfPathwaysDTO]) -> List[float]:
scores = [self.pathway_stock_availability_score(pathway) for pathway in pathways]
return scores
def _compose_headers(self, api_token: str):
headers = {"accept": "application/json", "Authorization": f"Token {api_token}"}
return headers | /reinvent_chemistry-0.0.51.tar.gz/reinvent_chemistry-0.0.51/reinvent_chemistry/library_design/aizynth/aizynth_client.py | 0.7413 | 0.284315 | aizynth_client.py | pypi |
from typing import List, Dict
import numpy as np
from reinvent_chemistry.library_design import FragmentReactions
from reinvent_chemistry.library_design.reaction_definitions.standard_definitions import StandardDefinitions
from reinvent_chemistry.library_design.reaction_filters.reaction_filter_configruation import ReactionFilterConfiguration
from reinvent_chemistry.library_design.reaction_filters.base_reaction_filter import BaseReactionFilter
class DefinedSelectiveFilter(BaseReactionFilter):
def __init__(self, configuration: ReactionFilterConfiguration):
self._chemistry = FragmentReactions()
self._score_cutoff = 0.5
self._reactions_library = StandardDefinitions(configuration.reaction_definition_file)
self._reactions = self._configure_reactions(configuration.reactions)
def _configure_reactions(self, reaction_names: Dict[str, List[str]]):
reactions = {}
for idx, name_list in enumerate(reaction_names):
smarts_list = [self._reactions_library.get_reaction_definition(name) for name in name_list]
converted = self._chemistry.create_reactions_from_smarts(smarts_list)
reactions[idx] = converted
return reactions
def evaluate(self, molecule):
if not self._reactions:
return 1
return self.score_molecule(molecule)
def score_molecule(self, molecule):
new_bonds = self._find_new_bonds(molecule)
count = self._count_applicable_reactions_on_molecule(molecule, new_bonds)
score = self._score_cutoff + (1 - self._score_cutoff)*(count / len(new_bonds))
return score
def _find_new_bonds(self, molecule) -> dict:
"""Find atoms marked with the bondNum atom property and add to a list"""
_bond_indices_dict = {}
for atom in molecule.GetAtoms():
if atom.HasProp("bondNum"):
bondNum = int(atom.GetProp("bondNum"))
if not bondNum in _bond_indices_dict:
_bond_indices_dict[bondNum] = []
_bond_indices_dict[bondNum].append(atom.GetIdx())
return _bond_indices_dict
def _convert_reactants_to_atom_indices(self, reactant_pairs):
"""Convert the list of tuples of reactants into a list of lists of original atom indices"""
_reactant_idx_list = []
for reactant_pair in reactant_pairs:
outcome_list = []
for reactant in reactant_pair:
idxs = set(
[
int(atom.GetProp("react_atom_idx"))
for atom in reactant.GetAtoms()
if atom.HasProp("react_atom_idx")
]
)
outcome_list.append(idxs)
_reactant_idx_list.append(outcome_list)
return _reactant_idx_list
def _count_applicable_reactions_on_molecule(self, molecule, target_bonds: dict):
count = 0
for bond_indx, reactions in self._reactions.items():
if reactions:
reaction_pairs = self._apply_reactions_on_bond(molecule, reactions)
reactant_idxs = self._convert_reactants_to_atom_indices(reaction_pairs)
if self._detect_sliced_bond_by_reaction(target_bonds[bond_indx], reactant_idxs):
count += 1
else:
count += 1
return count
def _apply_reactions_on_bond(self, molecule, reactions: List) -> List:
outcomes = []
for reaction in reactions:
outcome = reaction.RunReactant(molecule, 0)
outcomes.extend(outcome)
reaction_pairs = [outcome for outcome in outcomes]
return reaction_pairs
def _detect_sliced_bond_by_reaction(self, bond, reactant_idxs):
"""Test a given bond if its targetable by any retrosynthethic disconnection"""
return np.any(
[
self._verify_atom_splits(bond, sets[0], sets[1])
for sets in reactant_idxs
if len(sets) == 2
]
)
def _verify_atom_splits(self, bond, set1, set2) -> bool:
"""Test if the bond is in split into the two different sets"""
atom1_set_id = self._find_set_id(bond[0], set1, set2)
atom2_set_id = self._find_set_id(bond[1], set1, set2)
atoms_are_from_different_sets = (atom1_set_id != atom2_set_id) and (atom1_set_id != 0) and (atom2_set_id != 0)
return atoms_are_from_different_sets
def _find_set_id(self, idx, set1, set2) -> int:
"""Check if an idx is in either of two sets or none of them"""
if idx in set1:
return 1
elif idx in set2:
return 2
else:
return 0 | /reinvent_chemistry-0.0.51.tar.gz/reinvent_chemistry-0.0.51/reinvent_chemistry/library_design/reaction_filters/defined_selective_filter.py | 0.835651 | 0.31511 | defined_selective_filter.py | pypi |
from typing import Dict, List
import numpy as np
from rdkit.Chem.rdChemReactions import ChemicalReaction
from reinvent_chemistry.library_design import FragmentReactions
from reinvent_chemistry.library_design.reaction_filters.base_reaction_filter import BaseReactionFilter
from reinvent_chemistry.library_design.reaction_filters.reaction_filter_configruation import ReactionFilterConfiguration
class NonSelectiveFilter(BaseReactionFilter):
def __init__(self, configuration: ReactionFilterConfiguration):
self._chemistry = FragmentReactions()
self._reactions = self._configure_reactions(configuration.reactions)
def _configure_reactions(self, reaction_smarts: Dict[str, List[str]]) -> List[ChemicalReaction]:
all_reactions = []
for smirks in reaction_smarts:
reactions = self._chemistry.create_reactions_from_smarts(smirks)
all_reactions.extend(reactions)
return reactions
def evaluate(self, molecule):
if not self._reactions:
return 1
return self.score_molecule(molecule)
def score_molecule(self, molecule):
bond_indices = self._get_created_bonds(molecule)
synthons = self._run_reactions(molecule)
reactant_idxs = self._analyze_reactants(synthons)
score = self._score_mol(bond_indices, reactant_idxs)
return score
def _analyze_reactants(self, synthons):
"""Convert the list of tuples of reactants into a list of lists of original atom indices"""
_reactant_idx_list = []
for synthon in synthons:
outcome_list = []
for reactant in synthon:
idxs = set(
[
int(atom.GetProp("react_atom_idx"))
for atom in reactant.GetAtoms()
if atom.HasProp("react_atom_idx")
]
)
outcome_list.append(idxs)
_reactant_idx_list.append(outcome_list)
return _reactant_idx_list
def _get_created_bonds(self, molecule):
"""Find atoms marked with the bondNum atom property and add to a list"""
_bond_indices_dict = {}
for atom in molecule.GetAtoms():
if atom.HasProp("bondNum"):
bondNum = int(atom.GetProp("bondNum"))
if not bondNum in _bond_indices_dict:
_bond_indices_dict[bondNum] = []
_bond_indices_dict[bondNum].append(atom.GetIdx())
return _bond_indices_dict
def _get_list_num(self, idx, set1, set2):
"""Check if an idx is in either of two sets or none of them"""
if idx in set1:
return 1
elif idx in set2:
return 2
else:
return 0
def _run_reactions(self, molecule):
"""Build full list of possible splits"""
synthons = []
for reaction in self._reactions:
outcomes = reaction.RunReactant(molecule, 0)
[synthons.append(outcome) for outcome in outcomes]
return synthons
def _score_mol(self, bond_indices_dict, reactant_idxs):
"""Score the current state by checking if all bonds has a possible disconnection"""
disconnections = [self._test_bond(bond, reactant_idxs) for bond in bond_indices_dict.values()]
return np.sum(disconnections) / len(disconnections)
def _test_splitting(self, bond, set1, set2):
"""Test if the bond is in split into the two different sets"""
atom1_set = self._get_list_num(bond[0], set1, set2)
atom2_set = self._get_list_num(bond[1], set1, set2)
return (atom1_set != atom2_set) and (atom1_set != 0) and (atom2_set != 0)
def _test_bond(self, bond, reactant_idxs):
"""Test a given bond if its targetable by any retrosynthethic disconnection"""
return np.any(
[
self._test_splitting(bond, sets[0], sets[1])
for sets in reactant_idxs
if len(sets) == 2
]
) | /reinvent_chemistry-0.0.51.tar.gz/reinvent_chemistry-0.0.51/reinvent_chemistry/library_design/reaction_filters/non_selective_filter.py | 0.83772 | 0.318207 | non_selective_filter.py | pypi |
from typing import List, Dict
import numpy as np
from reinvent_chemistry.library_design import FragmentReactions
from reinvent_chemistry.library_design.reaction_filters.base_reaction_filter import BaseReactionFilter
from reinvent_chemistry.library_design.reaction_filters.reaction_filter_configruation import ReactionFilterConfiguration
class SelectiveFilter(BaseReactionFilter):
def __init__(self, configuration: ReactionFilterConfiguration):
self._chemistry = FragmentReactions()
self._reactions = self._configure_reactions(configuration.reactions)
self._score_cutoff = 0.5
def _configure_reactions(self, reaction_smarts: Dict[str, List[str]]):
reactions = {}
for idx, smarts_list in enumerate(reaction_smarts):
converted = self._chemistry.create_reactions_from_smarts(smarts_list)
reactions[idx] = converted
return reactions
def evaluate(self, molecule):
if not self._reactions:
return 1
return self.score_molecule(molecule)
def score_molecule(self, molecule):
new_bonds = self._find_new_bonds(molecule)
count = self._count_applicable_reactions_on_molecule(molecule, new_bonds)
score = self._score_cutoff + (1 - self._score_cutoff)*(count / len(new_bonds))
return score
def _find_new_bonds(self, molecule) -> dict:
"""Find atoms marked with the bondNum atom property and add to a list"""
_bond_indices_dict = {}
for atom in molecule.GetAtoms():
if atom.HasProp("bondNum"):
bondNum = int(atom.GetProp("bondNum"))
if not bondNum in _bond_indices_dict:
_bond_indices_dict[bondNum] = []
_bond_indices_dict[bondNum].append(atom.GetIdx())
return _bond_indices_dict
def _convert_reactants_to_atom_indices(self, reactant_pairs):
"""Convert the list of tuples of reactants into a list of lists of original atom indices"""
_reactant_idx_list = []
for reactant_pair in reactant_pairs:
outcome_list = []
for reactant in reactant_pair:
idxs = set(
[
int(atom.GetProp("react_atom_idx"))
for atom in reactant.GetAtoms()
if atom.HasProp("react_atom_idx")
]
)
outcome_list.append(idxs)
_reactant_idx_list.append(outcome_list)
return _reactant_idx_list
def _count_applicable_reactions_on_molecule(self, molecule, target_bonds: dict):
count = 0
for bond_indx, reactions in self._reactions.items():
if reactions:
reaction_pairs = self._apply_reactions_on_bond(molecule, reactions)
reactant_idxs = self._convert_reactants_to_atom_indices(reaction_pairs)
if self._detect_sliced_bond_by_reaction(target_bonds[bond_indx], reactant_idxs):
count += 1
else:
count += 1
return count
def _apply_reactions_on_bond(self, molecule, reactions: List) -> List:
outcomes = []
for reaction in reactions:
outcome = reaction.RunReactant(molecule, 0)
outcomes.extend(outcome)
reaction_pairs = [outcome for outcome in outcomes]
return reaction_pairs
def _detect_sliced_bond_by_reaction(self, bond, reactant_idxs):
"""Test a given bond if its targetable by any retrosynthethic disconnection"""
return np.any(
[
self._verify_atom_splits(bond, sets[0], sets[1])
for sets in reactant_idxs
if len(sets) == 2
]
)
def _verify_atom_splits(self, bond, set1, set2) -> bool:
"""Test if the bond is in split into the two different sets"""
atom1_set_id = self._find_set_id(bond[0], set1, set2)
atom2_set_id = self._find_set_id(bond[1], set1, set2)
atoms_are_from_different_sets = (atom1_set_id != atom2_set_id) and (atom1_set_id != 0) and (atom2_set_id != 0)
return atoms_are_from_different_sets
def _find_set_id(self, idx, set1, set2) -> int:
"""Check if an idx is in either of two sets or none of them"""
if idx in set1:
return 1
elif idx in set2:
return 2
else:
return 0 | /reinvent_chemistry-0.0.51.tar.gz/reinvent_chemistry-0.0.51/reinvent_chemistry/library_design/reaction_filters/selective_filter.py | 0.848486 | 0.305378 | selective_filter.py | pypi |
from typing import List, Union, Any
import torch
from dacite import from_dict
from torch import nn as tnn
from reinvent_models.link_invent.dto import LinkInventModelParameterDTO
from reinvent_models.link_invent.dto import SampledSequencesDTO
from reinvent_models.link_invent.model_vocabulary.paired_model_vocabulary import PairedModelVocabulary
from reinvent_models.model_factory.enums.model_mode_enum import ModelModeEnum
from reinvent_models.model_factory.generative_model_base import GenerativeModelBase
from reinvent_models.link_invent.networks import EncoderDecoder
class LinkInventModel:
def __init__(self, vocabulary: PairedModelVocabulary, network: EncoderDecoder,
max_sequence_length: int = 256, no_cuda: bool = False, mode: str = ModelModeEnum().TRAINING):
self.vocabulary = vocabulary
self.network = network
self.max_sequence_length = max_sequence_length
self._model_modes = ModelModeEnum()
self.set_mode(mode)
if torch.cuda.is_available() and not no_cuda:
self.network.cuda()
self._nll_loss = tnn.NLLLoss(reduction="none", ignore_index=0)
def set_mode(self, mode: str):
if mode == self._model_modes.TRAINING:
self.network.train()
elif mode == self._model_modes.INFERENCE:
self.network.eval()
else:
raise ValueError(f"Invalid model mode '{mode}")
@classmethod
def load_from_file(cls, path_to_file, mode: str = ModelModeEnum().TRAINING) -> Union[Any, GenerativeModelBase] :
"""
Loads a model from a single file
:param path_to_file: Path to the saved model
:param mode: Mode in which the model should be initialized
:return: An instance of the network
"""
data = from_dict(LinkInventModelParameterDTO, torch.load(path_to_file))
network = EncoderDecoder(**data.network_parameter)
network.load_state_dict(data.network_state)
model = LinkInventModel(vocabulary=data.vocabulary, network=network,
max_sequence_length=data.max_sequence_length, mode=mode)
return model
def save_to_file(self, path_to_file):
"""
Saves the model to a file.
:param path_to_file: Path to the file which the model will be saved to.
"""
data = LinkInventModelParameterDTO(vocabulary=self.vocabulary, max_sequence_length=self.max_sequence_length,
network_parameter=self.network.get_params(),
network_state=self.network.state_dict())
torch.save(data.__dict__, path_to_file)
def likelihood(self, warheads_seqs, warheads_seq_lengths, linker_seqs, linker_seq_lengths):
"""
Retrieves the likelihood of warheads and their respective linker.
:param warheads_seqs: (batch, seq) A batch of padded scaffold sequences.
:param warheads_seq_lengths: The length of the scaffold sequences (for packing purposes).
:param linker_seqs: (batch, seq) A batch of decorator sequences.
:param linker_seq_lengths: The length of the decorator sequences (for packing purposes).
:return: (batch) Log likelihood for each item in the batch.
"""
# NOTE: the decoration_seq_lengths have a - 1 to prevent the end token to be forward-passed.
logits = self.network(warheads_seqs, warheads_seq_lengths, linker_seqs,
linker_seq_lengths - 1) # (batch, seq - 1, voc)
log_probs = logits.log_softmax(dim=2).transpose(1, 2) # (batch, voc, seq - 1)
return self._nll_loss(log_probs, linker_seqs[:, 1:]).sum(dim=1) # (batch)
@torch.no_grad()
def sample(self, inputs, input_seq_lengths) -> List[SampledSequencesDTO]:
"""
Samples as many linker as warhead pairs in the tensor.
:param inputs: A tensor with the warheads to sample already encoded and padded.
:param input_seq_lengths: A tensor with the length of the warheads.
:return: a sampled sequence dto with input_smi, output_smi and nll
"""
batch_size = inputs.size(0)
input_vector = torch.full((batch_size, 1), self.vocabulary.target.vocabulary["^"],
dtype=torch.long).cuda() # (batch, 1)
seq_lengths = torch.ones(batch_size) # (batch)
encoder_padded_seqs, hidden_states = self.network.forward_encoder(inputs, input_seq_lengths)
nlls = torch.zeros(batch_size).cuda()
not_finished = torch.ones(batch_size, 1, dtype=torch.long).cuda()
sequences = []
for _ in range(self.max_sequence_length - 1):
logits, hidden_states, _ = self.network.forward_decoder(
input_vector, seq_lengths, encoder_padded_seqs, hidden_states) # (batch, 1, voc)
probs = logits.softmax(dim=2).squeeze(dim=1) # (batch, voc)
log_probs = logits.log_softmax(dim=2).squeeze(dim=1) # (batch, voc)
input_vector = torch.multinomial(probs, 1) * not_finished # (batch, 1)
sequences.append(input_vector)
nlls += self._nll_loss(log_probs, input_vector.squeeze(dim=1))
not_finished = (input_vector > 1).type(torch.long) # 0 is padding, 1 is end token
if not_finished.sum() == 0:
break
linker_smiles_list = [self.vocabulary.target.decode(seq) for seq in torch.cat(sequences, 1).data.cpu().numpy()]
warheads_smiles_list = [self.vocabulary.input.decode(seq) for seq in inputs.data.cpu().numpy()]
result = [SampledSequencesDTO(warheads, linker, nll) for warheads, linker, nll in
zip(warheads_smiles_list, linker_smiles_list, nlls.data.cpu().numpy().tolist())]
return result
def get_network_parameters(self):
return self.network.parameters() | /reinvent_models-0.0.15rc1.tar.gz/reinvent_models-0.0.15rc1/reinvent_models/link_invent/link_invent_model.py | 0.924202 | 0.408454 | link_invent_model.py | pypi |
from typing import List, Tuple
import torch
from torch import Tensor
from torch.nn.utils.rnn import pad_sequence
from torch.utils import data as tud
from reinvent_models.link_invent.model_vocabulary.paired_model_vocabulary import PairedModelVocabulary
class PairedDataset(tud.Dataset):
"""Dataset that takes a list of (input, output) pairs."""
def __init__(self, input_target_smi_list: List[List[str]], vocabulary: PairedModelVocabulary):
self.vocabulary = vocabulary
self._encoded_list = []
for input_smi, target_smi in input_target_smi_list:
en_input = self.vocabulary.input.encode(input_smi)
en_output = self.vocabulary.target.encode(target_smi)
if en_input is not None and en_output is not None:
self._encoded_list.append((en_input, en_output))
else:
pass
# TODO log theses cases
def __getitem__(self, i):
en_input, en_output = self._encoded_list[i]
return (torch.tensor(en_input, dtype=torch.long),
torch.tensor(en_output, dtype=torch.long)) # pylint: disable=E1102
def __len__(self):
return len(self._encoded_list)
@classmethod
def collate_fn(cls, encoded_pairs):
"""
Turns a list of encoded pairs (input, target) of sequences and turns them into two batches.
:param: A list of pairs of encoded sequences.
:return: A tuple with two tensors, one for the input and one for the targets in the same order as given.
"""
encoded_inputs, encoded_targets = list(zip(*encoded_pairs))
return cls._pad_batch(encoded_inputs), cls._pad_batch(encoded_targets)
@staticmethod
def _pad_batch(encoded_seqs: List) -> Tuple[Tensor, Tensor]:
"""
Pads a batch.
:param encoded_seqs: A list of encoded sequences.
:return: A tensor with the sequences correctly padded.
"""
seq_lengths = torch.tensor([len(seq) for seq in encoded_seqs], dtype=torch.int64)
return pad_sequence(encoded_seqs, batch_first=True).cuda(), seq_lengths | /reinvent_models-0.0.15rc1.tar.gz/reinvent_models-0.0.15rc1/reinvent_models/link_invent/dataset/paired_dataset.py | 0.791741 | 0.437343 | paired_dataset.py | pypi |
import torch
from torch import nn as tnn
from torch.nn.utils import rnn as tnnur
from reinvent_models.model_factory.enums.model_parameter_enum import ModelParametersEnum
class Encoder(tnn.Module):
"""
Simple bidirectional RNN encoder implementation.
"""
def __init__(self, num_layers: int, num_dimensions: int, vocabulary_size: int, dropout: float):
super(Encoder, self).__init__()
self.num_layers = num_layers
self.num_dimensions = num_dimensions
self.vocabulary_size = vocabulary_size
self.dropout = dropout
self._embedding = tnn.Sequential(
tnn.Embedding(self.vocabulary_size, self.num_dimensions),
tnn.Dropout(dropout)
)
self._rnn = tnn.LSTM(self.num_dimensions, self.num_dimensions, self.num_layers,
batch_first=True, dropout=self.dropout, bidirectional=True)
def forward(self, padded_seqs: torch.Tensor, seq_lengths: torch.Tensor) \
-> (torch.Tensor, (torch.Tensor, torch.Tensor)): # pylint: disable=arguments-differ
"""
Performs the forward pass.
:param padded_seqs: A tensor with the sequences (batch, seq).
:param seq_lengths: The lengths of the sequences (for packed sequences).
:return : A tensor with all the output values for each step and the two hidden states.
"""
batch_size, max_seq_size = padded_seqs.size()
hidden_state = self._initialize_hidden_state(batch_size)
padded_seqs = self._embedding(padded_seqs)
hs_h, hs_c = (hidden_state, hidden_state.clone().detach())
#FIXME: this is to guard against non compatible `gpu` input for pack_padded_sequence() method in pytorch 1.7
seq_lengths = seq_lengths.cpu()
packed_seqs = tnnur.pack_padded_sequence(padded_seqs, seq_lengths, batch_first=True, enforce_sorted=False)
packed_seqs, (hs_h, hs_c) = self._rnn(packed_seqs, (hs_h, hs_c))
padded_seqs, _ = tnnur.pad_packed_sequence(packed_seqs, batch_first=True)
# sum up bidirectional layers and collapse
hs_h = hs_h.view(self.num_layers, 2, batch_size, self.num_dimensions).sum(dim=1) # (layers, batch, dim)
hs_c = hs_c.view(self.num_layers, 2, batch_size, self.num_dimensions).sum(dim=1) # (layers, batch, dim)
padded_seqs = padded_seqs.view(batch_size, max_seq_size, 2, self.num_dimensions).sum(dim=2) # (batch, seq, dim)
return padded_seqs, (hs_h, hs_c)
def _initialize_hidden_state(self, batch_size: int) -> torch.Tensor:
return torch.zeros(self.num_layers*2, batch_size, self.num_dimensions).cuda()
def get_params(self) -> dict:
parameter_enums = ModelParametersEnum
"""
Obtains the params for the network.
:return : A dict with the params.
"""
return {
parameter_enums.NUMBER_OF_LAYERS: self.num_layers,
parameter_enums.NUMBER_OF_DIMENSIONS: self.num_dimensions,
parameter_enums.VOCABULARY_SIZE: self.vocabulary_size,
parameter_enums.DROPOUT: self.dropout
} | /reinvent_models-0.0.15rc1.tar.gz/reinvent_models-0.0.15rc1/reinvent_models/link_invent/networks/encoder.py | 0.905388 | 0.47591 | encoder.py | pypi |
from typing import Tuple
import torch
from torch import nn as tnn
from torch.nn.utils import rnn as tnnur
from reinvent_models.link_invent.networks.attention_layer import AttentionLayer
from reinvent_models.model_factory.enums.model_parameter_enum import ModelParametersEnum
class Decoder(tnn.Module):
"""
Simple RNN decoder.
"""
def __init__(self, num_layers: int, num_dimensions: int, vocabulary_size: int, dropout: float):
super(Decoder, self).__init__()
self.num_layers = num_layers
self.num_dimensions = num_dimensions
self.vocabulary_size = vocabulary_size
self.dropout = dropout
self._embedding = tnn.Sequential(
tnn.Embedding(self.vocabulary_size, self.num_dimensions),
tnn.Dropout(dropout)
)
self._rnn = tnn.LSTM(self.num_dimensions, self.num_dimensions, self.num_layers,
batch_first=True, dropout=self.dropout, bidirectional=False)
self._attention = AttentionLayer(self.num_dimensions)
self._linear = tnn.Linear(self.num_dimensions, self.vocabulary_size) # just to redimension
def forward(self, padded_seqs: torch.Tensor, seq_lengths: torch.Tensor,
encoder_padded_seqs: torch.Tensor, hidden_states: Tuple[torch.Tensor]) \
-> (torch.Tensor, Tuple[torch.Tensor], torch.Tensor): # pylint: disable=arguments-differ
"""
Performs the forward pass.
:param padded_seqs: A tensor with the output sequences (batch, seq_d, dim).
:param seq_lengths: A list with the length of each output sequence.
:param encoder_padded_seqs: A tensor with the encoded input sequences (batch, seq_e, dim).
:param hidden_states: The hidden states from the encoder.
:return : Three tensors: The output logits, the hidden states of the decoder and the attention weights.
"""
# FIXME: this is to guard against non compatible `gpu` input for pack_padded_sequence() method in pytorch 1.7
seq_lengths = seq_lengths.cpu()
padded_encoded_seqs = self._embedding(padded_seqs)
packed_encoded_seqs = tnnur.pack_padded_sequence(
padded_encoded_seqs, seq_lengths, batch_first=True, enforce_sorted=False)
packed_encoded_seqs, hidden_states = self._rnn(packed_encoded_seqs, hidden_states)
padded_encoded_seqs, _ = tnnur.pad_packed_sequence(packed_encoded_seqs, batch_first=True) # (batch, seq, dim)
mask = (padded_encoded_seqs[:, :, 0] != 0).unsqueeze(dim=-1).type(torch.float)
attn_padded_encoded_seqs, attention_weights = self._attention(padded_encoded_seqs, encoder_padded_seqs, mask)
logits = self._linear(attn_padded_encoded_seqs)*mask # (batch, seq, voc_size)
return logits, hidden_states, attention_weights
def get_params(self) -> dict:
parameter_enum = ModelParametersEnum
"""
Obtains the params for the network.
:return : A dict with the params.
"""
return {
parameter_enum.NUMBER_OF_LAYERS: self.num_layers,
parameter_enum.NUMBER_OF_DIMENSIONS: self.num_dimensions,
parameter_enum.VOCABULARY_SIZE: self.vocabulary_size,
parameter_enum.DROPOUT: self.dropout
} | /reinvent_models-0.0.15rc1.tar.gz/reinvent_models-0.0.15rc1/reinvent_models/link_invent/networks/decoder.py | 0.929224 | 0.494629 | decoder.py | pypi |
import re
import numpy as np
class Vocabulary:
"""Stores the tokens and their conversion to one-hot vectors."""
def __init__(self, tokens=None, starting_id=0):
"""
Instantiates a Vocabulary instance.
:param tokens: A list of tokens (str).
:param starting_id: The value for the starting id.
:return:
"""
self._tokens = {}
self._current_id = starting_id
if tokens:
for token, idx in tokens.items():
self._add(token, idx)
self._current_id = max(self._current_id, idx + 1)
def __getitem__(self, token_or_id):
"""
Retrieves the if the token is given or a token if the id is given.
:param token_or_id: A token or an id.
:return: An id if a token was given or a token if an id was given.
"""
return self._tokens[token_or_id]
def add(self, token):
"""
Adds a token to the vocabulary.
:param token: Token to add.
:return: The id assigned to the token. If the token was already there,
the id of that token is returned instead.
"""
if not isinstance(token, str):
raise TypeError("Token is not a string")
if token in self:
return self[token]
self._add(token, self._current_id)
self._current_id += 1
return self._current_id - 1
def update(self, tokens):
"""
Adds many tokens at once.
:param tokens: A list of tokens.
:return: The ids of the tokens added.
"""
return [self.add(token) for token in tokens]
def __delitem__(self, token_or_id):
"""
Deletes a (token, id) tuple, given a token or an id.
:param token_or_id: A token or an id.
:return:
"""
other_val = self._tokens[token_or_id]
del self._tokens[other_val]
del self._tokens[token_or_id]
def __contains__(self, token_or_id):
"""
Checks whether a token is contained in the vocabulary.
:param a token or an id to check
:return : True if it is contained, otherwise False.
"""
return token_or_id in self._tokens
def __eq__(self, other_vocabulary):
"""
Compares two vocabularies.
:param other_vocabulary: Other vocabulary to be checked.
:return: True if they are the same.
"""
return self._tokens == other_vocabulary._tokens # pylint: disable=W0212
def __len__(self):
"""
Calculates the length (number of tokens) of the vocabulary.
:return : The number of tokens.
"""
return len(self._tokens) // 2
def encode(self, tokens):
"""
Encodes a list of tokens, encoding them in 1-hot encoded vectors.
:param tokens: Tokens to encode.
:return : An numpy array with the tokens encoded.
"""
ohe_vect = np.zeros(len(tokens), dtype=np.float32)
try:
for i, token in enumerate(tokens):
ohe_vect[i] = self._tokens[token]
except KeyError:
return None
else:
return ohe_vect
def decode(self, ohe_vect):
"""
Decodes a one-hot encoded vector matrix to a list of tokens.
:param : A numpy array with some encoded tokens.
:return : An unencoded version of the input array.
"""
tokens = []
for ohv in ohe_vect:
tokens.append(self[ohv])
return tokens
def _add(self, token, idx):
if idx not in self._tokens:
self._tokens[token] = idx
self._tokens[idx] = token
else:
raise ValueError("IDX already present in vocabulary")
def tokens(self):
"""
Returns the tokens from the vocabulary.
:return: A list of tokens.
"""
return [t for t in self._tokens if isinstance(t, str)]
class SMILESTokenizer:
"""Deals with the tokenization and untokenization of SMILES."""
REGEXPS = {
"brackets": re.compile(r"(\[[^\]]*\])"),
"2_ring_nums": re.compile(r"(%\d{2})"),
"brcl": re.compile(r"(Br|Cl)")
}
REGEXP_ORDER = ["brackets", "2_ring_nums", "brcl"]
def tokenize(self, smiles, with_begin_and_end=True):
"""
Tokenizes a SMILES string.
:param smiles: A SMILES string.
:param with_begin_and_end: Appends a begin token and prepends an end token.
:return : A list with the tokenized version.
"""
def split_by(smiles, regexps):
if not regexps:
return list(smiles)
regexp = self.REGEXPS[regexps[0]]
splitted = regexp.split(smiles)
tokens = []
for i, split in enumerate(splitted):
if i % 2 == 0:
tokens += split_by(split, regexps[1:])
else:
tokens.append(split)
return tokens
tokens = split_by(smiles, self.REGEXP_ORDER)
if with_begin_and_end:
tokens = ["^"] + tokens + ["$"]
return tokens
def untokenize(self, tokens):
"""
Untokenizes a SMILES string.
:param tokens: List of tokens.
:return : A SMILES string.
"""
smi = ""
for token in tokens:
if token == "$":
break
if token != "^":
smi += token
return smi
def create_vocabulary(smiles_list, tokenizer):
"""
Creates a vocabulary for the SMILES syntax.
:param smiles_list: A list with SMILES.
:param tokenizer: Tokenizer to use.
:return: A vocabulary instance with all the tokens in the smiles_list.
"""
tokens = set()
for smi in smiles_list:
tokens.update(tokenizer.tokenize(smi, with_begin_and_end=False))
vocabulary = Vocabulary()
vocabulary.update(["<pad>", "$", "^"] + sorted(tokens))
return vocabulary | /reinvent_models-0.0.15rc1.tar.gz/reinvent_models-0.0.15rc1/reinvent_models/link_invent/model_vocabulary/vocabulary.py | 0.832407 | 0.546073 | vocabulary.py | pypi |
import torch
import torch.utils.data as tud
import torch.nn.utils.rnn as tnnur
class Dataset(tud.Dataset):
"""Dataset that takes a list of SMILES only."""
def __init__(self, smiles_list, vocabulary, tokenizer):
"""
Instantiates a Dataset.
:param smiles_list: A list with SMILES strings.
:param vocabulary: A Vocabulary object.
:param tokenizer: A Tokenizer object.
:return:
"""
self._vocabulary = vocabulary
self._tokenizer = tokenizer
self._encoded_list = []
for smi in smiles_list:
tokenized = self._tokenizer.tokenize(smi)
enc = self._vocabulary.encode(tokenized)
if enc is not None:
self._encoded_list.append(enc)
def __getitem__(self, i):
return torch.tensor(self._encoded_list[i], dtype=torch.long) # pylint: disable=E1102
def __len__(self):
return len(self._encoded_list)
@classmethod
def collate_fn(cls, encoded_seqs):
return pad_batch(encoded_seqs)
class DecoratorDataset(tud.Dataset):
"""Dataset that takes a list of (scaffold, decoration) pairs."""
def __init__(self, scaffold_decoration_smi_list, vocabulary):
self.vocabulary = vocabulary
self._encoded_list = []
for scaffold, dec in scaffold_decoration_smi_list:
en_scaff = self.vocabulary.scaffold_vocabulary.encode(self.vocabulary.scaffold_tokenizer.tokenize(scaffold))
en_dec = self.vocabulary.decoration_vocabulary.encode(self.vocabulary.decoration_tokenizer.tokenize(dec))
if en_scaff is not None and en_dec is not None:
self._encoded_list.append((en_scaff, en_dec))
def __getitem__(self, i):
scaff, dec = self._encoded_list[i]
return (torch.tensor(scaff, dtype=torch.long), torch.tensor(dec, dtype=torch.long)) # pylint: disable=E1102
def __len__(self):
return len(self._encoded_list)
@classmethod
def collate_fn(cls, encoded_pairs):
"""
Turns a list of encoded pairs (scaffold, decoration) of sequences and turns them into two batches.
:param: A list of pairs of encoded sequences.
:return: A tuple with two tensors, one for the scaffolds and one for the decorations in the same order as given.
"""
encoded_scaffolds, encoded_decorations = list(zip(*encoded_pairs))
return (pad_batch(encoded_scaffolds), pad_batch(encoded_decorations))
def pad_batch(encoded_seqs):
"""
Pads a batch.
:param encoded_seqs: A list of encoded sequences.
:return: A tensor with the sequences correctly padded.
"""
seq_lengths = torch.tensor([len(seq) for seq in encoded_seqs], dtype=torch.int64) # pylint: disable=not-callable
return (tnnur.pad_sequence(encoded_seqs, batch_first=True).cuda(), seq_lengths) | /reinvent_models-0.0.15rc1.tar.gz/reinvent_models-0.0.15rc1/reinvent_models/lib_invent/models/dataset.py | 0.915254 | 0.371137 | dataset.py | pypi |
import re
import numpy as np
class Vocabulary:
"""Stores the tokens and their conversion to one-hot vectors."""
def __init__(self, tokens=None, starting_id=0):
"""
Instantiates a Vocabulary instance.
:param tokens: A list of tokens (str).
:param starting_id: The value for the starting id.
:return:
"""
self._tokens = {}
self._current_id = starting_id
if tokens:
for token, idx in tokens.items():
self._add(token, idx)
self._current_id = max(self._current_id, idx + 1)
def __getitem__(self, token_or_id):
"""
Retrieves the if the token is given or a token if the id is given.
:param token_or_id: A token or an id.
:return: An id if a token was given or a token if an id was given.
"""
return self._tokens[token_or_id]
def add(self, token):
"""
Adds a token to the vocabulary.
:param token: Token to add.
:return: The id assigned to the token. If the token was already there,
the id of that token is returned instead.
"""
if not isinstance(token, str):
raise TypeError("Token is not a string")
if token in self:
return self[token]
self._add(token, self._current_id)
self._current_id += 1
return self._current_id - 1
def update(self, tokens):
"""
Adds many tokens at once.
:param tokens: A list of tokens.
:return: The ids of the tokens added.
"""
return [self.add(token) for token in tokens]
def __delitem__(self, token_or_id):
"""
Deletes a (token, id) tuple, given a token or an id.
:param token_or_id: A token or an id.
:return:
"""
other_val = self._tokens[token_or_id]
del self._tokens[other_val]
del self._tokens[token_or_id]
def __contains__(self, token_or_id):
"""
Checks whether a token is contained in the vocabulary.
:param a token or an id to check
:return : True if it is contained, otherwise False.
"""
return token_or_id in self._tokens
def __eq__(self, other_vocabulary):
"""
Compares two vocabularies.
:param other_vocabulary: Other vocabulary to be checked.
:return: True if they are the same.
"""
return self._tokens == other_vocabulary._tokens # pylint: disable=W0212
def __len__(self):
"""
Calculates the length (number of tokens) of the vocabulary.
:return : The number of tokens.
"""
return len(self._tokens) // 2
def encode(self, tokens):
"""
Encodes a list of tokens, encoding them in 1-hot encoded vectors.
:param tokens: Tokens to encode.
:return : An numpy array with the tokens encoded.
"""
ohe_vect = np.zeros(len(tokens), dtype=np.float32)
try:
for i, token in enumerate(tokens):
ohe_vect[i] = self._tokens[token]
except KeyError:
return None
else:
return ohe_vect
def decode(self, ohe_vect):
"""
Decodes a one-hot encoded vector matrix to a list of tokens.
:param : A numpy array with some encoded tokens.
:return : An unencoded version of the input array.
"""
tokens = []
for ohv in ohe_vect:
tokens.append(self[ohv])
return tokens
def _add(self, token, idx):
if idx not in self._tokens:
self._tokens[token] = idx
self._tokens[idx] = token
else:
raise ValueError("IDX already present in vocabulary")
def tokens(self):
"""
Returns the tokens from the vocabulary.
:return: A list of tokens.
"""
return [t for t in self._tokens if isinstance(t, str)]
class SMILESTokenizer:
"""Deals with the tokenization and untokenization of SMILES."""
REGEXPS = {
"brackets": re.compile(r"(\[[^\]]*\])"),
"2_ring_nums": re.compile(r"(%\d{2})"),
"brcl": re.compile(r"(Br|Cl)")
}
REGEXP_ORDER = ["brackets", "2_ring_nums", "brcl"]
def tokenize(self, smiles, with_begin_and_end=True):
"""
Tokenizes a SMILES string.
:param smiles: A SMILES string.
:param with_begin_and_end: Appends a begin token and prepends an end token.
:return : A list with the tokenized version.
"""
def split_by(smiles, regexps):
if not regexps:
return list(smiles)
regexp = self.REGEXPS[regexps[0]]
splitted = regexp.split(smiles)
tokens = []
for i, split in enumerate(splitted):
if i % 2 == 0:
tokens += split_by(split, regexps[1:])
else:
tokens.append(split)
return tokens
tokens = split_by(smiles, self.REGEXP_ORDER)
if with_begin_and_end:
tokens = ["^"] + tokens + ["$"]
return tokens
def untokenize(self, tokens):
"""
Untokenizes a SMILES string.
:param tokens: List of tokens.
:return : A SMILES string.
"""
smi = ""
for token in tokens:
if token == "$":
break
if token != "^":
smi += token
return smi
def create_vocabulary(smiles_list, tokenizer):
"""
Creates a vocabulary for the SMILES syntax.
:param smiles_list: A list with SMILES.
:param tokenizer: Tokenizer to use.
:return: A vocabulary instance with all the tokens in the smiles_list.
"""
tokens = set()
for smi in smiles_list:
tokens.update(tokenizer.tokenize(smi, with_begin_and_end=False))
vocabulary = Vocabulary()
vocabulary.update(["<pad>", "$", "^"] + sorted(tokens))
return vocabulary
class DecoratorVocabulary:
"""
Encapsulation of the two vocabularies needed for the decorator.
"""
def __init__(self, scaffold_vocabulary, scaffold_tokenizer, decoration_vocabulary, decoration_tokenizer):
self.scaffold_vocabulary = scaffold_vocabulary
self.scaffold_tokenizer = scaffold_tokenizer
self.decoration_vocabulary = decoration_vocabulary
self.decoration_tokenizer = decoration_tokenizer
def len_scaffold(self):
"""
Returns the length of the scaffold vocabulary.
"""
return len(self.scaffold_vocabulary)
def len_decoration(self):
"""
Returns the length of the decoration vocabulary.
"""
return len(self.decoration_vocabulary)
def len(self):
"""
Returns the lenght of both vocabularies in a tuple.
:return: A tuple with (len(scaff_voc), len(dec_voc)).
"""
return (len(self.scaffold_vocabulary), len(self.decoration_vocabulary))
def encode_scaffold(self, smiles):
"""
Encodes a scaffold SMILES.
:param smiles: Scaffold SMILES to encode.
:return : An one-hot-encoded vector with the scaffold information.
"""
return self.scaffold_vocabulary.encode(self.scaffold_tokenizer.tokenize(smiles))
def decode_scaffold(self, encoded_scaffold):
"""
Decodes the scaffold.
:param encoded_scaffold: A one-hot encoded version of the scaffold.
:return : A SMILES of the scaffold.
"""
return self.scaffold_tokenizer.untokenize(self.scaffold_vocabulary.decode(encoded_scaffold))
def encode_decoration(self, smiles):
"""
Encodes a decoration SMILES.
:param smiles: Decoration SMILES to encode.
:return : An one-hot-encoded vector with the fragment information.
"""
return self.decoration_vocabulary.encode(self.decoration_tokenizer.tokenize(smiles))
def decode_decoration(self, encoded_decoration):
"""
Decodes the decorations for a scaffold.
:param encoded_decorations: A one-hot encoded version of the decoration.
:return : A list with SMILES of all the fragments.
"""
return self.decoration_tokenizer.untokenize(self.decoration_vocabulary.decode(encoded_decoration))
@classmethod
def from_lists(cls, scaffold_list, decoration_list):
"""
Creates the vocabularies from lists.
:param scaffold_list: A list with scaffolds.
:param decoration_list: A list with decorations.
:return : A DecoratorVocabulary instance
"""
scaffold_tokenizer = SMILESTokenizer()
scaffold_vocabulary = create_vocabulary(scaffold_list, scaffold_tokenizer)
decoration_tokenizer = SMILESTokenizer()
decoration_vocabulary = create_vocabulary(decoration_list, decoration_tokenizer)
return DecoratorVocabulary(scaffold_vocabulary, scaffold_tokenizer, decoration_vocabulary, decoration_tokenizer) | /reinvent_models-0.0.15rc1.tar.gz/reinvent_models-0.0.15rc1/reinvent_models/lib_invent/models/vocabulary.py | 0.863909 | 0.519582 | vocabulary.py | pypi |
import math
import torch
import torch.nn as tnn
import torch.nn.utils.rnn as tnnur
from reinvent_models.lib_invent.enums.generative_model_parameters import GenerativeModelParametersEnum
class Encoder(tnn.Module):
"""
Simple bidirectional RNN encoder implementation.
"""
def __init__(self, num_layers, num_dimensions, vocabulary_size, dropout):
super(Encoder, self).__init__()
self.num_layers = num_layers
self.num_dimensions = num_dimensions
self.vocabulary_size = vocabulary_size
self.dropout = dropout
self._embedding = tnn.Sequential(
tnn.Embedding(self.vocabulary_size, self.num_dimensions),
tnn.Dropout(dropout)
)
self._rnn = tnn.LSTM(self.num_dimensions, self.num_dimensions, self.num_layers,
batch_first=True, dropout=self.dropout, bidirectional=True)
def forward(self, padded_seqs, seq_lengths): # pylint: disable=arguments-differ
# FIXME: This fails with a batch of 1 because squeezing looses a dimension with size 1
"""
Performs the forward pass.
:param padded_seqs: A tensor with the sequences (batch, seq).
:param seq_lengths: The lengths of the sequences (for packed sequences).
:return : A tensor with all the output values for each step and the two hidden states.
"""
batch_size = padded_seqs.size(0)
max_seq_size = padded_seqs.size(1)
hidden_state = self._initialize_hidden_state(batch_size)
padded_seqs = self._embedding(padded_seqs)
hs_h, hs_c = (hidden_state, hidden_state.clone().detach())
#FIXME: this is to guard against non compatible `gpu` input for pack_padded_sequence() method in pytorch 1.7
seq_lengths = seq_lengths.cpu()
packed_seqs = tnnur.pack_padded_sequence(padded_seqs, seq_lengths, batch_first=True, enforce_sorted=False)
packed_seqs, (hs_h, hs_c) = self._rnn(packed_seqs, (hs_h, hs_c))
padded_seqs, _ = tnnur.pad_packed_sequence(packed_seqs, batch_first=True)
# sum up bidirectional layers and collapse
hs_h = hs_h.view(self.num_layers, 2, batch_size, self.num_dimensions)\
.sum(dim=1).squeeze() # (layers, batch, dim)
hs_c = hs_c.view(self.num_layers, 2, batch_size, self.num_dimensions)\
.sum(dim=1).squeeze() # (layers, batch, dim)
padded_seqs = padded_seqs.view(batch_size, max_seq_size, 2, self.num_dimensions)\
.sum(dim=2).squeeze() # (batch, seq, dim)
return padded_seqs, (hs_h, hs_c)
def _initialize_hidden_state(self, batch_size):
return torch.zeros(self.num_layers*2, batch_size, self.num_dimensions).cuda()
def get_params(self):
parameter_enums = GenerativeModelParametersEnum
"""
Obtains the params for the network.
:return : A dict with the params.
"""
return {
parameter_enums.NUMBER_OF_LAYERS: self.num_layers,
parameter_enums.NUMBER_OF_DIMENSIONS: self.num_dimensions,
parameter_enums.VOCABULARY_SIZE: self.vocabulary_size,
parameter_enums.DROPOUT: self.dropout
}
class AttentionLayer(tnn.Module):
def __init__(self, num_dimensions):
super(AttentionLayer, self).__init__()
self.num_dimensions = num_dimensions
self._attention_linear = tnn.Sequential(
tnn.Linear(self.num_dimensions*2, self.num_dimensions),
tnn.Tanh()
)
def forward(self, padded_seqs, encoder_padded_seqs, decoder_mask): # pylint: disable=arguments-differ
"""
Performs the forward pass.
:param padded_seqs: A tensor with the output sequences (batch, seq_d, dim).
:param encoder_padded_seqs: A tensor with the encoded input scaffold sequences (batch, seq_e, dim).
:param decoder_mask: A tensor that represents the encoded input mask.
:return : Two tensors: one with the modified logits and another with the attention weights.
"""
# scaled dot-product
# (batch, seq_d, 1, dim)*(batch, 1, seq_e, dim) => (batch, seq_d, seq_e*)
attention_weights = (padded_seqs.unsqueeze(dim=2)*encoder_padded_seqs.unsqueeze(dim=1))\
.sum(dim=3).div(math.sqrt(self.num_dimensions))\
.softmax(dim=2)
# (batch, seq_d, seq_e*)@(batch, seq_e, dim) => (batch, seq_d, dim)
attention_context = attention_weights.bmm(encoder_padded_seqs)
return (self._attention_linear(torch.cat([padded_seqs, attention_context], dim=2))*decoder_mask, attention_weights)
class Decoder(tnn.Module):
"""
Simple RNN decoder.
"""
def __init__(self, num_layers, num_dimensions, vocabulary_size, dropout):
super(Decoder, self).__init__()
self.num_layers = num_layers
self.num_dimensions = num_dimensions
self.vocabulary_size = vocabulary_size
self.dropout = dropout
self._embedding = tnn.Sequential(
tnn.Embedding(self.vocabulary_size, self.num_dimensions),
tnn.Dropout(dropout)
)
self._rnn = tnn.LSTM(self.num_dimensions, self.num_dimensions, self.num_layers,
batch_first=True, dropout=self.dropout, bidirectional=False)
self._attention = AttentionLayer(self.num_dimensions)
self._linear = tnn.Linear(self.num_dimensions, self.vocabulary_size) # just to redimension
def forward(self, padded_seqs, seq_lengths, encoder_padded_seqs, hidden_states): # pylint: disable=arguments-differ
"""
Performs the forward pass.
:param padded_seqs: A tensor with the output sequences (batch, seq_d, dim).
:param seq_lengths: A list with the length of each output sequence.
:param encoder_padded_seqs: A tensor with the encoded input scaffold sequences (batch, seq_e, dim).
:param hidden_states: The hidden states from the encoder.
:return : Three tensors: The output logits, the hidden states of the decoder and the attention weights.
"""
# FIXME: this is to guard against non compatible `gpu` input for pack_padded_sequence() method in pytorch 1.7
seq_lengths = seq_lengths.cpu()
padded_encoded_seqs = self._embedding(padded_seqs)
packed_encoded_seqs = tnnur.pack_padded_sequence(
padded_encoded_seqs, seq_lengths, batch_first=True, enforce_sorted=False)
packed_encoded_seqs, hidden_states = self._rnn(packed_encoded_seqs, hidden_states)
padded_encoded_seqs, _ = tnnur.pad_packed_sequence(packed_encoded_seqs, batch_first=True) # (batch, seq, dim)
mask = (padded_encoded_seqs[:, :, 0] != 0).unsqueeze(dim=-1).type(torch.float)
attn_padded_encoded_seqs, attention_weights = self._attention(padded_encoded_seqs, encoder_padded_seqs, mask)
logits = self._linear(attn_padded_encoded_seqs)*mask # (batch, seq, voc_size)
return logits, hidden_states, attention_weights
def get_params(self):
parameter_enum = GenerativeModelParametersEnum
"""
Obtains the params for the network.
:return : A dict with the params.
"""
return {
parameter_enum.NUMBER_OF_LAYERS: self.num_layers,
parameter_enum.NUMBER_OF_DIMENSIONS: self.num_dimensions,
parameter_enum.VOCABULARY_SIZE: self.vocabulary_size,
parameter_enum.DROPOUT: self.dropout
}
class Decorator(tnn.Module):
"""
An encoder-decoder that decorates scaffolds.
"""
def __init__(self, encoder_params, decoder_params):
super(Decorator, self).__init__()
self._encoder = Encoder(**encoder_params)
self._decoder = Decoder(**decoder_params)
def forward(self, encoder_seqs, encoder_seq_lengths, decoder_seqs, decoder_seq_lengths): # pylint: disable=arguments-differ
"""
Performs the forward pass.
:param encoder_seqs: A tensor with the output sequences (batch, seq_d, dim).
:param encoder_seq_lengths: A list with the length of each input sequence.
:param decoder_seqs: A tensor with the encoded input scaffold sequences (batch, seq_e, dim).
:param decoder_seq_lengths: The lengths of the decoder sequences.
:return : The output logits as a tensor (batch, seq_d, dim).
"""
encoder_padded_seqs, hidden_states = self.forward_encoder(encoder_seqs, encoder_seq_lengths)
logits, _, _ = self.forward_decoder(decoder_seqs, decoder_seq_lengths, encoder_padded_seqs, hidden_states)
return logits
def forward_encoder(self, padded_seqs, seq_lengths):
"""
Does a forward pass only of the encoder.
:param padded_seqs: The data to feed the encoder.
:param seq_lengths: The length of each sequence in the batch.
:return : Returns a tuple with (encoded_seqs, hidden_states)
"""
return self._encoder(padded_seqs, seq_lengths)
def forward_decoder(self, padded_seqs, seq_lengths, encoder_padded_seqs, hidden_states):
"""
Does a forward pass only of the decoder.
:param hidden_states: The hidden states from the encoder.
:param padded_seqs: The data to feed to the decoder.
:param seq_lengths: The length of each sequence in the batch.
:return : Returns the logits and the hidden state for each element of the sequence passed.
"""
return self._decoder(padded_seqs, seq_lengths, encoder_padded_seqs, hidden_states)
def get_params(self):
"""
Obtains the params for the network.
:return : A dict with the params.
"""
return {
"encoder_params": self._encoder.get_params(),
"decoder_params": self._decoder.get_params()
} | /reinvent_models-0.0.15rc1.tar.gz/reinvent_models-0.0.15rc1/reinvent_models/lib_invent/models/decorator.py | 0.90932 | 0.436382 | decorator.py | pypi |
import torch
import torch.nn as tnn
from reinvent_models.lib_invent.enums.generative_model_regime import GenerativeModelRegimeEnum
from reinvent_models.lib_invent.models.decorator import Decorator
from reinvent_models.model_factory.enums.model_mode_enum import ModelModeEnum
class DecoratorModel:
def __init__(self, vocabulary, decorator, max_sequence_length=256, no_cuda=False, mode=ModelModeEnum().TRAINING):
"""
Implements the likelihood and scaffold_decorating functions of the decorator model.
:param vocabulary: A DecoratorVocabulary instance with the vocabularies of both the encoder and decoder.
:param network_params: A dict with parameters for the encoder and decoder networks.
:param decorator: An decorator network instance.
:param max_sequence_length: Maximium number of tokens allowed to sample.
:param no_cuda: Forces the model not to use CUDA, even if it is available.
:param mode: Mode in which the model should be initialized.
:return:
"""
self.vocabulary = vocabulary
self.max_sequence_length = max_sequence_length
self.network = decorator
self._model_modes = GenerativeModelRegimeEnum()
if torch.cuda.is_available() and not no_cuda:
self.network.cuda()
self._nll_loss = tnn.NLLLoss(reduction="none", ignore_index=0)
self.set_mode(mode)
@classmethod
def load_from_file(cls, path, mode=ModelModeEnum().TRAINING):
"""
Loads a model from a single file
:param path: Path to the saved model.
:param mode: Mode in which the model should be initialized.
:return: An instance of the RNN.
"""
data = torch.load(path)
decorator = Decorator(**data["decorator"]["params"])
decorator.load_state_dict(data["decorator"]["state"])
model = DecoratorModel(
decorator=decorator,
mode=mode,
**data["model"]
)
return model
def save(self, path):
"""
Saves the model to a file.
:param path: Path to the file which the model will be saved to.
"""
save_dict = {
'model': {
'vocabulary': self.vocabulary,
'max_sequence_length': self.max_sequence_length
},
'decorator': {
'params': self.network.get_params(),
'state': self.network.state_dict()
}
}
torch.save(save_dict, path)
def set_mode(self, mode):
"""
Changes the mode of the RNN to training or eval.
:param mode: Mode to change to (training, eval)
:return: The model instance.
"""
if mode == self._model_modes.INFERENCE:
self.network.eval()
else:
self.network.train()
return self
def likelihood(self, scaffold_seqs, scaffold_seq_lengths, decoration_seqs, decoration_seq_lengths):
"""
Retrieves the likelihood of a scaffold and its respective decorations.
:param scaffold_seqs: (batch, seq) A batch of padded scaffold sequences.
:param scaffold_seq_lengths: The length of the scaffold sequences (for packing purposes).
:param decoration_seqs: (batch, seq) A batch of decorator sequences.
:param decoration_seq_lengths: The length of the decorator sequences (for packing purposes).
:return: (batch) Log likelihood for each item in the batch.
"""
# NOTE: the decoration_seq_lengths have a - 1 to prevent the end token to be forward-passed.
logits = self.network(scaffold_seqs, scaffold_seq_lengths, decoration_seqs,
decoration_seq_lengths - 1) # (batch, seq - 1, voc)
log_probs = logits.log_softmax(dim=2).transpose(1, 2) # (batch, voc, seq - 1)
return self._nll_loss(log_probs, decoration_seqs[:, 1:]).sum(dim=1) # (batch)
@torch.no_grad()
def sample_decorations(self, scaffold_seqs, scaffold_seq_lengths):
# TODO: fix the return type to be SampledSequencesDTO
"""
Samples as many decorations as scaffolds in the tensor.
:param scaffold_seqs: A tensor with the scaffolds to sample already encoded and padded.
:param scaffold_seq_lengths: A tensor with the length of the scaffolds.
:return: An iterator with (scaffold_smi, decoration_smi, nll) triplets.
"""
batch_size = scaffold_seqs.size(0)
input_vector = torch.full(
(batch_size, 1), self.vocabulary.decoration_vocabulary["^"], dtype=torch.long).cuda() # (batch, 1)
# print(f"input_vector: {input_vector}")
seq_lengths = torch.ones(batch_size) # (batch)
encoder_padded_seqs, hidden_states = self.network.forward_encoder(scaffold_seqs, scaffold_seq_lengths)
nlls = torch.zeros(batch_size).cuda()
not_finished = torch.ones(batch_size, 1, dtype=torch.long).cuda()
sequences = []
for _ in range(self.max_sequence_length - 1):
logits, hidden_states, _ = self.network.forward_decoder(
input_vector, seq_lengths, encoder_padded_seqs, hidden_states) # (batch, 1, voc)
probs = logits.softmax(dim=2).squeeze() # (batch, voc)
log_probs = logits.log_softmax(dim=2).squeeze() # (batch, voc)
input_vector = torch.multinomial(probs, 1) * not_finished # (batch, 1)
sequences.append(input_vector)
nlls += self._nll_loss(log_probs, input_vector.squeeze())
not_finished = (input_vector > 1).type(torch.long) # 0 is padding, 1 is end token
if not_finished.sum() == 0:
break
decoration_smiles = [self.vocabulary.decode_decoration(seq)
for seq in torch.cat(sequences, 1).data.cpu().numpy()]
scaffold_smiles = [self.vocabulary.decode_scaffold(seq) for seq in scaffold_seqs.data.cpu().numpy()]
return zip(scaffold_smiles, decoration_smiles, nlls.data.cpu().numpy().tolist())
def get_network_parameters(self):
return self.network.parameters() | /reinvent_models-0.0.15rc1.tar.gz/reinvent_models-0.0.15rc1/reinvent_models/lib_invent/models/model.py | 0.824462 | 0.518546 | model.py | pypi |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.