diff --git a/evalkit_tf446/lib/python3.10/lib2to3/Grammar.txt b/evalkit_tf446/lib/python3.10/lib2to3/Grammar.txt new file mode 100644 index 0000000000000000000000000000000000000000..fa7b15061d941c4d952c8d6446fe33c40890a264 --- /dev/null +++ b/evalkit_tf446/lib/python3.10/lib2to3/Grammar.txt @@ -0,0 +1,196 @@ +# Grammar for 2to3. This grammar supports Python 2.x and 3.x. + +# NOTE WELL: You should also follow all the steps listed at +# https://devguide.python.org/grammar/ + +# Start symbols for the grammar: +# file_input is a module or sequence of commands read from an input file; +# single_input is a single interactive statement; +# eval_input is the input for the eval() and input() functions. +# NB: compound_stmt in single_input is followed by extra NEWLINE! +file_input: (NEWLINE | stmt)* ENDMARKER +single_input: NEWLINE | simple_stmt | compound_stmt NEWLINE +eval_input: testlist NEWLINE* ENDMARKER + +decorator: '@' dotted_name [ '(' [arglist] ')' ] NEWLINE +decorators: decorator+ +decorated: decorators (classdef | funcdef | async_funcdef) +async_funcdef: ASYNC funcdef +funcdef: 'def' NAME parameters ['->' test] ':' suite +parameters: '(' [typedargslist] ')' + +# The following definition for typedarglist is equivalent to this set of rules: +# +# arguments = argument (',' argument)* +# argument = tfpdef ['=' test] +# kwargs = '**' tname [','] +# args = '*' [tname] +# kwonly_kwargs = (',' argument)* [',' [kwargs]] +# args_kwonly_kwargs = args kwonly_kwargs | kwargs +# poskeyword_args_kwonly_kwargs = arguments [',' [args_kwonly_kwargs]] +# typedargslist_no_posonly = poskeyword_args_kwonly_kwargs | args_kwonly_kwargs +# typedarglist = arguments ',' '/' [',' [typedargslist_no_posonly]])|(typedargslist_no_posonly)" +# +# It needs to be fully expanded to allow our LL(1) parser to work on it. + +typedargslist: tfpdef ['=' test] (',' tfpdef ['=' test])* ',' '/' [ + ',' [((tfpdef ['=' test] ',')* ('*' [tname] (',' tname ['=' test])* + [',' ['**' tname [',']]] | '**' tname [',']) + | tfpdef ['=' test] (',' tfpdef ['=' test])* [','])] + ] | ((tfpdef ['=' test] ',')* ('*' [tname] (',' tname ['=' test])* + [',' ['**' tname [',']]] | '**' tname [',']) + | tfpdef ['=' test] (',' tfpdef ['=' test])* [',']) + +tname: NAME [':' test] +tfpdef: tname | '(' tfplist ')' +tfplist: tfpdef (',' tfpdef)* [','] + +# The following definition for varargslist is equivalent to this set of rules: +# +# arguments = argument (',' argument )* +# argument = vfpdef ['=' test] +# kwargs = '**' vname [','] +# args = '*' [vname] +# kwonly_kwargs = (',' argument )* [',' [kwargs]] +# args_kwonly_kwargs = args kwonly_kwargs | kwargs +# poskeyword_args_kwonly_kwargs = arguments [',' [args_kwonly_kwargs]] +# vararglist_no_posonly = poskeyword_args_kwonly_kwargs | args_kwonly_kwargs +# varargslist = arguments ',' '/' [','[(vararglist_no_posonly)]] | (vararglist_no_posonly) +# +# It needs to be fully expanded to allow our LL(1) parser to work on it. + +varargslist: vfpdef ['=' test ](',' vfpdef ['=' test])* ',' '/' [',' [ + ((vfpdef ['=' test] ',')* ('*' [vname] (',' vname ['=' test])* + [',' ['**' vname [',']]] | '**' vname [',']) + | vfpdef ['=' test] (',' vfpdef ['=' test])* [',']) + ]] | ((vfpdef ['=' test] ',')* + ('*' [vname] (',' vname ['=' test])* [',' ['**' vname [',']]]| '**' vname [',']) + | vfpdef ['=' test] (',' vfpdef ['=' test])* [',']) + +vname: NAME +vfpdef: vname | '(' vfplist ')' +vfplist: vfpdef (',' vfpdef)* [','] + +stmt: simple_stmt | compound_stmt +simple_stmt: small_stmt (';' small_stmt)* [';'] NEWLINE +small_stmt: (expr_stmt | print_stmt | del_stmt | pass_stmt | flow_stmt | + import_stmt | global_stmt | exec_stmt | assert_stmt) +expr_stmt: testlist_star_expr (annassign | augassign (yield_expr|testlist) | + ('=' (yield_expr|testlist_star_expr))*) +annassign: ':' test ['=' test] +testlist_star_expr: (test|star_expr) (',' (test|star_expr))* [','] +augassign: ('+=' | '-=' | '*=' | '@=' | '/=' | '%=' | '&=' | '|=' | '^=' | + '<<=' | '>>=' | '**=' | '//=') +# For normal and annotated assignments, additional restrictions enforced by the interpreter +print_stmt: 'print' ( [ test (',' test)* [','] ] | + '>>' test [ (',' test)+ [','] ] ) +del_stmt: 'del' exprlist +pass_stmt: 'pass' +flow_stmt: break_stmt | continue_stmt | return_stmt | raise_stmt | yield_stmt +break_stmt: 'break' +continue_stmt: 'continue' +return_stmt: 'return' [testlist_star_expr] +yield_stmt: yield_expr +raise_stmt: 'raise' [test ['from' test | ',' test [',' test]]] +import_stmt: import_name | import_from +import_name: 'import' dotted_as_names +import_from: ('from' ('.'* dotted_name | '.'+) + 'import' ('*' | '(' import_as_names ')' | import_as_names)) +import_as_name: NAME ['as' NAME] +dotted_as_name: dotted_name ['as' NAME] +import_as_names: import_as_name (',' import_as_name)* [','] +dotted_as_names: dotted_as_name (',' dotted_as_name)* +dotted_name: NAME ('.' NAME)* +global_stmt: ('global' | 'nonlocal') NAME (',' NAME)* +exec_stmt: 'exec' expr ['in' test [',' test]] +assert_stmt: 'assert' test [',' test] + +compound_stmt: if_stmt | while_stmt | for_stmt | try_stmt | with_stmt | funcdef | classdef | decorated | async_stmt +async_stmt: ASYNC (funcdef | with_stmt | for_stmt) +if_stmt: 'if' namedexpr_test ':' suite ('elif' namedexpr_test ':' suite)* ['else' ':' suite] +while_stmt: 'while' namedexpr_test ':' suite ['else' ':' suite] +for_stmt: 'for' exprlist 'in' testlist ':' suite ['else' ':' suite] +try_stmt: ('try' ':' suite + ((except_clause ':' suite)+ + ['else' ':' suite] + ['finally' ':' suite] | + 'finally' ':' suite)) +with_stmt: 'with' with_item (',' with_item)* ':' suite +with_item: test ['as' expr] +with_var: 'as' expr +# NB compile.c makes sure that the default except clause is last +except_clause: 'except' [test [(',' | 'as') test]] +suite: simple_stmt | NEWLINE INDENT stmt+ DEDENT + +# Backward compatibility cruft to support: +# [ x for x in lambda: True, lambda: False if x() ] +# even while also allowing: +# lambda x: 5 if x else 2 +# (But not a mix of the two) +testlist_safe: old_test [(',' old_test)+ [',']] +old_test: or_test | old_lambdef +old_lambdef: 'lambda' [varargslist] ':' old_test + +namedexpr_test: test [':=' test] +test: or_test ['if' or_test 'else' test] | lambdef +or_test: and_test ('or' and_test)* +and_test: not_test ('and' not_test)* +not_test: 'not' not_test | comparison +comparison: expr (comp_op expr)* +comp_op: '<'|'>'|'=='|'>='|'<='|'<>'|'!='|'in'|'not' 'in'|'is'|'is' 'not' +star_expr: '*' expr +expr: xor_expr ('|' xor_expr)* +xor_expr: and_expr ('^' and_expr)* +and_expr: shift_expr ('&' shift_expr)* +shift_expr: arith_expr (('<<'|'>>') arith_expr)* +arith_expr: term (('+'|'-') term)* +term: factor (('*'|'@'|'/'|'%'|'//') factor)* +factor: ('+'|'-'|'~') factor | power +power: [AWAIT] atom trailer* ['**' factor] +atom: ('(' [yield_expr|testlist_gexp] ')' | + '[' [listmaker] ']' | + '{' [dictsetmaker] '}' | + '`' testlist1 '`' | + NAME | NUMBER | STRING+ | '.' '.' '.') +listmaker: (namedexpr_test|star_expr) ( comp_for | (',' (namedexpr_test|star_expr))* [','] ) +testlist_gexp: (namedexpr_test|star_expr) ( comp_for | (',' (namedexpr_test|star_expr))* [','] ) +lambdef: 'lambda' [varargslist] ':' test +trailer: '(' [arglist] ')' | '[' subscriptlist ']' | '.' NAME +subscriptlist: subscript (',' subscript)* [','] +subscript: test | [test] ':' [test] [sliceop] +sliceop: ':' [test] +exprlist: (expr|star_expr) (',' (expr|star_expr))* [','] +testlist: test (',' test)* [','] +dictsetmaker: ( ((test ':' test | '**' expr) + (comp_for | (',' (test ':' test | '**' expr))* [','])) | + ((test | star_expr) + (comp_for | (',' (test | star_expr))* [','])) ) + +classdef: 'class' NAME ['(' [arglist] ')'] ':' suite + +arglist: argument (',' argument)* [','] + +# "test '=' test" is really "keyword '=' test", but we have no such token. +# These need to be in a single rule to avoid grammar that is ambiguous +# to our LL(1) parser. Even though 'test' includes '*expr' in star_expr, +# we explicitly match '*' here, too, to give it proper precedence. +# Illegal combinations and orderings are blocked in ast.c: +# multiple (test comp_for) arguments are blocked; keyword unpackings +# that precede iterable unpackings are blocked; etc. +argument: ( test [comp_for] | + test ':=' test | + test '=' test | + '**' test | + '*' test ) + +comp_iter: comp_for | comp_if +comp_for: [ASYNC] 'for' exprlist 'in' testlist_safe [comp_iter] +comp_if: 'if' old_test [comp_iter] + +testlist1: test (',' test)* + +# not used in grammar, but may appear in "node" passed from Parser to Compiler +encoding_decl: NAME + +yield_expr: 'yield' [yield_arg] +yield_arg: 'from' test | testlist_star_expr diff --git a/evalkit_tf446/lib/python3.10/lib2to3/PatternGrammar.txt b/evalkit_tf446/lib/python3.10/lib2to3/PatternGrammar.txt new file mode 100644 index 0000000000000000000000000000000000000000..36bf8148273bd7a27b4a76817a776fe2ab234562 --- /dev/null +++ b/evalkit_tf446/lib/python3.10/lib2to3/PatternGrammar.txt @@ -0,0 +1,28 @@ +# Copyright 2006 Google, Inc. All Rights Reserved. +# Licensed to PSF under a Contributor Agreement. + +# A grammar to describe tree matching patterns. +# Not shown here: +# - 'TOKEN' stands for any token (leaf node) +# - 'any' stands for any node (leaf or interior) +# With 'any' we can still specify the sub-structure. + +# The start symbol is 'Matcher'. + +Matcher: Alternatives ENDMARKER + +Alternatives: Alternative ('|' Alternative)* + +Alternative: (Unit | NegatedUnit)+ + +Unit: [NAME '='] ( STRING [Repeater] + | NAME [Details] [Repeater] + | '(' Alternatives ')' [Repeater] + | '[' Alternatives ']' + ) + +NegatedUnit: 'not' (STRING | NAME [Details] | '(' Alternatives ')') + +Repeater: '*' | '+' | '{' NUMBER [',' NUMBER] '}' + +Details: '<' Alternatives '>' diff --git a/evalkit_tf446/lib/python3.10/lib2to3/PatternGrammar3.10.16.final.0.pickle b/evalkit_tf446/lib/python3.10/lib2to3/PatternGrammar3.10.16.final.0.pickle new file mode 100644 index 0000000000000000000000000000000000000000..e9de5e3aa351535c40c4d95139afac6eb97e675d --- /dev/null +++ b/evalkit_tf446/lib/python3.10/lib2to3/PatternGrammar3.10.16.final.0.pickle @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:36ee934395b9209737b13893ddaff05fad8e239c2fdfac29d401d3fceeb30768 +size 1225 diff --git a/evalkit_tf446/lib/python3.10/lib2to3/fixes/__init__.py b/evalkit_tf446/lib/python3.10/lib2to3/fixes/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..b93054b3ecf3a5af96f4772e7208e7a18b5dd4a4 --- /dev/null +++ b/evalkit_tf446/lib/python3.10/lib2to3/fixes/__init__.py @@ -0,0 +1 @@ +# Dummy file to make this directory a package. diff --git a/evalkit_tf446/lib/python3.10/lib2to3/fixes/__pycache__/fix_apply.cpython-310.pyc b/evalkit_tf446/lib/python3.10/lib2to3/fixes/__pycache__/fix_apply.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ad404cdafd044d9c90e58252d40c074b6465d5cc Binary files /dev/null and b/evalkit_tf446/lib/python3.10/lib2to3/fixes/__pycache__/fix_apply.cpython-310.pyc differ diff --git a/evalkit_tf446/lib/python3.10/lib2to3/fixes/__pycache__/fix_asserts.cpython-310.pyc b/evalkit_tf446/lib/python3.10/lib2to3/fixes/__pycache__/fix_asserts.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..adb7807b3b2cc3deb11d30e0d3cbcf691b83dd0c Binary files /dev/null and b/evalkit_tf446/lib/python3.10/lib2to3/fixes/__pycache__/fix_asserts.cpython-310.pyc differ diff --git a/evalkit_tf446/lib/python3.10/lib2to3/fixes/__pycache__/fix_except.cpython-310.pyc b/evalkit_tf446/lib/python3.10/lib2to3/fixes/__pycache__/fix_except.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b17edd2f5482a8905851c18eca7b9de219a8cf28 Binary files /dev/null and b/evalkit_tf446/lib/python3.10/lib2to3/fixes/__pycache__/fix_except.cpython-310.pyc differ diff --git a/evalkit_tf446/lib/python3.10/lib2to3/fixes/__pycache__/fix_execfile.cpython-310.pyc b/evalkit_tf446/lib/python3.10/lib2to3/fixes/__pycache__/fix_execfile.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d574c28b07d0d9a4845b50650f5e019e22e82e06 Binary files /dev/null and b/evalkit_tf446/lib/python3.10/lib2to3/fixes/__pycache__/fix_execfile.cpython-310.pyc differ diff --git a/evalkit_tf446/lib/python3.10/lib2to3/fixes/__pycache__/fix_filter.cpython-310.pyc b/evalkit_tf446/lib/python3.10/lib2to3/fixes/__pycache__/fix_filter.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..46864ba954e7ba2f49bf79e1bd0dc13a917b4db3 Binary files /dev/null and b/evalkit_tf446/lib/python3.10/lib2to3/fixes/__pycache__/fix_filter.cpython-310.pyc differ diff --git a/evalkit_tf446/lib/python3.10/lib2to3/fixes/__pycache__/fix_funcattrs.cpython-310.pyc b/evalkit_tf446/lib/python3.10/lib2to3/fixes/__pycache__/fix_funcattrs.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..25ae92ea0c43980e3815e2473b5235c5eb28afbf Binary files /dev/null and b/evalkit_tf446/lib/python3.10/lib2to3/fixes/__pycache__/fix_funcattrs.cpython-310.pyc differ diff --git a/evalkit_tf446/lib/python3.10/lib2to3/fixes/__pycache__/fix_idioms.cpython-310.pyc b/evalkit_tf446/lib/python3.10/lib2to3/fixes/__pycache__/fix_idioms.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f06a1eaa1af650c4b2027e7ed64ac38147e7a6b1 Binary files /dev/null and b/evalkit_tf446/lib/python3.10/lib2to3/fixes/__pycache__/fix_idioms.cpython-310.pyc differ diff --git a/evalkit_tf446/lib/python3.10/lib2to3/fixes/__pycache__/fix_import.cpython-310.pyc b/evalkit_tf446/lib/python3.10/lib2to3/fixes/__pycache__/fix_import.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3c438b05315c5b2d45668b9e90c584affdb8fbae Binary files /dev/null and b/evalkit_tf446/lib/python3.10/lib2to3/fixes/__pycache__/fix_import.cpython-310.pyc differ diff --git a/evalkit_tf446/lib/python3.10/lib2to3/fixes/__pycache__/fix_map.cpython-310.pyc b/evalkit_tf446/lib/python3.10/lib2to3/fixes/__pycache__/fix_map.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..06e8e0dfd82d23eee38a7e46b1d2b01b1a4843c0 Binary files /dev/null and b/evalkit_tf446/lib/python3.10/lib2to3/fixes/__pycache__/fix_map.cpython-310.pyc differ diff --git a/evalkit_tf446/lib/python3.10/lib2to3/fixes/__pycache__/fix_methodattrs.cpython-310.pyc b/evalkit_tf446/lib/python3.10/lib2to3/fixes/__pycache__/fix_methodattrs.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a0c91efa3e577c70934c616dd9eca7e05afbcd96 Binary files /dev/null and b/evalkit_tf446/lib/python3.10/lib2to3/fixes/__pycache__/fix_methodattrs.cpython-310.pyc differ diff --git a/evalkit_tf446/lib/python3.10/lib2to3/fixes/__pycache__/fix_numliterals.cpython-310.pyc b/evalkit_tf446/lib/python3.10/lib2to3/fixes/__pycache__/fix_numliterals.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1e77abd6742cfbf91b1f17b9c778ba8aa416b08f Binary files /dev/null and b/evalkit_tf446/lib/python3.10/lib2to3/fixes/__pycache__/fix_numliterals.cpython-310.pyc differ diff --git a/evalkit_tf446/lib/python3.10/lib2to3/fixes/__pycache__/fix_paren.cpython-310.pyc b/evalkit_tf446/lib/python3.10/lib2to3/fixes/__pycache__/fix_paren.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e3449436090f3a915ad67465ee69372db2049282 Binary files /dev/null and b/evalkit_tf446/lib/python3.10/lib2to3/fixes/__pycache__/fix_paren.cpython-310.pyc differ diff --git a/evalkit_tf446/lib/python3.10/lib2to3/fixes/__pycache__/fix_print.cpython-310.pyc b/evalkit_tf446/lib/python3.10/lib2to3/fixes/__pycache__/fix_print.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..83d257998c804a400b9f056276f6f53a03d419e3 Binary files /dev/null and b/evalkit_tf446/lib/python3.10/lib2to3/fixes/__pycache__/fix_print.cpython-310.pyc differ diff --git a/evalkit_tf446/lib/python3.10/lib2to3/fixes/__pycache__/fix_raise.cpython-310.pyc b/evalkit_tf446/lib/python3.10/lib2to3/fixes/__pycache__/fix_raise.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fd43def42816668908941a2e56d267019b538b67 Binary files /dev/null and b/evalkit_tf446/lib/python3.10/lib2to3/fixes/__pycache__/fix_raise.cpython-310.pyc differ diff --git a/evalkit_tf446/lib/python3.10/lib2to3/fixes/__pycache__/fix_throw.cpython-310.pyc b/evalkit_tf446/lib/python3.10/lib2to3/fixes/__pycache__/fix_throw.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..21a97a8e6b72fa2fd1f502a9745d491bdca2f842 Binary files /dev/null and b/evalkit_tf446/lib/python3.10/lib2to3/fixes/__pycache__/fix_throw.cpython-310.pyc differ diff --git a/evalkit_tf446/lib/python3.10/lib2to3/fixes/__pycache__/fix_tuple_params.cpython-310.pyc b/evalkit_tf446/lib/python3.10/lib2to3/fixes/__pycache__/fix_tuple_params.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..391a84700ec0a958b22696de34d3297891e68e0f Binary files /dev/null and b/evalkit_tf446/lib/python3.10/lib2to3/fixes/__pycache__/fix_tuple_params.cpython-310.pyc differ diff --git a/evalkit_tf446/lib/python3.10/lib2to3/fixes/__pycache__/fix_zip.cpython-310.pyc b/evalkit_tf446/lib/python3.10/lib2to3/fixes/__pycache__/fix_zip.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b3da60f11c6f8ea2f1e139a5d7a6a73d9ecada1f Binary files /dev/null and b/evalkit_tf446/lib/python3.10/lib2to3/fixes/__pycache__/fix_zip.cpython-310.pyc differ diff --git a/evalkit_tf446/lib/python3.10/lib2to3/fixes/fix_apply.py b/evalkit_tf446/lib/python3.10/lib2to3/fixes/fix_apply.py new file mode 100644 index 0000000000000000000000000000000000000000..6408582c42647741416968f92c351a5581bc5e3e --- /dev/null +++ b/evalkit_tf446/lib/python3.10/lib2to3/fixes/fix_apply.py @@ -0,0 +1,68 @@ +# Copyright 2006 Google, Inc. All Rights Reserved. +# Licensed to PSF under a Contributor Agreement. + +"""Fixer for apply(). + +This converts apply(func, v, k) into (func)(*v, **k).""" + +# Local imports +from .. import pytree +from ..pgen2 import token +from .. import fixer_base +from ..fixer_util import Call, Comma, parenthesize + +class FixApply(fixer_base.BaseFix): + BM_compatible = True + + PATTERN = """ + power< 'apply' + trailer< + '(' + arglist< + (not argument + ')' + > + > + """ + + def transform(self, node, results): + syms = self.syms + assert results + func = results["func"] + args = results["args"] + kwds = results.get("kwds") + # I feel like we should be able to express this logic in the + # PATTERN above but I don't know how to do it so... + if args: + if (args.type == self.syms.argument and + args.children[0].value in {'**', '*'}): + return # Make no change. + if kwds and (kwds.type == self.syms.argument and + kwds.children[0].value == '**'): + return # Make no change. + prefix = node.prefix + func = func.clone() + if (func.type not in (token.NAME, syms.atom) and + (func.type != syms.power or + func.children[-2].type == token.DOUBLESTAR)): + # Need to parenthesize + func = parenthesize(func) + func.prefix = "" + args = args.clone() + args.prefix = "" + if kwds is not None: + kwds = kwds.clone() + kwds.prefix = "" + l_newargs = [pytree.Leaf(token.STAR, "*"), args] + if kwds is not None: + l_newargs.extend([Comma(), + pytree.Leaf(token.DOUBLESTAR, "**"), + kwds]) + l_newargs[-2].prefix = " " # that's the ** token + # XXX Sometimes we could be cleverer, e.g. apply(f, (x, y) + t) + # can be translated into f(x, y, *t) instead of f(*(x, y) + t) + #new = pytree.Node(syms.power, (func, ArgList(l_newargs))) + return Call(func, l_newargs, prefix=prefix) diff --git a/evalkit_tf446/lib/python3.10/lib2to3/fixes/fix_asserts.py b/evalkit_tf446/lib/python3.10/lib2to3/fixes/fix_asserts.py new file mode 100644 index 0000000000000000000000000000000000000000..5bcec885f52cbf3a8a4a7bcf3331402a54f76967 --- /dev/null +++ b/evalkit_tf446/lib/python3.10/lib2to3/fixes/fix_asserts.py @@ -0,0 +1,34 @@ +"""Fixer that replaces deprecated unittest method names.""" + +# Author: Ezio Melotti + +from ..fixer_base import BaseFix +from ..fixer_util import Name + +NAMES = dict( + assert_="assertTrue", + assertEquals="assertEqual", + assertNotEquals="assertNotEqual", + assertAlmostEquals="assertAlmostEqual", + assertNotAlmostEquals="assertNotAlmostEqual", + assertRegexpMatches="assertRegex", + assertRaisesRegexp="assertRaisesRegex", + failUnlessEqual="assertEqual", + failIfEqual="assertNotEqual", + failUnlessAlmostEqual="assertAlmostEqual", + failIfAlmostEqual="assertNotAlmostEqual", + failUnless="assertTrue", + failUnlessRaises="assertRaises", + failIf="assertFalse", +) + + +class FixAsserts(BaseFix): + + PATTERN = """ + power< any+ trailer< '.' meth=(%s)> any* > + """ % '|'.join(map(repr, NAMES)) + + def transform(self, node, results): + name = results["meth"][0] + name.replace(Name(NAMES[str(name)], prefix=name.prefix)) diff --git a/evalkit_tf446/lib/python3.10/lib2to3/fixes/fix_basestring.py b/evalkit_tf446/lib/python3.10/lib2to3/fixes/fix_basestring.py new file mode 100644 index 0000000000000000000000000000000000000000..5fe69a0f03b1b885e28e508a8670406c67a4896a --- /dev/null +++ b/evalkit_tf446/lib/python3.10/lib2to3/fixes/fix_basestring.py @@ -0,0 +1,14 @@ +"""Fixer for basestring -> str.""" +# Author: Christian Heimes + +# Local imports +from .. import fixer_base +from ..fixer_util import Name + +class FixBasestring(fixer_base.BaseFix): + BM_compatible = True + + PATTERN = "'basestring'" + + def transform(self, node, results): + return Name("str", prefix=node.prefix) diff --git a/evalkit_tf446/lib/python3.10/lib2to3/fixes/fix_buffer.py b/evalkit_tf446/lib/python3.10/lib2to3/fixes/fix_buffer.py new file mode 100644 index 0000000000000000000000000000000000000000..f9a1958ad3b93e502126396e02caca773f1f01fa --- /dev/null +++ b/evalkit_tf446/lib/python3.10/lib2to3/fixes/fix_buffer.py @@ -0,0 +1,22 @@ +# Copyright 2007 Google, Inc. All Rights Reserved. +# Licensed to PSF under a Contributor Agreement. + +"""Fixer that changes buffer(...) into memoryview(...).""" + +# Local imports +from .. import fixer_base +from ..fixer_util import Name + + +class FixBuffer(fixer_base.BaseFix): + BM_compatible = True + + explicit = True # The user must ask for this fixer + + PATTERN = """ + power< name='buffer' trailer< '(' [any] ')' > any* > + """ + + def transform(self, node, results): + name = results["name"] + name.replace(Name("memoryview", prefix=name.prefix)) diff --git a/evalkit_tf446/lib/python3.10/lib2to3/fixes/fix_dict.py b/evalkit_tf446/lib/python3.10/lib2to3/fixes/fix_dict.py new file mode 100644 index 0000000000000000000000000000000000000000..d3655c9f1b2d9bf82d8ce3a2140cfeff9ba97323 --- /dev/null +++ b/evalkit_tf446/lib/python3.10/lib2to3/fixes/fix_dict.py @@ -0,0 +1,106 @@ +# Copyright 2007 Google, Inc. All Rights Reserved. +# Licensed to PSF under a Contributor Agreement. + +"""Fixer for dict methods. + +d.keys() -> list(d.keys()) +d.items() -> list(d.items()) +d.values() -> list(d.values()) + +d.iterkeys() -> iter(d.keys()) +d.iteritems() -> iter(d.items()) +d.itervalues() -> iter(d.values()) + +d.viewkeys() -> d.keys() +d.viewitems() -> d.items() +d.viewvalues() -> d.values() + +Except in certain very specific contexts: the iter() can be dropped +when the context is list(), sorted(), iter() or for...in; the list() +can be dropped when the context is list() or sorted() (but not iter() +or for...in!). Special contexts that apply to both: list(), sorted(), tuple() +set(), any(), all(), sum(). + +Note: iter(d.keys()) could be written as iter(d) but since the +original d.iterkeys() was also redundant we don't fix this. And there +are (rare) contexts where it makes a difference (e.g. when passing it +as an argument to a function that introspects the argument). +""" + +# Local imports +from .. import pytree +from .. import patcomp +from .. import fixer_base +from ..fixer_util import Name, Call, Dot +from .. import fixer_util + + +iter_exempt = fixer_util.consuming_calls | {"iter"} + + +class FixDict(fixer_base.BaseFix): + BM_compatible = True + + PATTERN = """ + power< head=any+ + trailer< '.' method=('keys'|'items'|'values'| + 'iterkeys'|'iteritems'|'itervalues'| + 'viewkeys'|'viewitems'|'viewvalues') > + parens=trailer< '(' ')' > + tail=any* + > + """ + + def transform(self, node, results): + head = results["head"] + method = results["method"][0] # Extract node for method name + tail = results["tail"] + syms = self.syms + method_name = method.value + isiter = method_name.startswith("iter") + isview = method_name.startswith("view") + if isiter or isview: + method_name = method_name[4:] + assert method_name in ("keys", "items", "values"), repr(method) + head = [n.clone() for n in head] + tail = [n.clone() for n in tail] + special = not tail and self.in_special_context(node, isiter) + args = head + [pytree.Node(syms.trailer, + [Dot(), + Name(method_name, + prefix=method.prefix)]), + results["parens"].clone()] + new = pytree.Node(syms.power, args) + if not (special or isview): + new.prefix = "" + new = Call(Name("iter" if isiter else "list"), [new]) + if tail: + new = pytree.Node(syms.power, [new] + tail) + new.prefix = node.prefix + return new + + P1 = "power< func=NAME trailer< '(' node=any ')' > any* >" + p1 = patcomp.compile_pattern(P1) + + P2 = """for_stmt< 'for' any 'in' node=any ':' any* > + | comp_for< 'for' any 'in' node=any any* > + """ + p2 = patcomp.compile_pattern(P2) + + def in_special_context(self, node, isiter): + if node.parent is None: + return False + results = {} + if (node.parent.parent is not None and + self.p1.match(node.parent.parent, results) and + results["node"] is node): + if isiter: + # iter(d.iterkeys()) -> iter(d.keys()), etc. + return results["func"].value in iter_exempt + else: + # list(d.keys()) -> list(d.keys()), etc. + return results["func"].value in fixer_util.consuming_calls + if not isiter: + return False + # for ... in d.iterkeys() -> for ... in d.keys(), etc. + return self.p2.match(node.parent, results) and results["node"] is node diff --git a/evalkit_tf446/lib/python3.10/lib2to3/fixes/fix_except.py b/evalkit_tf446/lib/python3.10/lib2to3/fixes/fix_except.py new file mode 100644 index 0000000000000000000000000000000000000000..49bd3d5ab7d6ccd8a98016a12b4ed54ff0c800de --- /dev/null +++ b/evalkit_tf446/lib/python3.10/lib2to3/fixes/fix_except.py @@ -0,0 +1,93 @@ +"""Fixer for except statements with named exceptions. + +The following cases will be converted: + +- "except E, T:" where T is a name: + + except E as T: + +- "except E, T:" where T is not a name, tuple or list: + + except E as t: + T = t + + This is done because the target of an "except" clause must be a + name. + +- "except E, T:" where T is a tuple or list literal: + + except E as t: + T = t.args +""" +# Author: Collin Winter + +# Local imports +from .. import pytree +from ..pgen2 import token +from .. import fixer_base +from ..fixer_util import Assign, Attr, Name, is_tuple, is_list, syms + +def find_excepts(nodes): + for i, n in enumerate(nodes): + if n.type == syms.except_clause: + if n.children[0].value == 'except': + yield (n, nodes[i+2]) + +class FixExcept(fixer_base.BaseFix): + BM_compatible = True + + PATTERN = """ + try_stmt< 'try' ':' (simple_stmt | suite) + cleanup=(except_clause ':' (simple_stmt | suite))+ + tail=(['except' ':' (simple_stmt | suite)] + ['else' ':' (simple_stmt | suite)] + ['finally' ':' (simple_stmt | suite)]) > + """ + + def transform(self, node, results): + syms = self.syms + + tail = [n.clone() for n in results["tail"]] + + try_cleanup = [ch.clone() for ch in results["cleanup"]] + for except_clause, e_suite in find_excepts(try_cleanup): + if len(except_clause.children) == 4: + (E, comma, N) = except_clause.children[1:4] + comma.replace(Name("as", prefix=" ")) + + if N.type != token.NAME: + # Generate a new N for the except clause + new_N = Name(self.new_name(), prefix=" ") + target = N.clone() + target.prefix = "" + N.replace(new_N) + new_N = new_N.clone() + + # Insert "old_N = new_N" as the first statement in + # the except body. This loop skips leading whitespace + # and indents + #TODO(cwinter) suite-cleanup + suite_stmts = e_suite.children + for i, stmt in enumerate(suite_stmts): + if isinstance(stmt, pytree.Node): + break + + # The assignment is different if old_N is a tuple or list + # In that case, the assignment is old_N = new_N.args + if is_tuple(N) or is_list(N): + assign = Assign(target, Attr(new_N, Name('args'))) + else: + assign = Assign(target, new_N) + + #TODO(cwinter) stopgap until children becomes a smart list + for child in reversed(suite_stmts[:i]): + e_suite.insert_child(0, child) + e_suite.insert_child(i, assign) + elif N.prefix == "": + # No space after a comma is legal; no space after "as", + # not so much. + N.prefix = " " + + #TODO(cwinter) fix this when children becomes a smart list + children = [c.clone() for c in node.children[:3]] + try_cleanup + tail + return pytree.Node(node.type, children) diff --git a/evalkit_tf446/lib/python3.10/lib2to3/fixes/fix_exec.py b/evalkit_tf446/lib/python3.10/lib2to3/fixes/fix_exec.py new file mode 100644 index 0000000000000000000000000000000000000000..ab921ee80cdf366532f027b2549e5bcba55a4fd5 --- /dev/null +++ b/evalkit_tf446/lib/python3.10/lib2to3/fixes/fix_exec.py @@ -0,0 +1,39 @@ +# Copyright 2006 Google, Inc. All Rights Reserved. +# Licensed to PSF under a Contributor Agreement. + +"""Fixer for exec. + +This converts usages of the exec statement into calls to a built-in +exec() function. + +exec code in ns1, ns2 -> exec(code, ns1, ns2) +""" + +# Local imports +from .. import fixer_base +from ..fixer_util import Comma, Name, Call + + +class FixExec(fixer_base.BaseFix): + BM_compatible = True + + PATTERN = """ + exec_stmt< 'exec' a=any 'in' b=any [',' c=any] > + | + exec_stmt< 'exec' (not atom<'(' [any] ')'>) a=any > + """ + + def transform(self, node, results): + assert results + syms = self.syms + a = results["a"] + b = results.get("b") + c = results.get("c") + args = [a.clone()] + args[0].prefix = "" + if b is not None: + args.extend([Comma(), b.clone()]) + if c is not None: + args.extend([Comma(), c.clone()]) + + return Call(Name("exec"), args, prefix=node.prefix) diff --git a/evalkit_tf446/lib/python3.10/lib2to3/fixes/fix_execfile.py b/evalkit_tf446/lib/python3.10/lib2to3/fixes/fix_execfile.py new file mode 100644 index 0000000000000000000000000000000000000000..b6c786fd4e8b6a141ab3619ba2b2576db158fcd4 --- /dev/null +++ b/evalkit_tf446/lib/python3.10/lib2to3/fixes/fix_execfile.py @@ -0,0 +1,53 @@ +# Copyright 2006 Google, Inc. All Rights Reserved. +# Licensed to PSF under a Contributor Agreement. + +"""Fixer for execfile. + +This converts usages of the execfile function into calls to the built-in +exec() function. +""" + +from .. import fixer_base +from ..fixer_util import (Comma, Name, Call, LParen, RParen, Dot, Node, + ArgList, String, syms) + + +class FixExecfile(fixer_base.BaseFix): + BM_compatible = True + + PATTERN = """ + power< 'execfile' trailer< '(' arglist< filename=any [',' globals=any [',' locals=any ] ] > ')' > > + | + power< 'execfile' trailer< '(' filename=any ')' > > + """ + + def transform(self, node, results): + assert results + filename = results["filename"] + globals = results.get("globals") + locals = results.get("locals") + + # Copy over the prefix from the right parentheses end of the execfile + # call. + execfile_paren = node.children[-1].children[-1].clone() + # Construct open().read(). + open_args = ArgList([filename.clone(), Comma(), String('"rb"', ' ')], + rparen=execfile_paren) + open_call = Node(syms.power, [Name("open"), open_args]) + read = [Node(syms.trailer, [Dot(), Name('read')]), + Node(syms.trailer, [LParen(), RParen()])] + open_expr = [open_call] + read + # Wrap the open call in a compile call. This is so the filename will be + # preserved in the execed code. + filename_arg = filename.clone() + filename_arg.prefix = " " + exec_str = String("'exec'", " ") + compile_args = open_expr + [Comma(), filename_arg, Comma(), exec_str] + compile_call = Call(Name("compile"), compile_args, "") + # Finally, replace the execfile call with an exec call. + args = [compile_call] + if globals is not None: + args.extend([Comma(), globals.clone()]) + if locals is not None: + args.extend([Comma(), locals.clone()]) + return Call(Name("exec"), args, prefix=node.prefix) diff --git a/evalkit_tf446/lib/python3.10/lib2to3/fixes/fix_exitfunc.py b/evalkit_tf446/lib/python3.10/lib2to3/fixes/fix_exitfunc.py new file mode 100644 index 0000000000000000000000000000000000000000..2e47887afead368518e7930397410335887d60f9 --- /dev/null +++ b/evalkit_tf446/lib/python3.10/lib2to3/fixes/fix_exitfunc.py @@ -0,0 +1,72 @@ +""" +Convert use of sys.exitfunc to use the atexit module. +""" + +# Author: Benjamin Peterson + +from lib2to3 import pytree, fixer_base +from lib2to3.fixer_util import Name, Attr, Call, Comma, Newline, syms + + +class FixExitfunc(fixer_base.BaseFix): + keep_line_order = True + BM_compatible = True + + PATTERN = """ + ( + sys_import=import_name<'import' + ('sys' + | + dotted_as_names< (any ',')* 'sys' (',' any)* > + ) + > + | + expr_stmt< + power< 'sys' trailer< '.' 'exitfunc' > > + '=' func=any > + ) + """ + + def __init__(self, *args): + super(FixExitfunc, self).__init__(*args) + + def start_tree(self, tree, filename): + super(FixExitfunc, self).start_tree(tree, filename) + self.sys_import = None + + def transform(self, node, results): + # First, find the sys import. We'll just hope it's global scope. + if "sys_import" in results: + if self.sys_import is None: + self.sys_import = results["sys_import"] + return + + func = results["func"].clone() + func.prefix = "" + register = pytree.Node(syms.power, + Attr(Name("atexit"), Name("register")) + ) + call = Call(register, [func], node.prefix) + node.replace(call) + + if self.sys_import is None: + # That's interesting. + self.warning(node, "Can't find sys import; Please add an atexit " + "import at the top of your file.") + return + + # Now add an atexit import after the sys import. + names = self.sys_import.children[1] + if names.type == syms.dotted_as_names: + names.append_child(Comma()) + names.append_child(Name("atexit", " ")) + else: + containing_stmt = self.sys_import.parent + position = containing_stmt.children.index(self.sys_import) + stmt_container = containing_stmt.parent + new_import = pytree.Node(syms.import_name, + [Name("import"), Name("atexit", " ")] + ) + new = pytree.Node(syms.simple_stmt, [new_import]) + containing_stmt.insert_child(position + 1, Newline()) + containing_stmt.insert_child(position + 2, new) diff --git a/evalkit_tf446/lib/python3.10/lib2to3/fixes/fix_filter.py b/evalkit_tf446/lib/python3.10/lib2to3/fixes/fix_filter.py new file mode 100644 index 0000000000000000000000000000000000000000..38e9078f11ac88f346b7ba2468c69dc5a4f74dd5 --- /dev/null +++ b/evalkit_tf446/lib/python3.10/lib2to3/fixes/fix_filter.py @@ -0,0 +1,94 @@ +# Copyright 2007 Google, Inc. All Rights Reserved. +# Licensed to PSF under a Contributor Agreement. + +"""Fixer that changes filter(F, X) into list(filter(F, X)). + +We avoid the transformation if the filter() call is directly contained +in iter(<>), list(<>), tuple(<>), sorted(<>), ...join(<>), or +for V in <>:. + +NOTE: This is still not correct if the original code was depending on +filter(F, X) to return a string if X is a string and a tuple if X is a +tuple. That would require type inference, which we don't do. Let +Python 2.6 figure it out. +""" + +# Local imports +from .. import fixer_base +from ..pytree import Node +from ..pygram import python_symbols as syms +from ..fixer_util import Name, ArgList, ListComp, in_special_context, parenthesize + + +class FixFilter(fixer_base.ConditionalFix): + BM_compatible = True + + PATTERN = """ + filter_lambda=power< + 'filter' + trailer< + '(' + arglist< + lambdef< 'lambda' + (fp=NAME | vfpdef< '(' fp=NAME ')'> ) ':' xp=any + > + ',' + it=any + > + ')' + > + [extra_trailers=trailer*] + > + | + power< + 'filter' + trailer< '(' arglist< none='None' ',' seq=any > ')' > + [extra_trailers=trailer*] + > + | + power< + 'filter' + args=trailer< '(' [any] ')' > + [extra_trailers=trailer*] + > + """ + + skip_on = "future_builtins.filter" + + def transform(self, node, results): + if self.should_skip(node): + return + + trailers = [] + if 'extra_trailers' in results: + for t in results['extra_trailers']: + trailers.append(t.clone()) + + if "filter_lambda" in results: + xp = results.get("xp").clone() + if xp.type == syms.test: + xp.prefix = "" + xp = parenthesize(xp) + + new = ListComp(results.get("fp").clone(), + results.get("fp").clone(), + results.get("it").clone(), xp) + new = Node(syms.power, [new] + trailers, prefix="") + + elif "none" in results: + new = ListComp(Name("_f"), + Name("_f"), + results["seq"].clone(), + Name("_f")) + new = Node(syms.power, [new] + trailers, prefix="") + + else: + if in_special_context(node): + return None + + args = results['args'].clone() + new = Node(syms.power, [Name("filter"), args], prefix="") + new = Node(syms.power, [Name("list"), ArgList([new])] + trailers) + new.prefix = "" + new.prefix = node.prefix + return new diff --git a/evalkit_tf446/lib/python3.10/lib2to3/fixes/fix_funcattrs.py b/evalkit_tf446/lib/python3.10/lib2to3/fixes/fix_funcattrs.py new file mode 100644 index 0000000000000000000000000000000000000000..67f3e18e061bdb10395bf73e82f440ad6842e2bd --- /dev/null +++ b/evalkit_tf446/lib/python3.10/lib2to3/fixes/fix_funcattrs.py @@ -0,0 +1,21 @@ +"""Fix function attribute names (f.func_x -> f.__x__).""" +# Author: Collin Winter + +# Local imports +from .. import fixer_base +from ..fixer_util import Name + + +class FixFuncattrs(fixer_base.BaseFix): + BM_compatible = True + + PATTERN = """ + power< any+ trailer< '.' attr=('func_closure' | 'func_doc' | 'func_globals' + | 'func_name' | 'func_defaults' | 'func_code' + | 'func_dict') > any* > + """ + + def transform(self, node, results): + attr = results["attr"][0] + attr.replace(Name(("__%s__" % attr.value[5:]), + prefix=attr.prefix)) diff --git a/evalkit_tf446/lib/python3.10/lib2to3/fixes/fix_future.py b/evalkit_tf446/lib/python3.10/lib2to3/fixes/fix_future.py new file mode 100644 index 0000000000000000000000000000000000000000..fbcb86af0791338e4edebd2f68bd49cd7a9160c2 --- /dev/null +++ b/evalkit_tf446/lib/python3.10/lib2to3/fixes/fix_future.py @@ -0,0 +1,22 @@ +"""Remove __future__ imports + +from __future__ import foo is replaced with an empty line. +""" +# Author: Christian Heimes + +# Local imports +from .. import fixer_base +from ..fixer_util import BlankLine + +class FixFuture(fixer_base.BaseFix): + BM_compatible = True + + PATTERN = """import_from< 'from' module_name="__future__" 'import' any >""" + + # This should be run last -- some things check for the import + run_order = 10 + + def transform(self, node, results): + new = BlankLine() + new.prefix = node.prefix + return new diff --git a/evalkit_tf446/lib/python3.10/lib2to3/fixes/fix_getcwdu.py b/evalkit_tf446/lib/python3.10/lib2to3/fixes/fix_getcwdu.py new file mode 100644 index 0000000000000000000000000000000000000000..087eaedcb26f9cfa139f524a67464154a23d79ac --- /dev/null +++ b/evalkit_tf446/lib/python3.10/lib2to3/fixes/fix_getcwdu.py @@ -0,0 +1,19 @@ +""" +Fixer that changes os.getcwdu() to os.getcwd(). +""" +# Author: Victor Stinner + +# Local imports +from .. import fixer_base +from ..fixer_util import Name + +class FixGetcwdu(fixer_base.BaseFix): + BM_compatible = True + + PATTERN = """ + power< 'os' trailer< dot='.' name='getcwdu' > any* > + """ + + def transform(self, node, results): + name = results["name"] + name.replace(Name("getcwd", prefix=name.prefix)) diff --git a/evalkit_tf446/lib/python3.10/lib2to3/fixes/fix_has_key.py b/evalkit_tf446/lib/python3.10/lib2to3/fixes/fix_has_key.py new file mode 100644 index 0000000000000000000000000000000000000000..439708c9923404312dfabb964615062ea32c0aea --- /dev/null +++ b/evalkit_tf446/lib/python3.10/lib2to3/fixes/fix_has_key.py @@ -0,0 +1,109 @@ +# Copyright 2006 Google, Inc. All Rights Reserved. +# Licensed to PSF under a Contributor Agreement. + +"""Fixer for has_key(). + +Calls to .has_key() methods are expressed in terms of the 'in' +operator: + + d.has_key(k) -> k in d + +CAVEATS: +1) While the primary target of this fixer is dict.has_key(), the + fixer will change any has_key() method call, regardless of its + class. + +2) Cases like this will not be converted: + + m = d.has_key + if m(k): + ... + + Only *calls* to has_key() are converted. While it is possible to + convert the above to something like + + m = d.__contains__ + if m(k): + ... + + this is currently not done. +""" + +# Local imports +from .. import pytree +from .. import fixer_base +from ..fixer_util import Name, parenthesize + + +class FixHasKey(fixer_base.BaseFix): + BM_compatible = True + + PATTERN = """ + anchor=power< + before=any+ + trailer< '.' 'has_key' > + trailer< + '(' + ( not(arglist | argument) arg=any ','> + ) + ')' + > + after=any* + > + | + negation=not_test< + 'not' + anchor=power< + before=any+ + trailer< '.' 'has_key' > + trailer< + '(' + ( not(arglist | argument) arg=any ','> + ) + ')' + > + > + > + """ + + def transform(self, node, results): + assert results + syms = self.syms + if (node.parent.type == syms.not_test and + self.pattern.match(node.parent)): + # Don't transform a node matching the first alternative of the + # pattern when its parent matches the second alternative + return None + negation = results.get("negation") + anchor = results["anchor"] + prefix = node.prefix + before = [n.clone() for n in results["before"]] + arg = results["arg"].clone() + after = results.get("after") + if after: + after = [n.clone() for n in after] + if arg.type in (syms.comparison, syms.not_test, syms.and_test, + syms.or_test, syms.test, syms.lambdef, syms.argument): + arg = parenthesize(arg) + if len(before) == 1: + before = before[0] + else: + before = pytree.Node(syms.power, before) + before.prefix = " " + n_op = Name("in", prefix=" ") + if negation: + n_not = Name("not", prefix=" ") + n_op = pytree.Node(syms.comp_op, (n_not, n_op)) + new = pytree.Node(syms.comparison, (arg, n_op, before)) + if after: + new = parenthesize(new) + new = pytree.Node(syms.power, (new,) + tuple(after)) + if node.parent.type in (syms.comparison, syms.expr, syms.xor_expr, + syms.and_expr, syms.shift_expr, + syms.arith_expr, syms.term, + syms.factor, syms.power): + new = parenthesize(new) + new.prefix = prefix + return new diff --git a/evalkit_tf446/lib/python3.10/lib2to3/fixes/fix_idioms.py b/evalkit_tf446/lib/python3.10/lib2to3/fixes/fix_idioms.py new file mode 100644 index 0000000000000000000000000000000000000000..6905913d7cb79d2ead79ca7c0c88ea6c9b98d79d --- /dev/null +++ b/evalkit_tf446/lib/python3.10/lib2to3/fixes/fix_idioms.py @@ -0,0 +1,152 @@ +"""Adjust some old Python 2 idioms to their modern counterparts. + +* Change some type comparisons to isinstance() calls: + type(x) == T -> isinstance(x, T) + type(x) is T -> isinstance(x, T) + type(x) != T -> not isinstance(x, T) + type(x) is not T -> not isinstance(x, T) + +* Change "while 1:" into "while True:". + +* Change both + + v = list(EXPR) + v.sort() + foo(v) + +and the more general + + v = EXPR + v.sort() + foo(v) + +into + + v = sorted(EXPR) + foo(v) +""" +# Author: Jacques Frechet, Collin Winter + +# Local imports +from .. import fixer_base +from ..fixer_util import Call, Comma, Name, Node, BlankLine, syms + +CMP = "(n='!=' | '==' | 'is' | n=comp_op< 'is' 'not' >)" +TYPE = "power< 'type' trailer< '(' x=any ')' > >" + +class FixIdioms(fixer_base.BaseFix): + explicit = True # The user must ask for this fixer + + PATTERN = r""" + isinstance=comparison< %s %s T=any > + | + isinstance=comparison< T=any %s %s > + | + while_stmt< 'while' while='1' ':' any+ > + | + sorted=any< + any* + simple_stmt< + expr_stmt< id1=any '=' + power< list='list' trailer< '(' (not arglist) any ')' > > + > + '\n' + > + sort= + simple_stmt< + power< id2=any + trailer< '.' 'sort' > trailer< '(' ')' > + > + '\n' + > + next=any* + > + | + sorted=any< + any* + simple_stmt< expr_stmt< id1=any '=' expr=any > '\n' > + sort= + simple_stmt< + power< id2=any + trailer< '.' 'sort' > trailer< '(' ')' > + > + '\n' + > + next=any* + > + """ % (TYPE, CMP, CMP, TYPE) + + def match(self, node): + r = super(FixIdioms, self).match(node) + # If we've matched one of the sort/sorted subpatterns above, we + # want to reject matches where the initial assignment and the + # subsequent .sort() call involve different identifiers. + if r and "sorted" in r: + if r["id1"] == r["id2"]: + return r + return None + return r + + def transform(self, node, results): + if "isinstance" in results: + return self.transform_isinstance(node, results) + elif "while" in results: + return self.transform_while(node, results) + elif "sorted" in results: + return self.transform_sort(node, results) + else: + raise RuntimeError("Invalid match") + + def transform_isinstance(self, node, results): + x = results["x"].clone() # The thing inside of type() + T = results["T"].clone() # The type being compared against + x.prefix = "" + T.prefix = " " + test = Call(Name("isinstance"), [x, Comma(), T]) + if "n" in results: + test.prefix = " " + test = Node(syms.not_test, [Name("not"), test]) + test.prefix = node.prefix + return test + + def transform_while(self, node, results): + one = results["while"] + one.replace(Name("True", prefix=one.prefix)) + + def transform_sort(self, node, results): + sort_stmt = results["sort"] + next_stmt = results["next"] + list_call = results.get("list") + simple_expr = results.get("expr") + + if list_call: + list_call.replace(Name("sorted", prefix=list_call.prefix)) + elif simple_expr: + new = simple_expr.clone() + new.prefix = "" + simple_expr.replace(Call(Name("sorted"), [new], + prefix=simple_expr.prefix)) + else: + raise RuntimeError("should not have reached here") + sort_stmt.remove() + + btwn = sort_stmt.prefix + # Keep any prefix lines between the sort_stmt and the list_call and + # shove them right after the sorted() call. + if "\n" in btwn: + if next_stmt: + # The new prefix should be everything from the sort_stmt's + # prefix up to the last newline, then the old prefix after a new + # line. + prefix_lines = (btwn.rpartition("\n")[0], next_stmt[0].prefix) + next_stmt[0].prefix = "\n".join(prefix_lines) + else: + assert list_call.parent + assert list_call.next_sibling is None + # Put a blank line after list_call and set its prefix. + end_line = BlankLine() + list_call.parent.append_child(end_line) + assert list_call.next_sibling is end_line + # The new prefix should be everything up to the first new line + # of sort_stmt's prefix. + end_line.prefix = btwn.rpartition("\n")[0] diff --git a/evalkit_tf446/lib/python3.10/lib2to3/fixes/fix_import.py b/evalkit_tf446/lib/python3.10/lib2to3/fixes/fix_import.py new file mode 100644 index 0000000000000000000000000000000000000000..734ca294699c36400b2c1d59c1badfb9c296ec14 --- /dev/null +++ b/evalkit_tf446/lib/python3.10/lib2to3/fixes/fix_import.py @@ -0,0 +1,99 @@ +"""Fixer for import statements. +If spam is being imported from the local directory, this import: + from spam import eggs +Becomes: + from .spam import eggs + +And this import: + import spam +Becomes: + from . import spam +""" + +# Local imports +from .. import fixer_base +from os.path import dirname, join, exists, sep +from ..fixer_util import FromImport, syms, token + + +def traverse_imports(names): + """ + Walks over all the names imported in a dotted_as_names node. + """ + pending = [names] + while pending: + node = pending.pop() + if node.type == token.NAME: + yield node.value + elif node.type == syms.dotted_name: + yield "".join([ch.value for ch in node.children]) + elif node.type == syms.dotted_as_name: + pending.append(node.children[0]) + elif node.type == syms.dotted_as_names: + pending.extend(node.children[::-2]) + else: + raise AssertionError("unknown node type") + + +class FixImport(fixer_base.BaseFix): + BM_compatible = True + + PATTERN = """ + import_from< 'from' imp=any 'import' ['('] any [')'] > + | + import_name< 'import' imp=any > + """ + + def start_tree(self, tree, name): + super(FixImport, self).start_tree(tree, name) + self.skip = "absolute_import" in tree.future_features + + def transform(self, node, results): + if self.skip: + return + imp = results['imp'] + + if node.type == syms.import_from: + # Some imps are top-level (eg: 'import ham') + # some are first level (eg: 'import ham.eggs') + # some are third level (eg: 'import ham.eggs as spam') + # Hence, the loop + while not hasattr(imp, 'value'): + imp = imp.children[0] + if self.probably_a_local_import(imp.value): + imp.value = "." + imp.value + imp.changed() + else: + have_local = False + have_absolute = False + for mod_name in traverse_imports(imp): + if self.probably_a_local_import(mod_name): + have_local = True + else: + have_absolute = True + if have_absolute: + if have_local: + # We won't handle both sibling and absolute imports in the + # same statement at the moment. + self.warning(node, "absolute and local imports together") + return + + new = FromImport(".", [imp]) + new.prefix = node.prefix + return new + + def probably_a_local_import(self, imp_name): + if imp_name.startswith("."): + # Relative imports are certainly not local imports. + return False + imp_name = imp_name.split(".", 1)[0] + base_path = dirname(self.filename) + base_path = join(base_path, imp_name) + # If there is no __init__.py next to the file its not in a package + # so can't be a relative import. + if not exists(join(dirname(base_path), "__init__.py")): + return False + for ext in [".py", sep, ".pyc", ".so", ".sl", ".pyd"]: + if exists(base_path + ext): + return True + return False diff --git a/evalkit_tf446/lib/python3.10/lib2to3/fixes/fix_imports.py b/evalkit_tf446/lib/python3.10/lib2to3/fixes/fix_imports.py new file mode 100644 index 0000000000000000000000000000000000000000..aaf4f2f642efb52d50eabdca67527a3cbf60014e --- /dev/null +++ b/evalkit_tf446/lib/python3.10/lib2to3/fixes/fix_imports.py @@ -0,0 +1,145 @@ +"""Fix incompatible imports and module references.""" +# Authors: Collin Winter, Nick Edds + +# Local imports +from .. import fixer_base +from ..fixer_util import Name, attr_chain + +MAPPING = {'StringIO': 'io', + 'cStringIO': 'io', + 'cPickle': 'pickle', + '__builtin__' : 'builtins', + 'copy_reg': 'copyreg', + 'Queue': 'queue', + 'SocketServer': 'socketserver', + 'ConfigParser': 'configparser', + 'repr': 'reprlib', + 'FileDialog': 'tkinter.filedialog', + 'tkFileDialog': 'tkinter.filedialog', + 'SimpleDialog': 'tkinter.simpledialog', + 'tkSimpleDialog': 'tkinter.simpledialog', + 'tkColorChooser': 'tkinter.colorchooser', + 'tkCommonDialog': 'tkinter.commondialog', + 'Dialog': 'tkinter.dialog', + 'Tkdnd': 'tkinter.dnd', + 'tkFont': 'tkinter.font', + 'tkMessageBox': 'tkinter.messagebox', + 'ScrolledText': 'tkinter.scrolledtext', + 'Tkconstants': 'tkinter.constants', + 'Tix': 'tkinter.tix', + 'ttk': 'tkinter.ttk', + 'Tkinter': 'tkinter', + 'markupbase': '_markupbase', + '_winreg': 'winreg', + 'thread': '_thread', + 'dummy_thread': '_dummy_thread', + # anydbm and whichdb are handled by fix_imports2 + 'dbhash': 'dbm.bsd', + 'dumbdbm': 'dbm.dumb', + 'dbm': 'dbm.ndbm', + 'gdbm': 'dbm.gnu', + 'xmlrpclib': 'xmlrpc.client', + 'DocXMLRPCServer': 'xmlrpc.server', + 'SimpleXMLRPCServer': 'xmlrpc.server', + 'httplib': 'http.client', + 'htmlentitydefs' : 'html.entities', + 'HTMLParser' : 'html.parser', + 'Cookie': 'http.cookies', + 'cookielib': 'http.cookiejar', + 'BaseHTTPServer': 'http.server', + 'SimpleHTTPServer': 'http.server', + 'CGIHTTPServer': 'http.server', + #'test.test_support': 'test.support', + 'commands': 'subprocess', + 'UserString' : 'collections', + 'UserList' : 'collections', + 'urlparse' : 'urllib.parse', + 'robotparser' : 'urllib.robotparser', +} + + +def alternates(members): + return "(" + "|".join(map(repr, members)) + ")" + + +def build_pattern(mapping=MAPPING): + mod_list = ' | '.join(["module_name='%s'" % key for key in mapping]) + bare_names = alternates(mapping.keys()) + + yield """name_import=import_name< 'import' ((%s) | + multiple_imports=dotted_as_names< any* (%s) any* >) > + """ % (mod_list, mod_list) + yield """import_from< 'from' (%s) 'import' ['('] + ( any | import_as_name< any 'as' any > | + import_as_names< any* >) [')'] > + """ % mod_list + yield """import_name< 'import' (dotted_as_name< (%s) 'as' any > | + multiple_imports=dotted_as_names< + any* dotted_as_name< (%s) 'as' any > any* >) > + """ % (mod_list, mod_list) + + # Find usages of module members in code e.g. thread.foo(bar) + yield "power< bare_with_attr=(%s) trailer<'.' any > any* >" % bare_names + + +class FixImports(fixer_base.BaseFix): + + BM_compatible = True + keep_line_order = True + # This is overridden in fix_imports2. + mapping = MAPPING + + # We want to run this fixer late, so fix_import doesn't try to make stdlib + # renames into relative imports. + run_order = 6 + + def build_pattern(self): + return "|".join(build_pattern(self.mapping)) + + def compile_pattern(self): + # We override this, so MAPPING can be pragmatically altered and the + # changes will be reflected in PATTERN. + self.PATTERN = self.build_pattern() + super(FixImports, self).compile_pattern() + + # Don't match the node if it's within another match. + def match(self, node): + match = super(FixImports, self).match + results = match(node) + if results: + # Module usage could be in the trailer of an attribute lookup, so we + # might have nested matches when "bare_with_attr" is present. + if "bare_with_attr" not in results and \ + any(match(obj) for obj in attr_chain(node, "parent")): + return False + return results + return False + + def start_tree(self, tree, filename): + super(FixImports, self).start_tree(tree, filename) + self.replace = {} + + def transform(self, node, results): + import_mod = results.get("module_name") + if import_mod: + mod_name = import_mod.value + new_name = self.mapping[mod_name] + import_mod.replace(Name(new_name, prefix=import_mod.prefix)) + if "name_import" in results: + # If it's not a "from x import x, y" or "import x as y" import, + # marked its usage to be replaced. + self.replace[mod_name] = new_name + if "multiple_imports" in results: + # This is a nasty hack to fix multiple imports on a line (e.g., + # "import StringIO, urlparse"). The problem is that I can't + # figure out an easy way to make a pattern recognize the keys of + # MAPPING randomly sprinkled in an import statement. + results = self.match(node) + if results: + self.transform(node, results) + else: + # Replace usage of the module. + bare_name = results["bare_with_attr"][0] + new_name = self.replace.get(bare_name.value) + if new_name: + bare_name.replace(Name(new_name, prefix=bare_name.prefix)) diff --git a/evalkit_tf446/lib/python3.10/lib2to3/fixes/fix_imports2.py b/evalkit_tf446/lib/python3.10/lib2to3/fixes/fix_imports2.py new file mode 100644 index 0000000000000000000000000000000000000000..9a33c67b1dc1940b2e271fad30b73f8e06b24e33 --- /dev/null +++ b/evalkit_tf446/lib/python3.10/lib2to3/fixes/fix_imports2.py @@ -0,0 +1,16 @@ +"""Fix incompatible imports and module references that must be fixed after +fix_imports.""" +from . import fix_imports + + +MAPPING = { + 'whichdb': 'dbm', + 'anydbm': 'dbm', + } + + +class FixImports2(fix_imports.FixImports): + + run_order = 7 + + mapping = MAPPING diff --git a/evalkit_tf446/lib/python3.10/lib2to3/fixes/fix_input.py b/evalkit_tf446/lib/python3.10/lib2to3/fixes/fix_input.py new file mode 100644 index 0000000000000000000000000000000000000000..9cf9a48c471f35e39fa0e99a40a1cc75fae6fe6d --- /dev/null +++ b/evalkit_tf446/lib/python3.10/lib2to3/fixes/fix_input.py @@ -0,0 +1,26 @@ +"""Fixer that changes input(...) into eval(input(...)).""" +# Author: Andre Roberge + +# Local imports +from .. import fixer_base +from ..fixer_util import Call, Name +from .. import patcomp + + +context = patcomp.compile_pattern("power< 'eval' trailer< '(' any ')' > >") + + +class FixInput(fixer_base.BaseFix): + BM_compatible = True + PATTERN = """ + power< 'input' args=trailer< '(' [any] ')' > > + """ + + def transform(self, node, results): + # If we're already wrapped in an eval() call, we're done. + if context.match(node.parent.parent): + return + + new = node.clone() + new.prefix = "" + return Call(Name("eval"), [new], prefix=node.prefix) diff --git a/evalkit_tf446/lib/python3.10/lib2to3/fixes/fix_intern.py b/evalkit_tf446/lib/python3.10/lib2to3/fixes/fix_intern.py new file mode 100644 index 0000000000000000000000000000000000000000..d752843092aacd8bc6e84c80f5bc6116563b9d25 --- /dev/null +++ b/evalkit_tf446/lib/python3.10/lib2to3/fixes/fix_intern.py @@ -0,0 +1,39 @@ +# Copyright 2006 Georg Brandl. +# Licensed to PSF under a Contributor Agreement. + +"""Fixer for intern(). + +intern(s) -> sys.intern(s)""" + +# Local imports +from .. import fixer_base +from ..fixer_util import ImportAndCall, touch_import + + +class FixIntern(fixer_base.BaseFix): + BM_compatible = True + order = "pre" + + PATTERN = """ + power< 'intern' + trailer< lpar='(' + ( not(arglist | argument) any ','> ) + rpar=')' > + after=any* + > + """ + + def transform(self, node, results): + if results: + # I feel like we should be able to express this logic in the + # PATTERN above but I don't know how to do it so... + obj = results['obj'] + if obj: + if (obj.type == self.syms.argument and + obj.children[0].value in {'**', '*'}): + return # Make no change. + names = ('sys', 'intern') + new = ImportAndCall(node, results, names) + touch_import(None, 'sys', node) + return new diff --git a/evalkit_tf446/lib/python3.10/lib2to3/fixes/fix_isinstance.py b/evalkit_tf446/lib/python3.10/lib2to3/fixes/fix_isinstance.py new file mode 100644 index 0000000000000000000000000000000000000000..bebb1de120424b6b568ae3243eab55ad12305194 --- /dev/null +++ b/evalkit_tf446/lib/python3.10/lib2to3/fixes/fix_isinstance.py @@ -0,0 +1,52 @@ +# Copyright 2008 Armin Ronacher. +# Licensed to PSF under a Contributor Agreement. + +"""Fixer that cleans up a tuple argument to isinstance after the tokens +in it were fixed. This is mainly used to remove double occurrences of +tokens as a leftover of the long -> int / unicode -> str conversion. + +eg. isinstance(x, (int, long)) -> isinstance(x, (int, int)) + -> isinstance(x, int) +""" + +from .. import fixer_base +from ..fixer_util import token + + +class FixIsinstance(fixer_base.BaseFix): + BM_compatible = True + PATTERN = """ + power< + 'isinstance' + trailer< '(' arglist< any ',' atom< '(' + args=testlist_gexp< any+ > + ')' > > ')' > + > + """ + + run_order = 6 + + def transform(self, node, results): + names_inserted = set() + testlist = results["args"] + args = testlist.children + new_args = [] + iterator = enumerate(args) + for idx, arg in iterator: + if arg.type == token.NAME and arg.value in names_inserted: + if idx < len(args) - 1 and args[idx + 1].type == token.COMMA: + next(iterator) + continue + else: + new_args.append(arg) + if arg.type == token.NAME: + names_inserted.add(arg.value) + if new_args and new_args[-1].type == token.COMMA: + del new_args[-1] + if len(new_args) == 1: + atom = testlist.parent + new_args[0].prefix = atom.prefix + atom.replace(new_args[0]) + else: + args[:] = new_args + node.changed() diff --git a/evalkit_tf446/lib/python3.10/lib2to3/fixes/fix_itertools.py b/evalkit_tf446/lib/python3.10/lib2to3/fixes/fix_itertools.py new file mode 100644 index 0000000000000000000000000000000000000000..8e78d6c689f4396421414248f15b39e01a844833 --- /dev/null +++ b/evalkit_tf446/lib/python3.10/lib2to3/fixes/fix_itertools.py @@ -0,0 +1,43 @@ +""" Fixer for itertools.(imap|ifilter|izip) --> (map|filter|zip) and + itertools.ifilterfalse --> itertools.filterfalse (bugs 2360-2363) + + imports from itertools are fixed in fix_itertools_import.py + + If itertools is imported as something else (ie: import itertools as it; + it.izip(spam, eggs)) method calls will not get fixed. + """ + +# Local imports +from .. import fixer_base +from ..fixer_util import Name + +class FixItertools(fixer_base.BaseFix): + BM_compatible = True + it_funcs = "('imap'|'ifilter'|'izip'|'izip_longest'|'ifilterfalse')" + PATTERN = """ + power< it='itertools' + trailer< + dot='.' func=%(it_funcs)s > trailer< '(' [any] ')' > > + | + power< func=%(it_funcs)s trailer< '(' [any] ')' > > + """ %(locals()) + + # Needs to be run after fix_(map|zip|filter) + run_order = 6 + + def transform(self, node, results): + prefix = None + func = results['func'][0] + if ('it' in results and + func.value not in ('ifilterfalse', 'izip_longest')): + dot, it = (results['dot'], results['it']) + # Remove the 'itertools' + prefix = it.prefix + it.remove() + # Replace the node which contains ('.', 'function') with the + # function (to be consistent with the second part of the pattern) + dot.remove() + func.parent.replace(func) + + prefix = prefix or func.prefix + func.replace(Name(func.value[1:], prefix=prefix)) diff --git a/evalkit_tf446/lib/python3.10/lib2to3/fixes/fix_itertools_imports.py b/evalkit_tf446/lib/python3.10/lib2to3/fixes/fix_itertools_imports.py new file mode 100644 index 0000000000000000000000000000000000000000..0ddbc7b8422991bacc398d8641091a478ee84f55 --- /dev/null +++ b/evalkit_tf446/lib/python3.10/lib2to3/fixes/fix_itertools_imports.py @@ -0,0 +1,57 @@ +""" Fixer for imports of itertools.(imap|ifilter|izip|ifilterfalse) """ + +# Local imports +from lib2to3 import fixer_base +from lib2to3.fixer_util import BlankLine, syms, token + + +class FixItertoolsImports(fixer_base.BaseFix): + BM_compatible = True + PATTERN = """ + import_from< 'from' 'itertools' 'import' imports=any > + """ %(locals()) + + def transform(self, node, results): + imports = results['imports'] + if imports.type == syms.import_as_name or not imports.children: + children = [imports] + else: + children = imports.children + for child in children[::2]: + if child.type == token.NAME: + member = child.value + name_node = child + elif child.type == token.STAR: + # Just leave the import as is. + return + else: + assert child.type == syms.import_as_name + name_node = child.children[0] + member_name = name_node.value + if member_name in ('imap', 'izip', 'ifilter'): + child.value = None + child.remove() + elif member_name in ('ifilterfalse', 'izip_longest'): + node.changed() + name_node.value = ('filterfalse' if member_name[1] == 'f' + else 'zip_longest') + + # Make sure the import statement is still sane + children = imports.children[:] or [imports] + remove_comma = True + for child in children: + if remove_comma and child.type == token.COMMA: + child.remove() + else: + remove_comma ^= True + + while children and children[-1].type == token.COMMA: + children.pop().remove() + + # If there are no imports left, just get rid of the entire statement + if (not (imports.children or getattr(imports, 'value', None)) or + imports.parent is None): + p = node.prefix + node = BlankLine() + node.prefix = p + return node diff --git a/evalkit_tf446/lib/python3.10/lib2to3/fixes/fix_long.py b/evalkit_tf446/lib/python3.10/lib2to3/fixes/fix_long.py new file mode 100644 index 0000000000000000000000000000000000000000..f227c9f49815388ed58a79e773c8114bced1e58f --- /dev/null +++ b/evalkit_tf446/lib/python3.10/lib2to3/fixes/fix_long.py @@ -0,0 +1,19 @@ +# Copyright 2006 Google, Inc. All Rights Reserved. +# Licensed to PSF under a Contributor Agreement. + +"""Fixer that turns 'long' into 'int' everywhere. +""" + +# Local imports +from lib2to3 import fixer_base +from lib2to3.fixer_util import is_probably_builtin + + +class FixLong(fixer_base.BaseFix): + BM_compatible = True + PATTERN = "'long'" + + def transform(self, node, results): + if is_probably_builtin(node): + node.value = "int" + node.changed() diff --git a/evalkit_tf446/lib/python3.10/lib2to3/fixes/fix_map.py b/evalkit_tf446/lib/python3.10/lib2to3/fixes/fix_map.py new file mode 100644 index 0000000000000000000000000000000000000000..78cf81c6f94098aad11edf351ff0f31da9cdbccd --- /dev/null +++ b/evalkit_tf446/lib/python3.10/lib2to3/fixes/fix_map.py @@ -0,0 +1,110 @@ +# Copyright 2007 Google, Inc. All Rights Reserved. +# Licensed to PSF under a Contributor Agreement. + +"""Fixer that changes map(F, ...) into list(map(F, ...)) unless there +exists a 'from future_builtins import map' statement in the top-level +namespace. + +As a special case, map(None, X) is changed into list(X). (This is +necessary because the semantics are changed in this case -- the new +map(None, X) is equivalent to [(x,) for x in X].) + +We avoid the transformation (except for the special case mentioned +above) if the map() call is directly contained in iter(<>), list(<>), +tuple(<>), sorted(<>), ...join(<>), or for V in <>:. + +NOTE: This is still not correct if the original code was depending on +map(F, X, Y, ...) to go on until the longest argument is exhausted, +substituting None for missing values -- like zip(), it now stops as +soon as the shortest argument is exhausted. +""" + +# Local imports +from ..pgen2 import token +from .. import fixer_base +from ..fixer_util import Name, ArgList, Call, ListComp, in_special_context +from ..pygram import python_symbols as syms +from ..pytree import Node + + +class FixMap(fixer_base.ConditionalFix): + BM_compatible = True + + PATTERN = """ + map_none=power< + 'map' + trailer< '(' arglist< 'None' ',' arg=any [','] > ')' > + [extra_trailers=trailer*] + > + | + map_lambda=power< + 'map' + trailer< + '(' + arglist< + lambdef< 'lambda' + (fp=NAME | vfpdef< '(' fp=NAME ')'> ) ':' xp=any + > + ',' + it=any + > + ')' + > + [extra_trailers=trailer*] + > + | + power< + 'map' args=trailer< '(' [any] ')' > + [extra_trailers=trailer*] + > + """ + + skip_on = 'future_builtins.map' + + def transform(self, node, results): + if self.should_skip(node): + return + + trailers = [] + if 'extra_trailers' in results: + for t in results['extra_trailers']: + trailers.append(t.clone()) + + if node.parent.type == syms.simple_stmt: + self.warning(node, "You should use a for loop here") + new = node.clone() + new.prefix = "" + new = Call(Name("list"), [new]) + elif "map_lambda" in results: + new = ListComp(results["xp"].clone(), + results["fp"].clone(), + results["it"].clone()) + new = Node(syms.power, [new] + trailers, prefix="") + + else: + if "map_none" in results: + new = results["arg"].clone() + new.prefix = "" + else: + if "args" in results: + args = results["args"] + if args.type == syms.trailer and \ + args.children[1].type == syms.arglist and \ + args.children[1].children[0].type == token.NAME and \ + args.children[1].children[0].value == "None": + self.warning(node, "cannot convert map(None, ...) " + "with multiple arguments because map() " + "now truncates to the shortest sequence") + return + + new = Node(syms.power, [Name("map"), args.clone()]) + new.prefix = "" + + if in_special_context(node): + return None + + new = Node(syms.power, [Name("list"), ArgList([new])] + trailers) + new.prefix = "" + + new.prefix = node.prefix + return new diff --git a/evalkit_tf446/lib/python3.10/lib2to3/fixes/fix_metaclass.py b/evalkit_tf446/lib/python3.10/lib2to3/fixes/fix_metaclass.py new file mode 100644 index 0000000000000000000000000000000000000000..fe547b2228072a3cf436733f40154004e98c215c --- /dev/null +++ b/evalkit_tf446/lib/python3.10/lib2to3/fixes/fix_metaclass.py @@ -0,0 +1,228 @@ +"""Fixer for __metaclass__ = X -> (metaclass=X) methods. + + The various forms of classef (inherits nothing, inherits once, inherits + many) don't parse the same in the CST so we look at ALL classes for + a __metaclass__ and if we find one normalize the inherits to all be + an arglist. + + For one-liner classes ('class X: pass') there is no indent/dedent so + we normalize those into having a suite. + + Moving the __metaclass__ into the classdef can also cause the class + body to be empty so there is some special casing for that as well. + + This fixer also tries very hard to keep original indenting and spacing + in all those corner cases. + +""" +# Author: Jack Diederich + +# Local imports +from .. import fixer_base +from ..pygram import token +from ..fixer_util import syms, Node, Leaf + + +def has_metaclass(parent): + """ we have to check the cls_node without changing it. + There are two possibilities: + 1) clsdef => suite => simple_stmt => expr_stmt => Leaf('__meta') + 2) clsdef => simple_stmt => expr_stmt => Leaf('__meta') + """ + for node in parent.children: + if node.type == syms.suite: + return has_metaclass(node) + elif node.type == syms.simple_stmt and node.children: + expr_node = node.children[0] + if expr_node.type == syms.expr_stmt and expr_node.children: + left_side = expr_node.children[0] + if isinstance(left_side, Leaf) and \ + left_side.value == '__metaclass__': + return True + return False + + +def fixup_parse_tree(cls_node): + """ one-line classes don't get a suite in the parse tree so we add + one to normalize the tree + """ + for node in cls_node.children: + if node.type == syms.suite: + # already in the preferred format, do nothing + return + + # !%@#! one-liners have no suite node, we have to fake one up + for i, node in enumerate(cls_node.children): + if node.type == token.COLON: + break + else: + raise ValueError("No class suite and no ':'!") + + # move everything into a suite node + suite = Node(syms.suite, []) + while cls_node.children[i+1:]: + move_node = cls_node.children[i+1] + suite.append_child(move_node.clone()) + move_node.remove() + cls_node.append_child(suite) + node = suite + + +def fixup_simple_stmt(parent, i, stmt_node): + """ if there is a semi-colon all the parts count as part of the same + simple_stmt. We just want the __metaclass__ part so we move + everything after the semi-colon into its own simple_stmt node + """ + for semi_ind, node in enumerate(stmt_node.children): + if node.type == token.SEMI: # *sigh* + break + else: + return + + node.remove() # kill the semicolon + new_expr = Node(syms.expr_stmt, []) + new_stmt = Node(syms.simple_stmt, [new_expr]) + while stmt_node.children[semi_ind:]: + move_node = stmt_node.children[semi_ind] + new_expr.append_child(move_node.clone()) + move_node.remove() + parent.insert_child(i, new_stmt) + new_leaf1 = new_stmt.children[0].children[0] + old_leaf1 = stmt_node.children[0].children[0] + new_leaf1.prefix = old_leaf1.prefix + + +def remove_trailing_newline(node): + if node.children and node.children[-1].type == token.NEWLINE: + node.children[-1].remove() + + +def find_metas(cls_node): + # find the suite node (Mmm, sweet nodes) + for node in cls_node.children: + if node.type == syms.suite: + break + else: + raise ValueError("No class suite!") + + # look for simple_stmt[ expr_stmt[ Leaf('__metaclass__') ] ] + for i, simple_node in list(enumerate(node.children)): + if simple_node.type == syms.simple_stmt and simple_node.children: + expr_node = simple_node.children[0] + if expr_node.type == syms.expr_stmt and expr_node.children: + # Check if the expr_node is a simple assignment. + left_node = expr_node.children[0] + if isinstance(left_node, Leaf) and \ + left_node.value == '__metaclass__': + # We found an assignment to __metaclass__. + fixup_simple_stmt(node, i, simple_node) + remove_trailing_newline(simple_node) + yield (node, i, simple_node) + + +def fixup_indent(suite): + """ If an INDENT is followed by a thing with a prefix then nuke the prefix + Otherwise we get in trouble when removing __metaclass__ at suite start + """ + kids = suite.children[::-1] + # find the first indent + while kids: + node = kids.pop() + if node.type == token.INDENT: + break + + # find the first Leaf + while kids: + node = kids.pop() + if isinstance(node, Leaf) and node.type != token.DEDENT: + if node.prefix: + node.prefix = '' + return + else: + kids.extend(node.children[::-1]) + + +class FixMetaclass(fixer_base.BaseFix): + BM_compatible = True + + PATTERN = """ + classdef + """ + + def transform(self, node, results): + if not has_metaclass(node): + return + + fixup_parse_tree(node) + + # find metaclasses, keep the last one + last_metaclass = None + for suite, i, stmt in find_metas(node): + last_metaclass = stmt + stmt.remove() + + text_type = node.children[0].type # always Leaf(nnn, 'class') + + # figure out what kind of classdef we have + if len(node.children) == 7: + # Node(classdef, ['class', 'name', '(', arglist, ')', ':', suite]) + # 0 1 2 3 4 5 6 + if node.children[3].type == syms.arglist: + arglist = node.children[3] + # Node(classdef, ['class', 'name', '(', 'Parent', ')', ':', suite]) + else: + parent = node.children[3].clone() + arglist = Node(syms.arglist, [parent]) + node.set_child(3, arglist) + elif len(node.children) == 6: + # Node(classdef, ['class', 'name', '(', ')', ':', suite]) + # 0 1 2 3 4 5 + arglist = Node(syms.arglist, []) + node.insert_child(3, arglist) + elif len(node.children) == 4: + # Node(classdef, ['class', 'name', ':', suite]) + # 0 1 2 3 + arglist = Node(syms.arglist, []) + node.insert_child(2, Leaf(token.RPAR, ')')) + node.insert_child(2, arglist) + node.insert_child(2, Leaf(token.LPAR, '(')) + else: + raise ValueError("Unexpected class definition") + + # now stick the metaclass in the arglist + meta_txt = last_metaclass.children[0].children[0] + meta_txt.value = 'metaclass' + orig_meta_prefix = meta_txt.prefix + + if arglist.children: + arglist.append_child(Leaf(token.COMMA, ',')) + meta_txt.prefix = ' ' + else: + meta_txt.prefix = '' + + # compact the expression "metaclass = Meta" -> "metaclass=Meta" + expr_stmt = last_metaclass.children[0] + assert expr_stmt.type == syms.expr_stmt + expr_stmt.children[1].prefix = '' + expr_stmt.children[2].prefix = '' + + arglist.append_child(last_metaclass) + + fixup_indent(suite) + + # check for empty suite + if not suite.children: + # one-liner that was just __metaclass_ + suite.remove() + pass_leaf = Leaf(text_type, 'pass') + pass_leaf.prefix = orig_meta_prefix + node.append_child(pass_leaf) + node.append_child(Leaf(token.NEWLINE, '\n')) + + elif len(suite.children) > 1 and \ + (suite.children[-2].type == token.INDENT and + suite.children[-1].type == token.DEDENT): + # there was only one line in the class body and it was __metaclass__ + pass_leaf = Leaf(text_type, 'pass') + suite.insert_child(-1, pass_leaf) + suite.insert_child(-1, Leaf(token.NEWLINE, '\n')) diff --git a/evalkit_tf446/lib/python3.10/lib2to3/fixes/fix_methodattrs.py b/evalkit_tf446/lib/python3.10/lib2to3/fixes/fix_methodattrs.py new file mode 100644 index 0000000000000000000000000000000000000000..7f9004f00e6e8f2a7d4f72d90b6af3a2674d0fa0 --- /dev/null +++ b/evalkit_tf446/lib/python3.10/lib2to3/fixes/fix_methodattrs.py @@ -0,0 +1,24 @@ +"""Fix bound method attributes (method.im_? -> method.__?__). +""" +# Author: Christian Heimes + +# Local imports +from .. import fixer_base +from ..fixer_util import Name + +MAP = { + "im_func" : "__func__", + "im_self" : "__self__", + "im_class" : "__self__.__class__" + } + +class FixMethodattrs(fixer_base.BaseFix): + BM_compatible = True + PATTERN = """ + power< any+ trailer< '.' attr=('im_func' | 'im_self' | 'im_class') > any* > + """ + + def transform(self, node, results): + attr = results["attr"][0] + new = MAP[attr.value] + attr.replace(Name(new, prefix=attr.prefix)) diff --git a/evalkit_tf446/lib/python3.10/lib2to3/fixes/fix_ne.py b/evalkit_tf446/lib/python3.10/lib2to3/fixes/fix_ne.py new file mode 100644 index 0000000000000000000000000000000000000000..e3ee10f4a63e0c211d2fb430095dfd784d68518e --- /dev/null +++ b/evalkit_tf446/lib/python3.10/lib2to3/fixes/fix_ne.py @@ -0,0 +1,23 @@ +# Copyright 2006 Google, Inc. All Rights Reserved. +# Licensed to PSF under a Contributor Agreement. + +"""Fixer that turns <> into !=.""" + +# Local imports +from .. import pytree +from ..pgen2 import token +from .. import fixer_base + + +class FixNe(fixer_base.BaseFix): + # This is so simple that we don't need the pattern compiler. + + _accept_type = token.NOTEQUAL + + def match(self, node): + # Override + return node.value == "<>" + + def transform(self, node, results): + new = pytree.Leaf(token.NOTEQUAL, "!=", prefix=node.prefix) + return new diff --git a/evalkit_tf446/lib/python3.10/lib2to3/fixes/fix_next.py b/evalkit_tf446/lib/python3.10/lib2to3/fixes/fix_next.py new file mode 100644 index 0000000000000000000000000000000000000000..9f6305e1d49dc5327338912f2e6ed0b5794c5062 --- /dev/null +++ b/evalkit_tf446/lib/python3.10/lib2to3/fixes/fix_next.py @@ -0,0 +1,103 @@ +"""Fixer for it.next() -> next(it), per PEP 3114.""" +# Author: Collin Winter + +# Things that currently aren't covered: +# - listcomp "next" names aren't warned +# - "with" statement targets aren't checked + +# Local imports +from ..pgen2 import token +from ..pygram import python_symbols as syms +from .. import fixer_base +from ..fixer_util import Name, Call, find_binding + +bind_warning = "Calls to builtin next() possibly shadowed by global binding" + + +class FixNext(fixer_base.BaseFix): + BM_compatible = True + PATTERN = """ + power< base=any+ trailer< '.' attr='next' > trailer< '(' ')' > > + | + power< head=any+ trailer< '.' attr='next' > not trailer< '(' ')' > > + | + classdef< 'class' any+ ':' + suite< any* + funcdef< 'def' + name='next' + parameters< '(' NAME ')' > any+ > + any* > > + | + global=global_stmt< 'global' any* 'next' any* > + """ + + order = "pre" # Pre-order tree traversal + + def start_tree(self, tree, filename): + super(FixNext, self).start_tree(tree, filename) + + n = find_binding('next', tree) + if n: + self.warning(n, bind_warning) + self.shadowed_next = True + else: + self.shadowed_next = False + + def transform(self, node, results): + assert results + + base = results.get("base") + attr = results.get("attr") + name = results.get("name") + + if base: + if self.shadowed_next: + attr.replace(Name("__next__", prefix=attr.prefix)) + else: + base = [n.clone() for n in base] + base[0].prefix = "" + node.replace(Call(Name("next", prefix=node.prefix), base)) + elif name: + n = Name("__next__", prefix=name.prefix) + name.replace(n) + elif attr: + # We don't do this transformation if we're assigning to "x.next". + # Unfortunately, it doesn't seem possible to do this in PATTERN, + # so it's being done here. + if is_assign_target(node): + head = results["head"] + if "".join([str(n) for n in head]).strip() == '__builtin__': + self.warning(node, bind_warning) + return + attr.replace(Name("__next__")) + elif "global" in results: + self.warning(node, bind_warning) + self.shadowed_next = True + + +### The following functions help test if node is part of an assignment +### target. + +def is_assign_target(node): + assign = find_assign(node) + if assign is None: + return False + + for child in assign.children: + if child.type == token.EQUAL: + return False + elif is_subtree(child, node): + return True + return False + +def find_assign(node): + if node.type == syms.expr_stmt: + return node + if node.type == syms.simple_stmt or node.parent is None: + return None + return find_assign(node.parent) + +def is_subtree(root, node): + if root == node: + return True + return any(is_subtree(c, node) for c in root.children) diff --git a/evalkit_tf446/lib/python3.10/lib2to3/fixes/fix_nonzero.py b/evalkit_tf446/lib/python3.10/lib2to3/fixes/fix_nonzero.py new file mode 100644 index 0000000000000000000000000000000000000000..c2295969a7728f78617677b01aac8122fa15539c --- /dev/null +++ b/evalkit_tf446/lib/python3.10/lib2to3/fixes/fix_nonzero.py @@ -0,0 +1,21 @@ +"""Fixer for __nonzero__ -> __bool__ methods.""" +# Author: Collin Winter + +# Local imports +from .. import fixer_base +from ..fixer_util import Name + +class FixNonzero(fixer_base.BaseFix): + BM_compatible = True + PATTERN = """ + classdef< 'class' any+ ':' + suite< any* + funcdef< 'def' name='__nonzero__' + parameters< '(' NAME ')' > any+ > + any* > > + """ + + def transform(self, node, results): + name = results["name"] + new = Name("__bool__", prefix=name.prefix) + name.replace(new) diff --git a/evalkit_tf446/lib/python3.10/lib2to3/fixes/fix_numliterals.py b/evalkit_tf446/lib/python3.10/lib2to3/fixes/fix_numliterals.py new file mode 100644 index 0000000000000000000000000000000000000000..79207d4aa368aee20a2ac9ae1e44072aabb2afd2 --- /dev/null +++ b/evalkit_tf446/lib/python3.10/lib2to3/fixes/fix_numliterals.py @@ -0,0 +1,28 @@ +"""Fixer that turns 1L into 1, 0755 into 0o755. +""" +# Copyright 2007 Georg Brandl. +# Licensed to PSF under a Contributor Agreement. + +# Local imports +from ..pgen2 import token +from .. import fixer_base +from ..fixer_util import Number + + +class FixNumliterals(fixer_base.BaseFix): + # This is so simple that we don't need the pattern compiler. + + _accept_type = token.NUMBER + + def match(self, node): + # Override + return (node.value.startswith("0") or node.value[-1] in "Ll") + + def transform(self, node, results): + val = node.value + if val[-1] in 'Ll': + val = val[:-1] + elif val.startswith('0') and val.isdigit() and len(set(val)) > 1: + val = "0o" + val[1:] + + return Number(val, prefix=node.prefix) diff --git a/evalkit_tf446/lib/python3.10/lib2to3/fixes/fix_operator.py b/evalkit_tf446/lib/python3.10/lib2to3/fixes/fix_operator.py new file mode 100644 index 0000000000000000000000000000000000000000..d303cd2018befb0745f2868068becf9f350b9b3d --- /dev/null +++ b/evalkit_tf446/lib/python3.10/lib2to3/fixes/fix_operator.py @@ -0,0 +1,97 @@ +"""Fixer for operator functions. + +operator.isCallable(obj) -> callable(obj) +operator.sequenceIncludes(obj) -> operator.contains(obj) +operator.isSequenceType(obj) -> isinstance(obj, collections.abc.Sequence) +operator.isMappingType(obj) -> isinstance(obj, collections.abc.Mapping) +operator.isNumberType(obj) -> isinstance(obj, numbers.Number) +operator.repeat(obj, n) -> operator.mul(obj, n) +operator.irepeat(obj, n) -> operator.imul(obj, n) +""" + +import collections.abc + +# Local imports +from lib2to3 import fixer_base +from lib2to3.fixer_util import Call, Name, String, touch_import + + +def invocation(s): + def dec(f): + f.invocation = s + return f + return dec + + +class FixOperator(fixer_base.BaseFix): + BM_compatible = True + order = "pre" + + methods = """ + method=('isCallable'|'sequenceIncludes' + |'isSequenceType'|'isMappingType'|'isNumberType' + |'repeat'|'irepeat') + """ + obj = "'(' obj=any ')'" + PATTERN = """ + power< module='operator' + trailer< '.' %(methods)s > trailer< %(obj)s > > + | + power< %(methods)s trailer< %(obj)s > > + """ % dict(methods=methods, obj=obj) + + def transform(self, node, results): + method = self._check_method(node, results) + if method is not None: + return method(node, results) + + @invocation("operator.contains(%s)") + def _sequenceIncludes(self, node, results): + return self._handle_rename(node, results, "contains") + + @invocation("callable(%s)") + def _isCallable(self, node, results): + obj = results["obj"] + return Call(Name("callable"), [obj.clone()], prefix=node.prefix) + + @invocation("operator.mul(%s)") + def _repeat(self, node, results): + return self._handle_rename(node, results, "mul") + + @invocation("operator.imul(%s)") + def _irepeat(self, node, results): + return self._handle_rename(node, results, "imul") + + @invocation("isinstance(%s, collections.abc.Sequence)") + def _isSequenceType(self, node, results): + return self._handle_type2abc(node, results, "collections.abc", "Sequence") + + @invocation("isinstance(%s, collections.abc.Mapping)") + def _isMappingType(self, node, results): + return self._handle_type2abc(node, results, "collections.abc", "Mapping") + + @invocation("isinstance(%s, numbers.Number)") + def _isNumberType(self, node, results): + return self._handle_type2abc(node, results, "numbers", "Number") + + def _handle_rename(self, node, results, name): + method = results["method"][0] + method.value = name + method.changed() + + def _handle_type2abc(self, node, results, module, abc): + touch_import(None, module, node) + obj = results["obj"] + args = [obj.clone(), String(", " + ".".join([module, abc]))] + return Call(Name("isinstance"), args, prefix=node.prefix) + + def _check_method(self, node, results): + method = getattr(self, "_" + results["method"][0].value) + if isinstance(method, collections.abc.Callable): + if "module" in results: + return method + else: + sub = (str(results["obj"]),) + invocation_str = method.invocation % sub + self.warning(node, "You should use '%s' here." % invocation_str) + return None diff --git a/evalkit_tf446/lib/python3.10/lib2to3/fixes/fix_paren.py b/evalkit_tf446/lib/python3.10/lib2to3/fixes/fix_paren.py new file mode 100644 index 0000000000000000000000000000000000000000..df3da5f5232c9c42fc53c904f8c0b886b4dd4a51 --- /dev/null +++ b/evalkit_tf446/lib/python3.10/lib2to3/fixes/fix_paren.py @@ -0,0 +1,44 @@ +"""Fixer that adds parentheses where they are required + +This converts ``[x for x in 1, 2]`` to ``[x for x in (1, 2)]``.""" + +# By Taek Joo Kim and Benjamin Peterson + +# Local imports +from .. import fixer_base +from ..fixer_util import LParen, RParen + +# XXX This doesn't support nested for loops like [x for x in 1, 2 for x in 1, 2] +class FixParen(fixer_base.BaseFix): + BM_compatible = True + + PATTERN = """ + atom< ('[' | '(') + (listmaker< any + comp_for< + 'for' NAME 'in' + target=testlist_safe< any (',' any)+ [','] + > + [any] + > + > + | + testlist_gexp< any + comp_for< + 'for' NAME 'in' + target=testlist_safe< any (',' any)+ [','] + > + [any] + > + >) + (']' | ')') > + """ + + def transform(self, node, results): + target = results["target"] + + lparen = LParen() + lparen.prefix = target.prefix + target.prefix = "" # Make it hug the parentheses + target.insert_child(0, lparen) + target.append_child(RParen()) diff --git a/evalkit_tf446/lib/python3.10/lib2to3/fixes/fix_print.py b/evalkit_tf446/lib/python3.10/lib2to3/fixes/fix_print.py new file mode 100644 index 0000000000000000000000000000000000000000..8780322265f6fe526cb35e7961f3cfb5aaa8e052 --- /dev/null +++ b/evalkit_tf446/lib/python3.10/lib2to3/fixes/fix_print.py @@ -0,0 +1,87 @@ +# Copyright 2006 Google, Inc. All Rights Reserved. +# Licensed to PSF under a Contributor Agreement. + +"""Fixer for print. + +Change: + 'print' into 'print()' + 'print ...' into 'print(...)' + 'print ... ,' into 'print(..., end=" ")' + 'print >>x, ...' into 'print(..., file=x)' + +No changes are applied if print_function is imported from __future__ + +""" + +# Local imports +from .. import patcomp +from .. import pytree +from ..pgen2 import token +from .. import fixer_base +from ..fixer_util import Name, Call, Comma, String + + +parend_expr = patcomp.compile_pattern( + """atom< '(' [atom|STRING|NAME] ')' >""" + ) + + +class FixPrint(fixer_base.BaseFix): + + BM_compatible = True + + PATTERN = """ + simple_stmt< any* bare='print' any* > | print_stmt + """ + + def transform(self, node, results): + assert results + + bare_print = results.get("bare") + + if bare_print: + # Special-case print all by itself + bare_print.replace(Call(Name("print"), [], + prefix=bare_print.prefix)) + return + assert node.children[0] == Name("print") + args = node.children[1:] + if len(args) == 1 and parend_expr.match(args[0]): + # We don't want to keep sticking parens around an + # already-parenthesised expression. + return + + sep = end = file = None + if args and args[-1] == Comma(): + args = args[:-1] + end = " " + if args and args[0] == pytree.Leaf(token.RIGHTSHIFT, ">>"): + assert len(args) >= 2 + file = args[1].clone() + args = args[3:] # Strip a possible comma after the file expression + # Now synthesize a print(args, sep=..., end=..., file=...) node. + l_args = [arg.clone() for arg in args] + if l_args: + l_args[0].prefix = "" + if sep is not None or end is not None or file is not None: + if sep is not None: + self.add_kwarg(l_args, "sep", String(repr(sep))) + if end is not None: + self.add_kwarg(l_args, "end", String(repr(end))) + if file is not None: + self.add_kwarg(l_args, "file", file) + n_stmt = Call(Name("print"), l_args) + n_stmt.prefix = node.prefix + return n_stmt + + def add_kwarg(self, l_nodes, s_kwd, n_expr): + # XXX All this prefix-setting may lose comments (though rarely) + n_expr.prefix = "" + n_argument = pytree.Node(self.syms.argument, + (Name(s_kwd), + pytree.Leaf(token.EQUAL, "="), + n_expr)) + if l_nodes: + l_nodes.append(Comma()) + n_argument.prefix = " " + l_nodes.append(n_argument) diff --git a/evalkit_tf446/lib/python3.10/lib2to3/fixes/fix_raise.py b/evalkit_tf446/lib/python3.10/lib2to3/fixes/fix_raise.py new file mode 100644 index 0000000000000000000000000000000000000000..05aa21e74a30ff101c593dc03514efc26f10cecf --- /dev/null +++ b/evalkit_tf446/lib/python3.10/lib2to3/fixes/fix_raise.py @@ -0,0 +1,90 @@ +"""Fixer for 'raise E, V, T' + +raise -> raise +raise E -> raise E +raise E, V -> raise E(V) +raise E, V, T -> raise E(V).with_traceback(T) +raise E, None, T -> raise E.with_traceback(T) + +raise (((E, E'), E''), E'''), V -> raise E(V) +raise "foo", V, T -> warns about string exceptions + + +CAVEATS: +1) "raise E, V" will be incorrectly translated if V is an exception + instance. The correct Python 3 idiom is + + raise E from V + + but since we can't detect instance-hood by syntax alone and since + any client code would have to be changed as well, we don't automate + this. +""" +# Author: Collin Winter + +# Local imports +from .. import pytree +from ..pgen2 import token +from .. import fixer_base +from ..fixer_util import Name, Call, Attr, ArgList, is_tuple + +class FixRaise(fixer_base.BaseFix): + + BM_compatible = True + PATTERN = """ + raise_stmt< 'raise' exc=any [',' val=any [',' tb=any]] > + """ + + def transform(self, node, results): + syms = self.syms + + exc = results["exc"].clone() + if exc.type == token.STRING: + msg = "Python 3 does not support string exceptions" + self.cannot_convert(node, msg) + return + + # Python 2 supports + # raise ((((E1, E2), E3), E4), E5), V + # as a synonym for + # raise E1, V + # Since Python 3 will not support this, we recurse down any tuple + # literals, always taking the first element. + if is_tuple(exc): + while is_tuple(exc): + # exc.children[1:-1] is the unparenthesized tuple + # exc.children[1].children[0] is the first element of the tuple + exc = exc.children[1].children[0].clone() + exc.prefix = " " + + if "val" not in results: + # One-argument raise + new = pytree.Node(syms.raise_stmt, [Name("raise"), exc]) + new.prefix = node.prefix + return new + + val = results["val"].clone() + if is_tuple(val): + args = [c.clone() for c in val.children[1:-1]] + else: + val.prefix = "" + args = [val] + + if "tb" in results: + tb = results["tb"].clone() + tb.prefix = "" + + e = exc + # If there's a traceback and None is passed as the value, then don't + # add a call, since the user probably just wants to add a + # traceback. See issue #9661. + if val.type != token.NAME or val.value != "None": + e = Call(exc, args) + with_tb = Attr(e, Name('with_traceback')) + [ArgList([tb])] + new = pytree.Node(syms.simple_stmt, [Name("raise")] + with_tb) + new.prefix = node.prefix + return new + else: + return pytree.Node(syms.raise_stmt, + [Name("raise"), Call(exc, args)], + prefix=node.prefix) diff --git a/evalkit_tf446/lib/python3.10/lib2to3/fixes/fix_raw_input.py b/evalkit_tf446/lib/python3.10/lib2to3/fixes/fix_raw_input.py new file mode 100644 index 0000000000000000000000000000000000000000..a51bb694b9e01e8d6f382f359118df1d74135924 --- /dev/null +++ b/evalkit_tf446/lib/python3.10/lib2to3/fixes/fix_raw_input.py @@ -0,0 +1,17 @@ +"""Fixer that changes raw_input(...) into input(...).""" +# Author: Andre Roberge + +# Local imports +from .. import fixer_base +from ..fixer_util import Name + +class FixRawInput(fixer_base.BaseFix): + + BM_compatible = True + PATTERN = """ + power< name='raw_input' trailer< '(' [any] ')' > any* > + """ + + def transform(self, node, results): + name = results["name"] + name.replace(Name("input", prefix=name.prefix)) diff --git a/evalkit_tf446/lib/python3.10/lib2to3/fixes/fix_reduce.py b/evalkit_tf446/lib/python3.10/lib2to3/fixes/fix_reduce.py new file mode 100644 index 0000000000000000000000000000000000000000..00e5aa1c33d4826e976cccee05b8f7398954459f --- /dev/null +++ b/evalkit_tf446/lib/python3.10/lib2to3/fixes/fix_reduce.py @@ -0,0 +1,35 @@ +# Copyright 2008 Armin Ronacher. +# Licensed to PSF under a Contributor Agreement. + +"""Fixer for reduce(). + +Makes sure reduce() is imported from the functools module if reduce is +used in that module. +""" + +from lib2to3 import fixer_base +from lib2to3.fixer_util import touch_import + + + +class FixReduce(fixer_base.BaseFix): + + BM_compatible = True + order = "pre" + + PATTERN = """ + power< 'reduce' + trailer< '(' + arglist< ( + (not(argument) any ',' + not(argument + > + """ + + def transform(self, node, results): + touch_import('functools', 'reduce', node) diff --git a/evalkit_tf446/lib/python3.10/lib2to3/fixes/fix_reload.py b/evalkit_tf446/lib/python3.10/lib2to3/fixes/fix_reload.py new file mode 100644 index 0000000000000000000000000000000000000000..b30841131c51f9b6311d1e10629dd9e5049fd6ab --- /dev/null +++ b/evalkit_tf446/lib/python3.10/lib2to3/fixes/fix_reload.py @@ -0,0 +1,36 @@ +"""Fixer for reload(). + +reload(s) -> importlib.reload(s)""" + +# Local imports +from .. import fixer_base +from ..fixer_util import ImportAndCall, touch_import + + +class FixReload(fixer_base.BaseFix): + BM_compatible = True + order = "pre" + + PATTERN = """ + power< 'reload' + trailer< lpar='(' + ( not(arglist | argument) any ','> ) + rpar=')' > + after=any* + > + """ + + def transform(self, node, results): + if results: + # I feel like we should be able to express this logic in the + # PATTERN above but I don't know how to do it so... + obj = results['obj'] + if obj: + if (obj.type == self.syms.argument and + obj.children[0].value in {'**', '*'}): + return # Make no change. + names = ('importlib', 'reload') + new = ImportAndCall(node, results, names) + touch_import(None, 'importlib', node) + return new diff --git a/evalkit_tf446/lib/python3.10/lib2to3/fixes/fix_renames.py b/evalkit_tf446/lib/python3.10/lib2to3/fixes/fix_renames.py new file mode 100644 index 0000000000000000000000000000000000000000..c0e3705ab7be19c037b833bc43e76eafc21067d8 --- /dev/null +++ b/evalkit_tf446/lib/python3.10/lib2to3/fixes/fix_renames.py @@ -0,0 +1,70 @@ +"""Fix incompatible renames + +Fixes: + * sys.maxint -> sys.maxsize +""" +# Author: Christian Heimes +# based on Collin Winter's fix_import + +# Local imports +from .. import fixer_base +from ..fixer_util import Name, attr_chain + +MAPPING = {"sys": {"maxint" : "maxsize"}, + } +LOOKUP = {} + +def alternates(members): + return "(" + "|".join(map(repr, members)) + ")" + + +def build_pattern(): + #bare = set() + for module, replace in list(MAPPING.items()): + for old_attr, new_attr in list(replace.items()): + LOOKUP[(module, old_attr)] = new_attr + #bare.add(module) + #bare.add(old_attr) + #yield """ + # import_name< 'import' (module=%r + # | dotted_as_names< any* module=%r any* >) > + # """ % (module, module) + yield """ + import_from< 'from' module_name=%r 'import' + ( attr_name=%r | import_as_name< attr_name=%r 'as' any >) > + """ % (module, old_attr, old_attr) + yield """ + power< module_name=%r trailer< '.' attr_name=%r > any* > + """ % (module, old_attr) + #yield """bare_name=%s""" % alternates(bare) + + +class FixRenames(fixer_base.BaseFix): + BM_compatible = True + PATTERN = "|".join(build_pattern()) + + order = "pre" # Pre-order tree traversal + + # Don't match the node if it's within another match + def match(self, node): + match = super(FixRenames, self).match + results = match(node) + if results: + if any(match(obj) for obj in attr_chain(node, "parent")): + return False + return results + return False + + #def start_tree(self, tree, filename): + # super(FixRenames, self).start_tree(tree, filename) + # self.replace = {} + + def transform(self, node, results): + mod_name = results.get("module_name") + attr_name = results.get("attr_name") + #bare_name = results.get("bare_name") + #import_mod = results.get("module") + + if mod_name and attr_name: + new_attr = LOOKUP[(mod_name.value, attr_name.value)] + attr_name.replace(Name(new_attr, prefix=attr_name.prefix)) diff --git a/evalkit_tf446/lib/python3.10/lib2to3/fixes/fix_repr.py b/evalkit_tf446/lib/python3.10/lib2to3/fixes/fix_repr.py new file mode 100644 index 0000000000000000000000000000000000000000..1150bb8b9db2afba11a18f2661fdfd4d9bd4b633 --- /dev/null +++ b/evalkit_tf446/lib/python3.10/lib2to3/fixes/fix_repr.py @@ -0,0 +1,23 @@ +# Copyright 2006 Google, Inc. All Rights Reserved. +# Licensed to PSF under a Contributor Agreement. + +"""Fixer that transforms `xyzzy` into repr(xyzzy).""" + +# Local imports +from .. import fixer_base +from ..fixer_util import Call, Name, parenthesize + + +class FixRepr(fixer_base.BaseFix): + + BM_compatible = True + PATTERN = """ + atom < '`' expr=any '`' > + """ + + def transform(self, node, results): + expr = results["expr"].clone() + + if expr.type == self.syms.testlist1: + expr = parenthesize(expr) + return Call(Name("repr"), [expr], prefix=node.prefix) diff --git a/evalkit_tf446/lib/python3.10/lib2to3/fixes/fix_set_literal.py b/evalkit_tf446/lib/python3.10/lib2to3/fixes/fix_set_literal.py new file mode 100644 index 0000000000000000000000000000000000000000..762550cf73dc0b627612465e63f0368d06847ae1 --- /dev/null +++ b/evalkit_tf446/lib/python3.10/lib2to3/fixes/fix_set_literal.py @@ -0,0 +1,53 @@ +""" +Optional fixer to transform set() calls to set literals. +""" + +# Author: Benjamin Peterson + +from lib2to3 import fixer_base, pytree +from lib2to3.fixer_util import token, syms + + + +class FixSetLiteral(fixer_base.BaseFix): + + BM_compatible = True + explicit = True + + PATTERN = """power< 'set' trailer< '(' + (atom=atom< '[' (items=listmaker< any ((',' any)* [',']) > + | + single=any) ']' > + | + atom< '(' items=testlist_gexp< any ((',' any)* [',']) > ')' > + ) + ')' > > + """ + + def transform(self, node, results): + single = results.get("single") + if single: + # Make a fake listmaker + fake = pytree.Node(syms.listmaker, [single.clone()]) + single.replace(fake) + items = fake + else: + items = results["items"] + + # Build the contents of the literal + literal = [pytree.Leaf(token.LBRACE, "{")] + literal.extend(n.clone() for n in items.children) + literal.append(pytree.Leaf(token.RBRACE, "}")) + # Set the prefix of the right brace to that of the ')' or ']' + literal[-1].prefix = items.next_sibling.prefix + maker = pytree.Node(syms.dictsetmaker, literal) + maker.prefix = node.prefix + + # If the original was a one tuple, we need to remove the extra comma. + if len(maker.children) == 4: + n = maker.children[2] + n.remove() + maker.children[-1].prefix = n.prefix + + # Finally, replace the set call with our shiny new literal. + return maker diff --git a/evalkit_tf446/lib/python3.10/lib2to3/fixes/fix_standarderror.py b/evalkit_tf446/lib/python3.10/lib2to3/fixes/fix_standarderror.py new file mode 100644 index 0000000000000000000000000000000000000000..dc742167e6e9d4680afb7afcfc64524f4c14aca3 --- /dev/null +++ b/evalkit_tf446/lib/python3.10/lib2to3/fixes/fix_standarderror.py @@ -0,0 +1,18 @@ +# Copyright 2007 Google, Inc. All Rights Reserved. +# Licensed to PSF under a Contributor Agreement. + +"""Fixer for StandardError -> Exception.""" + +# Local imports +from .. import fixer_base +from ..fixer_util import Name + + +class FixStandarderror(fixer_base.BaseFix): + BM_compatible = True + PATTERN = """ + 'StandardError' + """ + + def transform(self, node, results): + return Name("Exception", prefix=node.prefix) diff --git a/evalkit_tf446/lib/python3.10/lib2to3/fixes/fix_sys_exc.py b/evalkit_tf446/lib/python3.10/lib2to3/fixes/fix_sys_exc.py new file mode 100644 index 0000000000000000000000000000000000000000..f6039690374ab26d383bc8e0309438c051d0d405 --- /dev/null +++ b/evalkit_tf446/lib/python3.10/lib2to3/fixes/fix_sys_exc.py @@ -0,0 +1,30 @@ +"""Fixer for sys.exc_{type, value, traceback} + +sys.exc_type -> sys.exc_info()[0] +sys.exc_value -> sys.exc_info()[1] +sys.exc_traceback -> sys.exc_info()[2] +""" + +# By Jeff Balogh and Benjamin Peterson + +# Local imports +from .. import fixer_base +from ..fixer_util import Attr, Call, Name, Number, Subscript, Node, syms + +class FixSysExc(fixer_base.BaseFix): + # This order matches the ordering of sys.exc_info(). + exc_info = ["exc_type", "exc_value", "exc_traceback"] + BM_compatible = True + PATTERN = """ + power< 'sys' trailer< dot='.' attribute=(%s) > > + """ % '|'.join("'%s'" % e for e in exc_info) + + def transform(self, node, results): + sys_attr = results["attribute"][0] + index = Number(self.exc_info.index(sys_attr.value)) + + call = Call(Name("exc_info"), prefix=sys_attr.prefix) + attr = Attr(Name("sys"), call) + attr[1].children[0].prefix = results["dot"].prefix + attr.append(Subscript(index)) + return Node(syms.power, attr, prefix=node.prefix) diff --git a/evalkit_tf446/lib/python3.10/lib2to3/fixes/fix_throw.py b/evalkit_tf446/lib/python3.10/lib2to3/fixes/fix_throw.py new file mode 100644 index 0000000000000000000000000000000000000000..aac29169b4e98e26d0aa78c3fbce641742b04952 --- /dev/null +++ b/evalkit_tf446/lib/python3.10/lib2to3/fixes/fix_throw.py @@ -0,0 +1,56 @@ +"""Fixer for generator.throw(E, V, T). + +g.throw(E) -> g.throw(E) +g.throw(E, V) -> g.throw(E(V)) +g.throw(E, V, T) -> g.throw(E(V).with_traceback(T)) + +g.throw("foo"[, V[, T]]) will warn about string exceptions.""" +# Author: Collin Winter + +# Local imports +from .. import pytree +from ..pgen2 import token +from .. import fixer_base +from ..fixer_util import Name, Call, ArgList, Attr, is_tuple + +class FixThrow(fixer_base.BaseFix): + BM_compatible = True + PATTERN = """ + power< any trailer< '.' 'throw' > + trailer< '(' args=arglist< exc=any ',' val=any [',' tb=any] > ')' > + > + | + power< any trailer< '.' 'throw' > trailer< '(' exc=any ')' > > + """ + + def transform(self, node, results): + syms = self.syms + + exc = results["exc"].clone() + if exc.type is token.STRING: + self.cannot_convert(node, "Python 3 does not support string exceptions") + return + + # Leave "g.throw(E)" alone + val = results.get("val") + if val is None: + return + + val = val.clone() + if is_tuple(val): + args = [c.clone() for c in val.children[1:-1]] + else: + val.prefix = "" + args = [val] + + throw_args = results["args"] + + if "tb" in results: + tb = results["tb"].clone() + tb.prefix = "" + + e = Call(exc, args) + with_tb = Attr(e, Name('with_traceback')) + [ArgList([tb])] + throw_args.replace(pytree.Node(syms.power, with_tb)) + else: + throw_args.replace(Call(exc, args)) diff --git a/evalkit_tf446/lib/python3.10/lib2to3/fixes/fix_tuple_params.py b/evalkit_tf446/lib/python3.10/lib2to3/fixes/fix_tuple_params.py new file mode 100644 index 0000000000000000000000000000000000000000..cad755ffdbefb39431b2ffa4f5c8cf3c7a920c06 --- /dev/null +++ b/evalkit_tf446/lib/python3.10/lib2to3/fixes/fix_tuple_params.py @@ -0,0 +1,175 @@ +"""Fixer for function definitions with tuple parameters. + +def func(((a, b), c), d): + ... + + -> + +def func(x, d): + ((a, b), c) = x + ... + +It will also support lambdas: + + lambda (x, y): x + y -> lambda t: t[0] + t[1] + + # The parens are a syntax error in Python 3 + lambda (x): x + y -> lambda x: x + y +""" +# Author: Collin Winter + +# Local imports +from .. import pytree +from ..pgen2 import token +from .. import fixer_base +from ..fixer_util import Assign, Name, Newline, Number, Subscript, syms + +def is_docstring(stmt): + return isinstance(stmt, pytree.Node) and \ + stmt.children[0].type == token.STRING + +class FixTupleParams(fixer_base.BaseFix): + run_order = 4 #use a lower order since lambda is part of other + #patterns + BM_compatible = True + + PATTERN = """ + funcdef< 'def' any parameters< '(' args=any ')' > + ['->' any] ':' suite=any+ > + | + lambda= + lambdef< 'lambda' args=vfpdef< '(' inner=any ')' > + ':' body=any + > + """ + + def transform(self, node, results): + if "lambda" in results: + return self.transform_lambda(node, results) + + new_lines = [] + suite = results["suite"] + args = results["args"] + # This crap is so "def foo(...): x = 5; y = 7" is handled correctly. + # TODO(cwinter): suite-cleanup + if suite[0].children[1].type == token.INDENT: + start = 2 + indent = suite[0].children[1].value + end = Newline() + else: + start = 0 + indent = "; " + end = pytree.Leaf(token.INDENT, "") + + # We need access to self for new_name(), and making this a method + # doesn't feel right. Closing over self and new_lines makes the + # code below cleaner. + def handle_tuple(tuple_arg, add_prefix=False): + n = Name(self.new_name()) + arg = tuple_arg.clone() + arg.prefix = "" + stmt = Assign(arg, n.clone()) + if add_prefix: + n.prefix = " " + tuple_arg.replace(n) + new_lines.append(pytree.Node(syms.simple_stmt, + [stmt, end.clone()])) + + if args.type == syms.tfpdef: + handle_tuple(args) + elif args.type == syms.typedargslist: + for i, arg in enumerate(args.children): + if arg.type == syms.tfpdef: + # Without add_prefix, the emitted code is correct, + # just ugly. + handle_tuple(arg, add_prefix=(i > 0)) + + if not new_lines: + return + + # This isn't strictly necessary, but it plays nicely with other fixers. + # TODO(cwinter) get rid of this when children becomes a smart list + for line in new_lines: + line.parent = suite[0] + + # TODO(cwinter) suite-cleanup + after = start + if start == 0: + new_lines[0].prefix = " " + elif is_docstring(suite[0].children[start]): + new_lines[0].prefix = indent + after = start + 1 + + for line in new_lines: + line.parent = suite[0] + suite[0].children[after:after] = new_lines + for i in range(after+1, after+len(new_lines)+1): + suite[0].children[i].prefix = indent + suite[0].changed() + + def transform_lambda(self, node, results): + args = results["args"] + body = results["body"] + inner = simplify_args(results["inner"]) + + # Replace lambda ((((x)))): x with lambda x: x + if inner.type == token.NAME: + inner = inner.clone() + inner.prefix = " " + args.replace(inner) + return + + params = find_params(args) + to_index = map_to_index(params) + tup_name = self.new_name(tuple_name(params)) + + new_param = Name(tup_name, prefix=" ") + args.replace(new_param.clone()) + for n in body.post_order(): + if n.type == token.NAME and n.value in to_index: + subscripts = [c.clone() for c in to_index[n.value]] + new = pytree.Node(syms.power, + [new_param.clone()] + subscripts) + new.prefix = n.prefix + n.replace(new) + + +### Helper functions for transform_lambda() + +def simplify_args(node): + if node.type in (syms.vfplist, token.NAME): + return node + elif node.type == syms.vfpdef: + # These look like vfpdef< '(' x ')' > where x is NAME + # or another vfpdef instance (leading to recursion). + while node.type == syms.vfpdef: + node = node.children[1] + return node + raise RuntimeError("Received unexpected node %s" % node) + +def find_params(node): + if node.type == syms.vfpdef: + return find_params(node.children[1]) + elif node.type == token.NAME: + return node.value + return [find_params(c) for c in node.children if c.type != token.COMMA] + +def map_to_index(param_list, prefix=[], d=None): + if d is None: + d = {} + for i, obj in enumerate(param_list): + trailer = [Subscript(Number(str(i)))] + if isinstance(obj, list): + map_to_index(obj, trailer, d=d) + else: + d[obj] = prefix + trailer + return d + +def tuple_name(param_list): + l = [] + for obj in param_list: + if isinstance(obj, list): + l.append(tuple_name(obj)) + else: + l.append(obj) + return "_".join(l) diff --git a/evalkit_tf446/lib/python3.10/lib2to3/fixes/fix_types.py b/evalkit_tf446/lib/python3.10/lib2to3/fixes/fix_types.py new file mode 100644 index 0000000000000000000000000000000000000000..67bf51f2f5b85a6a2f6a4c6427a5d9893f0c4350 --- /dev/null +++ b/evalkit_tf446/lib/python3.10/lib2to3/fixes/fix_types.py @@ -0,0 +1,61 @@ +# Copyright 2007 Google, Inc. All Rights Reserved. +# Licensed to PSF under a Contributor Agreement. + +"""Fixer for removing uses of the types module. + +These work for only the known names in the types module. The forms above +can include types. or not. ie, It is assumed the module is imported either as: + + import types + from types import ... # either * or specific types + +The import statements are not modified. + +There should be another fixer that handles at least the following constants: + + type([]) -> list + type(()) -> tuple + type('') -> str + +""" + +# Local imports +from .. import fixer_base +from ..fixer_util import Name + +_TYPE_MAPPING = { + 'BooleanType' : 'bool', + 'BufferType' : 'memoryview', + 'ClassType' : 'type', + 'ComplexType' : 'complex', + 'DictType': 'dict', + 'DictionaryType' : 'dict', + 'EllipsisType' : 'type(Ellipsis)', + #'FileType' : 'io.IOBase', + 'FloatType': 'float', + 'IntType': 'int', + 'ListType': 'list', + 'LongType': 'int', + 'ObjectType' : 'object', + 'NoneType': 'type(None)', + 'NotImplementedType' : 'type(NotImplemented)', + 'SliceType' : 'slice', + 'StringType': 'bytes', # XXX ? + 'StringTypes' : '(str,)', # XXX ? + 'TupleType': 'tuple', + 'TypeType' : 'type', + 'UnicodeType': 'str', + 'XRangeType' : 'range', + } + +_pats = ["power< 'types' trailer< '.' name='%s' > >" % t for t in _TYPE_MAPPING] + +class FixTypes(fixer_base.BaseFix): + BM_compatible = True + PATTERN = '|'.join(_pats) + + def transform(self, node, results): + new_value = _TYPE_MAPPING.get(results["name"].value) + if new_value: + return Name(new_value, prefix=node.prefix) + return None diff --git a/evalkit_tf446/lib/python3.10/lib2to3/fixes/fix_unicode.py b/evalkit_tf446/lib/python3.10/lib2to3/fixes/fix_unicode.py new file mode 100644 index 0000000000000000000000000000000000000000..c7982c2b97c3e1cacf5812a9ddd9d20fdda66496 --- /dev/null +++ b/evalkit_tf446/lib/python3.10/lib2to3/fixes/fix_unicode.py @@ -0,0 +1,42 @@ +r"""Fixer for unicode. + +* Changes unicode to str and unichr to chr. + +* If "...\u..." is not unicode literal change it into "...\\u...". + +* Change u"..." into "...". + +""" + +from ..pgen2 import token +from .. import fixer_base + +_mapping = {"unichr" : "chr", "unicode" : "str"} + +class FixUnicode(fixer_base.BaseFix): + BM_compatible = True + PATTERN = "STRING | 'unicode' | 'unichr'" + + def start_tree(self, tree, filename): + super(FixUnicode, self).start_tree(tree, filename) + self.unicode_literals = 'unicode_literals' in tree.future_features + + def transform(self, node, results): + if node.type == token.NAME: + new = node.clone() + new.value = _mapping[node.value] + return new + elif node.type == token.STRING: + val = node.value + if not self.unicode_literals and val[0] in '\'"' and '\\' in val: + val = r'\\'.join([ + v.replace('\\u', r'\\u').replace('\\U', r'\\U') + for v in val.split(r'\\') + ]) + if val[0] in 'uU': + val = val[1:] + if val == node.value: + return node + new = node.clone() + new.value = val + return new diff --git a/evalkit_tf446/lib/python3.10/lib2to3/fixes/fix_urllib.py b/evalkit_tf446/lib/python3.10/lib2to3/fixes/fix_urllib.py new file mode 100644 index 0000000000000000000000000000000000000000..ab892bc52494c25bf5b46f92ca862965b4d99e5c --- /dev/null +++ b/evalkit_tf446/lib/python3.10/lib2to3/fixes/fix_urllib.py @@ -0,0 +1,196 @@ +"""Fix changes imports of urllib which are now incompatible. + This is rather similar to fix_imports, but because of the more + complex nature of the fixing for urllib, it has its own fixer. +""" +# Author: Nick Edds + +# Local imports +from lib2to3.fixes.fix_imports import alternates, FixImports +from lib2to3.fixer_util import (Name, Comma, FromImport, Newline, + find_indentation, Node, syms) + +MAPPING = {"urllib": [ + ("urllib.request", + ["URLopener", "FancyURLopener", "urlretrieve", + "_urlopener", "urlopen", "urlcleanup", + "pathname2url", "url2pathname", "getproxies"]), + ("urllib.parse", + ["quote", "quote_plus", "unquote", "unquote_plus", + "urlencode", "splitattr", "splithost", "splitnport", + "splitpasswd", "splitport", "splitquery", "splittag", + "splittype", "splituser", "splitvalue", ]), + ("urllib.error", + ["ContentTooShortError"])], + "urllib2" : [ + ("urllib.request", + ["urlopen", "install_opener", "build_opener", + "Request", "OpenerDirector", "BaseHandler", + "HTTPDefaultErrorHandler", "HTTPRedirectHandler", + "HTTPCookieProcessor", "ProxyHandler", + "HTTPPasswordMgr", + "HTTPPasswordMgrWithDefaultRealm", + "AbstractBasicAuthHandler", + "HTTPBasicAuthHandler", "ProxyBasicAuthHandler", + "AbstractDigestAuthHandler", + "HTTPDigestAuthHandler", "ProxyDigestAuthHandler", + "HTTPHandler", "HTTPSHandler", "FileHandler", + "FTPHandler", "CacheFTPHandler", + "UnknownHandler"]), + ("urllib.error", + ["URLError", "HTTPError"]), + ] +} + +# Duplicate the url parsing functions for urllib2. +MAPPING["urllib2"].append(MAPPING["urllib"][1]) + + +def build_pattern(): + bare = set() + for old_module, changes in MAPPING.items(): + for change in changes: + new_module, members = change + members = alternates(members) + yield """import_name< 'import' (module=%r + | dotted_as_names< any* module=%r any* >) > + """ % (old_module, old_module) + yield """import_from< 'from' mod_member=%r 'import' + ( member=%s | import_as_name< member=%s 'as' any > | + import_as_names< members=any* >) > + """ % (old_module, members, members) + yield """import_from< 'from' module_star=%r 'import' star='*' > + """ % old_module + yield """import_name< 'import' + dotted_as_name< module_as=%r 'as' any > > + """ % old_module + # bare_with_attr has a special significance for FixImports.match(). + yield """power< bare_with_attr=%r trailer< '.' member=%s > any* > + """ % (old_module, members) + + +class FixUrllib(FixImports): + + def build_pattern(self): + return "|".join(build_pattern()) + + def transform_import(self, node, results): + """Transform for the basic import case. Replaces the old + import name with a comma separated list of its + replacements. + """ + import_mod = results.get("module") + pref = import_mod.prefix + + names = [] + + # create a Node list of the replacement modules + for name in MAPPING[import_mod.value][:-1]: + names.extend([Name(name[0], prefix=pref), Comma()]) + names.append(Name(MAPPING[import_mod.value][-1][0], prefix=pref)) + import_mod.replace(names) + + def transform_member(self, node, results): + """Transform for imports of specific module elements. Replaces + the module to be imported from with the appropriate new + module. + """ + mod_member = results.get("mod_member") + pref = mod_member.prefix + member = results.get("member") + + # Simple case with only a single member being imported + if member: + # this may be a list of length one, or just a node + if isinstance(member, list): + member = member[0] + new_name = None + for change in MAPPING[mod_member.value]: + if member.value in change[1]: + new_name = change[0] + break + if new_name: + mod_member.replace(Name(new_name, prefix=pref)) + else: + self.cannot_convert(node, "This is an invalid module element") + + # Multiple members being imported + else: + # a dictionary for replacements, order matters + modules = [] + mod_dict = {} + members = results["members"] + for member in members: + # we only care about the actual members + if member.type == syms.import_as_name: + as_name = member.children[2].value + member_name = member.children[0].value + else: + member_name = member.value + as_name = None + if member_name != ",": + for change in MAPPING[mod_member.value]: + if member_name in change[1]: + if change[0] not in mod_dict: + modules.append(change[0]) + mod_dict.setdefault(change[0], []).append(member) + + new_nodes = [] + indentation = find_indentation(node) + first = True + def handle_name(name, prefix): + if name.type == syms.import_as_name: + kids = [Name(name.children[0].value, prefix=prefix), + name.children[1].clone(), + name.children[2].clone()] + return [Node(syms.import_as_name, kids)] + return [Name(name.value, prefix=prefix)] + for module in modules: + elts = mod_dict[module] + names = [] + for elt in elts[:-1]: + names.extend(handle_name(elt, pref)) + names.append(Comma()) + names.extend(handle_name(elts[-1], pref)) + new = FromImport(module, names) + if not first or node.parent.prefix.endswith(indentation): + new.prefix = indentation + new_nodes.append(new) + first = False + if new_nodes: + nodes = [] + for new_node in new_nodes[:-1]: + nodes.extend([new_node, Newline()]) + nodes.append(new_nodes[-1]) + node.replace(nodes) + else: + self.cannot_convert(node, "All module elements are invalid") + + def transform_dot(self, node, results): + """Transform for calls to module members in code.""" + module_dot = results.get("bare_with_attr") + member = results.get("member") + new_name = None + if isinstance(member, list): + member = member[0] + for change in MAPPING[module_dot.value]: + if member.value in change[1]: + new_name = change[0] + break + if new_name: + module_dot.replace(Name(new_name, + prefix=module_dot.prefix)) + else: + self.cannot_convert(node, "This is an invalid module element") + + def transform(self, node, results): + if results.get("module"): + self.transform_import(node, results) + elif results.get("mod_member"): + self.transform_member(node, results) + elif results.get("bare_with_attr"): + self.transform_dot(node, results) + # Renaming and star imports are not supported for these modules. + elif results.get("module_star"): + self.cannot_convert(node, "Cannot handle star imports.") + elif results.get("module_as"): + self.cannot_convert(node, "This module is now multiple modules") diff --git a/evalkit_tf446/lib/python3.10/lib2to3/fixes/fix_ws_comma.py b/evalkit_tf446/lib/python3.10/lib2to3/fixes/fix_ws_comma.py new file mode 100644 index 0000000000000000000000000000000000000000..a54a376c472afbff3f23f011d329af24813e5760 --- /dev/null +++ b/evalkit_tf446/lib/python3.10/lib2to3/fixes/fix_ws_comma.py @@ -0,0 +1,39 @@ +"""Fixer that changes 'a ,b' into 'a, b'. + +This also changes '{a :b}' into '{a: b}', but does not touch other +uses of colons. It does not touch other uses of whitespace. + +""" + +from .. import pytree +from ..pgen2 import token +from .. import fixer_base + +class FixWsComma(fixer_base.BaseFix): + + explicit = True # The user must ask for this fixers + + PATTERN = """ + any<(not(',') any)+ ',' ((not(',') any)+ ',')* [not(',') any]> + """ + + COMMA = pytree.Leaf(token.COMMA, ",") + COLON = pytree.Leaf(token.COLON, ":") + SEPS = (COMMA, COLON) + + def transform(self, node, results): + new = node.clone() + comma = False + for child in new.children: + if child in self.SEPS: + prefix = child.prefix + if prefix.isspace() and "\n" not in prefix: + child.prefix = "" + comma = True + else: + if comma: + prefix = child.prefix + if not prefix: + child.prefix = " " + comma = False + return new diff --git a/evalkit_tf446/lib/python3.10/lib2to3/fixes/fix_xrange.py b/evalkit_tf446/lib/python3.10/lib2to3/fixes/fix_xrange.py new file mode 100644 index 0000000000000000000000000000000000000000..1e491e166a3f1c4223d306b9ad5817bc31fab2ee --- /dev/null +++ b/evalkit_tf446/lib/python3.10/lib2to3/fixes/fix_xrange.py @@ -0,0 +1,73 @@ +# Copyright 2007 Google, Inc. All Rights Reserved. +# Licensed to PSF under a Contributor Agreement. + +"""Fixer that changes xrange(...) into range(...).""" + +# Local imports +from .. import fixer_base +from ..fixer_util import Name, Call, consuming_calls +from .. import patcomp + + +class FixXrange(fixer_base.BaseFix): + BM_compatible = True + PATTERN = """ + power< + (name='range'|name='xrange') trailer< '(' args=any ')' > + rest=any* > + """ + + def start_tree(self, tree, filename): + super(FixXrange, self).start_tree(tree, filename) + self.transformed_xranges = set() + + def finish_tree(self, tree, filename): + self.transformed_xranges = None + + def transform(self, node, results): + name = results["name"] + if name.value == "xrange": + return self.transform_xrange(node, results) + elif name.value == "range": + return self.transform_range(node, results) + else: + raise ValueError(repr(name)) + + def transform_xrange(self, node, results): + name = results["name"] + name.replace(Name("range", prefix=name.prefix)) + # This prevents the new range call from being wrapped in a list later. + self.transformed_xranges.add(id(node)) + + def transform_range(self, node, results): + if (id(node) not in self.transformed_xranges and + not self.in_special_context(node)): + range_call = Call(Name("range"), [results["args"].clone()]) + # Encase the range call in list(). + list_call = Call(Name("list"), [range_call], + prefix=node.prefix) + # Put things that were after the range() call after the list call. + for n in results["rest"]: + list_call.append_child(n) + return list_call + + P1 = "power< func=NAME trailer< '(' node=any ')' > any* >" + p1 = patcomp.compile_pattern(P1) + + P2 = """for_stmt< 'for' any 'in' node=any ':' any* > + | comp_for< 'for' any 'in' node=any any* > + | comparison< any 'in' node=any any*> + """ + p2 = patcomp.compile_pattern(P2) + + def in_special_context(self, node): + if node.parent is None: + return False + results = {} + if (node.parent.parent is not None and + self.p1.match(node.parent.parent, results) and + results["node"] is node): + # list(d.keys()) -> list(d.keys()), etc. + return results["func"].value in consuming_calls + # for ... in d.iterkeys() -> for ... in d.keys(), etc. + return self.p2.match(node.parent, results) and results["node"] is node diff --git a/evalkit_tf446/lib/python3.10/lib2to3/fixes/fix_xreadlines.py b/evalkit_tf446/lib/python3.10/lib2to3/fixes/fix_xreadlines.py new file mode 100644 index 0000000000000000000000000000000000000000..3e3f71ab045573d9438bcbe5da6115b77dc3d7e9 --- /dev/null +++ b/evalkit_tf446/lib/python3.10/lib2to3/fixes/fix_xreadlines.py @@ -0,0 +1,25 @@ +"""Fix "for x in f.xreadlines()" -> "for x in f". + +This fixer will also convert g(f.xreadlines) into g(f.__iter__).""" +# Author: Collin Winter + +# Local imports +from .. import fixer_base +from ..fixer_util import Name + + +class FixXreadlines(fixer_base.BaseFix): + BM_compatible = True + PATTERN = """ + power< call=any+ trailer< '.' 'xreadlines' > trailer< '(' ')' > > + | + power< any+ trailer< '.' no_call='xreadlines' > > + """ + + def transform(self, node, results): + no_call = results.get("no_call") + + if no_call: + no_call.replace(Name("__iter__", prefix=no_call.prefix)) + else: + node.replace([x.clone() for x in results["call"]]) diff --git a/evalkit_tf446/lib/python3.10/lib2to3/fixes/fix_zip.py b/evalkit_tf446/lib/python3.10/lib2to3/fixes/fix_zip.py new file mode 100644 index 0000000000000000000000000000000000000000..52c28df6aab411ad1ea64af74b14edf35e80b5bb --- /dev/null +++ b/evalkit_tf446/lib/python3.10/lib2to3/fixes/fix_zip.py @@ -0,0 +1,46 @@ +""" +Fixer that changes zip(seq0, seq1, ...) into list(zip(seq0, seq1, ...) +unless there exists a 'from future_builtins import zip' statement in the +top-level namespace. + +We avoid the transformation if the zip() call is directly contained in +iter(<>), list(<>), tuple(<>), sorted(<>), ...join(<>), or for V in <>:. +""" + +# Local imports +from .. import fixer_base +from ..pytree import Node +from ..pygram import python_symbols as syms +from ..fixer_util import Name, ArgList, in_special_context + + +class FixZip(fixer_base.ConditionalFix): + + BM_compatible = True + PATTERN = """ + power< 'zip' args=trailer< '(' [any] ')' > [trailers=trailer*] + > + """ + + skip_on = "future_builtins.zip" + + def transform(self, node, results): + if self.should_skip(node): + return + + if in_special_context(node): + return None + + args = results['args'].clone() + args.prefix = "" + + trailers = [] + if 'trailers' in results: + trailers = [n.clone() for n in results['trailers']] + for n in trailers: + n.prefix = "" + + new = Node(syms.power, [Name("zip"), args], prefix="") + new = Node(syms.power, [Name("list"), ArgList([new])] + trailers) + new.prefix = node.prefix + return new diff --git a/evalkit_tf446/lib/python3.10/lib2to3/patcomp.py b/evalkit_tf446/lib/python3.10/lib2to3/patcomp.py new file mode 100644 index 0000000000000000000000000000000000000000..f57f4954b26ce7f59c44ce7cd67bf966fd594cd5 --- /dev/null +++ b/evalkit_tf446/lib/python3.10/lib2to3/patcomp.py @@ -0,0 +1,204 @@ +# Copyright 2006 Google, Inc. All Rights Reserved. +# Licensed to PSF under a Contributor Agreement. + +"""Pattern compiler. + +The grammar is taken from PatternGrammar.txt. + +The compiler compiles a pattern to a pytree.*Pattern instance. +""" + +__author__ = "Guido van Rossum " + +# Python imports +import io + +# Fairly local imports +from .pgen2 import driver, literals, token, tokenize, parse, grammar + +# Really local imports +from . import pytree +from . import pygram + + +class PatternSyntaxError(Exception): + pass + + +def tokenize_wrapper(input): + """Tokenizes a string suppressing significant whitespace.""" + skip = {token.NEWLINE, token.INDENT, token.DEDENT} + tokens = tokenize.generate_tokens(io.StringIO(input).readline) + for quintuple in tokens: + type, value, start, end, line_text = quintuple + if type not in skip: + yield quintuple + + +class PatternCompiler(object): + + def __init__(self, grammar_file=None): + """Initializer. + + Takes an optional alternative filename for the pattern grammar. + """ + if grammar_file is None: + self.grammar = pygram.pattern_grammar + self.syms = pygram.pattern_symbols + else: + self.grammar = driver.load_grammar(grammar_file) + self.syms = pygram.Symbols(self.grammar) + self.pygrammar = pygram.python_grammar + self.pysyms = pygram.python_symbols + self.driver = driver.Driver(self.grammar, convert=pattern_convert) + + def compile_pattern(self, input, debug=False, with_tree=False): + """Compiles a pattern string to a nested pytree.*Pattern object.""" + tokens = tokenize_wrapper(input) + try: + root = self.driver.parse_tokens(tokens, debug=debug) + except parse.ParseError as e: + raise PatternSyntaxError(str(e)) from None + if with_tree: + return self.compile_node(root), root + else: + return self.compile_node(root) + + def compile_node(self, node): + """Compiles a node, recursively. + + This is one big switch on the node type. + """ + # XXX Optimize certain Wildcard-containing-Wildcard patterns + # that can be merged + if node.type == self.syms.Matcher: + node = node.children[0] # Avoid unneeded recursion + + if node.type == self.syms.Alternatives: + # Skip the odd children since they are just '|' tokens + alts = [self.compile_node(ch) for ch in node.children[::2]] + if len(alts) == 1: + return alts[0] + p = pytree.WildcardPattern([[a] for a in alts], min=1, max=1) + return p.optimize() + + if node.type == self.syms.Alternative: + units = [self.compile_node(ch) for ch in node.children] + if len(units) == 1: + return units[0] + p = pytree.WildcardPattern([units], min=1, max=1) + return p.optimize() + + if node.type == self.syms.NegatedUnit: + pattern = self.compile_basic(node.children[1:]) + p = pytree.NegatedPattern(pattern) + return p.optimize() + + assert node.type == self.syms.Unit + + name = None + nodes = node.children + if len(nodes) >= 3 and nodes[1].type == token.EQUAL: + name = nodes[0].value + nodes = nodes[2:] + repeat = None + if len(nodes) >= 2 and nodes[-1].type == self.syms.Repeater: + repeat = nodes[-1] + nodes = nodes[:-1] + + # Now we've reduced it to: STRING | NAME [Details] | (...) | [...] + pattern = self.compile_basic(nodes, repeat) + + if repeat is not None: + assert repeat.type == self.syms.Repeater + children = repeat.children + child = children[0] + if child.type == token.STAR: + min = 0 + max = pytree.HUGE + elif child.type == token.PLUS: + min = 1 + max = pytree.HUGE + elif child.type == token.LBRACE: + assert children[-1].type == token.RBRACE + assert len(children) in (3, 5) + min = max = self.get_int(children[1]) + if len(children) == 5: + max = self.get_int(children[3]) + else: + assert False + if min != 1 or max != 1: + pattern = pattern.optimize() + pattern = pytree.WildcardPattern([[pattern]], min=min, max=max) + + if name is not None: + pattern.name = name + return pattern.optimize() + + def compile_basic(self, nodes, repeat=None): + # Compile STRING | NAME [Details] | (...) | [...] + assert len(nodes) >= 1 + node = nodes[0] + if node.type == token.STRING: + value = str(literals.evalString(node.value)) + return pytree.LeafPattern(_type_of_literal(value), value) + elif node.type == token.NAME: + value = node.value + if value.isupper(): + if value not in TOKEN_MAP: + raise PatternSyntaxError("Invalid token: %r" % value) + if nodes[1:]: + raise PatternSyntaxError("Can't have details for token") + return pytree.LeafPattern(TOKEN_MAP[value]) + else: + if value == "any": + type = None + elif not value.startswith("_"): + type = getattr(self.pysyms, value, None) + if type is None: + raise PatternSyntaxError("Invalid symbol: %r" % value) + if nodes[1:]: # Details present + content = [self.compile_node(nodes[1].children[1])] + else: + content = None + return pytree.NodePattern(type, content) + elif node.value == "(": + return self.compile_node(nodes[1]) + elif node.value == "[": + assert repeat is None + subpattern = self.compile_node(nodes[1]) + return pytree.WildcardPattern([[subpattern]], min=0, max=1) + assert False, node + + def get_int(self, node): + assert node.type == token.NUMBER + return int(node.value) + + +# Map named tokens to the type value for a LeafPattern +TOKEN_MAP = {"NAME": token.NAME, + "STRING": token.STRING, + "NUMBER": token.NUMBER, + "TOKEN": None} + + +def _type_of_literal(value): + if value[0].isalpha(): + return token.NAME + elif value in grammar.opmap: + return grammar.opmap[value] + else: + return None + + +def pattern_convert(grammar, raw_node_info): + """Converts raw node information to a Node or Leaf instance.""" + type, value, context, children = raw_node_info + if children or type in grammar.number2symbol: + return pytree.Node(type, children, context=context) + else: + return pytree.Leaf(type, value, context=context) + + +def compile_pattern(pattern): + return PatternCompiler().compile_pattern(pattern) diff --git a/evalkit_tf446/lib/python3.10/lib2to3/pygram.py b/evalkit_tf446/lib/python3.10/lib2to3/pygram.py new file mode 100644 index 0000000000000000000000000000000000000000..24d9db9217f131adf50f6eca7d571ce69cbcbc3e --- /dev/null +++ b/evalkit_tf446/lib/python3.10/lib2to3/pygram.py @@ -0,0 +1,43 @@ +# Copyright 2006 Google, Inc. All Rights Reserved. +# Licensed to PSF under a Contributor Agreement. + +"""Export the Python grammar and symbols.""" + +# Python imports +import os + +# Local imports +from .pgen2 import token +from .pgen2 import driver +from . import pytree + +# The grammar file +_GRAMMAR_FILE = os.path.join(os.path.dirname(__file__), "Grammar.txt") +_PATTERN_GRAMMAR_FILE = os.path.join(os.path.dirname(__file__), + "PatternGrammar.txt") + + +class Symbols(object): + + def __init__(self, grammar): + """Initializer. + + Creates an attribute for each grammar symbol (nonterminal), + whose value is the symbol's type (an int >= 256). + """ + for name, symbol in grammar.symbol2number.items(): + setattr(self, name, symbol) + + +python_grammar = driver.load_packaged_grammar("lib2to3", _GRAMMAR_FILE) + +python_symbols = Symbols(python_grammar) + +python_grammar_no_print_statement = python_grammar.copy() +del python_grammar_no_print_statement.keywords["print"] + +python_grammar_no_print_and_exec_statement = python_grammar_no_print_statement.copy() +del python_grammar_no_print_and_exec_statement.keywords["exec"] + +pattern_grammar = driver.load_packaged_grammar("lib2to3", _PATTERN_GRAMMAR_FILE) +pattern_symbols = Symbols(pattern_grammar) diff --git a/evalkit_tf446/lib/python3.10/lib2to3/pytree.py b/evalkit_tf446/lib/python3.10/lib2to3/pytree.py new file mode 100644 index 0000000000000000000000000000000000000000..729023df0284e15ad8d7ac9f30cf7ed8d42db272 --- /dev/null +++ b/evalkit_tf446/lib/python3.10/lib2to3/pytree.py @@ -0,0 +1,853 @@ +# Copyright 2006 Google, Inc. All Rights Reserved. +# Licensed to PSF under a Contributor Agreement. + +""" +Python parse tree definitions. + +This is a very concrete parse tree; we need to keep every token and +even the comments and whitespace between tokens. + +There's also a pattern matching implementation here. +""" + +__author__ = "Guido van Rossum " + +import sys +from io import StringIO + +HUGE = 0x7FFFFFFF # maximum repeat count, default max + +_type_reprs = {} +def type_repr(type_num): + global _type_reprs + if not _type_reprs: + from .pygram import python_symbols + # printing tokens is possible but not as useful + # from .pgen2 import token // token.__dict__.items(): + for name, val in python_symbols.__dict__.items(): + if type(val) == int: _type_reprs[val] = name + return _type_reprs.setdefault(type_num, type_num) + +class Base(object): + + """ + Abstract base class for Node and Leaf. + + This provides some default functionality and boilerplate using the + template pattern. + + A node may be a subnode of at most one parent. + """ + + # Default values for instance variables + type = None # int: token number (< 256) or symbol number (>= 256) + parent = None # Parent node pointer, or None + children = () # Tuple of subnodes + was_changed = False + was_checked = False + + def __new__(cls, *args, **kwds): + """Constructor that prevents Base from being instantiated.""" + assert cls is not Base, "Cannot instantiate Base" + return object.__new__(cls) + + def __eq__(self, other): + """ + Compare two nodes for equality. + + This calls the method _eq(). + """ + if self.__class__ is not other.__class__: + return NotImplemented + return self._eq(other) + + __hash__ = None # For Py3 compatibility. + + def _eq(self, other): + """ + Compare two nodes for equality. + + This is called by __eq__ and __ne__. It is only called if the two nodes + have the same type. This must be implemented by the concrete subclass. + Nodes should be considered equal if they have the same structure, + ignoring the prefix string and other context information. + """ + raise NotImplementedError + + def clone(self): + """ + Return a cloned (deep) copy of self. + + This must be implemented by the concrete subclass. + """ + raise NotImplementedError + + def post_order(self): + """ + Return a post-order iterator for the tree. + + This must be implemented by the concrete subclass. + """ + raise NotImplementedError + + def pre_order(self): + """ + Return a pre-order iterator for the tree. + + This must be implemented by the concrete subclass. + """ + raise NotImplementedError + + def replace(self, new): + """Replace this node with a new one in the parent.""" + assert self.parent is not None, str(self) + assert new is not None + if not isinstance(new, list): + new = [new] + l_children = [] + found = False + for ch in self.parent.children: + if ch is self: + assert not found, (self.parent.children, self, new) + if new is not None: + l_children.extend(new) + found = True + else: + l_children.append(ch) + assert found, (self.children, self, new) + self.parent.changed() + self.parent.children = l_children + for x in new: + x.parent = self.parent + self.parent = None + + def get_lineno(self): + """Return the line number which generated the invocant node.""" + node = self + while not isinstance(node, Leaf): + if not node.children: + return + node = node.children[0] + return node.lineno + + def changed(self): + if self.parent: + self.parent.changed() + self.was_changed = True + + def remove(self): + """ + Remove the node from the tree. Returns the position of the node in its + parent's children before it was removed. + """ + if self.parent: + for i, node in enumerate(self.parent.children): + if node is self: + self.parent.changed() + del self.parent.children[i] + self.parent = None + return i + + @property + def next_sibling(self): + """ + The node immediately following the invocant in their parent's children + list. If the invocant does not have a next sibling, it is None + """ + if self.parent is None: + return None + + # Can't use index(); we need to test by identity + for i, child in enumerate(self.parent.children): + if child is self: + try: + return self.parent.children[i+1] + except IndexError: + return None + + @property + def prev_sibling(self): + """ + The node immediately preceding the invocant in their parent's children + list. If the invocant does not have a previous sibling, it is None. + """ + if self.parent is None: + return None + + # Can't use index(); we need to test by identity + for i, child in enumerate(self.parent.children): + if child is self: + if i == 0: + return None + return self.parent.children[i-1] + + def leaves(self): + for child in self.children: + yield from child.leaves() + + def depth(self): + if self.parent is None: + return 0 + return 1 + self.parent.depth() + + def get_suffix(self): + """ + Return the string immediately following the invocant node. This is + effectively equivalent to node.next_sibling.prefix + """ + next_sib = self.next_sibling + if next_sib is None: + return "" + return next_sib.prefix + + if sys.version_info < (3, 0): + def __str__(self): + return str(self).encode("ascii") + +class Node(Base): + + """Concrete implementation for interior nodes.""" + + def __init__(self,type, children, + context=None, + prefix=None, + fixers_applied=None): + """ + Initializer. + + Takes a type constant (a symbol number >= 256), a sequence of + child nodes, and an optional context keyword argument. + + As a side effect, the parent pointers of the children are updated. + """ + assert type >= 256, type + self.type = type + self.children = list(children) + for ch in self.children: + assert ch.parent is None, repr(ch) + ch.parent = self + if prefix is not None: + self.prefix = prefix + if fixers_applied: + self.fixers_applied = fixers_applied[:] + else: + self.fixers_applied = None + + def __repr__(self): + """Return a canonical string representation.""" + return "%s(%s, %r)" % (self.__class__.__name__, + type_repr(self.type), + self.children) + + def __unicode__(self): + """ + Return a pretty string representation. + + This reproduces the input source exactly. + """ + return "".join(map(str, self.children)) + + if sys.version_info > (3, 0): + __str__ = __unicode__ + + def _eq(self, other): + """Compare two nodes for equality.""" + return (self.type, self.children) == (other.type, other.children) + + def clone(self): + """Return a cloned (deep) copy of self.""" + return Node(self.type, [ch.clone() for ch in self.children], + fixers_applied=self.fixers_applied) + + def post_order(self): + """Return a post-order iterator for the tree.""" + for child in self.children: + yield from child.post_order() + yield self + + def pre_order(self): + """Return a pre-order iterator for the tree.""" + yield self + for child in self.children: + yield from child.pre_order() + + @property + def prefix(self): + """ + The whitespace and comments preceding this node in the input. + """ + if not self.children: + return "" + return self.children[0].prefix + + @prefix.setter + def prefix(self, prefix): + if self.children: + self.children[0].prefix = prefix + + def set_child(self, i, child): + """ + Equivalent to 'node.children[i] = child'. This method also sets the + child's parent attribute appropriately. + """ + child.parent = self + self.children[i].parent = None + self.children[i] = child + self.changed() + + def insert_child(self, i, child): + """ + Equivalent to 'node.children.insert(i, child)'. This method also sets + the child's parent attribute appropriately. + """ + child.parent = self + self.children.insert(i, child) + self.changed() + + def append_child(self, child): + """ + Equivalent to 'node.children.append(child)'. This method also sets the + child's parent attribute appropriately. + """ + child.parent = self + self.children.append(child) + self.changed() + + +class Leaf(Base): + + """Concrete implementation for leaf nodes.""" + + # Default values for instance variables + _prefix = "" # Whitespace and comments preceding this token in the input + lineno = 0 # Line where this token starts in the input + column = 0 # Column where this token tarts in the input + + def __init__(self, type, value, + context=None, + prefix=None, + fixers_applied=[]): + """ + Initializer. + + Takes a type constant (a token number < 256), a string value, and an + optional context keyword argument. + """ + assert 0 <= type < 256, type + if context is not None: + self._prefix, (self.lineno, self.column) = context + self.type = type + self.value = value + if prefix is not None: + self._prefix = prefix + self.fixers_applied = fixers_applied[:] + + def __repr__(self): + """Return a canonical string representation.""" + return "%s(%r, %r)" % (self.__class__.__name__, + self.type, + self.value) + + def __unicode__(self): + """ + Return a pretty string representation. + + This reproduces the input source exactly. + """ + return self.prefix + str(self.value) + + if sys.version_info > (3, 0): + __str__ = __unicode__ + + def _eq(self, other): + """Compare two nodes for equality.""" + return (self.type, self.value) == (other.type, other.value) + + def clone(self): + """Return a cloned (deep) copy of self.""" + return Leaf(self.type, self.value, + (self.prefix, (self.lineno, self.column)), + fixers_applied=self.fixers_applied) + + def leaves(self): + yield self + + def post_order(self): + """Return a post-order iterator for the tree.""" + yield self + + def pre_order(self): + """Return a pre-order iterator for the tree.""" + yield self + + @property + def prefix(self): + """ + The whitespace and comments preceding this token in the input. + """ + return self._prefix + + @prefix.setter + def prefix(self, prefix): + self.changed() + self._prefix = prefix + +def convert(gr, raw_node): + """ + Convert raw node information to a Node or Leaf instance. + + This is passed to the parser driver which calls it whenever a reduction of a + grammar rule produces a new complete node, so that the tree is build + strictly bottom-up. + """ + type, value, context, children = raw_node + if children or type in gr.number2symbol: + # If there's exactly one child, return that child instead of + # creating a new node. + if len(children) == 1: + return children[0] + return Node(type, children, context=context) + else: + return Leaf(type, value, context=context) + + +class BasePattern(object): + + """ + A pattern is a tree matching pattern. + + It looks for a specific node type (token or symbol), and + optionally for a specific content. + + This is an abstract base class. There are three concrete + subclasses: + + - LeafPattern matches a single leaf node; + - NodePattern matches a single node (usually non-leaf); + - WildcardPattern matches a sequence of nodes of variable length. + """ + + # Defaults for instance variables + type = None # Node type (token if < 256, symbol if >= 256) + content = None # Optional content matching pattern + name = None # Optional name used to store match in results dict + + def __new__(cls, *args, **kwds): + """Constructor that prevents BasePattern from being instantiated.""" + assert cls is not BasePattern, "Cannot instantiate BasePattern" + return object.__new__(cls) + + def __repr__(self): + args = [type_repr(self.type), self.content, self.name] + while args and args[-1] is None: + del args[-1] + return "%s(%s)" % (self.__class__.__name__, ", ".join(map(repr, args))) + + def optimize(self): + """ + A subclass can define this as a hook for optimizations. + + Returns either self or another node with the same effect. + """ + return self + + def match(self, node, results=None): + """ + Does this pattern exactly match a node? + + Returns True if it matches, False if not. + + If results is not None, it must be a dict which will be + updated with the nodes matching named subpatterns. + + Default implementation for non-wildcard patterns. + """ + if self.type is not None and node.type != self.type: + return False + if self.content is not None: + r = None + if results is not None: + r = {} + if not self._submatch(node, r): + return False + if r: + results.update(r) + if results is not None and self.name: + results[self.name] = node + return True + + def match_seq(self, nodes, results=None): + """ + Does this pattern exactly match a sequence of nodes? + + Default implementation for non-wildcard patterns. + """ + if len(nodes) != 1: + return False + return self.match(nodes[0], results) + + def generate_matches(self, nodes): + """ + Generator yielding all matches for this pattern. + + Default implementation for non-wildcard patterns. + """ + r = {} + if nodes and self.match(nodes[0], r): + yield 1, r + + +class LeafPattern(BasePattern): + + def __init__(self, type=None, content=None, name=None): + """ + Initializer. Takes optional type, content, and name. + + The type, if given must be a token type (< 256). If not given, + this matches any *leaf* node; the content may still be required. + + The content, if given, must be a string. + + If a name is given, the matching node is stored in the results + dict under that key. + """ + if type is not None: + assert 0 <= type < 256, type + if content is not None: + assert isinstance(content, str), repr(content) + self.type = type + self.content = content + self.name = name + + def match(self, node, results=None): + """Override match() to insist on a leaf node.""" + if not isinstance(node, Leaf): + return False + return BasePattern.match(self, node, results) + + def _submatch(self, node, results=None): + """ + Match the pattern's content to the node's children. + + This assumes the node type matches and self.content is not None. + + Returns True if it matches, False if not. + + If results is not None, it must be a dict which will be + updated with the nodes matching named subpatterns. + + When returning False, the results dict may still be updated. + """ + return self.content == node.value + + +class NodePattern(BasePattern): + + wildcards = False + + def __init__(self, type=None, content=None, name=None): + """ + Initializer. Takes optional type, content, and name. + + The type, if given, must be a symbol type (>= 256). If the + type is None this matches *any* single node (leaf or not), + except if content is not None, in which it only matches + non-leaf nodes that also match the content pattern. + + The content, if not None, must be a sequence of Patterns that + must match the node's children exactly. If the content is + given, the type must not be None. + + If a name is given, the matching node is stored in the results + dict under that key. + """ + if type is not None: + assert type >= 256, type + if content is not None: + assert not isinstance(content, str), repr(content) + content = list(content) + for i, item in enumerate(content): + assert isinstance(item, BasePattern), (i, item) + if isinstance(item, WildcardPattern): + self.wildcards = True + self.type = type + self.content = content + self.name = name + + def _submatch(self, node, results=None): + """ + Match the pattern's content to the node's children. + + This assumes the node type matches and self.content is not None. + + Returns True if it matches, False if not. + + If results is not None, it must be a dict which will be + updated with the nodes matching named subpatterns. + + When returning False, the results dict may still be updated. + """ + if self.wildcards: + for c, r in generate_matches(self.content, node.children): + if c == len(node.children): + if results is not None: + results.update(r) + return True + return False + if len(self.content) != len(node.children): + return False + for subpattern, child in zip(self.content, node.children): + if not subpattern.match(child, results): + return False + return True + + +class WildcardPattern(BasePattern): + + """ + A wildcard pattern can match zero or more nodes. + + This has all the flexibility needed to implement patterns like: + + .* .+ .? .{m,n} + (a b c | d e | f) + (...)* (...)+ (...)? (...){m,n} + + except it always uses non-greedy matching. + """ + + def __init__(self, content=None, min=0, max=HUGE, name=None): + """ + Initializer. + + Args: + content: optional sequence of subsequences of patterns; + if absent, matches one node; + if present, each subsequence is an alternative [*] + min: optional minimum number of times to match, default 0 + max: optional maximum number of times to match, default HUGE + name: optional name assigned to this match + + [*] Thus, if content is [[a, b, c], [d, e], [f, g, h]] this is + equivalent to (a b c | d e | f g h); if content is None, + this is equivalent to '.' in regular expression terms. + The min and max parameters work as follows: + min=0, max=maxint: .* + min=1, max=maxint: .+ + min=0, max=1: .? + min=1, max=1: . + If content is not None, replace the dot with the parenthesized + list of alternatives, e.g. (a b c | d e | f g h)* + """ + assert 0 <= min <= max <= HUGE, (min, max) + if content is not None: + content = tuple(map(tuple, content)) # Protect against alterations + # Check sanity of alternatives + assert len(content), repr(content) # Can't have zero alternatives + for alt in content: + assert len(alt), repr(alt) # Can have empty alternatives + self.content = content + self.min = min + self.max = max + self.name = name + + def optimize(self): + """Optimize certain stacked wildcard patterns.""" + subpattern = None + if (self.content is not None and + len(self.content) == 1 and len(self.content[0]) == 1): + subpattern = self.content[0][0] + if self.min == 1 and self.max == 1: + if self.content is None: + return NodePattern(name=self.name) + if subpattern is not None and self.name == subpattern.name: + return subpattern.optimize() + if (self.min <= 1 and isinstance(subpattern, WildcardPattern) and + subpattern.min <= 1 and self.name == subpattern.name): + return WildcardPattern(subpattern.content, + self.min*subpattern.min, + self.max*subpattern.max, + subpattern.name) + return self + + def match(self, node, results=None): + """Does this pattern exactly match a node?""" + return self.match_seq([node], results) + + def match_seq(self, nodes, results=None): + """Does this pattern exactly match a sequence of nodes?""" + for c, r in self.generate_matches(nodes): + if c == len(nodes): + if results is not None: + results.update(r) + if self.name: + results[self.name] = list(nodes) + return True + return False + + def generate_matches(self, nodes): + """ + Generator yielding matches for a sequence of nodes. + + Args: + nodes: sequence of nodes + + Yields: + (count, results) tuples where: + count: the match comprises nodes[:count]; + results: dict containing named submatches. + """ + if self.content is None: + # Shortcut for special case (see __init__.__doc__) + for count in range(self.min, 1 + min(len(nodes), self.max)): + r = {} + if self.name: + r[self.name] = nodes[:count] + yield count, r + elif self.name == "bare_name": + yield self._bare_name_matches(nodes) + else: + # The reason for this is that hitting the recursion limit usually + # results in some ugly messages about how RuntimeErrors are being + # ignored. We only have to do this on CPython, though, because other + # implementations don't have this nasty bug in the first place. + if hasattr(sys, "getrefcount"): + save_stderr = sys.stderr + sys.stderr = StringIO() + try: + for count, r in self._recursive_matches(nodes, 0): + if self.name: + r[self.name] = nodes[:count] + yield count, r + except RuntimeError: + # Fall back to the iterative pattern matching scheme if the + # recursive scheme hits the recursion limit (RecursionError). + for count, r in self._iterative_matches(nodes): + if self.name: + r[self.name] = nodes[:count] + yield count, r + finally: + if hasattr(sys, "getrefcount"): + sys.stderr = save_stderr + + def _iterative_matches(self, nodes): + """Helper to iteratively yield the matches.""" + nodelen = len(nodes) + if 0 >= self.min: + yield 0, {} + + results = [] + # generate matches that use just one alt from self.content + for alt in self.content: + for c, r in generate_matches(alt, nodes): + yield c, r + results.append((c, r)) + + # for each match, iterate down the nodes + while results: + new_results = [] + for c0, r0 in results: + # stop if the entire set of nodes has been matched + if c0 < nodelen and c0 <= self.max: + for alt in self.content: + for c1, r1 in generate_matches(alt, nodes[c0:]): + if c1 > 0: + r = {} + r.update(r0) + r.update(r1) + yield c0 + c1, r + new_results.append((c0 + c1, r)) + results = new_results + + def _bare_name_matches(self, nodes): + """Special optimized matcher for bare_name.""" + count = 0 + r = {} + done = False + max = len(nodes) + while not done and count < max: + done = True + for leaf in self.content: + if leaf[0].match(nodes[count], r): + count += 1 + done = False + break + r[self.name] = nodes[:count] + return count, r + + def _recursive_matches(self, nodes, count): + """Helper to recursively yield the matches.""" + assert self.content is not None + if count >= self.min: + yield 0, {} + if count < self.max: + for alt in self.content: + for c0, r0 in generate_matches(alt, nodes): + for c1, r1 in self._recursive_matches(nodes[c0:], count+1): + r = {} + r.update(r0) + r.update(r1) + yield c0 + c1, r + + +class NegatedPattern(BasePattern): + + def __init__(self, content=None): + """ + Initializer. + + The argument is either a pattern or None. If it is None, this + only matches an empty sequence (effectively '$' in regex + lingo). If it is not None, this matches whenever the argument + pattern doesn't have any matches. + """ + if content is not None: + assert isinstance(content, BasePattern), repr(content) + self.content = content + + def match(self, node): + # We never match a node in its entirety + return False + + def match_seq(self, nodes): + # We only match an empty sequence of nodes in its entirety + return len(nodes) == 0 + + def generate_matches(self, nodes): + if self.content is None: + # Return a match if there is an empty sequence + if len(nodes) == 0: + yield 0, {} + else: + # Return a match if the argument pattern has no matches + for c, r in self.content.generate_matches(nodes): + return + yield 0, {} + + +def generate_matches(patterns, nodes): + """ + Generator yielding matches for a sequence of patterns and nodes. + + Args: + patterns: a sequence of patterns + nodes: a sequence of nodes + + Yields: + (count, results) tuples where: + count: the entire sequence of patterns matches nodes[:count]; + results: dict containing named submatches. + """ + if not patterns: + yield 0, {} + else: + p, rest = patterns[0], patterns[1:] + for c0, r0 in p.generate_matches(nodes): + if not rest: + yield c0, r0 + else: + for c1, r1 in generate_matches(rest, nodes[c0:]): + r = {} + r.update(r0) + r.update(r1) + yield c0 + c1, r