id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
186,372 | import os
import sys
import textwrap
from argparse import ArgumentParser
from contextlib import contextmanager
from pathlib import Path
import asdl
TABSIZE = 4
def reflow_c_string(s, depth):
return '"%s"' % s.replace('\n', '\\n"\n%s"' % (' ' * depth * TABSIZE)) | null |
186,373 | import os
import sys
import textwrap
from argparse import ArgumentParser
from contextlib import contextmanager
from pathlib import Path
import asdl
def is_simple(sum):
"""Return True if a sum is a simple.
A sum is simple if its types have no fields, e.g.
unaryop = Invert | Not | UAdd | USub
"""
for t in sum.types:
if t.fields:
return False
return True
def asdl_of(name, obj):
if isinstance(obj, asdl.Product) or isinstance(obj, asdl.Constructor):
fields = ", ".join(map(str, obj.fields))
if fields:
fields = "({})".format(fields)
return "{}{}".format(name, fields)
else:
if is_simple(obj):
types = " | ".join(type.name for type in obj.types)
else:
sep = "\n{}| ".format(" " * (len(name) + 1))
types = sep.join(
asdl_of(type.name, type) for type in obj.types
)
return "{} = {}".format(name, types) | null |
186,374 | import os
import sys
import textwrap
from argparse import ArgumentParser
from contextlib import contextmanager
from pathlib import Path
import asdl
def ast_func_name(name):
return f"_PyAST_{name}" | null |
186,375 | import os
import sys
import textwrap
from argparse import ArgumentParser
from contextlib import contextmanager
from pathlib import Path
import asdl
class TypeDefVisitor(EmitVisitor):
def visitModule(self, mod):
for dfn in mod.dfns:
self.visit(dfn)
def visitType(self, type, depth=0):
self.visit(type.value, type.name, depth)
def visitSum(self, sum, name, depth):
if is_simple(sum):
self.simple_sum(sum, name, depth)
else:
self.sum_with_constructors(sum, name, depth)
def simple_sum(self, sum, name, depth):
enum = []
for i in range(len(sum.types)):
type = sum.types[i]
enum.append("%s=%d" % (type.name, i + 1))
enums = ", ".join(enum)
ctype = get_c_type(name)
s = "typedef enum _%s { %s } %s;" % (name, enums, ctype)
self.emit(s, depth)
self.emit("", depth)
def sum_with_constructors(self, sum, name, depth):
ctype = get_c_type(name)
s = "typedef struct _%(name)s *%(ctype)s;" % locals()
self.emit(s, depth)
self.emit("", depth)
def visitProduct(self, product, name, depth):
ctype = get_c_type(name)
s = "typedef struct _%(name)s *%(ctype)s;" % locals()
self.emit(s, depth)
self.emit("", depth)
class SequenceDefVisitor(EmitVisitor):
def visitModule(self, mod):
for dfn in mod.dfns:
self.visit(dfn)
def visitType(self, type, depth=0):
self.visit(type.value, type.name, depth)
def visitSum(self, sum, name, depth):
if is_simple(sum):
return
self.emit_sequence_constructor(name, depth)
def emit_sequence_constructor(self, name,depth):
ctype = get_c_type(name)
self.emit("""\
typedef struct {
_ASDL_SEQ_HEAD
%(ctype)s typed_elements[1];
} asdl_%(name)s_seq;""" % locals(), reflow=False, depth=depth)
self.emit("", depth)
self.emit("CiAPI_FUNC(asdl_%(name)s_seq *) _Py_asdl_%(name)s_seq_new(Py_ssize_t size, PyArena *arena);" % locals(), depth)
self.emit("", depth)
def visitProduct(self, product, name, depth):
self.emit_sequence_constructor(name, depth)
class StructVisitor(EmitVisitor):
"""Visitor to generate typedefs for AST."""
def visitModule(self, mod):
for dfn in mod.dfns:
self.visit(dfn)
def visitType(self, type, depth=0):
self.visit(type.value, type.name, depth)
def visitSum(self, sum, name, depth):
if not is_simple(sum):
self.sum_with_constructors(sum, name, depth)
def sum_with_constructors(self, sum, name, depth):
def emit(s, depth=depth):
self.emit(s % sys._getframe(1).f_locals, depth)
enum = []
for i in range(len(sum.types)):
type = sum.types[i]
enum.append("%s_kind=%d" % (type.name, i + 1))
emit("enum _%(name)s_kind {" + ", ".join(enum) + "};")
emit("struct _%(name)s {")
emit("enum _%(name)s_kind kind;", depth + 1)
emit("union {", depth + 1)
for t in sum.types:
self.visit(t, depth + 2)
emit("} v;", depth + 1)
for field in sum.attributes:
# rudimentary attribute handling
type = str(field.type)
assert type in asdl.builtin_types, type
emit("%s %s;" % (type, field.name), depth + 1);
emit("};")
emit("")
def visitConstructor(self, cons, depth):
if cons.fields:
self.emit("struct {", depth)
for f in cons.fields:
self.visit(f, depth + 1)
self.emit("} %s;" % cons.name, depth)
self.emit("", depth)
def visitField(self, field, depth):
# XXX need to lookup field.type, because it might be something
# like a builtin...
ctype = get_c_type(field.type)
name = field.name
if field.seq:
if field.type == 'cmpop':
self.emit("asdl_int_seq *%(name)s;" % locals(), depth)
else:
_type = field.type
self.emit("asdl_%(_type)s_seq *%(name)s;" % locals(), depth)
else:
self.emit("%(ctype)s %(name)s;" % locals(), depth)
def visitProduct(self, product, name, depth):
self.emit("struct _%(name)s {" % locals(), depth)
for f in product.fields:
self.visit(f, depth + 1)
for field in product.attributes:
# rudimentary attribute handling
type = str(field.type)
assert type in asdl.builtin_types, type
self.emit("%s %s;" % (type, field.name), depth + 1);
self.emit("};", depth)
self.emit("", depth)
class PrototypeVisitor(EmitVisitor):
"""Generate function prototypes for the .h file"""
def visitModule(self, mod):
for dfn in mod.dfns:
self.visit(dfn)
def visitType(self, type):
self.visit(type.value, type.name)
def visitSum(self, sum, name):
if is_simple(sum):
pass # XXX
else:
for t in sum.types:
self.visit(t, name, sum.attributes)
def get_args(self, fields):
"""Return list of C argument into, one for each field.
Argument info is 3-tuple of a C type, variable name, and flag
that is true if type can be NULL.
"""
args = []
unnamed = {}
for f in fields:
if f.name is None:
name = f.type
c = unnamed[name] = unnamed.get(name, 0) + 1
if c > 1:
name = "name%d" % (c - 1)
else:
name = f.name
# XXX should extend get_c_type() to handle this
if f.seq:
if f.type == 'cmpop':
ctype = "asdl_int_seq *"
else:
ctype = f"asdl_{f.type}_seq *"
else:
ctype = get_c_type(f.type)
args.append((ctype, name, f.opt or f.seq))
return args
def visitConstructor(self, cons, type, attrs):
args = self.get_args(cons.fields)
attrs = self.get_args(attrs)
ctype = get_c_type(type)
self.emit_function(cons.name, ctype, args, attrs)
def emit_function(self, name, ctype, args, attrs, union=True):
args = args + attrs
if args:
argstr = ", ".join(["%s %s" % (atype, aname)
for atype, aname, opt in args])
argstr += ", PyArena *arena"
else:
argstr = "PyArena *arena"
self.emit("CiAPI_FUNC(%s) %s(%s);" % (ctype, ast_func_name(name), argstr), False)
def visitProduct(self, prod, name):
self.emit_function(name, get_c_type(name),
self.get_args(prod.fields),
self.get_args(prod.attributes),
union=False)
class ChainOfVisitors:
def __init__(self, *visitors):
self.visitors = visitors
def visit(self, object):
for v in self.visitors:
v.visit(object)
v.emit("", 0)
def write_header(mod, f):
f.write(textwrap.dedent("""
#ifndef Py_INTERNAL_AST_H
#define Py_INTERNAL_AST_H
#ifdef __cplusplus
extern "C" {
#endif
#ifndef Py_BUILD_CORE
# error "this header requires Py_BUILD_CORE define"
#endif
#include "pycore_asdl.h"
""").lstrip())
c = ChainOfVisitors(TypeDefVisitor(f),
SequenceDefVisitor(f),
StructVisitor(f))
c.visit(mod)
f.write("// Note: these macros affect function definitions, not only call sites.\n")
PrototypeVisitor(f).visit(mod)
f.write(textwrap.dedent("""
CiAPI_FUNC(PyObject*) PyAST_mod2obj(mod_ty t);
CiAPI_FUNC(mod_ty) PyAST_obj2mod(PyObject* ast, PyArena* arena, int mode);
int PyAST_Check(PyObject* obj);
extern int _PyAST_Validate(mod_ty);
/* _PyAST_ExprAsUnicode is defined in ast_unparse.c */
CiAPI_FUNC(PyObject*) _PyAST_ExprAsUnicode(expr_ty);
/* Return the borrowed reference to the first literal string in the
sequence of statements or NULL if it doesn't start from a literal string.
Doesn't set exception. */
extern PyObject* _PyAST_GetDocString(asdl_stmt_seq *);
#ifdef __cplusplus
}
#endif
#endif /* !Py_INTERNAL_AST_H */
""")) | null |
186,376 | import os
import sys
import textwrap
from argparse import ArgumentParser
from contextlib import contextmanager
from pathlib import Path
import asdl
def write_internal_h_header(mod, f):
print(textwrap.dedent("""
#ifndef Py_INTERNAL_AST_STATE_H
#define Py_INTERNAL_AST_STATE_H
#ifdef __cplusplus
extern "C" {
#endif
#ifndef Py_BUILD_CORE
# error "this header requires Py_BUILD_CORE define"
#endif
""").lstrip(), file=f) | null |
186,377 | import os
import sys
import textwrap
from argparse import ArgumentParser
from contextlib import contextmanager
from pathlib import Path
import asdl
def write_internal_h_footer(mod, f):
print(textwrap.dedent("""
#ifdef __cplusplus
}
#endif
#endif /* !Py_INTERNAL_AST_STATE_H */
"""), file=f) | null |
186,378 | import os
import sys
import textwrap
from argparse import ArgumentParser
from contextlib import contextmanager
from pathlib import Path
import asdl
class FunctionVisitor(PrototypeVisitor):
"""Visitor to generate constructor functions for AST."""
def emit_function(self, name, ctype, args, attrs, union=True):
def emit(s, depth=0, reflow=True):
self.emit(s, depth, reflow)
argstr = ", ".join(["%s %s" % (atype, aname)
for atype, aname, opt in args + attrs])
if argstr:
argstr += ", PyArena *arena"
else:
argstr = "PyArena *arena"
self.emit("%s" % ctype, 0)
emit("%s(%s)" % (ast_func_name(name), argstr))
emit("{")
emit("%s p;" % ctype, 1)
for argtype, argname, opt in args:
if not opt and argtype != "int":
emit("if (!%s) {" % argname, 1)
emit("PyErr_SetString(PyExc_ValueError,", 2)
msg = "field '%s' is required for %s" % (argname, name)
emit(' "%s");' % msg,
2, reflow=False)
emit('return NULL;', 2)
emit('}', 1)
emit("p = (%s)_PyArena_Malloc(arena, sizeof(*p));" % ctype, 1);
emit("if (!p)", 1)
emit("return NULL;", 2)
if union:
self.emit_body_union(name, args, attrs)
else:
self.emit_body_struct(name, args, attrs)
emit("return p;", 1)
emit("}")
emit("")
def emit_body_union(self, name, args, attrs):
def emit(s, depth=0, reflow=True):
self.emit(s, depth, reflow)
emit("p->kind = %s_kind;" % name, 1)
for argtype, argname, opt in args:
emit("p->v.%s.%s = %s;" % (name, argname, argname), 1)
for argtype, argname, opt in attrs:
emit("p->%s = %s;" % (argname, argname), 1)
def emit_body_struct(self, name, args, attrs):
def emit(s, depth=0, reflow=True):
self.emit(s, depth, reflow)
for argtype, argname, opt in args:
emit("p->%s = %s;" % (argname, argname), 1)
for argtype, argname, opt in attrs:
emit("p->%s = %s;" % (argname, argname), 1)
class Obj2ModPrototypeVisitor(PickleVisitor):
def visitProduct(self, prod, name):
code = "static int obj2ast_%s(struct ast_state *state, PyObject* obj, %s* out, PyArena* arena);"
self.emit(code % (name, get_c_type(name)), 0)
visitSum = visitProduct
class Obj2ModVisitor(PickleVisitor):
def recursive_call(self, node, level):
self.emit('if (Py_EnterRecursiveCall(" while traversing \'%s\' node")) {' % node, level, reflow=False)
self.emit('goto failed;', level + 1)
self.emit('}', level)
yield
self.emit('Py_LeaveRecursiveCall();', level)
def funcHeader(self, name):
ctype = get_c_type(name)
self.emit("int", 0)
self.emit("obj2ast_%s(struct ast_state *state, PyObject* obj, %s* out, PyArena* arena)" % (name, ctype), 0)
self.emit("{", 0)
self.emit("int isinstance;", 1)
self.emit("", 0)
def sumTrailer(self, name, add_label=False):
self.emit("", 0)
# there's really nothing more we can do if this fails ...
error = "expected some sort of %s, but got %%R" % name
format = "PyErr_Format(PyExc_TypeError, \"%s\", obj);"
self.emit(format % error, 1, reflow=False)
if add_label:
self.emit("failed:", 1)
self.emit("Py_XDECREF(tmp);", 1)
self.emit("return 1;", 1)
self.emit("}", 0)
self.emit("", 0)
def simpleSum(self, sum, name):
self.funcHeader(name)
for t in sum.types:
line = ("isinstance = PyObject_IsInstance(obj, "
"state->%s_type);")
self.emit(line % (t.name,), 1)
self.emit("if (isinstance == -1) {", 1)
self.emit("return 1;", 2)
self.emit("}", 1)
self.emit("if (isinstance) {", 1)
self.emit("*out = %s;" % t.name, 2)
self.emit("return 0;", 2)
self.emit("}", 1)
self.sumTrailer(name)
def buildArgs(self, fields):
return ", ".join(fields + ["arena"])
def complexSum(self, sum, name):
self.funcHeader(name)
self.emit("PyObject *tmp = NULL;", 1)
self.emit("PyObject *tp;", 1)
for a in sum.attributes:
self.visitAttributeDeclaration(a, name, sum=sum)
self.emit("", 0)
# XXX: should we only do this for 'expr'?
self.emit("if (obj == Py_None) {", 1)
self.emit("*out = NULL;", 2)
self.emit("return 0;", 2)
self.emit("}", 1)
for a in sum.attributes:
self.visitField(a, name, sum=sum, depth=1)
for t in sum.types:
self.emit("tp = state->%s_type;" % (t.name,), 1)
self.emit("isinstance = PyObject_IsInstance(obj, tp);", 1)
self.emit("if (isinstance == -1) {", 1)
self.emit("return 1;", 2)
self.emit("}", 1)
self.emit("if (isinstance) {", 1)
for f in t.fields:
self.visitFieldDeclaration(f, t.name, sum=sum, depth=2)
self.emit("", 0)
for f in t.fields:
self.visitField(f, t.name, sum=sum, depth=2)
args = [f.name for f in t.fields] + [a.name for a in sum.attributes]
self.emit("*out = %s(%s);" % (ast_func_name(t.name), self.buildArgs(args)), 2)
self.emit("if (*out == NULL) goto failed;", 2)
self.emit("return 0;", 2)
self.emit("}", 1)
self.sumTrailer(name, True)
def visitAttributeDeclaration(self, a, name, sum=sum):
ctype = get_c_type(a.type)
self.emit("%s %s;" % (ctype, a.name), 1)
def visitSum(self, sum, name):
if is_simple(sum):
self.simpleSum(sum, name)
else:
self.complexSum(sum, name)
def visitProduct(self, prod, name):
ctype = get_c_type(name)
self.emit("int", 0)
self.emit("obj2ast_%s(struct ast_state *state, PyObject* obj, %s* out, PyArena* arena)" % (name, ctype), 0)
self.emit("{", 0)
self.emit("PyObject* tmp = NULL;", 1)
for f in prod.fields:
self.visitFieldDeclaration(f, name, prod=prod, depth=1)
for a in prod.attributes:
self.visitFieldDeclaration(a, name, prod=prod, depth=1)
self.emit("", 0)
for f in prod.fields:
self.visitField(f, name, prod=prod, depth=1)
for a in prod.attributes:
self.visitField(a, name, prod=prod, depth=1)
args = [f.name for f in prod.fields]
args.extend([a.name for a in prod.attributes])
self.emit("*out = %s(%s);" % (ast_func_name(name), self.buildArgs(args)), 1)
self.emit("return 0;", 1)
self.emit("failed:", 0)
self.emit("Py_XDECREF(tmp);", 1)
self.emit("return 1;", 1)
self.emit("}", 0)
self.emit("", 0)
def visitFieldDeclaration(self, field, name, sum=None, prod=None, depth=0):
ctype = get_c_type(field.type)
if field.seq:
if self.isSimpleType(field):
self.emit("asdl_int_seq* %s;" % field.name, depth)
else:
_type = field.type
self.emit(f"asdl_{field.type}_seq* {field.name};", depth)
else:
ctype = get_c_type(field.type)
self.emit("%s %s;" % (ctype, field.name), depth)
def isSimpleSum(self, field):
# XXX can the members of this list be determined automatically?
return field.type in ('expr_context', 'boolop', 'operator',
'unaryop', 'cmpop')
def isNumeric(self, field):
return get_c_type(field.type) in ("int", "bool")
def isSimpleType(self, field):
return self.isSimpleSum(field) or self.isNumeric(field)
def visitField(self, field, name, sum=None, prod=None, depth=0):
ctype = get_c_type(field.type)
line = "if (_PyObject_LookupAttr(obj, state->%s, &tmp) < 0) {"
self.emit(line % field.name, depth)
self.emit("return 1;", depth+1)
self.emit("}", depth)
if not field.opt:
self.emit("if (tmp == NULL) {", depth)
message = "required field \\\"%s\\\" missing from %s" % (field.name, name)
format = "PyErr_SetString(PyExc_TypeError, \"%s\");"
self.emit(format % message, depth+1, reflow=False)
self.emit("return 1;", depth+1)
else:
self.emit("if (tmp == NULL || tmp == Py_None) {", depth)
self.emit("Py_CLEAR(tmp);", depth+1)
if self.isNumeric(field):
self.emit("%s = 0;" % field.name, depth+1)
elif not self.isSimpleType(field):
self.emit("%s = NULL;" % field.name, depth+1)
else:
raise TypeError("could not determine the default value for %s" % field.name)
self.emit("}", depth)
self.emit("else {", depth)
self.emit("int res;", depth+1)
if field.seq:
self.emit("Py_ssize_t len;", depth+1)
self.emit("Py_ssize_t i;", depth+1)
self.emit("if (!PyList_Check(tmp)) {", depth+1)
self.emit("PyErr_Format(PyExc_TypeError, \"%s field \\\"%s\\\" must "
"be a list, not a %%.200s\", _PyType_Name(Py_TYPE(tmp)));" %
(name, field.name),
depth+2, reflow=False)
self.emit("goto failed;", depth+2)
self.emit("}", depth+1)
self.emit("len = PyList_GET_SIZE(tmp);", depth+1)
if self.isSimpleType(field):
self.emit("%s = _Py_asdl_int_seq_new(len, arena);" % field.name, depth+1)
else:
self.emit("%s = _Py_asdl_%s_seq_new(len, arena);" % (field.name, field.type), depth+1)
self.emit("if (%s == NULL) goto failed;" % field.name, depth+1)
self.emit("for (i = 0; i < len; i++) {", depth+1)
self.emit("%s val;" % ctype, depth+2)
self.emit("PyObject *tmp2 = PyList_GET_ITEM(tmp, i);", depth+2)
self.emit("Py_INCREF(tmp2);", depth+2)
with self.recursive_call(name, depth+2):
self.emit("res = obj2ast_%s(state, tmp2, &val, arena);" %
field.type, depth+2, reflow=False)
self.emit("Py_DECREF(tmp2);", depth+2)
self.emit("if (res != 0) goto failed;", depth+2)
self.emit("if (len != PyList_GET_SIZE(tmp)) {", depth+2)
self.emit("PyErr_SetString(PyExc_RuntimeError, \"%s field \\\"%s\\\" "
"changed size during iteration\");" %
(name, field.name),
depth+3, reflow=False)
self.emit("goto failed;", depth+3)
self.emit("}", depth+2)
self.emit("asdl_seq_SET(%s, i, val);" % field.name, depth+2)
self.emit("}", depth+1)
else:
with self.recursive_call(name, depth+1):
self.emit("res = obj2ast_%s(state, tmp, &%s, arena);" %
(field.type, field.name), depth+1)
self.emit("if (res != 0) goto failed;", depth+1)
self.emit("Py_CLEAR(tmp);", depth+1)
self.emit("}", depth)
class SequenceConstructorVisitor(EmitVisitor):
def visitModule(self, mod):
for dfn in mod.dfns:
self.visit(dfn)
def visitType(self, type):
self.visit(type.value, type.name)
def visitProduct(self, prod, name):
self.emit_sequence_constructor(name, get_c_type(name))
def visitSum(self, sum, name):
if not is_simple(sum):
self.emit_sequence_constructor(name, get_c_type(name))
def emit_sequence_constructor(self, name, type):
self.emit(f"GENERATE_ASDL_SEQ_CONSTRUCTOR({name}, {type})", depth=0)
class PyTypesDeclareVisitor(PickleVisitor):
def visitProduct(self, prod, name):
self.emit_type("%s_type" % name)
self.emit("static PyObject* ast2obj_%s(struct ast_state *state, void*);" % name, 0)
if prod.attributes:
for a in prod.attributes:
self.emit_identifier(a.name)
self.emit("static const char * const %s_attributes[] = {" % name, 0)
for a in prod.attributes:
self.emit('"%s",' % a.name, 1)
self.emit("};", 0)
if prod.fields:
for f in prod.fields:
self.emit_identifier(f.name)
self.emit("static const char * const %s_fields[]={" % name,0)
for f in prod.fields:
self.emit('"%s",' % f.name, 1)
self.emit("};", 0)
def visitSum(self, sum, name):
self.emit_type("%s_type" % name)
if sum.attributes:
for a in sum.attributes:
self.emit_identifier(a.name)
self.emit("static const char * const %s_attributes[] = {" % name, 0)
for a in sum.attributes:
self.emit('"%s",' % a.name, 1)
self.emit("};", 0)
ptype = "void*"
if is_simple(sum):
ptype = get_c_type(name)
for t in sum.types:
self.emit_singleton("%s_singleton" % t.name)
self.emit("static PyObject* ast2obj_%s(struct ast_state *state, %s);" % (name, ptype), 0)
for t in sum.types:
self.visitConstructor(t, name)
def visitConstructor(self, cons, name):
if cons.fields:
for t in cons.fields:
self.emit_identifier(t.name)
self.emit("static const char * const %s_fields[]={" % cons.name, 0)
for t in cons.fields:
self.emit('"%s",' % t.name, 1)
self.emit("};",0)
class PyTypesVisitor(PickleVisitor):
def visitModule(self, mod):
self.emit("""
typedef struct {
PyObject_HEAD
PyObject *dict;
} AST_object;
static void
ast_dealloc(AST_object *self)
{
/* bpo-31095: UnTrack is needed before calling any callbacks */
PyTypeObject *tp = Py_TYPE(self);
PyObject_GC_UnTrack(self);
Py_CLEAR(self->dict);
freefunc free_func = PyType_GetSlot(tp, Py_tp_free);
assert(free_func != NULL);
free_func(self);
Py_DECREF(tp);
}
static int
ast_traverse(AST_object *self, visitproc visit, void *arg)
{
Py_VISIT(Py_TYPE(self));
Py_VISIT(self->dict);
return 0;
}
static int
ast_clear(AST_object *self)
{
Py_CLEAR(self->dict);
return 0;
}
static int
ast_type_init(PyObject *self, PyObject *args, PyObject *kw)
{
struct ast_state *state = get_ast_state();
if (state == NULL) {
return -1;
}
Py_ssize_t i, numfields = 0;
int res = -1;
PyObject *key, *value, *fields;
if (_PyObject_LookupAttr((PyObject*)Py_TYPE(self), state->_fields, &fields) < 0) {
goto cleanup;
}
if (fields) {
numfields = PySequence_Size(fields);
if (numfields == -1) {
goto cleanup;
}
}
res = 0; /* if no error occurs, this stays 0 to the end */
if (numfields < PyTuple_GET_SIZE(args)) {
PyErr_Format(PyExc_TypeError, "%.400s constructor takes at most "
"%zd positional argument%s",
_PyType_Name(Py_TYPE(self)),
numfields, numfields == 1 ? "" : "s");
res = -1;
goto cleanup;
}
for (i = 0; i < PyTuple_GET_SIZE(args); i++) {
/* cannot be reached when fields is NULL */
PyObject *name = PySequence_GetItem(fields, i);
if (!name) {
res = -1;
goto cleanup;
}
res = PyObject_SetAttr(self, name, PyTuple_GET_ITEM(args, i));
Py_DECREF(name);
if (res < 0) {
goto cleanup;
}
}
if (kw) {
i = 0; /* needed by PyDict_Next */
while (PyDict_Next(kw, &i, &key, &value)) {
int contains = PySequence_Contains(fields, key);
if (contains == -1) {
res = -1;
goto cleanup;
} else if (contains == 1) {
Py_ssize_t p = PySequence_Index(fields, key);
if (p == -1) {
res = -1;
goto cleanup;
}
if (p < PyTuple_GET_SIZE(args)) {
PyErr_Format(PyExc_TypeError,
"%.400s got multiple values for argument '%U'",
Py_TYPE(self)->tp_name, key);
res = -1;
goto cleanup;
}
}
res = PyObject_SetAttr(self, key, value);
if (res < 0) {
goto cleanup;
}
}
}
cleanup:
Py_XDECREF(fields);
return res;
}
/* Pickling support */
static PyObject *
ast_type_reduce(PyObject *self, PyObject *unused)
{
struct ast_state *state = get_ast_state();
if (state == NULL) {
return NULL;
}
PyObject *dict;
if (_PyObject_LookupAttr(self, state->__dict__, &dict) < 0) {
return NULL;
}
if (dict) {
return Py_BuildValue("O()N", Py_TYPE(self), dict);
}
return Py_BuildValue("O()", Py_TYPE(self));
}
static PyMemberDef ast_type_members[] = {
{"__dictoffset__", T_PYSSIZET, offsetof(AST_object, dict), READONLY},
{NULL} /* Sentinel */
};
static PyMethodDef ast_type_methods[] = {
{"__reduce__", ast_type_reduce, METH_NOARGS, NULL},
{NULL}
};
static PyGetSetDef ast_type_getsets[] = {
{"__dict__", PyObject_GenericGetDict, PyObject_GenericSetDict},
{NULL}
};
static PyType_Slot AST_type_slots[] = {
{Py_tp_dealloc, ast_dealloc},
{Py_tp_getattro, PyObject_GenericGetAttr},
{Py_tp_setattro, PyObject_GenericSetAttr},
{Py_tp_traverse, ast_traverse},
{Py_tp_clear, ast_clear},
{Py_tp_members, ast_type_members},
{Py_tp_methods, ast_type_methods},
{Py_tp_getset, ast_type_getsets},
{Py_tp_init, ast_type_init},
{Py_tp_alloc, PyType_GenericAlloc},
{Py_tp_new, PyType_GenericNew},
{Py_tp_free, PyObject_GC_Del},
{0, 0},
};
static PyType_Spec AST_type_spec = {
"ast.AST",
sizeof(AST_object),
0,
Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE | Py_TPFLAGS_HAVE_GC,
AST_type_slots
};
static PyObject *
make_type(struct ast_state *state, const char *type, PyObject* base,
const char* const* fields, int num_fields, const char *doc)
{
PyObject *fnames, *result;
int i;
fnames = PyTuple_New(num_fields);
if (!fnames) return NULL;
for (i = 0; i < num_fields; i++) {
PyObject *field = PyUnicode_InternFromString(fields[i]);
if (!field) {
Py_DECREF(fnames);
return NULL;
}
PyTuple_SET_ITEM(fnames, i, field);
}
result = PyObject_CallFunction((PyObject*)&PyType_Type, "s(O){OOOOOOOs}",
type, base,
state->_fields, fnames,
state->__match_args__, fnames,
state->__module__,
state->ast,
state->__doc__, doc);
Py_DECREF(fnames);
return result;
}
static int
add_attributes(struct ast_state *state, PyObject *type, const char * const *attrs, int num_fields)
{
int i, result;
PyObject *s, *l = PyTuple_New(num_fields);
if (!l)
return 0;
for (i = 0; i < num_fields; i++) {
s = PyUnicode_InternFromString(attrs[i]);
if (!s) {
Py_DECREF(l);
return 0;
}
PyTuple_SET_ITEM(l, i, s);
}
result = PyObject_SetAttr(type, state->_attributes, l) >= 0;
Py_DECREF(l);
return result;
}
/* Conversion AST -> Python */
static PyObject* ast2obj_list(struct ast_state *state, asdl_seq *seq, PyObject* (*func)(struct ast_state *state, void*))
{
Py_ssize_t i, n = asdl_seq_LEN(seq);
PyObject *result = PyList_New(n);
PyObject *value;
if (!result)
return NULL;
for (i = 0; i < n; i++) {
value = func(state, asdl_seq_GET_UNTYPED(seq, i));
if (!value) {
Py_DECREF(result);
return NULL;
}
PyList_SET_ITEM(result, i, value);
}
return result;
}
static PyObject* ast2obj_object(struct ast_state *Py_UNUSED(state), void *o)
{
if (!o)
o = Py_None;
Py_INCREF((PyObject*)o);
return (PyObject*)o;
}
#define ast2obj_constant ast2obj_object
#define ast2obj_identifier ast2obj_object
#define ast2obj_string ast2obj_object
static PyObject* ast2obj_int(struct ast_state *Py_UNUSED(state), long b)
{
return PyLong_FromLong(b);
}
/* Conversion Python -> AST */
static int obj2ast_object(struct ast_state *Py_UNUSED(state), PyObject* obj, PyObject** out, PyArena* arena)
{
if (obj == Py_None)
obj = NULL;
if (obj) {
if (_PyArena_AddPyObject(arena, obj) < 0) {
*out = NULL;
return -1;
}
Py_INCREF(obj);
}
*out = obj;
return 0;
}
static int obj2ast_constant(struct ast_state *Py_UNUSED(state), PyObject* obj, PyObject** out, PyArena* arena)
{
if (_PyArena_AddPyObject(arena, obj) < 0) {
*out = NULL;
return -1;
}
Py_INCREF(obj);
*out = obj;
return 0;
}
static int obj2ast_identifier(struct ast_state *state, PyObject* obj, PyObject** out, PyArena* arena)
{
if (!PyUnicode_CheckExact(obj) && obj != Py_None) {
PyErr_SetString(PyExc_TypeError, "AST identifier must be of type str");
return 1;
}
return obj2ast_object(state, obj, out, arena);
}
static int obj2ast_string(struct ast_state *state, PyObject* obj, PyObject** out, PyArena* arena)
{
if (!PyUnicode_CheckExact(obj) && !PyBytes_CheckExact(obj)) {
PyErr_SetString(PyExc_TypeError, "AST string must be of type str");
return 1;
}
return obj2ast_object(state, obj, out, arena);
}
static int obj2ast_int(struct ast_state* Py_UNUSED(state), PyObject* obj, int* out, PyArena* arena)
{
int i;
if (!PyLong_Check(obj)) {
PyErr_Format(PyExc_ValueError, "invalid integer value: %R", obj);
return 1;
}
i = _PyLong_AsInt(obj);
if (i == -1 && PyErr_Occurred())
return 1;
*out = i;
return 0;
}
static int add_ast_fields(struct ast_state *state)
{
PyObject *empty_tuple;
empty_tuple = PyTuple_New(0);
if (!empty_tuple ||
PyObject_SetAttrString(state->AST_type, "_fields", empty_tuple) < 0 ||
PyObject_SetAttrString(state->AST_type, "__match_args__", empty_tuple) < 0 ||
PyObject_SetAttrString(state->AST_type, "_attributes", empty_tuple) < 0) {
Py_XDECREF(empty_tuple);
return -1;
}
Py_DECREF(empty_tuple);
return 0;
}
""", 0, reflow=False)
self.file.write(textwrap.dedent('''
static int
init_types(struct ast_state *state)
{
// init_types() must not be called after _PyAST_Fini()
// has been called
assert(state->initialized >= 0);
if (state->initialized) {
return 1;
}
if (init_identifiers(state) < 0) {
return 0;
}
state->AST_type = PyType_FromSpec(&AST_type_spec);
if (!state->AST_type) {
return 0;
}
if (add_ast_fields(state) < 0) {
return 0;
}
'''))
for dfn in mod.dfns:
self.visit(dfn)
self.file.write(textwrap.dedent('''
state->initialized = 1;
return 1;
}
'''))
def visitProduct(self, prod, name):
if prod.fields:
fields = name+"_fields"
else:
fields = "NULL"
self.emit('state->%s_type = make_type(state, "%s", state->AST_type, %s, %d,' %
(name, name, fields, len(prod.fields)), 1)
self.emit('%s);' % reflow_c_string(asdl_of(name, prod), 2), 2, reflow=False)
self.emit("if (!state->%s_type) return 0;" % name, 1)
self.emit_type("AST_type")
self.emit_type("%s_type" % name)
if prod.attributes:
self.emit("if (!add_attributes(state, state->%s_type, %s_attributes, %d)) return 0;" %
(name, name, len(prod.attributes)), 1)
else:
self.emit("if (!add_attributes(state, state->%s_type, NULL, 0)) return 0;" % name, 1)
self.emit_defaults(name, prod.fields, 1)
self.emit_defaults(name, prod.attributes, 1)
def visitSum(self, sum, name):
self.emit('state->%s_type = make_type(state, "%s", state->AST_type, NULL, 0,' %
(name, name), 1)
self.emit('%s);' % reflow_c_string(asdl_of(name, sum), 2), 2, reflow=False)
self.emit_type("%s_type" % name)
self.emit("if (!state->%s_type) return 0;" % name, 1)
if sum.attributes:
self.emit("if (!add_attributes(state, state->%s_type, %s_attributes, %d)) return 0;" %
(name, name, len(sum.attributes)), 1)
else:
self.emit("if (!add_attributes(state, state->%s_type, NULL, 0)) return 0;" % name, 1)
self.emit_defaults(name, sum.attributes, 1)
simple = is_simple(sum)
for t in sum.types:
self.visitConstructor(t, name, simple)
def visitConstructor(self, cons, name, simple):
if cons.fields:
fields = cons.name+"_fields"
else:
fields = "NULL"
self.emit('state->%s_type = make_type(state, "%s", state->%s_type, %s, %d,' %
(cons.name, cons.name, name, fields, len(cons.fields)), 1)
self.emit('%s);' % reflow_c_string(asdl_of(cons.name, cons), 2), 2, reflow=False)
self.emit("if (!state->%s_type) return 0;" % cons.name, 1)
self.emit_type("%s_type" % cons.name)
self.emit_defaults(cons.name, cons.fields, 1)
if simple:
self.emit("state->%s_singleton = PyType_GenericNew((PyTypeObject *)"
"state->%s_type, NULL, NULL);" %
(cons.name, cons.name), 1)
self.emit("if (!state->%s_singleton) return 0;" % cons.name, 1)
def emit_defaults(self, name, fields, depth):
for field in fields:
if field.opt:
self.emit('if (PyObject_SetAttr(state->%s_type, state->%s, Py_None) == -1)' %
(name, field.name), depth)
self.emit("return 0;", depth+1)
class ASTModuleVisitor(PickleVisitor):
def visitModule(self, mod):
self.emit("static int", 0)
self.emit("astmodule_exec(PyObject *m)", 0)
self.emit("{", 0)
self.emit('struct ast_state *state = get_ast_state();', 1)
self.emit('if (state == NULL) {', 1)
self.emit('return -1;', 2)
self.emit('}', 1)
self.emit('if (PyModule_AddObjectRef(m, "AST", state->AST_type) < 0) {', 1)
self.emit('return -1;', 2)
self.emit('}', 1)
self.emit('if (PyModule_AddIntMacro(m, PyCF_ALLOW_TOP_LEVEL_AWAIT) < 0) {', 1)
self.emit("return -1;", 2)
self.emit('}', 1)
self.emit('if (PyModule_AddIntMacro(m, PyCF_ONLY_AST) < 0) {', 1)
self.emit("return -1;", 2)
self.emit('}', 1)
self.emit('if (PyModule_AddIntMacro(m, PyCF_TYPE_COMMENTS) < 0) {', 1)
self.emit("return -1;", 2)
self.emit('}', 1)
for dfn in mod.dfns:
self.visit(dfn)
self.emit("return 0;", 1)
self.emit("}", 0)
self.emit("", 0)
self.emit("""
static PyModuleDef_Slot astmodule_slots[] = {
{Py_mod_exec, astmodule_exec},
{0, NULL}
};
static struct PyModuleDef _astmodule = {
PyModuleDef_HEAD_INIT,
.m_name = "_ast",
// The _ast module uses a per-interpreter state (PyInterpreterState.ast)
.m_size = 0,
.m_slots = astmodule_slots,
};
PyMODINIT_FUNC
PyInit__ast(void)
{
return PyModuleDef_Init(&_astmodule);
}
""".strip(), 0, reflow=False)
def visitProduct(self, prod, name):
self.addObj(name)
def visitSum(self, sum, name):
self.addObj(name)
for t in sum.types:
self.visitConstructor(t, name)
def visitConstructor(self, cons, name):
self.addObj(cons.name)
def addObj(self, name):
self.emit("if (PyModule_AddObjectRef(m, \"%s\", "
"state->%s_type) < 0) {" % (name, name), 1)
self.emit("return -1;", 2)
self.emit('}', 1)
class ObjVisitor(PickleVisitor):
def func_begin(self, name):
ctype = get_c_type(name)
self.emit("PyObject*", 0)
self.emit("ast2obj_%s(struct ast_state *state, void* _o)" % (name), 0)
self.emit("{", 0)
self.emit("%s o = (%s)_o;" % (ctype, ctype), 1)
self.emit("PyObject *result = NULL, *value = NULL;", 1)
self.emit("PyTypeObject *tp;", 1)
self.emit('if (!o) {', 1)
self.emit("Py_RETURN_NONE;", 2)
self.emit("}", 1)
def func_end(self):
self.emit("return result;", 1)
self.emit("failed:", 0)
self.emit("Py_XDECREF(value);", 1)
self.emit("Py_XDECREF(result);", 1)
self.emit("return NULL;", 1)
self.emit("}", 0)
self.emit("", 0)
def visitSum(self, sum, name):
if is_simple(sum):
self.simpleSum(sum, name)
return
self.func_begin(name)
self.emit("switch (o->kind) {", 1)
for i in range(len(sum.types)):
t = sum.types[i]
self.visitConstructor(t, i + 1, name)
self.emit("}", 1)
for a in sum.attributes:
self.emit("value = ast2obj_%s(state, o->%s);" % (a.type, a.name), 1)
self.emit("if (!value) goto failed;", 1)
self.emit('if (PyObject_SetAttr(result, state->%s, value) < 0)' % a.name, 1)
self.emit('goto failed;', 2)
self.emit('Py_DECREF(value);', 1)
self.func_end()
def simpleSum(self, sum, name):
self.emit("PyObject* ast2obj_%s(struct ast_state *state, %s_ty o)" % (name, name), 0)
self.emit("{", 0)
self.emit("switch(o) {", 1)
for t in sum.types:
self.emit("case %s:" % t.name, 2)
self.emit("Py_INCREF(state->%s_singleton);" % t.name, 3)
self.emit("return state->%s_singleton;" % t.name, 3)
self.emit("}", 1)
self.emit("Py_UNREACHABLE();", 1);
self.emit("}", 0)
def visitProduct(self, prod, name):
self.func_begin(name)
self.emit("tp = (PyTypeObject *)state->%s_type;" % name, 1)
self.emit("result = PyType_GenericNew(tp, NULL, NULL);", 1);
self.emit("if (!result) return NULL;", 1)
for field in prod.fields:
self.visitField(field, name, 1, True)
for a in prod.attributes:
self.emit("value = ast2obj_%s(state, o->%s);" % (a.type, a.name), 1)
self.emit("if (!value) goto failed;", 1)
self.emit("if (PyObject_SetAttr(result, state->%s, value) < 0)" % a.name, 1)
self.emit('goto failed;', 2)
self.emit('Py_DECREF(value);', 1)
self.func_end()
def visitConstructor(self, cons, enum, name):
self.emit("case %s_kind:" % cons.name, 1)
self.emit("tp = (PyTypeObject *)state->%s_type;" % cons.name, 2)
self.emit("result = PyType_GenericNew(tp, NULL, NULL);", 2);
self.emit("if (!result) goto failed;", 2)
for f in cons.fields:
self.visitField(f, cons.name, 2, False)
self.emit("break;", 2)
def visitField(self, field, name, depth, product):
def emit(s, d):
self.emit(s, depth + d)
if product:
value = "o->%s" % field.name
else:
value = "o->v.%s.%s" % (name, field.name)
self.set(field, value, depth)
emit("if (!value) goto failed;", 0)
emit("if (PyObject_SetAttr(result, state->%s, value) == -1)" % field.name, 0)
emit("goto failed;", 1)
emit("Py_DECREF(value);", 0)
def set(self, field, value, depth):
if field.seq:
# XXX should really check for is_simple, but that requires a symbol table
if field.type == "cmpop":
# While the sequence elements are stored as void*,
# ast2obj_cmpop expects an enum
self.emit("{", depth)
self.emit("Py_ssize_t i, n = asdl_seq_LEN(%s);" % value, depth+1)
self.emit("value = PyList_New(n);", depth+1)
self.emit("if (!value) goto failed;", depth+1)
self.emit("for(i = 0; i < n; i++)", depth+1)
# This cannot fail, so no need for error handling
self.emit("PyList_SET_ITEM(value, i, ast2obj_cmpop(state, (cmpop_ty)asdl_seq_GET(%s, i)));" % value,
depth+2, reflow=False)
self.emit("}", depth)
else:
self.emit("value = ast2obj_list(state, (asdl_seq*)%s, ast2obj_%s);" % (value, field.type), depth)
else:
self.emit("value = ast2obj_%s(state, %s);" % (field.type, value), depth, reflow=False)
class PartingShots(StaticVisitor):
CODE = """
PyObject* PyAST_mod2obj(mod_ty t)
{
struct ast_state *state = get_ast_state();
if (state == NULL) {
return NULL;
}
return ast2obj_mod(state, t);
}
/* mode is 0 for "exec", 1 for "eval" and 2 for "single" input */
mod_ty PyAST_obj2mod(PyObject* ast, PyArena* arena, int mode)
{
const char * const req_name[] = {"Module", "Expression", "Interactive"};
int isinstance;
if (PySys_Audit("compile", "OO", ast, Py_None) < 0) {
return NULL;
}
struct ast_state *state = get_ast_state();
if (state == NULL) {
return NULL;
}
PyObject *req_type[3];
req_type[0] = state->Module_type;
req_type[1] = state->Expression_type;
req_type[2] = state->Interactive_type;
assert(0 <= mode && mode <= 2);
isinstance = PyObject_IsInstance(ast, req_type[mode]);
if (isinstance == -1)
return NULL;
if (!isinstance) {
PyErr_Format(PyExc_TypeError, "expected %s node, got %.400s",
req_name[mode], _PyType_Name(Py_TYPE(ast)));
return NULL;
}
mod_ty res = NULL;
if (obj2ast_mod(state, ast, &res, arena) != 0)
return NULL;
else
return res;
}
int PyAST_Check(PyObject* obj)
{
struct ast_state *state = get_ast_state();
if (state == NULL) {
return -1;
}
return PyObject_IsInstance(obj, state->AST_type);
}
"""
class ChainOfVisitors:
def __init__(self, *visitors):
self.visitors = visitors
def visit(self, object):
for v in self.visitors:
v.visit(object)
v.emit("", 0)
def generate_module_def(mod, f, internal_h):
# Gather all the data needed for ModuleSpec
visitor_list = set()
with open(os.devnull, "w") as devnull:
visitor = PyTypesDeclareVisitor(devnull)
visitor.visit(mod)
visitor_list.add(visitor)
visitor = PyTypesVisitor(devnull)
visitor.visit(mod)
visitor_list.add(visitor)
state_strings = {
"ast",
"_fields",
"__match_args__",
"__doc__",
"__dict__",
"__module__",
"_attributes",
}
module_state = state_strings.copy()
for visitor in visitor_list:
for identifier in visitor.identifiers:
module_state.add(identifier)
state_strings.add(identifier)
for singleton in visitor.singletons:
module_state.add(singleton)
for tp in visitor.types:
module_state.add(tp)
state_strings = sorted(state_strings)
module_state = sorted(module_state)
generate_ast_state(module_state, internal_h)
print(textwrap.dedent("""
#include "Python.h"
#include "pycore_ast.h"
#include "pycore_ast_state.h" // struct ast_state
#include "pycore_interp.h" // _PyInterpreterState.ast
#include "pycore_pystate.h" // _PyInterpreterState_GET()
#include "structmember.h"
#include <stddef.h>
#include "internal/pycore_ast.h"
// Forward declaration
static int init_types(struct ast_state *state);
static struct ast_state*
get_ast_state(void)
{
PyInterpreterState *interp = _PyInterpreterState_GET();
struct ast_state *state = &interp->ast;
if (!init_types(state)) {
return NULL;
}
return state;
}
""").strip(), file=f)
generate_ast_fini(module_state, f)
f.write('static int init_identifiers(struct ast_state *state)\n')
f.write('{\n')
for identifier in state_strings:
f.write(' if ((state->' + identifier)
f.write(' = PyUnicode_InternFromString("')
f.write(identifier + '")) == NULL) return 0;\n')
f.write(' return 1;\n')
f.write('};\n\n')
def write_source(mod, f, internal_h_file):
generate_module_def(mod, f, internal_h_file)
v = ChainOfVisitors(
SequenceConstructorVisitor(f),
PyTypesDeclareVisitor(f),
PyTypesVisitor(f),
Obj2ModPrototypeVisitor(f),
FunctionVisitor(f),
ObjVisitor(f),
Obj2ModVisitor(f),
ASTModuleVisitor(f),
PartingShots(f),
)
v.visit(mod) | null |
186,380 | from collections import namedtuple
import re
class ASDLParser:
"""Parser for ASDL files.
Create, then call the parse method on a buffer containing ASDL.
This is a simple recursive descent parser that uses tokenize_asdl for the
lexing.
"""
def __init__(self):
self._tokenizer = None
self.cur_token = None
def parse(self, buf):
"""Parse the ASDL in the buffer and return an AST with a Module root.
"""
self._tokenizer = tokenize_asdl(buf)
self._advance()
return self._parse_module()
def _parse_module(self):
if self._at_keyword('module'):
self._advance()
else:
raise ASDLSyntaxError(
'Expected "module" (found {})'.format(self.cur_token.value),
self.cur_token.lineno)
name = self._match(self._id_kinds)
self._match(TokenKind.LBrace)
defs = self._parse_definitions()
self._match(TokenKind.RBrace)
return Module(name, defs)
def _parse_definitions(self):
defs = []
while self.cur_token.kind == TokenKind.TypeId:
typename = self._advance()
self._match(TokenKind.Equals)
type = self._parse_type()
defs.append(Type(typename, type))
return defs
def _parse_type(self):
if self.cur_token.kind == TokenKind.LParen:
# If we see a (, it's a product
return self._parse_product()
else:
# Otherwise it's a sum. Look for ConstructorId
sumlist = [Constructor(self._match(TokenKind.ConstructorId),
self._parse_optional_fields())]
while self.cur_token.kind == TokenKind.Pipe:
# More constructors
self._advance()
sumlist.append(Constructor(
self._match(TokenKind.ConstructorId),
self._parse_optional_fields()))
return Sum(sumlist, self._parse_optional_attributes())
def _parse_product(self):
return Product(self._parse_fields(), self._parse_optional_attributes())
def _parse_fields(self):
fields = []
self._match(TokenKind.LParen)
while self.cur_token.kind == TokenKind.TypeId:
typename = self._advance()
is_seq, is_opt = self._parse_optional_field_quantifier()
id = (self._advance() if self.cur_token.kind in self._id_kinds
else None)
fields.append(Field(typename, id, seq=is_seq, opt=is_opt))
if self.cur_token.kind == TokenKind.RParen:
break
elif self.cur_token.kind == TokenKind.Comma:
self._advance()
self._match(TokenKind.RParen)
return fields
def _parse_optional_fields(self):
if self.cur_token.kind == TokenKind.LParen:
return self._parse_fields()
else:
return None
def _parse_optional_attributes(self):
if self._at_keyword('attributes'):
self._advance()
return self._parse_fields()
else:
return None
def _parse_optional_field_quantifier(self):
is_seq, is_opt = False, False
if self.cur_token.kind == TokenKind.Asterisk:
is_seq = True
self._advance()
elif self.cur_token.kind == TokenKind.Question:
is_opt = True
self._advance()
return is_seq, is_opt
def _advance(self):
""" Return the value of the current token and read the next one into
self.cur_token.
"""
cur_val = None if self.cur_token is None else self.cur_token.value
try:
self.cur_token = next(self._tokenizer)
except StopIteration:
self.cur_token = None
return cur_val
_id_kinds = (TokenKind.ConstructorId, TokenKind.TypeId)
def _match(self, kind):
"""The 'match' primitive of RD parsers.
* Verifies that the current token is of the given kind (kind can
be a tuple, in which the kind must match one of its members).
* Returns the value of the current token
* Reads in the next token
"""
if (isinstance(kind, tuple) and self.cur_token.kind in kind or
self.cur_token.kind == kind
):
value = self.cur_token.value
self._advance()
return value
else:
raise ASDLSyntaxError(
'Unmatched {} (found {})'.format(kind, self.cur_token.kind),
self.cur_token.lineno)
def _at_keyword(self, keyword):
return (self.cur_token.kind == TokenKind.TypeId and
self.cur_token.value == keyword)
The provided code snippet includes necessary dependencies for implementing the `parse` function. Write a Python function `def parse(filename)` to solve the following problem:
Parse ASDL from the given file and return a Module node describing it.
Here is the function:
def parse(filename):
"""Parse ASDL from the given file and return a Module node describing it."""
with open(filename, encoding="utf-8") as f:
parser = ASDLParser()
return parser.parse(f.read()) | Parse ASDL from the given file and return a Module node describing it. |
186,384 | import os
import stat
from itertools import filterfalse
from types import GenericAlias
class dircmp:
"""A class that manages the comparison of 2 directories.
dircmp(a, b, ignore=None, hide=None)
A and B are directories.
IGNORE is a list of names to ignore,
defaults to DEFAULT_IGNORES.
HIDE is a list of names to hide,
defaults to [os.curdir, os.pardir].
High level usage:
x = dircmp(dir1, dir2)
x.report() -> prints a report on the differences between dir1 and dir2
or
x.report_partial_closure() -> prints report on differences between dir1
and dir2, and reports on common immediate subdirectories.
x.report_full_closure() -> like report_partial_closure,
but fully recursive.
Attributes:
left_list, right_list: The files in dir1 and dir2,
filtered by hide and ignore.
common: a list of names in both dir1 and dir2.
left_only, right_only: names only in dir1, dir2.
common_dirs: subdirectories in both dir1 and dir2.
common_files: files in both dir1 and dir2.
common_funny: names in both dir1 and dir2 where the type differs between
dir1 and dir2, or the name is not stat-able.
same_files: list of identical files.
diff_files: list of filenames which differ.
funny_files: list of files which could not be compared.
subdirs: a dictionary of dircmp instances (or MyDirCmp instances if this
object is of type MyDirCmp, a subclass of dircmp), keyed by names
in common_dirs.
"""
def __init__(self, a, b, ignore=None, hide=None): # Initialize
self.left = a
self.right = b
if hide is None:
self.hide = [os.curdir, os.pardir] # Names never to be shown
else:
self.hide = hide
if ignore is None:
self.ignore = DEFAULT_IGNORES
else:
self.ignore = ignore
def phase0(self): # Compare everything except common subdirectories
self.left_list = _filter(os.listdir(self.left),
self.hide+self.ignore)
self.right_list = _filter(os.listdir(self.right),
self.hide+self.ignore)
self.left_list.sort()
self.right_list.sort()
def phase1(self): # Compute common names
a = dict(zip(map(os.path.normcase, self.left_list), self.left_list))
b = dict(zip(map(os.path.normcase, self.right_list), self.right_list))
self.common = list(map(a.__getitem__, filter(b.__contains__, a)))
self.left_only = list(map(a.__getitem__, filterfalse(b.__contains__, a)))
self.right_only = list(map(b.__getitem__, filterfalse(a.__contains__, b)))
def phase2(self): # Distinguish files, directories, funnies
self.common_dirs = []
self.common_files = []
self.common_funny = []
for x in self.common:
a_path = os.path.join(self.left, x)
b_path = os.path.join(self.right, x)
ok = 1
try:
a_stat = os.stat(a_path)
except OSError:
# print('Can\'t stat', a_path, ':', why.args[1])
ok = 0
try:
b_stat = os.stat(b_path)
except OSError:
# print('Can\'t stat', b_path, ':', why.args[1])
ok = 0
if ok:
a_type = stat.S_IFMT(a_stat.st_mode)
b_type = stat.S_IFMT(b_stat.st_mode)
if a_type != b_type:
self.common_funny.append(x)
elif stat.S_ISDIR(a_type):
self.common_dirs.append(x)
elif stat.S_ISREG(a_type):
self.common_files.append(x)
else:
self.common_funny.append(x)
else:
self.common_funny.append(x)
def phase3(self): # Find out differences between common files
xx = cmpfiles(self.left, self.right, self.common_files)
self.same_files, self.diff_files, self.funny_files = xx
def phase4(self): # Find out differences between common subdirectories
# A new dircmp (or MyDirCmp if dircmp was subclassed) object is created
# for each common subdirectory,
# these are stored in a dictionary indexed by filename.
# The hide and ignore properties are inherited from the parent
self.subdirs = {}
for x in self.common_dirs:
a_x = os.path.join(self.left, x)
b_x = os.path.join(self.right, x)
self.subdirs[x] = self.__class__(a_x, b_x, self.ignore, self.hide)
def phase4_closure(self): # Recursively call phase4() on subdirectories
self.phase4()
for sd in self.subdirs.values():
sd.phase4_closure()
def report(self): # Print a report on the differences between a and b
# Output format is purposely lousy
print('diff', self.left, self.right)
if self.left_only:
self.left_only.sort()
print('Only in', self.left, ':', self.left_only)
if self.right_only:
self.right_only.sort()
print('Only in', self.right, ':', self.right_only)
if self.same_files:
self.same_files.sort()
print('Identical files :', self.same_files)
if self.diff_files:
self.diff_files.sort()
print('Differing files :', self.diff_files)
if self.funny_files:
self.funny_files.sort()
print('Trouble with common files :', self.funny_files)
if self.common_dirs:
self.common_dirs.sort()
print('Common subdirectories :', self.common_dirs)
if self.common_funny:
self.common_funny.sort()
print('Common funny cases :', self.common_funny)
def report_partial_closure(self): # Print reports on self and on subdirs
self.report()
for sd in self.subdirs.values():
print()
sd.report()
def report_full_closure(self): # Report on self and subdirs recursively
self.report()
for sd in self.subdirs.values():
print()
sd.report_full_closure()
methodmap = dict(subdirs=phase4,
same_files=phase3, diff_files=phase3, funny_files=phase3,
common_dirs = phase2, common_files=phase2, common_funny=phase2,
common=phase1, left_only=phase1, right_only=phase1,
left_list=phase0, right_list=phase0)
def __getattr__(self, attr):
if attr not in self.methodmap:
raise AttributeError(attr)
self.methodmap[attr](self)
return getattr(self, attr)
__class_getitem__ = classmethod(GenericAlias)
def getopt(args, shortopts, longopts = []):
"""getopt(args, options[, long_options]) -> opts, args
Parses command line options and parameter list. args is the
argument list to be parsed, without the leading reference to the
running program. Typically, this means "sys.argv[1:]". shortopts
is the string of option letters that the script wants to
recognize, with options that require an argument followed by a
colon (i.e., the same format that Unix getopt() uses). If
specified, longopts is a list of strings with the names of the
long options which should be supported. The leading '--'
characters should not be included in the option name. Options
which require an argument should be followed by an equal sign
('=').
The return value consists of two elements: the first is a list of
(option, value) pairs; the second is the list of program arguments
left after the option list was stripped (this is a trailing slice
of the first argument). Each option-and-value pair returned has
the option as its first element, prefixed with a hyphen (e.g.,
'-x'), and the option argument as its second element, or an empty
string if the option has no argument. The options occur in the
list in the same order in which they were found, thus allowing
multiple occurrences. Long and short options may be mixed.
"""
opts = []
if type(longopts) == type(""):
longopts = [longopts]
else:
longopts = list(longopts)
while args and args[0].startswith('-') and args[0] != '-':
if args[0] == '--':
args = args[1:]
break
if args[0].startswith('--'):
opts, args = do_longs(opts, args[0][2:], longopts, args[1:])
else:
opts, args = do_shorts(opts, args[0][1:], shortopts, args[1:])
return opts, args
def demo():
import sys
import getopt
options, args = getopt.getopt(sys.argv[1:], 'r')
if len(args) != 2:
raise getopt.GetoptError('need exactly two args', None)
dd = dircmp(args[0], args[1])
if ('-r', '') in options:
dd.report_full_closure()
else:
dd.report() | null |
186,388 | import _lsprof
import profile as _pyprofile
class Profile(_lsprof.Profiler):
def print_stats(self, sort=-1):
def dump_stats(self, file):
def create_stats(self):
def snapshot_stats(self):
def run(self, cmd):
def runctx(self, cmd, globals, locals):
def runcall(self, func, /, *args, **kw):
def __enter__(self):
def __exit__(self, *exc_info):
def runctx(statement, globals, locals, filename=None, sort=-1):
return _pyprofile._Utils(Profile).runctx(statement, globals, locals,
filename, sort) | null |
186,390 | __block_openssl_constructor = {
'blake2b', 'blake2s',
}
def __get_builtin_constructor(name):
cache = __builtin_constructor_cache
constructor = cache.get(name)
if constructor is not None:
return constructor
try:
if name in {'SHA1', 'sha1'}:
import _sha1
cache['SHA1'] = cache['sha1'] = _sha1.sha1
elif name in {'MD5', 'md5'}:
import _md5
cache['MD5'] = cache['md5'] = _md5.md5
elif name in {'SHA256', 'sha256', 'SHA224', 'sha224'}:
import _sha256
cache['SHA224'] = cache['sha224'] = _sha256.sha224
cache['SHA256'] = cache['sha256'] = _sha256.sha256
elif name in {'SHA512', 'sha512', 'SHA384', 'sha384'}:
import _sha512
cache['SHA384'] = cache['sha384'] = _sha512.sha384
cache['SHA512'] = cache['sha512'] = _sha512.sha512
elif name in {'blake2b', 'blake2s'}:
import _blake2
cache['blake2b'] = _blake2.blake2b
cache['blake2s'] = _blake2.blake2s
elif name in {'sha3_224', 'sha3_256', 'sha3_384', 'sha3_512'}:
import _sha3
cache['sha3_224'] = _sha3.sha3_224
cache['sha3_256'] = _sha3.sha3_256
cache['sha3_384'] = _sha3.sha3_384
cache['sha3_512'] = _sha3.sha3_512
elif name in {'shake_128', 'shake_256'}:
import _sha3
cache['shake_128'] = _sha3.shake_128
cache['shake_256'] = _sha3.shake_256
except ImportError:
pass # no extension module, this hash is unsupported.
constructor = cache.get(name)
if constructor is not None:
return constructor
raise ValueError('unsupported hash type ' + name)
try:
import _hashlib
new = __hash_new
__get_hash = __get_openssl_constructor
algorithms_available = algorithms_available.union(
_hashlib.openssl_md_meth_names)
except ImportError:
_hashlib = None
new = __py_new
__get_hash = __get_builtin_constructor
try:
# OpenSSL's PKCS5_PBKDF2_HMAC requires OpenSSL 1.0+ with HMAC and SHA
from _hashlib import pbkdf2_hmac
except ImportError:
from warnings import warn as _warn
_trans_5C = bytes((x ^ 0x5C) for x in range(256))
_trans_36 = bytes((x ^ 0x36) for x in range(256))
try:
# OpenSSL's scrypt requires OpenSSL 1.1+
from _hashlib import scrypt
except ImportError:
pass
def __get_openssl_constructor(name):
if name in __block_openssl_constructor:
# Prefer our builtin blake2 implementation.
return __get_builtin_constructor(name)
try:
# MD5, SHA1, and SHA2 are in all supported OpenSSL versions
# SHA3/shake are available in OpenSSL 1.1.1+
f = getattr(_hashlib, 'openssl_' + name)
# Allow the C module to raise ValueError. The function will be
# defined but the hash not actually available. Don't fall back to
# builtin if the current security policy blocks a digest, bpo#40695.
f(usedforsecurity=False)
# Use the C function directly (very fast)
return f
except (AttributeError, ValueError):
return __get_builtin_constructor(name) | null |
186,392 | __block_openssl_constructor = {
'blake2b', 'blake2s',
}
def __get_builtin_constructor(name):
cache = __builtin_constructor_cache
constructor = cache.get(name)
if constructor is not None:
return constructor
try:
if name in {'SHA1', 'sha1'}:
import _sha1
cache['SHA1'] = cache['sha1'] = _sha1.sha1
elif name in {'MD5', 'md5'}:
import _md5
cache['MD5'] = cache['md5'] = _md5.md5
elif name in {'SHA256', 'sha256', 'SHA224', 'sha224'}:
import _sha256
cache['SHA224'] = cache['sha224'] = _sha256.sha224
cache['SHA256'] = cache['sha256'] = _sha256.sha256
elif name in {'SHA512', 'sha512', 'SHA384', 'sha384'}:
import _sha512
cache['SHA384'] = cache['sha384'] = _sha512.sha384
cache['SHA512'] = cache['sha512'] = _sha512.sha512
elif name in {'blake2b', 'blake2s'}:
import _blake2
cache['blake2b'] = _blake2.blake2b
cache['blake2s'] = _blake2.blake2s
elif name in {'sha3_224', 'sha3_256', 'sha3_384', 'sha3_512'}:
import _sha3
cache['sha3_224'] = _sha3.sha3_224
cache['sha3_256'] = _sha3.sha3_256
cache['sha3_384'] = _sha3.sha3_384
cache['sha3_512'] = _sha3.sha3_512
elif name in {'shake_128', 'shake_256'}:
import _sha3
cache['shake_128'] = _sha3.shake_128
cache['shake_256'] = _sha3.shake_256
except ImportError:
pass # no extension module, this hash is unsupported.
constructor = cache.get(name)
if constructor is not None:
return constructor
raise ValueError('unsupported hash type ' + name)
try:
import _hashlib
new = __hash_new
__get_hash = __get_openssl_constructor
algorithms_available = algorithms_available.union(
_hashlib.openssl_md_meth_names)
except ImportError:
_hashlib = None
new = __py_new
__get_hash = __get_builtin_constructor
try:
# OpenSSL's PKCS5_PBKDF2_HMAC requires OpenSSL 1.0+ with HMAC and SHA
from _hashlib import pbkdf2_hmac
except ImportError:
from warnings import warn as _warn
_trans_5C = bytes((x ^ 0x5C) for x in range(256))
_trans_36 = bytes((x ^ 0x36) for x in range(256))
try:
# OpenSSL's scrypt requires OpenSSL 1.1+
from _hashlib import scrypt
except ImportError:
pass
The provided code snippet includes necessary dependencies for implementing the `__hash_new` function. Write a Python function `def __hash_new(name, data=b'', **kwargs)` to solve the following problem:
new(name, data=b'') - Return a new hashing object using the named algorithm; optionally initialized with data (which must be a bytes-like object).
Here is the function:
def __hash_new(name, data=b'', **kwargs):
"""new(name, data=b'') - Return a new hashing object using the named algorithm;
optionally initialized with data (which must be a bytes-like object).
"""
if name in __block_openssl_constructor:
# Prefer our builtin blake2 implementation.
return __get_builtin_constructor(name)(data, **kwargs)
try:
return _hashlib.new(name, data, **kwargs)
except ValueError:
# If the _hashlib module (OpenSSL) doesn't support the named
# hash, try using our builtin implementations.
# This allows for SHA224/256 and SHA384/512 support even though
# the OpenSSL library prior to 0.9.8 doesn't provide them.
return __get_builtin_constructor(name)(data) | new(name, data=b'') - Return a new hashing object using the named algorithm; optionally initialized with data (which must be a bytes-like object). |
186,393 |
The provided code snippet includes necessary dependencies for implementing the `pbkdf2_hmac` function. Write a Python function `def pbkdf2_hmac(hash_name, password, salt, iterations, dklen=None)` to solve the following problem:
Password based key derivation function 2 (PKCS #5 v2.0) This Python implementations based on the hmac module about as fast as OpenSSL's PKCS5_PBKDF2_HMAC for short passwords and much faster for long passwords.
Here is the function:
def pbkdf2_hmac(hash_name, password, salt, iterations, dklen=None):
"""Password based key derivation function 2 (PKCS #5 v2.0)
This Python implementations based on the hmac module about as fast
as OpenSSL's PKCS5_PBKDF2_HMAC for short passwords and much faster
for long passwords.
"""
_warn(
"Python implementation of pbkdf2_hmac() is deprecated.",
category=DeprecationWarning,
stacklevel=2
)
if not isinstance(hash_name, str):
raise TypeError(hash_name)
if not isinstance(password, (bytes, bytearray)):
password = bytes(memoryview(password))
if not isinstance(salt, (bytes, bytearray)):
salt = bytes(memoryview(salt))
# Fast inline HMAC implementation
inner = new(hash_name)
outer = new(hash_name)
blocksize = getattr(inner, 'block_size', 64)
if len(password) > blocksize:
password = new(hash_name, password).digest()
password = password + b'\x00' * (blocksize - len(password))
inner.update(password.translate(_trans_36))
outer.update(password.translate(_trans_5C))
def prf(msg, inner=inner, outer=outer):
# PBKDF2_HMAC uses the password as key. We can re-use the same
# digest objects and just update copies to skip initialization.
icpy = inner.copy()
ocpy = outer.copy()
icpy.update(msg)
ocpy.update(icpy.digest())
return ocpy.digest()
if iterations < 1:
raise ValueError(iterations)
if dklen is None:
dklen = outer.digest_size
if dklen < 1:
raise ValueError(dklen)
dkey = b''
loop = 1
from_bytes = int.from_bytes
while len(dkey) < dklen:
prev = prf(salt + loop.to_bytes(4, 'big'))
# endianness doesn't matter here as long to / from use the same
rkey = int.from_bytes(prev, 'big')
for i in range(iterations - 1):
prev = prf(prev)
# rkey = rkey ^ prev
rkey ^= from_bytes(prev, 'big')
loop += 1
dkey += rkey.to_bytes(inner.digest_size, 'big')
return dkey[:dklen] | Password based key derivation function 2 (PKCS #5 v2.0) This Python implementations based on the hmac module about as fast as OpenSSL's PKCS5_PBKDF2_HMAC for short passwords and much faster for long passwords. |
186,404 | import __future__
import warnings
def _is_syntax_error(err1, err2):
rep1 = repr(err1)
rep2 = repr(err2)
if "was never closed" in rep1 and "was never closed" in rep2:
return False
if rep1 == rep2:
return True
return False | null |
186,405 | import __future__
import warnings
def _maybe_compile(compiler, source, filename, symbol):
# Check for source consisting of only blank lines and comments.
for line in source.split("\n"):
line = line.strip()
if line and line[0] != '#':
break # Leave it alone.
else:
if symbol != "eval":
source = "pass" # Replace it with a 'pass' statement
try:
return compiler(source, filename, symbol)
except SyntaxError: # Let other compile() errors propagate.
pass
# Catch syntax warnings after the first compile
# to emit warnings (SyntaxWarning, DeprecationWarning) at most once.
with warnings.catch_warnings():
warnings.simplefilter("error")
try:
compiler(source + "\n", filename, symbol)
except SyntaxError as e:
if "incomplete input" in str(e):
return None
raise
def _compile(source, filename, symbol):
return compile(source, filename, symbol, PyCF_DONT_IMPLY_DEDENT | PyCF_ALLOW_INCOMPLETE_INPUT)
The provided code snippet includes necessary dependencies for implementing the `compile_command` function. Write a Python function `def compile_command(source, filename="<input>", symbol="single")` to solve the following problem:
r"""Compile a command and determine whether it is incomplete. Arguments: source -- the source string; may contain \n characters filename -- optional filename from which source was read; default "<input>" symbol -- optional grammar start symbol; "single" (default), "exec" or "eval" Return value / exceptions raised: - Return a code object if the command is complete and valid - Return None if the command is incomplete - Raise SyntaxError, ValueError or OverflowError if the command is a syntax error (OverflowError and ValueError can be produced by malformed literals).
Here is the function:
def compile_command(source, filename="<input>", symbol="single"):
r"""Compile a command and determine whether it is incomplete.
Arguments:
source -- the source string; may contain \n characters
filename -- optional filename from which source was read; default
"<input>"
symbol -- optional grammar start symbol; "single" (default), "exec"
or "eval"
Return value / exceptions raised:
- Return a code object if the command is complete and valid
- Return None if the command is incomplete
- Raise SyntaxError, ValueError or OverflowError if the command is a
syntax error (OverflowError and ValueError can be produced by
malformed literals).
"""
return _maybe_compile(_compile, source, filename, symbol) | r"""Compile a command and determine whether it is incomplete. Arguments: source -- the source string; may contain \n characters filename -- optional filename from which source was read; default "<input>" symbol -- optional grammar start symbol; "single" (default), "exec" or "eval" Return value / exceptions raised: - Return a code object if the command is complete and valid - Return None if the command is incomplete - Raise SyntaxError, ValueError or OverflowError if the command is a syntax error (OverflowError and ValueError can be produced by malformed literals). |
186,406 | hasname = []
def def_op(name, op):
opname[op] = name
opmap[name] = op
def_op('POP_TOP', 1)
def_op('ROT_TWO', 2)
def_op('ROT_THREE', 3)
def_op('DUP_TOP', 4)
def_op('DUP_TOP_TWO', 5)
def_op('ROT_FOUR', 6)
def_op('NOP', 9)
def_op('UNARY_POSITIVE', 10)
def_op('UNARY_NEGATIVE', 11)
def_op('UNARY_NOT', 12)
def_op('UNARY_INVERT', 15)
def_op('BINARY_MATRIX_MULTIPLY', 16)
def_op('INPLACE_MATRIX_MULTIPLY', 17)
def_op('BINARY_POWER', 19)
def_op('BINARY_MULTIPLY', 20)
def_op('BINARY_MODULO', 22)
def_op('BINARY_ADD', 23)
def_op('BINARY_SUBTRACT', 24)
def_op('BINARY_SUBSCR', 25)
def_op('BINARY_FLOOR_DIVIDE', 26)
def_op('BINARY_TRUE_DIVIDE', 27)
def_op('INPLACE_FLOOR_DIVIDE', 28)
def_op('INPLACE_TRUE_DIVIDE', 29)
def_op('GET_LEN', 30)
def_op('MATCH_MAPPING', 31)
def_op('MATCH_SEQUENCE', 32)
def_op('MATCH_KEYS', 33)
def_op('COPY_DICT_WITHOUT_KEYS', 34)
def_op('WITH_EXCEPT_START', 49)
def_op('GET_AITER', 50)
def_op('GET_ANEXT', 51)
def_op('BEFORE_ASYNC_WITH', 52)
def_op('END_ASYNC_FOR', 54)
def_op('INPLACE_ADD', 55)
def_op('INPLACE_SUBTRACT', 56)
def_op('INPLACE_MULTIPLY', 57)
def_op('INPLACE_MODULO', 59)
def_op('STORE_SUBSCR', 60)
def_op('DELETE_SUBSCR', 61)
def_op('BINARY_LSHIFT', 62)
def_op('BINARY_RSHIFT', 63)
def_op('BINARY_AND', 64)
def_op('BINARY_XOR', 65)
def_op('BINARY_OR', 66)
def_op('INPLACE_POWER', 67)
def_op('GET_ITER', 68)
def_op('GET_YIELD_FROM_ITER', 69)
def_op('PRINT_EXPR', 70)
def_op('LOAD_BUILD_CLASS', 71)
def_op('YIELD_FROM', 72)
def_op('GET_AWAITABLE', 73)
def_op('LOAD_ASSERTION_ERROR', 74)
def_op('INPLACE_LSHIFT', 75)
def_op('INPLACE_RSHIFT', 76)
def_op('INPLACE_AND', 77)
def_op('INPLACE_XOR', 78)
def_op('INPLACE_OR', 79)
def_op('LIST_TO_TUPLE', 82)
def_op('RETURN_VALUE', 83)
def_op('IMPORT_STAR', 84)
def_op('SETUP_ANNOTATIONS', 85)
def_op('YIELD_VALUE', 86)
def_op('POP_BLOCK', 87)
def_op('POP_EXCEPT', 89)
def_op('UNPACK_SEQUENCE', 92)
def_op('UNPACK_EX', 94)
def_op('ROT_N', 99)
def_op('LOAD_CONST', 100)
def_op('BUILD_TUPLE', 102)
def_op('BUILD_LIST', 103)
def_op('BUILD_SET', 104)
def_op('BUILD_MAP', 105)
def_op('COMPARE_OP', 107)
def_op('IS_OP', 117)
def_op('CONTAINS_OP', 118)
def_op('RERAISE', 119)
def_op('LOAD_FAST', 124)
def_op('STORE_FAST', 125)
def_op('DELETE_FAST', 126)
def_op('GEN_START', 129)
def_op('RAISE_VARARGS', 130)
def_op('CALL_FUNCTION', 131)
def_op('MAKE_FUNCTION', 132)
def_op('BUILD_SLICE', 133)
def_op('LOAD_CLOSURE', 135)
def_op('LOAD_DEREF', 136)
def_op('STORE_DEREF', 137)
def_op('DELETE_DEREF', 138)
def_op('CALL_FUNCTION_KW', 141)
def_op('CALL_FUNCTION_EX', 142)
def_op('EXTENDED_ARG', 144)
def_op('LIST_APPEND', 145)
def_op('SET_ADD', 146)
def_op('MAP_ADD', 147)
def_op('LOAD_CLASSDEREF', 148)
def_op('MATCH_CLASS', 152)
def_op('FORMAT_VALUE', 155)
def_op('BUILD_CONST_KEY_MAP', 156)
def_op('BUILD_STRING', 157)
def_op('CALL_METHOD', 161)
def_op('LIST_EXTEND', 162)
def_op('SET_UPDATE', 163)
def_op('DICT_MERGE', 164)
def_op('DICT_UPDATE', 165)
def_op("LOAD_METHOD_SUPER", 198)
def_op("LOAD_ATTR_SUPER", 199)
del def_op, name_op, jrel_op, jabs_op
def name_op(name, op):
def_op(name, op)
hasname.append(op) | null |
186,407 | hasjrel = []
def def_op(name, op):
opname[op] = name
opmap[name] = op
def_op('POP_TOP', 1)
def_op('ROT_TWO', 2)
def_op('ROT_THREE', 3)
def_op('DUP_TOP', 4)
def_op('DUP_TOP_TWO', 5)
def_op('ROT_FOUR', 6)
def_op('NOP', 9)
def_op('UNARY_POSITIVE', 10)
def_op('UNARY_NEGATIVE', 11)
def_op('UNARY_NOT', 12)
def_op('UNARY_INVERT', 15)
def_op('BINARY_MATRIX_MULTIPLY', 16)
def_op('INPLACE_MATRIX_MULTIPLY', 17)
def_op('BINARY_POWER', 19)
def_op('BINARY_MULTIPLY', 20)
def_op('BINARY_MODULO', 22)
def_op('BINARY_ADD', 23)
def_op('BINARY_SUBTRACT', 24)
def_op('BINARY_SUBSCR', 25)
def_op('BINARY_FLOOR_DIVIDE', 26)
def_op('BINARY_TRUE_DIVIDE', 27)
def_op('INPLACE_FLOOR_DIVIDE', 28)
def_op('INPLACE_TRUE_DIVIDE', 29)
def_op('GET_LEN', 30)
def_op('MATCH_MAPPING', 31)
def_op('MATCH_SEQUENCE', 32)
def_op('MATCH_KEYS', 33)
def_op('COPY_DICT_WITHOUT_KEYS', 34)
def_op('WITH_EXCEPT_START', 49)
def_op('GET_AITER', 50)
def_op('GET_ANEXT', 51)
def_op('BEFORE_ASYNC_WITH', 52)
def_op('END_ASYNC_FOR', 54)
def_op('INPLACE_ADD', 55)
def_op('INPLACE_SUBTRACT', 56)
def_op('INPLACE_MULTIPLY', 57)
def_op('INPLACE_MODULO', 59)
def_op('STORE_SUBSCR', 60)
def_op('DELETE_SUBSCR', 61)
def_op('BINARY_LSHIFT', 62)
def_op('BINARY_RSHIFT', 63)
def_op('BINARY_AND', 64)
def_op('BINARY_XOR', 65)
def_op('BINARY_OR', 66)
def_op('INPLACE_POWER', 67)
def_op('GET_ITER', 68)
def_op('GET_YIELD_FROM_ITER', 69)
def_op('PRINT_EXPR', 70)
def_op('LOAD_BUILD_CLASS', 71)
def_op('YIELD_FROM', 72)
def_op('GET_AWAITABLE', 73)
def_op('LOAD_ASSERTION_ERROR', 74)
def_op('INPLACE_LSHIFT', 75)
def_op('INPLACE_RSHIFT', 76)
def_op('INPLACE_AND', 77)
def_op('INPLACE_XOR', 78)
def_op('INPLACE_OR', 79)
def_op('LIST_TO_TUPLE', 82)
def_op('RETURN_VALUE', 83)
def_op('IMPORT_STAR', 84)
def_op('SETUP_ANNOTATIONS', 85)
def_op('YIELD_VALUE', 86)
def_op('POP_BLOCK', 87)
def_op('POP_EXCEPT', 89)
def_op('UNPACK_SEQUENCE', 92)
def_op('UNPACK_EX', 94)
def_op('ROT_N', 99)
def_op('LOAD_CONST', 100)
def_op('BUILD_TUPLE', 102)
def_op('BUILD_LIST', 103)
def_op('BUILD_SET', 104)
def_op('BUILD_MAP', 105)
def_op('COMPARE_OP', 107)
def_op('IS_OP', 117)
def_op('CONTAINS_OP', 118)
def_op('RERAISE', 119)
def_op('LOAD_FAST', 124)
def_op('STORE_FAST', 125)
def_op('DELETE_FAST', 126)
def_op('GEN_START', 129)
def_op('RAISE_VARARGS', 130)
def_op('CALL_FUNCTION', 131)
def_op('MAKE_FUNCTION', 132)
def_op('BUILD_SLICE', 133)
def_op('LOAD_CLOSURE', 135)
def_op('LOAD_DEREF', 136)
def_op('STORE_DEREF', 137)
def_op('DELETE_DEREF', 138)
def_op('CALL_FUNCTION_KW', 141)
def_op('CALL_FUNCTION_EX', 142)
def_op('EXTENDED_ARG', 144)
def_op('LIST_APPEND', 145)
def_op('SET_ADD', 146)
def_op('MAP_ADD', 147)
def_op('LOAD_CLASSDEREF', 148)
def_op('MATCH_CLASS', 152)
def_op('FORMAT_VALUE', 155)
def_op('BUILD_CONST_KEY_MAP', 156)
def_op('BUILD_STRING', 157)
def_op('CALL_METHOD', 161)
def_op('LIST_EXTEND', 162)
def_op('SET_UPDATE', 163)
def_op('DICT_MERGE', 164)
def_op('DICT_UPDATE', 165)
def_op("LOAD_METHOD_SUPER", 198)
def_op("LOAD_ATTR_SUPER", 199)
del def_op, name_op, jrel_op, jabs_op
def jrel_op(name, op):
def_op(name, op)
hasjrel.append(op) | null |
186,408 | hasjabs = []
def def_op(name, op):
opname[op] = name
opmap[name] = op
def_op('POP_TOP', 1)
def_op('ROT_TWO', 2)
def_op('ROT_THREE', 3)
def_op('DUP_TOP', 4)
def_op('DUP_TOP_TWO', 5)
def_op('ROT_FOUR', 6)
def_op('NOP', 9)
def_op('UNARY_POSITIVE', 10)
def_op('UNARY_NEGATIVE', 11)
def_op('UNARY_NOT', 12)
def_op('UNARY_INVERT', 15)
def_op('BINARY_MATRIX_MULTIPLY', 16)
def_op('INPLACE_MATRIX_MULTIPLY', 17)
def_op('BINARY_POWER', 19)
def_op('BINARY_MULTIPLY', 20)
def_op('BINARY_MODULO', 22)
def_op('BINARY_ADD', 23)
def_op('BINARY_SUBTRACT', 24)
def_op('BINARY_SUBSCR', 25)
def_op('BINARY_FLOOR_DIVIDE', 26)
def_op('BINARY_TRUE_DIVIDE', 27)
def_op('INPLACE_FLOOR_DIVIDE', 28)
def_op('INPLACE_TRUE_DIVIDE', 29)
def_op('GET_LEN', 30)
def_op('MATCH_MAPPING', 31)
def_op('MATCH_SEQUENCE', 32)
def_op('MATCH_KEYS', 33)
def_op('COPY_DICT_WITHOUT_KEYS', 34)
def_op('WITH_EXCEPT_START', 49)
def_op('GET_AITER', 50)
def_op('GET_ANEXT', 51)
def_op('BEFORE_ASYNC_WITH', 52)
def_op('END_ASYNC_FOR', 54)
def_op('INPLACE_ADD', 55)
def_op('INPLACE_SUBTRACT', 56)
def_op('INPLACE_MULTIPLY', 57)
def_op('INPLACE_MODULO', 59)
def_op('STORE_SUBSCR', 60)
def_op('DELETE_SUBSCR', 61)
def_op('BINARY_LSHIFT', 62)
def_op('BINARY_RSHIFT', 63)
def_op('BINARY_AND', 64)
def_op('BINARY_XOR', 65)
def_op('BINARY_OR', 66)
def_op('INPLACE_POWER', 67)
def_op('GET_ITER', 68)
def_op('GET_YIELD_FROM_ITER', 69)
def_op('PRINT_EXPR', 70)
def_op('LOAD_BUILD_CLASS', 71)
def_op('YIELD_FROM', 72)
def_op('GET_AWAITABLE', 73)
def_op('LOAD_ASSERTION_ERROR', 74)
def_op('INPLACE_LSHIFT', 75)
def_op('INPLACE_RSHIFT', 76)
def_op('INPLACE_AND', 77)
def_op('INPLACE_XOR', 78)
def_op('INPLACE_OR', 79)
def_op('LIST_TO_TUPLE', 82)
def_op('RETURN_VALUE', 83)
def_op('IMPORT_STAR', 84)
def_op('SETUP_ANNOTATIONS', 85)
def_op('YIELD_VALUE', 86)
def_op('POP_BLOCK', 87)
def_op('POP_EXCEPT', 89)
def_op('UNPACK_SEQUENCE', 92)
def_op('UNPACK_EX', 94)
def_op('ROT_N', 99)
def_op('LOAD_CONST', 100)
def_op('BUILD_TUPLE', 102)
def_op('BUILD_LIST', 103)
def_op('BUILD_SET', 104)
def_op('BUILD_MAP', 105)
def_op('COMPARE_OP', 107)
def_op('IS_OP', 117)
def_op('CONTAINS_OP', 118)
def_op('RERAISE', 119)
def_op('LOAD_FAST', 124)
def_op('STORE_FAST', 125)
def_op('DELETE_FAST', 126)
def_op('GEN_START', 129)
def_op('RAISE_VARARGS', 130)
def_op('CALL_FUNCTION', 131)
def_op('MAKE_FUNCTION', 132)
def_op('BUILD_SLICE', 133)
def_op('LOAD_CLOSURE', 135)
def_op('LOAD_DEREF', 136)
def_op('STORE_DEREF', 137)
def_op('DELETE_DEREF', 138)
def_op('CALL_FUNCTION_KW', 141)
def_op('CALL_FUNCTION_EX', 142)
def_op('EXTENDED_ARG', 144)
def_op('LIST_APPEND', 145)
def_op('SET_ADD', 146)
def_op('MAP_ADD', 147)
def_op('LOAD_CLASSDEREF', 148)
def_op('MATCH_CLASS', 152)
def_op('FORMAT_VALUE', 155)
def_op('BUILD_CONST_KEY_MAP', 156)
def_op('BUILD_STRING', 157)
def_op('CALL_METHOD', 161)
def_op('LIST_EXTEND', 162)
def_op('SET_UPDATE', 163)
def_op('DICT_MERGE', 164)
def_op('DICT_UPDATE', 165)
def_op("LOAD_METHOD_SUPER", 198)
def_op("LOAD_ATTR_SUPER", 199)
del def_op, name_op, jrel_op, jabs_op
def jabs_op(name, op):
def_op(name, op)
hasjabs.append(op) | null |
186,409 | import sys
import sysconfig
def _aix_tag(vrtl, bd):
# type: (List[int], int) -> str
# Infer the ABI bitwidth from maxsize (assuming 64 bit as the default)
_sz = 32 if sys.maxsize == (2**31-1) else 64
# vrtl[version, release, technology_level]
return "aix-{:1x}{:1d}{:02d}-{:04d}-{}".format(vrtl[0], vrtl[1], vrtl[2], bd, _sz)
def _aix_bgt():
# type: () -> List[int]
gnu_type = sysconfig.get_config_var("BUILD_GNU_TYPE")
if not gnu_type:
raise ValueError("BUILD_GNU_TYPE is not defined")
return _aix_vrtl(vrmf=gnu_type)
The provided code snippet includes necessary dependencies for implementing the `aix_buildtag` function. Write a Python function `def aix_buildtag()` to solve the following problem:
Return the platform_tag of the system Python was built on.
Here is the function:
def aix_buildtag():
# type: () -> str
"""
Return the platform_tag of the system Python was built on.
"""
# AIX_BUILDDATE is defined by configure with:
# lslpp -Lcq bos.mp64 | awk -F: '{ print $NF }'
build_date = sysconfig.get_config_var("AIX_BUILDDATE")
try:
build_date = int(build_date)
except (ValueError, TypeError):
raise ValueError(f"AIX_BUILDDATE is not defined or invalid: "
f"{build_date!r}")
return _aix_tag(_aix_bgt(), build_date) | Return the platform_tag of the system Python was built on. |
186,420 | from .. import fixer_base
from ..pygram import token
from ..fixer_util import syms, Node, Leaf
def fixup_simple_stmt(parent, i, stmt_node):
""" if there is a semi-colon all the parts count as part of the same
simple_stmt. We just want the __metaclass__ part so we move
everything after the semi-colon into its own simple_stmt node
"""
for semi_ind, node in enumerate(stmt_node.children):
if node.type == token.SEMI: # *sigh*
break
else:
return
node.remove() # kill the semicolon
new_expr = Node(syms.expr_stmt, [])
new_stmt = Node(syms.simple_stmt, [new_expr])
while stmt_node.children[semi_ind:]:
move_node = stmt_node.children[semi_ind]
new_expr.append_child(move_node.clone())
move_node.remove()
parent.insert_child(i, new_stmt)
new_leaf1 = new_stmt.children[0].children[0]
old_leaf1 = stmt_node.children[0].children[0]
new_leaf1.prefix = old_leaf1.prefix
def remove_trailing_newline(node):
if node.children and node.children[-1].type == token.NEWLINE:
node.children[-1].remove()
def find_metas(cls_node):
# find the suite node (Mmm, sweet nodes)
for node in cls_node.children:
if node.type == syms.suite:
break
else:
raise ValueError("No class suite!")
# look for simple_stmt[ expr_stmt[ Leaf('__metaclass__') ] ]
for i, simple_node in list(enumerate(node.children)):
if simple_node.type == syms.simple_stmt and simple_node.children:
expr_node = simple_node.children[0]
if expr_node.type == syms.expr_stmt and expr_node.children:
# Check if the expr_node is a simple assignment.
left_node = expr_node.children[0]
if isinstance(left_node, Leaf) and \
left_node.value == '__metaclass__':
# We found an assignment to __metaclass__.
fixup_simple_stmt(node, i, simple_node)
remove_trailing_newline(simple_node)
yield (node, i, simple_node) | null |
186,437 | import string, re
from codecs import BOM_UTF8, lookup
from lib2to3.pgen2.token import *
from . import token
def group(*choices):
def maybe(*choices): return group(*choices) + '?' | null |
186,444 | from .pgen2 import token
from .pytree import Leaf, Node
from .pygram import python_symbols as syms
from . import patcomp
class Node(Base):
def __init__(self,type, children,
context=None,
prefix=None,
fixers_applied=None):
def __repr__(self):
def __unicode__(self):
def _eq(self, other):
def clone(self):
def post_order(self):
def pre_order(self):
def prefix(self):
def prefix(self, prefix):
def set_child(self, i, child):
def insert_child(self, i, child):
def append_child(self, child):
class Leaf(Base):
def __init__(self, type, value,
context=None,
prefix=None,
fixers_applied=[]):
def __repr__(self):
def __unicode__(self):
def _eq(self, other):
def clone(self):
def leaves(self):
def post_order(self):
def pre_order(self):
def prefix(self):
def prefix(self, prefix):
def KeywordArg(keyword, value):
return Node(syms.argument,
[keyword, Leaf(token.EQUAL, "="), value]) | null |
186,454 | from .pgen2 import token
from .pytree import Leaf, Node
from .pygram import python_symbols as syms
from . import patcomp
def LParen():
def RParen():
class Node(Base):
def __init__(self,type, children,
context=None,
prefix=None,
fixers_applied=None):
def __repr__(self):
def __unicode__(self):
def _eq(self, other):
def clone(self):
def post_order(self):
def pre_order(self):
def prefix(self):
def prefix(self, prefix):
def set_child(self, i, child):
def insert_child(self, i, child):
def append_child(self, child):
def parenthesize(node):
return Node(syms.atom, [LParen(), node, RParen()]) | null |
186,467 | import collections
import os
import re
import sys
import subprocess
import functools
import itertools
def _sys_version(sys_version=None):
""" Returns a parsed version of Python's sys.version as tuple
(name, version, branch, revision, buildno, builddate, compiler)
referring to the Python implementation name, version, branch,
revision, build number, build date/time as string and the compiler
identification string.
Note that unlike the Python sys.version, the returned value
for the Python version will always include the patchlevel (it
defaults to '.0').
The function returns empty strings for tuple entries that
cannot be determined.
sys_version may be given to parse an alternative version
string, e.g. if the version was read from a different Python
interpreter.
"""
# Get the Python version
if sys_version is None:
sys_version = sys.version
# Try the cache first
result = _sys_version_cache.get(sys_version, None)
if result is not None:
return result
# Parse it
if 'IronPython' in sys_version:
# IronPython
name = 'IronPython'
if sys_version.startswith('IronPython'):
match = _ironpython_sys_version_parser.match(sys_version)
else:
match = _ironpython26_sys_version_parser.match(sys_version)
if match is None:
raise ValueError(
'failed to parse IronPython sys.version: %s' %
repr(sys_version))
version, alt_version, compiler = match.groups()
buildno = ''
builddate = ''
elif sys.platform.startswith('java'):
# Jython
name = 'Jython'
match = _sys_version_parser.match(sys_version)
if match is None:
raise ValueError(
'failed to parse Jython sys.version: %s' %
repr(sys_version))
version, buildno, builddate, buildtime, _ = match.groups()
if builddate is None:
builddate = ''
compiler = sys.platform
elif "PyPy" in sys_version:
# PyPy
name = "PyPy"
match = _pypy_sys_version_parser.match(sys_version)
if match is None:
raise ValueError("failed to parse PyPy sys.version: %s" %
repr(sys_version))
version, buildno, builddate, buildtime = match.groups()
compiler = ""
else:
# CPython
match = _sys_version_parser.match(sys_version)
if match is None:
raise ValueError(
'failed to parse CPython sys.version: %s' %
repr(sys_version))
version, buildno, builddate, buildtime, compiler = \
match.groups()
name = 'CPython'
if builddate is None:
builddate = ''
elif buildtime:
builddate = builddate + ' ' + buildtime
if hasattr(sys, '_git'):
_, branch, revision = sys._git
elif hasattr(sys, '_mercurial'):
_, branch, revision = sys._mercurial
else:
branch = ''
revision = ''
# Add the patchlevel version if missing
l = version.split('.')
if len(l) == 2:
l.append('0')
version = '.'.join(l)
# Build and cache the result
result = (name, version, branch, revision, buildno, builddate, compiler)
_sys_version_cache[sys_version] = result
return result
The provided code snippet includes necessary dependencies for implementing the `python_implementation` function. Write a Python function `def python_implementation()` to solve the following problem:
Returns a string identifying the Python implementation. Currently, the following implementations are identified: 'CPython' (C implementation of Python), 'IronPython' (.NET implementation of Python), 'Jython' (Java implementation of Python), 'PyPy' (Python implementation of Python).
Here is the function:
def python_implementation():
""" Returns a string identifying the Python implementation.
Currently, the following implementations are identified:
'CPython' (C implementation of Python),
'IronPython' (.NET implementation of Python),
'Jython' (Java implementation of Python),
'PyPy' (Python implementation of Python).
"""
return _sys_version()[0] | Returns a string identifying the Python implementation. Currently, the following implementations are identified: 'CPython' (C implementation of Python), 'IronPython' (.NET implementation of Python), 'Jython' (Java implementation of Python), 'PyPy' (Python implementation of Python). |
186,468 | import collections
import os
import re
import sys
import subprocess
import functools
import itertools
def _sys_version(sys_version=None):
""" Returns a parsed version of Python's sys.version as tuple
(name, version, branch, revision, buildno, builddate, compiler)
referring to the Python implementation name, version, branch,
revision, build number, build date/time as string and the compiler
identification string.
Note that unlike the Python sys.version, the returned value
for the Python version will always include the patchlevel (it
defaults to '.0').
The function returns empty strings for tuple entries that
cannot be determined.
sys_version may be given to parse an alternative version
string, e.g. if the version was read from a different Python
interpreter.
"""
# Get the Python version
if sys_version is None:
sys_version = sys.version
# Try the cache first
result = _sys_version_cache.get(sys_version, None)
if result is not None:
return result
# Parse it
if 'IronPython' in sys_version:
# IronPython
name = 'IronPython'
if sys_version.startswith('IronPython'):
match = _ironpython_sys_version_parser.match(sys_version)
else:
match = _ironpython26_sys_version_parser.match(sys_version)
if match is None:
raise ValueError(
'failed to parse IronPython sys.version: %s' %
repr(sys_version))
version, alt_version, compiler = match.groups()
buildno = ''
builddate = ''
elif sys.platform.startswith('java'):
# Jython
name = 'Jython'
match = _sys_version_parser.match(sys_version)
if match is None:
raise ValueError(
'failed to parse Jython sys.version: %s' %
repr(sys_version))
version, buildno, builddate, buildtime, _ = match.groups()
if builddate is None:
builddate = ''
compiler = sys.platform
elif "PyPy" in sys_version:
# PyPy
name = "PyPy"
match = _pypy_sys_version_parser.match(sys_version)
if match is None:
raise ValueError("failed to parse PyPy sys.version: %s" %
repr(sys_version))
version, buildno, builddate, buildtime = match.groups()
compiler = ""
else:
# CPython
match = _sys_version_parser.match(sys_version)
if match is None:
raise ValueError(
'failed to parse CPython sys.version: %s' %
repr(sys_version))
version, buildno, builddate, buildtime, compiler = \
match.groups()
name = 'CPython'
if builddate is None:
builddate = ''
elif buildtime:
builddate = builddate + ' ' + buildtime
if hasattr(sys, '_git'):
_, branch, revision = sys._git
elif hasattr(sys, '_mercurial'):
_, branch, revision = sys._mercurial
else:
branch = ''
revision = ''
# Add the patchlevel version if missing
l = version.split('.')
if len(l) == 2:
l.append('0')
version = '.'.join(l)
# Build and cache the result
result = (name, version, branch, revision, buildno, builddate, compiler)
_sys_version_cache[sys_version] = result
return result
The provided code snippet includes necessary dependencies for implementing the `python_version_tuple` function. Write a Python function `def python_version_tuple()` to solve the following problem:
Returns the Python version as tuple (major, minor, patchlevel) of strings. Note that unlike the Python sys.version, the returned value will always include the patchlevel (it defaults to 0).
Here is the function:
def python_version_tuple():
""" Returns the Python version as tuple (major, minor, patchlevel)
of strings.
Note that unlike the Python sys.version, the returned value
will always include the patchlevel (it defaults to 0).
"""
return tuple(_sys_version()[1].split('.')) | Returns the Python version as tuple (major, minor, patchlevel) of strings. Note that unlike the Python sys.version, the returned value will always include the patchlevel (it defaults to 0). |
186,469 | import collections
import os
import re
import sys
import subprocess
import functools
import itertools
def _sys_version(sys_version=None):
""" Returns a parsed version of Python's sys.version as tuple
(name, version, branch, revision, buildno, builddate, compiler)
referring to the Python implementation name, version, branch,
revision, build number, build date/time as string and the compiler
identification string.
Note that unlike the Python sys.version, the returned value
for the Python version will always include the patchlevel (it
defaults to '.0').
The function returns empty strings for tuple entries that
cannot be determined.
sys_version may be given to parse an alternative version
string, e.g. if the version was read from a different Python
interpreter.
"""
# Get the Python version
if sys_version is None:
sys_version = sys.version
# Try the cache first
result = _sys_version_cache.get(sys_version, None)
if result is not None:
return result
# Parse it
if 'IronPython' in sys_version:
# IronPython
name = 'IronPython'
if sys_version.startswith('IronPython'):
match = _ironpython_sys_version_parser.match(sys_version)
else:
match = _ironpython26_sys_version_parser.match(sys_version)
if match is None:
raise ValueError(
'failed to parse IronPython sys.version: %s' %
repr(sys_version))
version, alt_version, compiler = match.groups()
buildno = ''
builddate = ''
elif sys.platform.startswith('java'):
# Jython
name = 'Jython'
match = _sys_version_parser.match(sys_version)
if match is None:
raise ValueError(
'failed to parse Jython sys.version: %s' %
repr(sys_version))
version, buildno, builddate, buildtime, _ = match.groups()
if builddate is None:
builddate = ''
compiler = sys.platform
elif "PyPy" in sys_version:
# PyPy
name = "PyPy"
match = _pypy_sys_version_parser.match(sys_version)
if match is None:
raise ValueError("failed to parse PyPy sys.version: %s" %
repr(sys_version))
version, buildno, builddate, buildtime = match.groups()
compiler = ""
else:
# CPython
match = _sys_version_parser.match(sys_version)
if match is None:
raise ValueError(
'failed to parse CPython sys.version: %s' %
repr(sys_version))
version, buildno, builddate, buildtime, compiler = \
match.groups()
name = 'CPython'
if builddate is None:
builddate = ''
elif buildtime:
builddate = builddate + ' ' + buildtime
if hasattr(sys, '_git'):
_, branch, revision = sys._git
elif hasattr(sys, '_mercurial'):
_, branch, revision = sys._mercurial
else:
branch = ''
revision = ''
# Add the patchlevel version if missing
l = version.split('.')
if len(l) == 2:
l.append('0')
version = '.'.join(l)
# Build and cache the result
result = (name, version, branch, revision, buildno, builddate, compiler)
_sys_version_cache[sys_version] = result
return result
The provided code snippet includes necessary dependencies for implementing the `python_branch` function. Write a Python function `def python_branch()` to solve the following problem:
Returns a string identifying the Python implementation branch. For CPython this is the SCM branch from which the Python binary was built. If not available, an empty string is returned.
Here is the function:
def python_branch():
""" Returns a string identifying the Python implementation
branch.
For CPython this is the SCM branch from which the
Python binary was built.
If not available, an empty string is returned.
"""
return _sys_version()[2] | Returns a string identifying the Python implementation branch. For CPython this is the SCM branch from which the Python binary was built. If not available, an empty string is returned. |
186,470 | import collections
import os
import re
import sys
import subprocess
import functools
import itertools
def _sys_version(sys_version=None):
""" Returns a parsed version of Python's sys.version as tuple
(name, version, branch, revision, buildno, builddate, compiler)
referring to the Python implementation name, version, branch,
revision, build number, build date/time as string and the compiler
identification string.
Note that unlike the Python sys.version, the returned value
for the Python version will always include the patchlevel (it
defaults to '.0').
The function returns empty strings for tuple entries that
cannot be determined.
sys_version may be given to parse an alternative version
string, e.g. if the version was read from a different Python
interpreter.
"""
# Get the Python version
if sys_version is None:
sys_version = sys.version
# Try the cache first
result = _sys_version_cache.get(sys_version, None)
if result is not None:
return result
# Parse it
if 'IronPython' in sys_version:
# IronPython
name = 'IronPython'
if sys_version.startswith('IronPython'):
match = _ironpython_sys_version_parser.match(sys_version)
else:
match = _ironpython26_sys_version_parser.match(sys_version)
if match is None:
raise ValueError(
'failed to parse IronPython sys.version: %s' %
repr(sys_version))
version, alt_version, compiler = match.groups()
buildno = ''
builddate = ''
elif sys.platform.startswith('java'):
# Jython
name = 'Jython'
match = _sys_version_parser.match(sys_version)
if match is None:
raise ValueError(
'failed to parse Jython sys.version: %s' %
repr(sys_version))
version, buildno, builddate, buildtime, _ = match.groups()
if builddate is None:
builddate = ''
compiler = sys.platform
elif "PyPy" in sys_version:
# PyPy
name = "PyPy"
match = _pypy_sys_version_parser.match(sys_version)
if match is None:
raise ValueError("failed to parse PyPy sys.version: %s" %
repr(sys_version))
version, buildno, builddate, buildtime = match.groups()
compiler = ""
else:
# CPython
match = _sys_version_parser.match(sys_version)
if match is None:
raise ValueError(
'failed to parse CPython sys.version: %s' %
repr(sys_version))
version, buildno, builddate, buildtime, compiler = \
match.groups()
name = 'CPython'
if builddate is None:
builddate = ''
elif buildtime:
builddate = builddate + ' ' + buildtime
if hasattr(sys, '_git'):
_, branch, revision = sys._git
elif hasattr(sys, '_mercurial'):
_, branch, revision = sys._mercurial
else:
branch = ''
revision = ''
# Add the patchlevel version if missing
l = version.split('.')
if len(l) == 2:
l.append('0')
version = '.'.join(l)
# Build and cache the result
result = (name, version, branch, revision, buildno, builddate, compiler)
_sys_version_cache[sys_version] = result
return result
The provided code snippet includes necessary dependencies for implementing the `python_revision` function. Write a Python function `def python_revision()` to solve the following problem:
Returns a string identifying the Python implementation revision. For CPython this is the SCM revision from which the Python binary was built. If not available, an empty string is returned.
Here is the function:
def python_revision():
""" Returns a string identifying the Python implementation
revision.
For CPython this is the SCM revision from which the
Python binary was built.
If not available, an empty string is returned.
"""
return _sys_version()[3] | Returns a string identifying the Python implementation revision. For CPython this is the SCM revision from which the Python binary was built. If not available, an empty string is returned. |
186,471 | import collections
import os
import re
import sys
import subprocess
import functools
import itertools
_os_release_candidates = ("/etc/os-release", "/usr/lib/os-release")
_os_release_cache = None
def _parse_os_release(lines):
# These fields are mandatory fields with well-known defaults
# in practice all Linux distributions override NAME, ID, and PRETTY_NAME.
info = {
"NAME": "Linux",
"ID": "linux",
"PRETTY_NAME": "Linux",
}
for line in lines:
mo = _os_release_line.match(line)
if mo is not None:
info[mo.group('name')] = _os_release_unescape.sub(
r"\1", mo.group('value')
)
return info
The provided code snippet includes necessary dependencies for implementing the `freedesktop_os_release` function. Write a Python function `def freedesktop_os_release()` to solve the following problem:
Return operation system identification from freedesktop.org os-release
Here is the function:
def freedesktop_os_release():
"""Return operation system identification from freedesktop.org os-release
"""
global _os_release_cache
if _os_release_cache is None:
errno = None
for candidate in _os_release_candidates:
try:
with open(candidate, encoding="utf-8") as f:
_os_release_cache = _parse_os_release(f)
break
except OSError as e:
errno = e.errno
else:
raise OSError(
errno,
f"Unable to read files {', '.join(_os_release_candidates)}"
)
return _os_release_cache.copy() | Return operation system identification from freedesktop.org os-release |
186,472 | import _imp
import _io
import sys
import _warnings
import marshal
_CASE_INSENSITIVE_PLATFORMS_STR_KEY = 'win',
_CASE_INSENSITIVE_PLATFORMS = (_CASE_INSENSITIVE_PLATFORMS_BYTES_KEY
+ _CASE_INSENSITIVE_PLATFORMS_STR_KEY)
_relax_case = _make_relax_case()
def _make_relax_case():
if sys.platform.startswith(_CASE_INSENSITIVE_PLATFORMS):
if sys.platform.startswith(_CASE_INSENSITIVE_PLATFORMS_STR_KEY):
key = 'PYTHONCASEOK'
else:
key = b'PYTHONCASEOK'
def _relax_case():
"""True if filenames must be checked case-insensitively and ignore environment flags are not set."""
return not sys.flags.ignore_environment and key in _os.environ
else:
def _relax_case():
"""True if filenames must be checked case-insensitively."""
return False
return _relax_case | null |
186,477 | _bootstrap = None
import _imp
import _io
import sys
import _warnings
import marshal
The provided code snippet includes necessary dependencies for implementing the `_check_name` function. Write a Python function `def _check_name(method)` to solve the following problem:
Decorator to verify that the module being requested matches the one the loader can handle. The first argument (self) must define _name which the second argument is compared against. If the comparison fails then ImportError is raised.
Here is the function:
def _check_name(method):
"""Decorator to verify that the module being requested matches the one the
loader can handle.
The first argument (self) must define _name which the second argument is
compared against. If the comparison fails then ImportError is raised.
"""
def _check_name_wrapper(self, name=None, *args, **kwargs):
if name is None:
name = self.name
elif self.name != name:
raise ImportError('loader for %s cannot handle %s' %
(self.name, name), name=name)
return method(self, name, *args, **kwargs)
# FIXME: @_check_name is used to define class methods before the
# _bootstrap module is set by _set_bootstrap_module().
if _bootstrap is not None:
_wrap = _bootstrap._wrap
else:
def _wrap(new, old):
for replace in ['__module__', '__name__', '__qualname__', '__doc__']:
if hasattr(old, replace):
setattr(new, replace, getattr(old, replace))
new.__dict__.update(old.__dict__)
_wrap(_check_name_wrapper, method)
return _check_name_wrapper | Decorator to verify that the module being requested matches the one the loader can handle. The first argument (self) must define _name which the second argument is compared against. If the comparison fails then ImportError is raised. |
186,478 | import _imp
import _io
import sys
import _warnings
import marshal
The provided code snippet includes necessary dependencies for implementing the `_find_module_shim` function. Write a Python function `def _find_module_shim(self, fullname)` to solve the following problem:
Try to find a loader for the specified module by delegating to self.find_loader(). This method is deprecated in favor of finder.find_spec().
Here is the function:
def _find_module_shim(self, fullname):
"""Try to find a loader for the specified module by delegating to
self.find_loader().
This method is deprecated in favor of finder.find_spec().
"""
_warnings.warn("find_module() is deprecated and "
"slated for removal in Python 3.12; use find_spec() instead",
DeprecationWarning)
# Call find_loader(). If it returns a string (indicating this
# is a namespace package portion), generate a warning and
# return None.
loader, portions = self.find_loader(fullname)
if loader is None and len(portions):
msg = 'Not importing directory {}: missing __init__'
_warnings.warn(msg.format(portions[0]), ImportWarning)
return loader | Try to find a loader for the specified module by delegating to self.find_loader(). This method is deprecated in favor of finder.find_spec(). |
186,479 | _bootstrap = None
import _imp
import _io
import sys
import _warnings
import marshal
def _unpack_uint32(data):
"""Convert 4 bytes in little-endian to an integer."""
assert len(data) == 4
return int.from_bytes(data, 'little')
The provided code snippet includes necessary dependencies for implementing the `_validate_timestamp_pyc` function. Write a Python function `def _validate_timestamp_pyc(data, source_mtime, source_size, name, exc_details)` to solve the following problem:
Validate a pyc against the source last-modified time. *data* is the contents of the pyc file. (Only the first 16 bytes are required.) *source_mtime* is the last modified timestamp of the source file. *source_size* is None or the size of the source file in bytes. *name* is the name of the module being imported. It is used for logging. *exc_details* is a dictionary passed to ImportError if it raised for improved debugging. An ImportError is raised if the bytecode is stale.
Here is the function:
def _validate_timestamp_pyc(data, source_mtime, source_size, name,
exc_details):
"""Validate a pyc against the source last-modified time.
*data* is the contents of the pyc file. (Only the first 16 bytes are
required.)
*source_mtime* is the last modified timestamp of the source file.
*source_size* is None or the size of the source file in bytes.
*name* is the name of the module being imported. It is used for logging.
*exc_details* is a dictionary passed to ImportError if it raised for
improved debugging.
An ImportError is raised if the bytecode is stale.
"""
if _unpack_uint32(data[8:12]) != (source_mtime & 0xFFFFFFFF):
message = f'bytecode is stale for {name!r}'
_bootstrap._verbose_message('{}', message)
raise ImportError(message, **exc_details)
if (source_size is not None and
_unpack_uint32(data[12:16]) != (source_size & 0xFFFFFFFF)):
raise ImportError(f'bytecode is stale for {name!r}', **exc_details) | Validate a pyc against the source last-modified time. *data* is the contents of the pyc file. (Only the first 16 bytes are required.) *source_mtime* is the last modified timestamp of the source file. *source_size* is None or the size of the source file in bytes. *name* is the name of the module being imported. It is used for logging. *exc_details* is a dictionary passed to ImportError if it raised for improved debugging. An ImportError is raised if the bytecode is stale. |
186,481 | _bootstrap = None
import _imp
import _io
import sys
import _warnings
import marshal
_code_type = type(_write_atomic.__code__)
The provided code snippet includes necessary dependencies for implementing the `_compile_bytecode` function. Write a Python function `def _compile_bytecode(data, name=None, bytecode_path=None, source_path=None)` to solve the following problem:
Compile bytecode as found in a pyc.
Here is the function:
def _compile_bytecode(data, name=None, bytecode_path=None, source_path=None):
"""Compile bytecode as found in a pyc."""
code = marshal.loads(data)
if isinstance(code, _code_type):
_bootstrap._verbose_message('code object from {!r}', bytecode_path)
if source_path is not None:
_imp._fix_co_filename(code, source_path)
return code
else:
raise ImportError('Non-code object in {!r}'.format(bytecode_path),
name=name, path=bytecode_path) | Compile bytecode as found in a pyc. |
186,483 | import _imp
import _io
import sys
import _warnings
import marshal
def spec_from_file_location(name, location=None, *, loader=None,
submodule_search_locations=_POPULATE):
"""Return a module spec based on a file location.
To indicate that the module is a package, set
submodule_search_locations to a list of directory paths. An
empty list is sufficient, though its not otherwise useful to the
import system.
The loader must take a spec as its only __init__() arg.
"""
if location is None:
# The caller may simply want a partially populated location-
# oriented spec. So we set the location to a bogus value and
# fill in as much as we can.
location = '<unknown>'
if hasattr(loader, 'get_filename'):
# ExecutionLoader
try:
location = loader.get_filename(name)
except ImportError:
pass
else:
location = _os.fspath(location)
if not _path_isabs(location):
try:
location = _path_join(_os.getcwd(), location)
except OSError:
pass
# If the location is on the filesystem, but doesn't actually exist,
# we could return None here, indicating that the location is not
# valid. However, we don't have a good way of testing since an
# indirect location (e.g. a zip file or URL) will look like a
# non-existent file relative to the filesystem.
spec = _bootstrap.ModuleSpec(name, loader, origin=location)
spec._set_fileattr = True
# Pick a loader if one wasn't provided.
if loader is None:
for loader_class, suffixes in _get_supported_file_loaders():
if location.endswith(tuple(suffixes)):
loader = loader_class(name, location)
spec.loader = loader
break
else:
return None
# Set submodule_search_paths appropriately.
if submodule_search_locations is _POPULATE:
# Check the loader.
if hasattr(loader, 'is_package'):
try:
is_package = loader.is_package(name)
except ImportError:
pass
else:
if is_package:
spec.submodule_search_locations = []
else:
spec.submodule_search_locations = submodule_search_locations
if spec.submodule_search_locations == []:
if location:
dirname = _path_split(location)[0]
spec.submodule_search_locations.append(dirname)
return spec
class SourceFileLoader(FileLoader, SourceLoader):
"""Concrete implementation of SourceLoader using the file system."""
def path_stats(self, path):
"""Return the metadata for the path."""
st = _path_stat(path)
return {'mtime': st.st_mtime, 'size': st.st_size}
def _cache_bytecode(self, source_path, bytecode_path, data):
# Adapt between the two APIs
mode = _calc_mode(source_path)
return self.set_data(bytecode_path, data, _mode=mode)
def set_data(self, path, data, *, _mode=0o666):
"""Write bytes data to a file."""
parent, filename = _path_split(path)
path_parts = []
# Figure out what directories are missing.
while parent and not _path_isdir(parent):
parent, part = _path_split(parent)
path_parts.append(part)
# Create needed directories.
for part in reversed(path_parts):
parent = _path_join(parent, part)
try:
_os.mkdir(parent)
except FileExistsError:
# Probably another Python process already created the dir.
continue
except OSError as exc:
# Could be a permission error, read-only filesystem: just forget
# about writing the data.
_bootstrap._verbose_message('could not create {!r}: {!r}',
parent, exc)
return
try:
_write_atomic(path, data, _mode)
_bootstrap._verbose_message('created {!r}', path)
except OSError as exc:
# Same as above: just don't write the bytecode.
_bootstrap._verbose_message('could not create {!r}: {!r}', path,
exc)
class SourcelessFileLoader(FileLoader, _LoaderBasics):
"""Loader which handles sourceless file imports."""
def get_code(self, fullname):
path = self.get_filename(fullname)
data = self.get_data(path)
# Call _classify_pyc to do basic validation of the pyc but ignore the
# result. There's no source to check against.
exc_details = {
'name': fullname,
'path': path,
}
_classify_pyc(data, fullname, exc_details)
return _compile_bytecode(
memoryview(data)[16:],
name=fullname,
bytecode_path=path,
)
def get_source(self, fullname):
"""Return None as there is no source code."""
return None
def _fix_up_module(ns, name, pathname, cpathname=None):
# This function is used by PyImport_ExecCodeModuleObject().
loader = ns.get('__loader__')
spec = ns.get('__spec__')
if not loader:
if spec:
loader = spec.loader
elif pathname == cpathname:
loader = SourcelessFileLoader(name, pathname)
else:
loader = SourceFileLoader(name, pathname)
if not spec:
spec = spec_from_file_location(name, pathname, loader=loader)
try:
ns['__spec__'] = spec
ns['__loader__'] = loader
ns['__file__'] = pathname
ns['__cached__'] = cpathname
except Exception:
# Not important enough to report.
pass | null |
186,484 | import _imp
import _io
import sys
import _warnings
import marshal
class PathFinder:
"""Meta path finder for sys.path and package __path__ attributes."""
def invalidate_caches():
"""Call the invalidate_caches() method on all path entry finders
stored in sys.path_importer_caches (where implemented)."""
for name, finder in list(sys.path_importer_cache.items()):
if finder is None:
del sys.path_importer_cache[name]
elif hasattr(finder, 'invalidate_caches'):
finder.invalidate_caches()
# Also invalidate the caches of _NamespacePaths
# https://bugs.python.org/issue45703
_NamespacePath._epoch += 1
def _path_hooks(path):
"""Search sys.path_hooks for a finder for 'path'."""
if sys.path_hooks is not None and not sys.path_hooks:
_warnings.warn('sys.path_hooks is empty', ImportWarning)
for hook in sys.path_hooks:
try:
return hook(path)
except ImportError:
continue
else:
return None
def _path_importer_cache(cls, path):
"""Get the finder for the path entry from sys.path_importer_cache.
If the path entry is not in the cache, find the appropriate finder
and cache it. If no finder is available, store None.
"""
if path == '':
try:
path = _os.getcwd()
except FileNotFoundError:
# Don't cache the failure as the cwd can easily change to
# a valid directory later on.
return None
try:
finder = sys.path_importer_cache[path]
except KeyError:
finder = cls._path_hooks(path)
sys.path_importer_cache[path] = finder
return finder
def _legacy_get_spec(cls, fullname, finder):
# This would be a good place for a DeprecationWarning if
# we ended up going that route.
if hasattr(finder, 'find_loader'):
msg = (f"{_bootstrap._object_name(finder)}.find_spec() not found; "
"falling back to find_loader()")
_warnings.warn(msg, ImportWarning)
loader, portions = finder.find_loader(fullname)
else:
msg = (f"{_bootstrap._object_name(finder)}.find_spec() not found; "
"falling back to find_module()")
_warnings.warn(msg, ImportWarning)
loader = finder.find_module(fullname)
portions = []
if loader is not None:
return _bootstrap.spec_from_loader(fullname, loader)
spec = _bootstrap.ModuleSpec(fullname, None)
spec.submodule_search_locations = portions
return spec
def _get_spec(cls, fullname, path, target=None):
"""Find the loader or namespace_path for this module/package name."""
# If this ends up being a namespace package, namespace_path is
# the list of paths that will become its __path__
namespace_path = []
for entry in path:
if not isinstance(entry, (str, bytes)):
continue
finder = cls._path_importer_cache(entry)
if finder is not None:
if hasattr(finder, 'find_spec'):
spec = finder.find_spec(fullname, target)
else:
spec = cls._legacy_get_spec(fullname, finder)
if spec is None:
continue
if spec.loader is not None:
return spec
portions = spec.submodule_search_locations
if portions is None:
raise ImportError('spec missing loader')
# This is possibly part of a namespace package.
# Remember these path entries (if any) for when we
# create a namespace package, and continue iterating
# on path.
namespace_path.extend(portions)
else:
spec = _bootstrap.ModuleSpec(fullname, None)
spec.submodule_search_locations = namespace_path
return spec
def find_spec(cls, fullname, path=None, target=None):
"""Try to find a spec for 'fullname' on sys.path or 'path'.
The search is based on sys.path_hooks and sys.path_importer_cache.
"""
if path is None:
path = sys.path
spec = cls._get_spec(fullname, path, target)
if spec is None:
return None
elif spec.loader is None:
namespace_path = spec.submodule_search_locations
if namespace_path:
# We found at least one namespace path. Return a spec which
# can create the namespace package.
spec.origin = None
spec.submodule_search_locations = _NamespacePath(fullname, namespace_path, cls._get_spec)
return spec
else:
return None
else:
return spec
def find_module(cls, fullname, path=None):
"""find the module on sys.path or 'path' based on sys.path_hooks and
sys.path_importer_cache.
This method is deprecated. Use find_spec() instead.
"""
_warnings.warn("PathFinder.find_module() is deprecated and "
"slated for removal in Python 3.12; use find_spec() instead",
DeprecationWarning)
spec = cls.find_spec(fullname, path)
if spec is None:
return None
return spec.loader
def find_distributions(*args, **kwargs):
"""
Find distributions.
Return an iterable of all Distribution instances capable of
loading the metadata for packages matching ``context.name``
(or all names if ``None`` indicated) along the paths in the list
of directories ``context.path``.
"""
from importlib.metadata import MetadataPathFinder
return MetadataPathFinder.find_distributions(*args, **kwargs)
class FileFinder:
"""File-based finder.
Interactions with the file system are cached for performance, being
refreshed when the directory the finder is handling has been modified.
"""
def __init__(self, path, *loader_details):
"""Initialize with the path to search on and a variable number of
2-tuples containing the loader and the file suffixes the loader
recognizes."""
loaders = []
for loader, suffixes in loader_details:
loaders.extend((suffix, loader) for suffix in suffixes)
self._loaders = loaders
# Base (directory) path
self.path = path or '.'
if not _path_isabs(self.path):
self.path = _path_join(_os.getcwd(), self.path)
self._path_mtime = -1
self._path_cache = set()
self._relaxed_path_cache = set()
def invalidate_caches(self):
"""Invalidate the directory mtime."""
self._path_mtime = -1
find_module = _find_module_shim
def find_loader(self, fullname):
"""Try to find a loader for the specified module, or the namespace
package portions. Returns (loader, list-of-portions).
This method is deprecated. Use find_spec() instead.
"""
_warnings.warn("FileFinder.find_loader() is deprecated and "
"slated for removal in Python 3.12; use find_spec() instead",
DeprecationWarning)
spec = self.find_spec(fullname)
if spec is None:
return None, []
return spec.loader, spec.submodule_search_locations or []
def _get_spec(self, loader_class, fullname, path, smsl, target):
loader = loader_class(fullname, path)
return spec_from_file_location(fullname, path, loader=loader,
submodule_search_locations=smsl)
def find_spec(self, fullname, target=None):
"""Try to find a spec for the specified module.
Returns the matching spec, or None if not found.
"""
is_namespace = False
tail_module = fullname.rpartition('.')[2]
try:
mtime = _path_stat(self.path or _os.getcwd()).st_mtime
except OSError:
mtime = -1
if mtime != self._path_mtime:
self._fill_cache()
self._path_mtime = mtime
# tail_module keeps the original casing, for __file__ and friends
if _relax_case():
cache = self._relaxed_path_cache
cache_module = tail_module.lower()
else:
cache = self._path_cache
cache_module = tail_module
# Check if the module is the name of a directory (and thus a package).
if cache_module in cache:
base_path = _path_join(self.path, tail_module)
for suffix, loader_class in self._loaders:
init_filename = '__init__' + suffix
full_path = _path_join(base_path, init_filename)
if _path_isfile(full_path):
return self._get_spec(loader_class, fullname, full_path, [base_path], target)
else:
# If a namespace package, return the path if we don't
# find a module in the next section.
is_namespace = _path_isdir(base_path)
# Check for a file w/ a proper suffix exists.
for suffix, loader_class in self._loaders:
try:
full_path = _path_join(self.path, tail_module + suffix)
except ValueError:
return None
_bootstrap._verbose_message('trying {}', full_path, verbosity=2)
if cache_module + suffix in cache:
if _path_isfile(full_path):
return self._get_spec(loader_class, fullname, full_path,
None, target)
if is_namespace:
_bootstrap._verbose_message('possible namespace for {}', base_path)
spec = _bootstrap.ModuleSpec(fullname, None)
spec.submodule_search_locations = [base_path]
return spec
return None
def _fill_cache(self):
"""Fill the cache of potential modules and packages for this directory."""
path = self.path
try:
contents = _os.listdir(path or _os.getcwd())
except (FileNotFoundError, PermissionError, NotADirectoryError):
# Directory has either been removed, turned into a file, or made
# unreadable.
contents = []
# We store two cached versions, to handle runtime changes of the
# PYTHONCASEOK environment variable.
if not sys.platform.startswith('win'):
self._path_cache = set(contents)
else:
# Windows users can import modules with case-insensitive file
# suffixes (for legacy reasons). Make the suffix lowercase here
# so it's done once instead of for every import. This is safe as
# the specified suffixes to check against are always specified in a
# case-sensitive manner.
lower_suffix_contents = set()
for item in contents:
name, dot, suffix = item.partition('.')
if dot:
new_name = '{}.{}'.format(name, suffix.lower())
else:
new_name = name
lower_suffix_contents.add(new_name)
self._path_cache = lower_suffix_contents
if sys.platform.startswith(_CASE_INSENSITIVE_PLATFORMS):
self._relaxed_path_cache = {fn.lower() for fn in contents}
def path_hook(cls, *loader_details):
"""A class method which returns a closure to use on sys.path_hook
which will return an instance using the specified loaders and the path
called on the closure.
If the path called on the closure is not a directory, ImportError is
raised.
"""
def path_hook_for_FileFinder(path):
"""Path hook for importlib.machinery.FileFinder."""
if not _path_isdir(path):
raise ImportError('only directories are supported', path=path)
return cls(path, *loader_details)
return path_hook_for_FileFinder
def __repr__(self):
return 'FileFinder({!r})'.format(self.path)
def _get_supported_file_loaders():
"""Returns a list of file-based module loaders.
Each item is a tuple (loader, suffixes).
"""
extensions = ExtensionFileLoader, _imp.extension_suffixes()
source = SourceFileLoader, SOURCE_SUFFIXES
bytecode = SourcelessFileLoader, BYTECODE_SUFFIXES
return [extensions, source, bytecode]
def _set_bootstrap_module(_bootstrap_module):
global _bootstrap
_bootstrap = _bootstrap_module
The provided code snippet includes necessary dependencies for implementing the `_install` function. Write a Python function `def _install(_bootstrap_module)` to solve the following problem:
Install the path-based import components.
Here is the function:
def _install(_bootstrap_module):
"""Install the path-based import components."""
_set_bootstrap_module(_bootstrap_module)
supported_loaders = _get_supported_file_loaders()
sys.path_hooks.extend([FileFinder.path_hook(*supported_loaders)])
sys.meta_path.append(PathFinder) | Install the path-based import components. |
186,485 | from . import _bootstrap_external
from . import machinery
try:
import _frozen_importlib
except ImportError as exc:
if exc.name != '_frozen_importlib':
raise
_frozen_importlib = None
try:
import _frozen_importlib_external
except ImportError:
_frozen_importlib_external = _bootstrap_external
from ._abc import Loader
import abc
import warnings
from typing import BinaryIO, Iterable, Text
from typing import Protocol, runtime_checkable
try:
import _frozen_importlib as _bootstrap
except ImportError:
from . import _bootstrap
_bootstrap._setup(sys, _imp)
else:
# importlib._bootstrap is the built-in import, ensure we don't create
# a second copy of the module.
_bootstrap.__name__ = 'importlib._bootstrap'
_bootstrap.__package__ = 'importlib'
try:
_bootstrap.__file__ = __file__.replace('__init__.py', '_bootstrap.py')
except NameError:
# __file__ is not guaranteed to be defined, e.g. if this code gets
# frozen by a tool like cx_Freeze.
pass
sys.modules['importlib._bootstrap'] = _bootstrap
try:
import _frozen_importlib_external as _bootstrap_external
except ImportError:
from . import _bootstrap_external
_bootstrap_external._set_bootstrap_module(_bootstrap)
_bootstrap._bootstrap_external = _bootstrap_external
else:
_bootstrap_external.__name__ = 'importlib._bootstrap_external'
_bootstrap_external.__package__ = 'importlib'
try:
_bootstrap_external.__file__ = __file__.replace('__init__.py', '_bootstrap_external.py')
except NameError:
# __file__ is not guaranteed to be defined, e.g. if this code gets
# frozen by a tool like cx_Freeze.
pass
sys.modules['importlib._bootstrap_external'] = _bootstrap_external
def _register(abstract_cls, *classes):
for cls in classes:
abstract_cls.register(cls)
if _frozen_importlib is not None:
try:
frozen_cls = getattr(_frozen_importlib, cls.__name__)
except AttributeError:
frozen_cls = getattr(_frozen_importlib_external, cls.__name__)
abstract_cls.register(frozen_cls) | null |
186,486 | from ._abc import Loader
from ._bootstrap import module_from_spec
from ._bootstrap import _resolve_name
from ._bootstrap import spec_from_loader
from ._bootstrap import _find_spec
from ._bootstrap_external import MAGIC_NUMBER
from ._bootstrap_external import _RAW_MAGIC_NUMBER
from ._bootstrap_external import cache_from_source
from ._bootstrap_external import decode_source
from ._bootstrap_external import source_from_cache
from ._bootstrap_external import spec_from_file_location
from contextlib import contextmanager
import _imp
import functools
import sys
import types
import warnings
The provided code snippet includes necessary dependencies for implementing the `set_package` function. Write a Python function `def set_package(fxn)` to solve the following problem:
Set __package__ on the returned module. This function is deprecated.
Here is the function:
def set_package(fxn):
"""Set __package__ on the returned module.
This function is deprecated.
"""
@functools.wraps(fxn)
def set_package_wrapper(*args, **kwargs):
warnings.warn('The import system now takes care of this automatically; '
'this decorator is slated for removal in Python 3.12',
DeprecationWarning, stacklevel=2)
module = fxn(*args, **kwargs)
if getattr(module, '__package__', None) is None:
module.__package__ = module.__name__
if not hasattr(module, '__path__'):
module.__package__ = module.__package__.rpartition('.')[0]
return module
return set_package_wrapper | Set __package__ on the returned module. This function is deprecated. |
186,487 | from ._abc import Loader
from ._bootstrap import module_from_spec
from ._bootstrap import _resolve_name
from ._bootstrap import spec_from_loader
from ._bootstrap import _find_spec
from ._bootstrap_external import MAGIC_NUMBER
from ._bootstrap_external import _RAW_MAGIC_NUMBER
from ._bootstrap_external import cache_from_source
from ._bootstrap_external import decode_source
from ._bootstrap_external import source_from_cache
from ._bootstrap_external import spec_from_file_location
from contextlib import contextmanager
import _imp
import functools
import sys
import types
import warnings
The provided code snippet includes necessary dependencies for implementing the `set_loader` function. Write a Python function `def set_loader(fxn)` to solve the following problem:
Set __loader__ on the returned module. This function is deprecated.
Here is the function:
def set_loader(fxn):
"""Set __loader__ on the returned module.
This function is deprecated.
"""
@functools.wraps(fxn)
def set_loader_wrapper(self, *args, **kwargs):
warnings.warn('The import system now takes care of this automatically; '
'this decorator is slated for removal in Python 3.12',
DeprecationWarning, stacklevel=2)
module = fxn(self, *args, **kwargs)
if getattr(module, '__loader__', None) is None:
module.__loader__ = self
return module
return set_loader_wrapper | Set __loader__ on the returned module. This function is deprecated. |
186,488 | from ._abc import Loader
from ._bootstrap import module_from_spec
from ._bootstrap import _resolve_name
from ._bootstrap import spec_from_loader
from ._bootstrap import _find_spec
from ._bootstrap_external import MAGIC_NUMBER
from ._bootstrap_external import _RAW_MAGIC_NUMBER
from ._bootstrap_external import cache_from_source
from ._bootstrap_external import decode_source
from ._bootstrap_external import source_from_cache
from ._bootstrap_external import spec_from_file_location
from contextlib import contextmanager
import _imp
import functools
import sys
import types
import warnings
def _module_to_load(name):
is_reload = name in sys.modules
module = sys.modules.get(name)
if not is_reload:
# This must be done before open() is called as the 'io' module
# implicitly imports 'locale' and would otherwise trigger an
# infinite loop.
module = type(sys)(name)
# This must be done before putting the module in sys.modules
# (otherwise an optimization shortcut in import.c becomes wrong)
module.__initializing__ = True
sys.modules[name] = module
try:
yield module
except Exception:
if not is_reload:
try:
del sys.modules[name]
except KeyError:
pass
finally:
module.__initializing__ = False
The provided code snippet includes necessary dependencies for implementing the `module_for_loader` function. Write a Python function `def module_for_loader(fxn)` to solve the following problem:
Decorator to handle selecting the proper module for loaders. The decorated function is passed the module to use instead of the module name. The module passed in to the function is either from sys.modules if it already exists or is a new module. If the module is new, then __name__ is set the first argument to the method, __loader__ is set to self, and __package__ is set accordingly (if self.is_package() is defined) will be set before it is passed to the decorated function (if self.is_package() does not work for the module it will be set post-load). If an exception is raised and the decorator created the module it is subsequently removed from sys.modules. The decorator assumes that the decorated function takes the module name as the second argument.
Here is the function:
def module_for_loader(fxn):
"""Decorator to handle selecting the proper module for loaders.
The decorated function is passed the module to use instead of the module
name. The module passed in to the function is either from sys.modules if
it already exists or is a new module. If the module is new, then __name__
is set the first argument to the method, __loader__ is set to self, and
__package__ is set accordingly (if self.is_package() is defined) will be set
before it is passed to the decorated function (if self.is_package() does
not work for the module it will be set post-load).
If an exception is raised and the decorator created the module it is
subsequently removed from sys.modules.
The decorator assumes that the decorated function takes the module name as
the second argument.
"""
warnings.warn('The import system now takes care of this automatically; '
'this decorator is slated for removal in Python 3.12',
DeprecationWarning, stacklevel=2)
@functools.wraps(fxn)
def module_for_loader_wrapper(self, fullname, *args, **kwargs):
with _module_to_load(fullname) as module:
module.__loader__ = self
try:
is_package = self.is_package(fullname)
except (ImportError, AttributeError):
pass
else:
if is_package:
module.__package__ = fullname
else:
module.__package__ = fullname.rpartition('.')[0]
# If __package__ was not set above, __import__() will do it later.
return fxn(self, module, *args, **kwargs)
return module_for_loader_wrapper | Decorator to handle selecting the proper module for loaders. The decorated function is passed the module to use instead of the module name. The module passed in to the function is either from sys.modules if it already exists or is a new module. If the module is new, then __name__ is set the first argument to the method, __loader__ is set to self, and __package__ is set accordingly (if self.is_package() is defined) will be set before it is passed to the decorated function (if self.is_package() does not work for the module it will be set post-load). If an exception is raised and the decorator created the module it is subsequently removed from sys.modules. The decorator assumes that the decorated function takes the module name as the second argument. |
186,489 | from itertools import filterfalse
The provided code snippet includes necessary dependencies for implementing the `unique_everseen` function. Write a Python function `def unique_everseen(iterable, key=None)` to solve the following problem:
List unique elements, preserving order. Remember all elements ever seen.
Here is the function:
def unique_everseen(iterable, key=None):
"List unique elements, preserving order. Remember all elements ever seen."
# unique_everseen('AAAABBBCCDAABBB') --> A B C D
# unique_everseen('ABBCcAD', str.lower) --> A B C D
seen = set()
seen_add = seen.add
if key is None:
for element in filterfalse(seen.__contains__, iterable):
seen_add(element)
yield element
else:
for element in iterable:
k = key(element)
if k not in seen:
seen_add(k)
yield element | List unique elements, preserving order. Remember all elements ever seen. |
186,491 | import os
import pathlib
import tempfile
import functools
import contextlib
import types
import importlib
from typing import Union, Any, Optional
from .abc import ResourceReader, Traversable
from ._adapters import wrap_spec
The provided code snippet includes necessary dependencies for implementing the `_` function. Write a Python function `def _(path)` to solve the following problem:
Degenerate behavior for pathlib.Path objects.
Here is the function:
def _(path):
"""
Degenerate behavior for pathlib.Path objects.
"""
yield path | Degenerate behavior for pathlib.Path objects. |
186,492 | import os
import io
from . import _common
from ._common import as_file, files
from .abc import ResourceReader
from contextlib import suppress
from importlib.abc import ResourceLoader
from importlib.machinery import ModuleSpec
from io import BytesIO, TextIOWrapper
from pathlib import Path
from types import ModuleType
from typing import ContextManager, Iterable, Union
from typing import cast
from typing.io import BinaryIO, TextIO
from collections.abc import Sequence
from functools import singledispatch
Package = Union[str, ModuleType]
Resource = Union[str, os.PathLike]
def open_binary(package: Package, resource: Resource) -> BinaryIO:
"""Return a file-like object opened for binary reading of the resource."""
resource = _common.normalize_path(resource)
package = _common.get_package(package)
reader = _common.get_resource_reader(package)
if reader is not None:
return reader.open_resource(resource)
spec = cast(ModuleSpec, package.__spec__)
# Using pathlib doesn't work well here due to the lack of 'strict'
# argument for pathlib.Path.resolve() prior to Python 3.6.
if spec.submodule_search_locations is not None:
paths = spec.submodule_search_locations
elif spec.origin is not None:
paths = [os.path.dirname(os.path.abspath(spec.origin))]
for package_path in paths:
full_path = os.path.join(package_path, resource)
try:
return open(full_path, mode='rb')
except OSError:
# Just assume the loader is a resource loader; all the relevant
# importlib.machinery loaders are and an AttributeError for
# get_data() will make it clear what is needed from the loader.
loader = cast(ResourceLoader, spec.loader)
data = None
if hasattr(spec.loader, 'get_data'):
with suppress(OSError):
data = loader.get_data(full_path)
if data is not None:
return BytesIO(data)
raise FileNotFoundError(f'{resource!r} resource not found in {spec.name!r}')
The provided code snippet includes necessary dependencies for implementing the `read_binary` function. Write a Python function `def read_binary(package: Package, resource: Resource) -> bytes` to solve the following problem:
Return the binary contents of the resource.
Here is the function:
def read_binary(package: Package, resource: Resource) -> bytes:
"""Return the binary contents of the resource."""
with open_binary(package, resource) as fp:
return fp.read() | Return the binary contents of the resource. |
186,493 | import os
import io
from . import _common
from ._common import as_file, files
from .abc import ResourceReader
from contextlib import suppress
from importlib.abc import ResourceLoader
from importlib.machinery import ModuleSpec
from io import BytesIO, TextIOWrapper
from pathlib import Path
from types import ModuleType
from typing import ContextManager, Iterable, Union
from typing import cast
from typing.io import BinaryIO, TextIO
from collections.abc import Sequence
from functools import singledispatch
Package = Union[str, ModuleType]
Resource = Union[str, os.PathLike]
def open_text(
package: Package,
resource: Resource,
encoding: str = 'utf-8',
errors: str = 'strict',
) -> TextIO:
"""Return a file-like object opened for text reading of the resource."""
return TextIOWrapper(
open_binary(package, resource), encoding=encoding, errors=errors
)
The provided code snippet includes necessary dependencies for implementing the `read_text` function. Write a Python function `def read_text( package: Package, resource: Resource, encoding: str = 'utf-8', errors: str = 'strict', ) -> str` to solve the following problem:
Return the decoded string of the resource. The decoding-related arguments have the same semantics as those of bytes.decode().
Here is the function:
def read_text(
package: Package,
resource: Resource,
encoding: str = 'utf-8',
errors: str = 'strict',
) -> str:
"""Return the decoded string of the resource.
The decoding-related arguments have the same semantics as those of
bytes.decode().
"""
with open_text(package, resource, encoding, errors) as fp:
return fp.read() | Return the decoded string of the resource. The decoding-related arguments have the same semantics as those of bytes.decode(). |
186,494 | import os
import io
from . import _common
from ._common import as_file, files
from .abc import ResourceReader
from contextlib import suppress
from importlib.abc import ResourceLoader
from importlib.machinery import ModuleSpec
from io import BytesIO, TextIOWrapper
from pathlib import Path
from types import ModuleType
from typing import ContextManager, Iterable, Union
from typing import cast
from typing.io import BinaryIO, TextIO
from collections.abc import Sequence
from functools import singledispatch
Package = Union[str, ModuleType]
def contents(package: Package) -> Iterable[str]:
"""Return an iterable of entries in 'package'.
Note that not all entries are resources. Specifically, directories are
not considered resources. Use `is_resource()` on each entry returned here
to check if it is a resource or not.
"""
package = _common.get_package(package)
reader = _common.get_resource_reader(package)
if reader is not None:
return _ensure_sequence(reader.contents())
transversable = _common.from_package(package)
if transversable.is_dir():
return list(item.name for item in transversable.iterdir())
return []
The provided code snippet includes necessary dependencies for implementing the `is_resource` function. Write a Python function `def is_resource(package: Package, name: str) -> bool` to solve the following problem:
True if 'name' is a resource inside 'package'. Directories are *not* resources.
Here is the function:
def is_resource(package: Package, name: str) -> bool:
"""True if 'name' is a resource inside 'package'.
Directories are *not* resources.
"""
package = _common.get_package(package)
_common.normalize_path(name)
reader = _common.get_resource_reader(package)
if reader is not None:
return reader.is_resource(name)
package_contents = set(contents(package))
if name not in package_contents:
return False
return (_common.from_package(package) / name).is_file() | True if 'name' is a resource inside 'package'. Directories are *not* resources. |
186,495 | import os
import io
from . import _common
from ._common import as_file, files
from .abc import ResourceReader
from contextlib import suppress
from importlib.abc import ResourceLoader
from importlib.machinery import ModuleSpec
from io import BytesIO, TextIOWrapper
from pathlib import Path
from types import ModuleType
from typing import ContextManager, Iterable, Union
from typing import cast
from typing.io import BinaryIO, TextIO
from collections.abc import Sequence
from functools import singledispatch
def _(iterable):
return iterable | null |
186,496 |
The provided code snippet includes necessary dependencies for implementing the `_has_deadlocked` function. Write a Python function `def _has_deadlocked(target_id, *, seen_ids, candidate_ids, blocking_on)` to solve the following problem:
Check if 'target_id' is holding the same lock as another thread(s). The search within 'blocking_on' starts with the threads listed in 'candidate_ids'. 'seen_ids' contains any threads that are considered already traversed in the search. Keyword arguments: target_id -- The thread id to try to reach. seen_ids -- A set of threads that have already been visited. candidate_ids -- The thread ids from which to begin. blocking_on -- A dict representing the thread/blocking-on graph. This may be the same object as the global '_blocking_on' but it is a parameter to reduce the impact that global mutable state has on the result of this function.
Here is the function:
def _has_deadlocked(target_id, *, seen_ids, candidate_ids, blocking_on):
"""Check if 'target_id' is holding the same lock as another thread(s).
The search within 'blocking_on' starts with the threads listed in
'candidate_ids'. 'seen_ids' contains any threads that are considered
already traversed in the search.
Keyword arguments:
target_id -- The thread id to try to reach.
seen_ids -- A set of threads that have already been visited.
candidate_ids -- The thread ids from which to begin.
blocking_on -- A dict representing the thread/blocking-on graph. This may
be the same object as the global '_blocking_on' but it is
a parameter to reduce the impact that global mutable
state has on the result of this function.
"""
if target_id in candidate_ids:
# If we have already reached the target_id, we're done - signal that it
# is reachable.
return True
# Otherwise, try to reach the target_id from each of the given candidate_ids.
for tid in candidate_ids:
if not (candidate_blocking_on := blocking_on.get(tid)):
# There are no edges out from this node, skip it.
continue
elif tid in seen_ids:
# bpo 38091: the chain of tid's we encounter here eventually leads
# to a fixed point or a cycle, but does not reach target_id.
# This means we would not actually deadlock. This can happen if
# other threads are at the beginning of acquire() below.
return False
seen_ids.add(tid)
# Follow the edges out from this thread.
edges = [lock.owner for lock in candidate_blocking_on]
if _has_deadlocked(target_id, seen_ids=seen_ids, candidate_ids=edges,
blocking_on=blocking_on):
return True
return False | Check if 'target_id' is holding the same lock as another thread(s). The search within 'blocking_on' starts with the threads listed in 'candidate_ids'. 'seen_ids' contains any threads that are considered already traversed in the search. Keyword arguments: target_id -- The thread id to try to reach. seen_ids -- A set of threads that have already been visited. candidate_ids -- The thread ids from which to begin. blocking_on -- A dict representing the thread/blocking-on graph. This may be the same object as the global '_blocking_on' but it is a parameter to reduce the impact that global mutable state has on the result of this function. |
186,499 | _warnings = None
def spec_from_loader(name, loader, *, origin=None, is_package=None):
"""Return a module spec based on various loader methods."""
if hasattr(loader, 'get_filename'):
if _bootstrap_external is None:
raise NotImplementedError
spec_from_file_location = _bootstrap_external.spec_from_file_location
if is_package is None:
return spec_from_file_location(name, loader=loader)
search = [] if is_package else None
return spec_from_file_location(name, loader=loader,
submodule_search_locations=search)
if is_package is None:
if hasattr(loader, 'is_package'):
try:
is_package = loader.is_package(name)
except ImportError:
is_package = None # aka, undefined
else:
# the default
is_package = False
return ModuleSpec(name, loader, origin=origin, is_package=is_package)
def _exec(spec, module):
"""Execute the spec's specified module in an existing module's namespace."""
name = spec.name
with _ModuleLockManager(name):
if sys.modules.get(name) is not module:
msg = 'module {!r} not in sys.modules'.format(name)
raise ImportError(msg, name=name)
try:
if spec.loader is None:
if spec.submodule_search_locations is None:
raise ImportError('missing loader', name=spec.name)
# Namespace package.
_init_module_attrs(spec, module, override=True)
else:
_init_module_attrs(spec, module, override=True)
if not hasattr(spec.loader, 'exec_module'):
msg = (f"{_object_name(spec.loader)}.exec_module() not found; "
"falling back to load_module()")
_warnings.warn(msg, ImportWarning)
spec.loader.load_module(name)
else:
spec.loader.exec_module(module)
finally:
# Update the order of insertion into sys.modules for module
# clean-up at shutdown.
module = sys.modules.pop(spec.name)
sys.modules[spec.name] = module
return module
def _load(spec):
"""Return a new module object, loaded by the spec's loader.
The module is not added to its parent.
If a module is already in sys.modules, that existing module gets
clobbered.
"""
with _ModuleLockManager(spec.name):
return _load_unlocked(spec)
The provided code snippet includes necessary dependencies for implementing the `_load_module_shim` function. Write a Python function `def _load_module_shim(self, fullname)` to solve the following problem:
Load the specified module into sys.modules and return it. This method is deprecated. Use loader.exec_module() instead.
Here is the function:
def _load_module_shim(self, fullname):
"""Load the specified module into sys.modules and return it.
This method is deprecated. Use loader.exec_module() instead.
"""
msg = ("the load_module() method is deprecated and slated for removal in "
"Python 3.12; use exec_module() instead")
_warnings.warn(msg, DeprecationWarning)
spec = spec_from_loader(fullname, self)
if fullname in sys.modules:
module = sys.modules[fullname]
_exec(spec, module)
return sys.modules[fullname]
else:
return _load(spec) | Load the specified module into sys.modules and return it. This method is deprecated. Use loader.exec_module() instead. |
186,500 | def _module_repr_from_spec(spec):
"""Return the repr to use for the module."""
# We mostly replicate _module_repr() using the spec attributes.
name = '?' if spec.name is None else spec.name
if spec.origin is None:
if spec.loader is None:
return '<module {!r}>'.format(name)
else:
return '<module {!r} ({!r})>'.format(name, spec.loader)
else:
if spec.has_location:
return '<module {!r} from {!r}>'.format(name, spec.origin)
else:
return '<module {!r} ({})>'.format(spec.name, spec.origin)
The provided code snippet includes necessary dependencies for implementing the `_module_repr` function. Write a Python function `def _module_repr(module)` to solve the following problem:
The implementation of ModuleType.__repr__().
Here is the function:
def _module_repr(module):
"""The implementation of ModuleType.__repr__()."""
loader = getattr(module, '__loader__', None)
if spec := getattr(module, "__spec__", None):
return _module_repr_from_spec(spec)
elif hasattr(loader, 'module_repr'):
try:
return loader.module_repr(module)
except Exception:
pass
# Fall through to a catch-all which always succeeds.
try:
name = module.__name__
except AttributeError:
name = '?'
try:
filename = module.__file__
except AttributeError:
if loader is None:
return '<module {!r}>'.format(name)
else:
return '<module {!r} ({!r})>'.format(name, loader)
else:
return '<module {!r} from {!r}>'.format(name, filename) | The implementation of ModuleType.__repr__(). |
186,501 | _bootstrap_external = None
def _install(sys_module, _imp_module):
"""Install importers for builtin and frozen modules"""
_setup(sys_module, _imp_module)
sys.meta_path.append(BuiltinImporter)
sys.meta_path.append(FrozenImporter)
try:
import _frozen_importlib_external as _bootstrap_external
except ImportError:
from . import _bootstrap_external
_bootstrap_external._set_bootstrap_module(_bootstrap)
_bootstrap._bootstrap_external = _bootstrap_external
else:
_bootstrap_external.__name__ = 'importlib._bootstrap_external'
_bootstrap_external.__package__ = 'importlib'
try:
_bootstrap_external.__file__ = __file__.replace('__init__.py', '_bootstrap_external.py')
except NameError:
# __file__ is not guaranteed to be defined, e.g. if this code gets
# frozen by a tool like cx_Freeze.
pass
sys.modules['importlib._bootstrap_external'] = _bootstrap_external
The provided code snippet includes necessary dependencies for implementing the `_install_external_importers` function. Write a Python function `def _install_external_importers()` to solve the following problem:
Install importers that require external filesystem access
Here is the function:
def _install_external_importers():
"""Install importers that require external filesystem access"""
global _bootstrap_external
import _frozen_importlib_external
_bootstrap_external = _frozen_importlib_external
_frozen_importlib_external._install(sys.modules[__name__]) | Install importers that require external filesystem access |
186,502 | import collections
import zipfile
import pathlib
from . import abc
def remove_duplicates(items):
return iter(collections.OrderedDict.fromkeys(items)) | null |
186,507 | import os
import sys
import stat
import genericpath
from genericpath import *
def join(a, *p):
"""Join two or more pathname components, inserting '/' as needed.
If any component is an absolute path, all previous path components
will be discarded. An empty last part will result in a path that
ends with a separator."""
a = os.fspath(a)
sep = _get_sep(a)
path = a
try:
if not p:
path[:0] + sep #23780: Ensure compatible data type even if p is null.
for b in map(os.fspath, p):
if b.startswith(sep):
path = b
elif not path or path.endswith(sep):
path += b
else:
path += sep + b
except (TypeError, AttributeError, BytesWarning):
genericpath._check_arg_types('join', a, *p)
raise
return path
def realpath(filename, *, strict=False):
"""Return the canonical path of the specified filename, eliminating any
symbolic links encountered in the path."""
filename = os.fspath(filename)
path, ok = _joinrealpath(filename[:0], filename, strict, {})
return abspath(path)
from os.path import (curdir, pardir, sep, pathsep, defpath, extsep, altsep,
devnull)
The provided code snippet includes necessary dependencies for implementing the `ismount` function. Write a Python function `def ismount(path)` to solve the following problem:
Test whether a path is a mount point
Here is the function:
def ismount(path):
"""Test whether a path is a mount point"""
try:
s1 = os.lstat(path)
except (OSError, ValueError):
# It doesn't exist -- so not a mount point. :-)
return False
else:
# A symlink can never be a mount point
if stat.S_ISLNK(s1.st_mode):
return False
if isinstance(path, bytes):
parent = join(path, b'..')
else:
parent = join(path, '..')
parent = realpath(parent)
try:
s2 = os.lstat(parent)
except (OSError, ValueError):
return False
dev1 = s1.st_dev
dev2 = s2.st_dev
if dev1 != dev2:
return True # path/.. on a different device as path
ino1 = s1.st_ino
ino2 = s2.st_ino
if ino1 == ino2:
return True # path/.. is the same i-node as path
return False | Test whether a path is a mount point |
186,508 | sep = '/'
import os
import sys
import stat
import genericpath
from genericpath import *
def _get_sep(path):
if isinstance(path, bytes):
return b'/'
else:
return '/'
from os.path import (curdir, pardir, sep, pathsep, defpath, extsep, altsep,
devnull)
The provided code snippet includes necessary dependencies for implementing the `expanduser` function. Write a Python function `def expanduser(path)` to solve the following problem:
Expand ~ and ~user constructions. If user or $HOME is unknown, do nothing.
Here is the function:
def expanduser(path):
"""Expand ~ and ~user constructions. If user or $HOME is unknown,
do nothing."""
path = os.fspath(path)
if isinstance(path, bytes):
tilde = b'~'
else:
tilde = '~'
if not path.startswith(tilde):
return path
sep = _get_sep(path)
i = path.find(sep, 1)
if i < 0:
i = len(path)
if i == 1:
if 'HOME' not in os.environ:
import pwd
try:
userhome = pwd.getpwuid(os.getuid()).pw_dir
except KeyError:
# bpo-10496: if the current user identifier doesn't exist in the
# password database, return the path unchanged
return path
else:
userhome = os.environ['HOME']
else:
import pwd
name = path[1:i]
if isinstance(name, bytes):
name = str(name, 'ASCII')
try:
pwent = pwd.getpwnam(name)
except KeyError:
# bpo-10496: if the user name from the path doesn't exist in the
# password database, return the path unchanged
return path
userhome = pwent.pw_dir
# if no user home, return the path unchanged on VxWorks
if userhome is None and sys.platform == "vxworks":
return path
if isinstance(path, bytes):
userhome = os.fsencode(userhome)
root = b'/'
else:
root = '/'
userhome = userhome.rstrip(root)
return (userhome + path[i:]) or root | Expand ~ and ~user constructions. If user or $HOME is unknown, do nothing. |
186,512 | import re
class TextWrapper:
"""
Object for wrapping/filling text. The public interface consists of
the wrap() and fill() methods; the other methods are just there for
subclasses to override in order to tweak the default behaviour.
If you want to completely replace the main wrapping algorithm,
you'll probably have to override _wrap_chunks().
Several instance attributes control various aspects of wrapping:
width (default: 70)
the maximum width of wrapped lines (unless break_long_words
is false)
initial_indent (default: "")
string that will be prepended to the first line of wrapped
output. Counts towards the line's width.
subsequent_indent (default: "")
string that will be prepended to all lines save the first
of wrapped output; also counts towards each line's width.
expand_tabs (default: true)
Expand tabs in input text to spaces before further processing.
Each tab will become 0 .. 'tabsize' spaces, depending on its position
in its line. If false, each tab is treated as a single character.
tabsize (default: 8)
Expand tabs in input text to 0 .. 'tabsize' spaces, unless
'expand_tabs' is false.
replace_whitespace (default: true)
Replace all whitespace characters in the input text by spaces
after tab expansion. Note that if expand_tabs is false and
replace_whitespace is true, every tab will be converted to a
single space!
fix_sentence_endings (default: false)
Ensure that sentence-ending punctuation is always followed
by two spaces. Off by default because the algorithm is
(unavoidably) imperfect.
break_long_words (default: true)
Break words longer than 'width'. If false, those words will not
be broken, and some lines might be longer than 'width'.
break_on_hyphens (default: true)
Allow breaking hyphenated words. If true, wrapping will occur
preferably on whitespaces and right after hyphens part of
compound words.
drop_whitespace (default: true)
Drop leading and trailing whitespace from lines.
max_lines (default: None)
Truncate wrapped lines.
placeholder (default: ' [...]')
Append to the last line of truncated text.
"""
unicode_whitespace_trans = {}
uspace = ord(' ')
for x in _whitespace:
unicode_whitespace_trans[ord(x)] = uspace
# This funky little regex is just the trick for splitting
# text up into word-wrappable chunks. E.g.
# "Hello there -- you goof-ball, use the -b option!"
# splits into
# Hello/ /there/ /--/ /you/ /goof-/ball,/ /use/ /the/ /-b/ /option!
# (after stripping out empty strings).
word_punct = r'[\w!"\'&.,?]'
letter = r'[^\d\W]'
whitespace = r'[%s]' % re.escape(_whitespace)
nowhitespace = '[^' + whitespace[1:]
wordsep_re = re.compile(r'''
( # any whitespace
%(ws)s+
| # em-dash between words
(?<=%(wp)s) -{2,} (?=\w)
| # word, possibly hyphenated
%(nws)s+? (?:
# hyphenated word
-(?: (?<=%(lt)s{2}-) | (?<=%(lt)s-%(lt)s-))
(?= %(lt)s -? %(lt)s)
| # end of word
(?=%(ws)s|\Z)
| # em-dash
(?<=%(wp)s) (?=-{2,}\w)
)
)''' % {'wp': word_punct, 'lt': letter,
'ws': whitespace, 'nws': nowhitespace},
re.VERBOSE)
del word_punct, letter, nowhitespace
# This less funky little regex just split on recognized spaces. E.g.
# "Hello there -- you goof-ball, use the -b option!"
# splits into
# Hello/ /there/ /--/ /you/ /goof-ball,/ /use/ /the/ /-b/ /option!/
wordsep_simple_re = re.compile(r'(%s+)' % whitespace)
del whitespace
# XXX this is not locale- or charset-aware -- string.lowercase
# is US-ASCII only (and therefore English-only)
sentence_end_re = re.compile(r'[a-z]' # lowercase letter
r'[\.\!\?]' # sentence-ending punct.
r'[\"\']?' # optional end-of-quote
r'\Z') # end of chunk
def __init__(self,
width=70,
initial_indent="",
subsequent_indent="",
expand_tabs=True,
replace_whitespace=True,
fix_sentence_endings=False,
break_long_words=True,
drop_whitespace=True,
break_on_hyphens=True,
tabsize=8,
*,
max_lines=None,
placeholder=' [...]'):
self.width = width
self.initial_indent = initial_indent
self.subsequent_indent = subsequent_indent
self.expand_tabs = expand_tabs
self.replace_whitespace = replace_whitespace
self.fix_sentence_endings = fix_sentence_endings
self.break_long_words = break_long_words
self.drop_whitespace = drop_whitespace
self.break_on_hyphens = break_on_hyphens
self.tabsize = tabsize
self.max_lines = max_lines
self.placeholder = placeholder
# -- Private methods -----------------------------------------------
# (possibly useful for subclasses to override)
def _munge_whitespace(self, text):
"""_munge_whitespace(text : string) -> string
Munge whitespace in text: expand tabs and convert all other
whitespace characters to spaces. Eg. " foo\\tbar\\n\\nbaz"
becomes " foo bar baz".
"""
if self.expand_tabs:
text = text.expandtabs(self.tabsize)
if self.replace_whitespace:
text = text.translate(self.unicode_whitespace_trans)
return text
def _split(self, text):
"""_split(text : string) -> [string]
Split the text to wrap into indivisible chunks. Chunks are
not quite the same as words; see _wrap_chunks() for full
details. As an example, the text
Look, goof-ball -- use the -b option!
breaks into the following chunks:
'Look,', ' ', 'goof-', 'ball', ' ', '--', ' ',
'use', ' ', 'the', ' ', '-b', ' ', 'option!'
if break_on_hyphens is True, or in:
'Look,', ' ', 'goof-ball', ' ', '--', ' ',
'use', ' ', 'the', ' ', '-b', ' ', option!'
otherwise.
"""
if self.break_on_hyphens is True:
chunks = self.wordsep_re.split(text)
else:
chunks = self.wordsep_simple_re.split(text)
chunks = [c for c in chunks if c]
return chunks
def _fix_sentence_endings(self, chunks):
"""_fix_sentence_endings(chunks : [string])
Correct for sentence endings buried in 'chunks'. Eg. when the
original text contains "... foo.\\nBar ...", munge_whitespace()
and split() will convert that to [..., "foo.", " ", "Bar", ...]
which has one too few spaces; this method simply changes the one
space to two.
"""
i = 0
patsearch = self.sentence_end_re.search
while i < len(chunks)-1:
if chunks[i+1] == " " and patsearch(chunks[i]):
chunks[i+1] = " "
i += 2
else:
i += 1
def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
"""_handle_long_word(chunks : [string],
cur_line : [string],
cur_len : int, width : int)
Handle a chunk of text (most likely a word, not whitespace) that
is too long to fit in any line.
"""
# Figure out when indent is larger than the specified width, and make
# sure at least one character is stripped off on every pass
if width < 1:
space_left = 1
else:
space_left = width - cur_len
# If we're allowed to break long words, then do so: put as much
# of the next chunk onto the current line as will fit.
if self.break_long_words:
end = space_left
chunk = reversed_chunks[-1]
if self.break_on_hyphens and len(chunk) > space_left:
# break after last hyphen, but only if there are
# non-hyphens before it
hyphen = chunk.rfind('-', 0, space_left)
if hyphen > 0 and any(c != '-' for c in chunk[:hyphen]):
end = hyphen + 1
cur_line.append(chunk[:end])
reversed_chunks[-1] = chunk[end:]
# Otherwise, we have to preserve the long word intact. Only add
# it to the current line if there's nothing already there --
# that minimizes how much we violate the width constraint.
elif not cur_line:
cur_line.append(reversed_chunks.pop())
# If we're not allowed to break long words, and there's already
# text on the current line, do nothing. Next time through the
# main loop of _wrap_chunks(), we'll wind up here again, but
# cur_len will be zero, so the next line will be entirely
# devoted to the long word that we can't handle right now.
def _wrap_chunks(self, chunks):
"""_wrap_chunks(chunks : [string]) -> [string]
Wrap a sequence of text chunks and return a list of lines of
length 'self.width' or less. (If 'break_long_words' is false,
some lines may be longer than this.) Chunks correspond roughly
to words and the whitespace between them: each chunk is
indivisible (modulo 'break_long_words'), but a line break can
come between any two chunks. Chunks should not have internal
whitespace; ie. a chunk is either all whitespace or a "word".
Whitespace chunks will be removed from the beginning and end of
lines, but apart from that whitespace is preserved.
"""
lines = []
if self.width <= 0:
raise ValueError("invalid width %r (must be > 0)" % self.width)
if self.max_lines is not None:
if self.max_lines > 1:
indent = self.subsequent_indent
else:
indent = self.initial_indent
if len(indent) + len(self.placeholder.lstrip()) > self.width:
raise ValueError("placeholder too large for max width")
# Arrange in reverse order so items can be efficiently popped
# from a stack of chucks.
chunks.reverse()
while chunks:
# Start the list of chunks that will make up the current line.
# cur_len is just the length of all the chunks in cur_line.
cur_line = []
cur_len = 0
# Figure out which static string will prefix this line.
if lines:
indent = self.subsequent_indent
else:
indent = self.initial_indent
# Maximum width for this line.
width = self.width - len(indent)
# First chunk on line is whitespace -- drop it, unless this
# is the very beginning of the text (ie. no lines started yet).
if self.drop_whitespace and chunks[-1].strip() == '' and lines:
del chunks[-1]
while chunks:
l = len(chunks[-1])
# Can at least squeeze this chunk onto the current line.
if cur_len + l <= width:
cur_line.append(chunks.pop())
cur_len += l
# Nope, this line is full.
else:
break
# The current line is full, and the next chunk is too big to
# fit on *any* line (not just this one).
if chunks and len(chunks[-1]) > width:
self._handle_long_word(chunks, cur_line, cur_len, width)
cur_len = sum(map(len, cur_line))
# If the last chunk on this line is all whitespace, drop it.
if self.drop_whitespace and cur_line and cur_line[-1].strip() == '':
cur_len -= len(cur_line[-1])
del cur_line[-1]
if cur_line:
if (self.max_lines is None or
len(lines) + 1 < self.max_lines or
(not chunks or
self.drop_whitespace and
len(chunks) == 1 and
not chunks[0].strip()) and cur_len <= width):
# Convert current line back to a string and store it in
# list of all lines (return value).
lines.append(indent + ''.join(cur_line))
else:
while cur_line:
if (cur_line[-1].strip() and
cur_len + len(self.placeholder) <= width):
cur_line.append(self.placeholder)
lines.append(indent + ''.join(cur_line))
break
cur_len -= len(cur_line[-1])
del cur_line[-1]
else:
if lines:
prev_line = lines[-1].rstrip()
if (len(prev_line) + len(self.placeholder) <=
self.width):
lines[-1] = prev_line + self.placeholder
break
lines.append(indent + self.placeholder.lstrip())
break
return lines
def _split_chunks(self, text):
text = self._munge_whitespace(text)
return self._split(text)
# -- Public interface ----------------------------------------------
def wrap(self, text):
"""wrap(text : string) -> [string]
Reformat the single paragraph in 'text' so it fits in lines of
no more than 'self.width' columns, and return a list of wrapped
lines. Tabs in 'text' are expanded with string.expandtabs(),
and all other whitespace characters (including newline) are
converted to space.
"""
chunks = self._split_chunks(text)
if self.fix_sentence_endings:
self._fix_sentence_endings(chunks)
return self._wrap_chunks(chunks)
def fill(self, text):
"""fill(text : string) -> string
Reformat the single paragraph in 'text' to fit in lines of no
more than 'self.width' columns, and return a new string
containing the entire wrapped paragraph.
"""
return "\n".join(self.wrap(text))
The provided code snippet includes necessary dependencies for implementing the `wrap` function. Write a Python function `def wrap(text, width=70, **kwargs)` to solve the following problem:
Wrap a single paragraph of text, returning a list of wrapped lines. Reformat the single paragraph in 'text' so it fits in lines of no more than 'width' columns, and return a list of wrapped lines. By default, tabs in 'text' are expanded with string.expandtabs(), and all other whitespace characters (including newline) are converted to space. See TextWrapper class for available keyword args to customize wrapping behaviour.
Here is the function:
def wrap(text, width=70, **kwargs):
"""Wrap a single paragraph of text, returning a list of wrapped lines.
Reformat the single paragraph in 'text' so it fits in lines of no
more than 'width' columns, and return a list of wrapped lines. By
default, tabs in 'text' are expanded with string.expandtabs(), and
all other whitespace characters (including newline) are converted to
space. See TextWrapper class for available keyword args to customize
wrapping behaviour.
"""
w = TextWrapper(width=width, **kwargs)
return w.wrap(text) | Wrap a single paragraph of text, returning a list of wrapped lines. Reformat the single paragraph in 'text' so it fits in lines of no more than 'width' columns, and return a list of wrapped lines. By default, tabs in 'text' are expanded with string.expandtabs(), and all other whitespace characters (including newline) are converted to space. See TextWrapper class for available keyword args to customize wrapping behaviour. |
186,513 | import re
class TextWrapper:
"""
Object for wrapping/filling text. The public interface consists of
the wrap() and fill() methods; the other methods are just there for
subclasses to override in order to tweak the default behaviour.
If you want to completely replace the main wrapping algorithm,
you'll probably have to override _wrap_chunks().
Several instance attributes control various aspects of wrapping:
width (default: 70)
the maximum width of wrapped lines (unless break_long_words
is false)
initial_indent (default: "")
string that will be prepended to the first line of wrapped
output. Counts towards the line's width.
subsequent_indent (default: "")
string that will be prepended to all lines save the first
of wrapped output; also counts towards each line's width.
expand_tabs (default: true)
Expand tabs in input text to spaces before further processing.
Each tab will become 0 .. 'tabsize' spaces, depending on its position
in its line. If false, each tab is treated as a single character.
tabsize (default: 8)
Expand tabs in input text to 0 .. 'tabsize' spaces, unless
'expand_tabs' is false.
replace_whitespace (default: true)
Replace all whitespace characters in the input text by spaces
after tab expansion. Note that if expand_tabs is false and
replace_whitespace is true, every tab will be converted to a
single space!
fix_sentence_endings (default: false)
Ensure that sentence-ending punctuation is always followed
by two spaces. Off by default because the algorithm is
(unavoidably) imperfect.
break_long_words (default: true)
Break words longer than 'width'. If false, those words will not
be broken, and some lines might be longer than 'width'.
break_on_hyphens (default: true)
Allow breaking hyphenated words. If true, wrapping will occur
preferably on whitespaces and right after hyphens part of
compound words.
drop_whitespace (default: true)
Drop leading and trailing whitespace from lines.
max_lines (default: None)
Truncate wrapped lines.
placeholder (default: ' [...]')
Append to the last line of truncated text.
"""
unicode_whitespace_trans = {}
uspace = ord(' ')
for x in _whitespace:
unicode_whitespace_trans[ord(x)] = uspace
# This funky little regex is just the trick for splitting
# text up into word-wrappable chunks. E.g.
# "Hello there -- you goof-ball, use the -b option!"
# splits into
# Hello/ /there/ /--/ /you/ /goof-/ball,/ /use/ /the/ /-b/ /option!
# (after stripping out empty strings).
word_punct = r'[\w!"\'&.,?]'
letter = r'[^\d\W]'
whitespace = r'[%s]' % re.escape(_whitespace)
nowhitespace = '[^' + whitespace[1:]
wordsep_re = re.compile(r'''
( # any whitespace
%(ws)s+
| # em-dash between words
(?<=%(wp)s) -{2,} (?=\w)
| # word, possibly hyphenated
%(nws)s+? (?:
# hyphenated word
-(?: (?<=%(lt)s{2}-) | (?<=%(lt)s-%(lt)s-))
(?= %(lt)s -? %(lt)s)
| # end of word
(?=%(ws)s|\Z)
| # em-dash
(?<=%(wp)s) (?=-{2,}\w)
)
)''' % {'wp': word_punct, 'lt': letter,
'ws': whitespace, 'nws': nowhitespace},
re.VERBOSE)
del word_punct, letter, nowhitespace
# This less funky little regex just split on recognized spaces. E.g.
# "Hello there -- you goof-ball, use the -b option!"
# splits into
# Hello/ /there/ /--/ /you/ /goof-ball,/ /use/ /the/ /-b/ /option!/
wordsep_simple_re = re.compile(r'(%s+)' % whitespace)
del whitespace
# XXX this is not locale- or charset-aware -- string.lowercase
# is US-ASCII only (and therefore English-only)
sentence_end_re = re.compile(r'[a-z]' # lowercase letter
r'[\.\!\?]' # sentence-ending punct.
r'[\"\']?' # optional end-of-quote
r'\Z') # end of chunk
def __init__(self,
width=70,
initial_indent="",
subsequent_indent="",
expand_tabs=True,
replace_whitespace=True,
fix_sentence_endings=False,
break_long_words=True,
drop_whitespace=True,
break_on_hyphens=True,
tabsize=8,
*,
max_lines=None,
placeholder=' [...]'):
self.width = width
self.initial_indent = initial_indent
self.subsequent_indent = subsequent_indent
self.expand_tabs = expand_tabs
self.replace_whitespace = replace_whitespace
self.fix_sentence_endings = fix_sentence_endings
self.break_long_words = break_long_words
self.drop_whitespace = drop_whitespace
self.break_on_hyphens = break_on_hyphens
self.tabsize = tabsize
self.max_lines = max_lines
self.placeholder = placeholder
# -- Private methods -----------------------------------------------
# (possibly useful for subclasses to override)
def _munge_whitespace(self, text):
"""_munge_whitespace(text : string) -> string
Munge whitespace in text: expand tabs and convert all other
whitespace characters to spaces. Eg. " foo\\tbar\\n\\nbaz"
becomes " foo bar baz".
"""
if self.expand_tabs:
text = text.expandtabs(self.tabsize)
if self.replace_whitespace:
text = text.translate(self.unicode_whitespace_trans)
return text
def _split(self, text):
"""_split(text : string) -> [string]
Split the text to wrap into indivisible chunks. Chunks are
not quite the same as words; see _wrap_chunks() for full
details. As an example, the text
Look, goof-ball -- use the -b option!
breaks into the following chunks:
'Look,', ' ', 'goof-', 'ball', ' ', '--', ' ',
'use', ' ', 'the', ' ', '-b', ' ', 'option!'
if break_on_hyphens is True, or in:
'Look,', ' ', 'goof-ball', ' ', '--', ' ',
'use', ' ', 'the', ' ', '-b', ' ', option!'
otherwise.
"""
if self.break_on_hyphens is True:
chunks = self.wordsep_re.split(text)
else:
chunks = self.wordsep_simple_re.split(text)
chunks = [c for c in chunks if c]
return chunks
def _fix_sentence_endings(self, chunks):
"""_fix_sentence_endings(chunks : [string])
Correct for sentence endings buried in 'chunks'. Eg. when the
original text contains "... foo.\\nBar ...", munge_whitespace()
and split() will convert that to [..., "foo.", " ", "Bar", ...]
which has one too few spaces; this method simply changes the one
space to two.
"""
i = 0
patsearch = self.sentence_end_re.search
while i < len(chunks)-1:
if chunks[i+1] == " " and patsearch(chunks[i]):
chunks[i+1] = " "
i += 2
else:
i += 1
def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
"""_handle_long_word(chunks : [string],
cur_line : [string],
cur_len : int, width : int)
Handle a chunk of text (most likely a word, not whitespace) that
is too long to fit in any line.
"""
# Figure out when indent is larger than the specified width, and make
# sure at least one character is stripped off on every pass
if width < 1:
space_left = 1
else:
space_left = width - cur_len
# If we're allowed to break long words, then do so: put as much
# of the next chunk onto the current line as will fit.
if self.break_long_words:
end = space_left
chunk = reversed_chunks[-1]
if self.break_on_hyphens and len(chunk) > space_left:
# break after last hyphen, but only if there are
# non-hyphens before it
hyphen = chunk.rfind('-', 0, space_left)
if hyphen > 0 and any(c != '-' for c in chunk[:hyphen]):
end = hyphen + 1
cur_line.append(chunk[:end])
reversed_chunks[-1] = chunk[end:]
# Otherwise, we have to preserve the long word intact. Only add
# it to the current line if there's nothing already there --
# that minimizes how much we violate the width constraint.
elif not cur_line:
cur_line.append(reversed_chunks.pop())
# If we're not allowed to break long words, and there's already
# text on the current line, do nothing. Next time through the
# main loop of _wrap_chunks(), we'll wind up here again, but
# cur_len will be zero, so the next line will be entirely
# devoted to the long word that we can't handle right now.
def _wrap_chunks(self, chunks):
"""_wrap_chunks(chunks : [string]) -> [string]
Wrap a sequence of text chunks and return a list of lines of
length 'self.width' or less. (If 'break_long_words' is false,
some lines may be longer than this.) Chunks correspond roughly
to words and the whitespace between them: each chunk is
indivisible (modulo 'break_long_words'), but a line break can
come between any two chunks. Chunks should not have internal
whitespace; ie. a chunk is either all whitespace or a "word".
Whitespace chunks will be removed from the beginning and end of
lines, but apart from that whitespace is preserved.
"""
lines = []
if self.width <= 0:
raise ValueError("invalid width %r (must be > 0)" % self.width)
if self.max_lines is not None:
if self.max_lines > 1:
indent = self.subsequent_indent
else:
indent = self.initial_indent
if len(indent) + len(self.placeholder.lstrip()) > self.width:
raise ValueError("placeholder too large for max width")
# Arrange in reverse order so items can be efficiently popped
# from a stack of chucks.
chunks.reverse()
while chunks:
# Start the list of chunks that will make up the current line.
# cur_len is just the length of all the chunks in cur_line.
cur_line = []
cur_len = 0
# Figure out which static string will prefix this line.
if lines:
indent = self.subsequent_indent
else:
indent = self.initial_indent
# Maximum width for this line.
width = self.width - len(indent)
# First chunk on line is whitespace -- drop it, unless this
# is the very beginning of the text (ie. no lines started yet).
if self.drop_whitespace and chunks[-1].strip() == '' and lines:
del chunks[-1]
while chunks:
l = len(chunks[-1])
# Can at least squeeze this chunk onto the current line.
if cur_len + l <= width:
cur_line.append(chunks.pop())
cur_len += l
# Nope, this line is full.
else:
break
# The current line is full, and the next chunk is too big to
# fit on *any* line (not just this one).
if chunks and len(chunks[-1]) > width:
self._handle_long_word(chunks, cur_line, cur_len, width)
cur_len = sum(map(len, cur_line))
# If the last chunk on this line is all whitespace, drop it.
if self.drop_whitespace and cur_line and cur_line[-1].strip() == '':
cur_len -= len(cur_line[-1])
del cur_line[-1]
if cur_line:
if (self.max_lines is None or
len(lines) + 1 < self.max_lines or
(not chunks or
self.drop_whitespace and
len(chunks) == 1 and
not chunks[0].strip()) and cur_len <= width):
# Convert current line back to a string and store it in
# list of all lines (return value).
lines.append(indent + ''.join(cur_line))
else:
while cur_line:
if (cur_line[-1].strip() and
cur_len + len(self.placeholder) <= width):
cur_line.append(self.placeholder)
lines.append(indent + ''.join(cur_line))
break
cur_len -= len(cur_line[-1])
del cur_line[-1]
else:
if lines:
prev_line = lines[-1].rstrip()
if (len(prev_line) + len(self.placeholder) <=
self.width):
lines[-1] = prev_line + self.placeholder
break
lines.append(indent + self.placeholder.lstrip())
break
return lines
def _split_chunks(self, text):
text = self._munge_whitespace(text)
return self._split(text)
# -- Public interface ----------------------------------------------
def wrap(self, text):
"""wrap(text : string) -> [string]
Reformat the single paragraph in 'text' so it fits in lines of
no more than 'self.width' columns, and return a list of wrapped
lines. Tabs in 'text' are expanded with string.expandtabs(),
and all other whitespace characters (including newline) are
converted to space.
"""
chunks = self._split_chunks(text)
if self.fix_sentence_endings:
self._fix_sentence_endings(chunks)
return self._wrap_chunks(chunks)
def fill(self, text):
"""fill(text : string) -> string
Reformat the single paragraph in 'text' to fit in lines of no
more than 'self.width' columns, and return a new string
containing the entire wrapped paragraph.
"""
return "\n".join(self.wrap(text))
def fill(text, width=70, **kwargs):
"""Fill a single paragraph of text, returning a new string.
Reformat the single paragraph in 'text' to fit in lines of no more
than 'width' columns, and return a new string containing the entire
wrapped paragraph. As with wrap(), tabs are expanded and other
whitespace characters converted to space. See TextWrapper class for
available keyword args to customize wrapping behaviour.
"""
w = TextWrapper(width=width, **kwargs)
return w.fill(text)
The provided code snippet includes necessary dependencies for implementing the `shorten` function. Write a Python function `def shorten(text, width, **kwargs)` to solve the following problem:
Collapse and truncate the given text to fit in the given width. The text first has its whitespace collapsed. If it then fits in the *width*, it is returned as is. Otherwise, as many words as possible are joined and then the placeholder is appended:: >>> textwrap.shorten("Hello world!", width=12) 'Hello world!' >>> textwrap.shorten("Hello world!", width=11) 'Hello [...]'
Here is the function:
def shorten(text, width, **kwargs):
"""Collapse and truncate the given text to fit in the given width.
The text first has its whitespace collapsed. If it then fits in
the *width*, it is returned as is. Otherwise, as many words
as possible are joined and then the placeholder is appended::
>>> textwrap.shorten("Hello world!", width=12)
'Hello world!'
>>> textwrap.shorten("Hello world!", width=11)
'Hello [...]'
"""
w = TextWrapper(width=width, max_lines=1, **kwargs)
return w.fill(' '.join(text.strip().split())) | Collapse and truncate the given text to fit in the given width. The text first has its whitespace collapsed. If it then fits in the *width*, it is returned as is. Otherwise, as many words as possible are joined and then the placeholder is appended:: >>> textwrap.shorten("Hello world!", width=12) 'Hello world!' >>> textwrap.shorten("Hello world!", width=11) 'Hello [...]' |
186,517 | import struct, sys, time, os
import zlib
import builtins
import io
import _compression
_COMPRESS_LEVEL_BEST = 9
class GzipFile(_compression.BaseStream):
"""The GzipFile class simulates most of the methods of a file object with
the exception of the truncate() method.
This class only supports opening files in binary mode. If you need to open a
compressed file in text mode, use the gzip.open() function.
"""
# Overridden with internal file object to be closed, if only a filename
# is passed in
myfileobj = None
def __init__(self, filename=None, mode=None,
compresslevel=_COMPRESS_LEVEL_BEST, fileobj=None, mtime=None):
"""Constructor for the GzipFile class.
At least one of fileobj and filename must be given a
non-trivial value.
The new class instance is based on fileobj, which can be a regular
file, an io.BytesIO object, or any other object which simulates a file.
It defaults to None, in which case filename is opened to provide
a file object.
When fileobj is not None, the filename argument is only used to be
included in the gzip file header, which may include the original
filename of the uncompressed file. It defaults to the filename of
fileobj, if discernible; otherwise, it defaults to the empty string,
and in this case the original filename is not included in the header.
The mode argument can be any of 'r', 'rb', 'a', 'ab', 'w', 'wb', 'x', or
'xb' depending on whether the file will be read or written. The default
is the mode of fileobj if discernible; otherwise, the default is 'rb'.
A mode of 'r' is equivalent to one of 'rb', and similarly for 'w' and
'wb', 'a' and 'ab', and 'x' and 'xb'.
The compresslevel argument is an integer from 0 to 9 controlling the
level of compression; 1 is fastest and produces the least compression,
and 9 is slowest and produces the most compression. 0 is no compression
at all. The default is 9.
The mtime argument is an optional numeric timestamp to be written
to the last modification time field in the stream when compressing.
If omitted or None, the current time is used.
"""
if mode and ('t' in mode or 'U' in mode):
raise ValueError("Invalid mode: {!r}".format(mode))
if mode and 'b' not in mode:
mode += 'b'
if fileobj is None:
fileobj = self.myfileobj = builtins.open(filename, mode or 'rb')
if filename is None:
filename = getattr(fileobj, 'name', '')
if not isinstance(filename, (str, bytes)):
filename = ''
else:
filename = os.fspath(filename)
origmode = mode
if mode is None:
mode = getattr(fileobj, 'mode', 'rb')
if mode.startswith('r'):
self.mode = READ
raw = _GzipReader(fileobj)
self._buffer = io.BufferedReader(raw)
self.name = filename
elif mode.startswith(('w', 'a', 'x')):
if origmode is None:
import warnings
warnings.warn(
"GzipFile was opened for writing, but this will "
"change in future Python releases. "
"Specify the mode argument for opening it for writing.",
FutureWarning, 2)
self.mode = WRITE
self._init_write(filename)
self.compress = zlib.compressobj(compresslevel,
zlib.DEFLATED,
-zlib.MAX_WBITS,
zlib.DEF_MEM_LEVEL,
0)
self._write_mtime = mtime
else:
raise ValueError("Invalid mode: {!r}".format(mode))
self.fileobj = fileobj
if self.mode == WRITE:
self._write_gzip_header(compresslevel)
def filename(self):
import warnings
warnings.warn("use the name attribute", DeprecationWarning, 2)
if self.mode == WRITE and self.name[-3:] != ".gz":
return self.name + ".gz"
return self.name
def mtime(self):
"""Last modification time read from stream, or None"""
return self._buffer.raw._last_mtime
def __repr__(self):
s = repr(self.fileobj)
return '<gzip ' + s[1:-1] + ' ' + hex(id(self)) + '>'
def _init_write(self, filename):
self.name = filename
self.crc = zlib.crc32(b"")
self.size = 0
self.writebuf = []
self.bufsize = 0
self.offset = 0 # Current file offset for seek(), tell(), etc
def _write_gzip_header(self, compresslevel):
self.fileobj.write(b'\037\213') # magic header
self.fileobj.write(b'\010') # compression method
try:
# RFC 1952 requires the FNAME field to be Latin-1. Do not
# include filenames that cannot be represented that way.
fname = os.path.basename(self.name)
if not isinstance(fname, bytes):
fname = fname.encode('latin-1')
if fname.endswith(b'.gz'):
fname = fname[:-3]
except UnicodeEncodeError:
fname = b''
flags = 0
if fname:
flags = FNAME
self.fileobj.write(chr(flags).encode('latin-1'))
mtime = self._write_mtime
if mtime is None:
mtime = time.time()
write32u(self.fileobj, int(mtime))
if compresslevel == _COMPRESS_LEVEL_BEST:
xfl = b'\002'
elif compresslevel == _COMPRESS_LEVEL_FAST:
xfl = b'\004'
else:
xfl = b'\000'
self.fileobj.write(xfl)
self.fileobj.write(b'\377')
if fname:
self.fileobj.write(fname + b'\000')
def write(self,data):
self._check_not_closed()
if self.mode != WRITE:
import errno
raise OSError(errno.EBADF, "write() on read-only GzipFile object")
if self.fileobj is None:
raise ValueError("write() on closed GzipFile object")
if isinstance(data, (bytes, bytearray)):
length = len(data)
else:
# accept any data that supports the buffer protocol
data = memoryview(data)
length = data.nbytes
if length > 0:
self.fileobj.write(self.compress.compress(data))
self.size += length
self.crc = zlib.crc32(data, self.crc)
self.offset += length
return length
def read(self, size=-1):
self._check_not_closed()
if self.mode != READ:
import errno
raise OSError(errno.EBADF, "read() on write-only GzipFile object")
return self._buffer.read(size)
def read1(self, size=-1):
"""Implements BufferedIOBase.read1()
Reads up to a buffer's worth of data if size is negative."""
self._check_not_closed()
if self.mode != READ:
import errno
raise OSError(errno.EBADF, "read1() on write-only GzipFile object")
if size < 0:
size = io.DEFAULT_BUFFER_SIZE
return self._buffer.read1(size)
def peek(self, n):
self._check_not_closed()
if self.mode != READ:
import errno
raise OSError(errno.EBADF, "peek() on write-only GzipFile object")
return self._buffer.peek(n)
def closed(self):
return self.fileobj is None
def close(self):
fileobj = self.fileobj
if fileobj is None:
return
self.fileobj = None
try:
if self.mode == WRITE:
fileobj.write(self.compress.flush())
write32u(fileobj, self.crc)
# self.size may exceed 2 GiB, or even 4 GiB
write32u(fileobj, self.size & 0xffffffff)
elif self.mode == READ:
self._buffer.close()
finally:
myfileobj = self.myfileobj
if myfileobj:
self.myfileobj = None
myfileobj.close()
def flush(self,zlib_mode=zlib.Z_SYNC_FLUSH):
self._check_not_closed()
if self.mode == WRITE:
# Ensure the compressor's buffer is flushed
self.fileobj.write(self.compress.flush(zlib_mode))
self.fileobj.flush()
def fileno(self):
"""Invoke the underlying file object's fileno() method.
This will raise AttributeError if the underlying file object
doesn't support fileno().
"""
return self.fileobj.fileno()
def rewind(self):
'''Return the uncompressed stream file position indicator to the
beginning of the file'''
if self.mode != READ:
raise OSError("Can't rewind in write mode")
self._buffer.seek(0)
def readable(self):
return self.mode == READ
def writable(self):
return self.mode == WRITE
def seekable(self):
return True
def seek(self, offset, whence=io.SEEK_SET):
if self.mode == WRITE:
if whence != io.SEEK_SET:
if whence == io.SEEK_CUR:
offset = self.offset + offset
else:
raise ValueError('Seek from end not supported')
if offset < self.offset:
raise OSError('Negative seek in write mode')
count = offset - self.offset
chunk = b'\0' * 1024
for i in range(count // 1024):
self.write(chunk)
self.write(b'\0' * (count % 1024))
elif self.mode == READ:
self._check_not_closed()
return self._buffer.seek(offset, whence)
return self.offset
def readline(self, size=-1):
self._check_not_closed()
return self._buffer.readline(size)
The provided code snippet includes necessary dependencies for implementing the `compress` function. Write a Python function `def compress(data, compresslevel=_COMPRESS_LEVEL_BEST, *, mtime=None)` to solve the following problem:
Compress data in one shot and return the compressed string. Optional argument is the compression level, in range of 0-9.
Here is the function:
def compress(data, compresslevel=_COMPRESS_LEVEL_BEST, *, mtime=None):
"""Compress data in one shot and return the compressed string.
Optional argument is the compression level, in range of 0-9.
"""
buf = io.BytesIO()
with GzipFile(fileobj=buf, mode='wb', compresslevel=compresslevel, mtime=mtime) as f:
f.write(data)
return buf.getvalue() | Compress data in one shot and return the compressed string. Optional argument is the compression level, in range of 0-9. |
186,518 | import struct, sys, time, os
import zlib
import builtins
import io
import _compression
class GzipFile(_compression.BaseStream):
"""The GzipFile class simulates most of the methods of a file object with
the exception of the truncate() method.
This class only supports opening files in binary mode. If you need to open a
compressed file in text mode, use the gzip.open() function.
"""
# Overridden with internal file object to be closed, if only a filename
# is passed in
myfileobj = None
def __init__(self, filename=None, mode=None,
compresslevel=_COMPRESS_LEVEL_BEST, fileobj=None, mtime=None):
"""Constructor for the GzipFile class.
At least one of fileobj and filename must be given a
non-trivial value.
The new class instance is based on fileobj, which can be a regular
file, an io.BytesIO object, or any other object which simulates a file.
It defaults to None, in which case filename is opened to provide
a file object.
When fileobj is not None, the filename argument is only used to be
included in the gzip file header, which may include the original
filename of the uncompressed file. It defaults to the filename of
fileobj, if discernible; otherwise, it defaults to the empty string,
and in this case the original filename is not included in the header.
The mode argument can be any of 'r', 'rb', 'a', 'ab', 'w', 'wb', 'x', or
'xb' depending on whether the file will be read or written. The default
is the mode of fileobj if discernible; otherwise, the default is 'rb'.
A mode of 'r' is equivalent to one of 'rb', and similarly for 'w' and
'wb', 'a' and 'ab', and 'x' and 'xb'.
The compresslevel argument is an integer from 0 to 9 controlling the
level of compression; 1 is fastest and produces the least compression,
and 9 is slowest and produces the most compression. 0 is no compression
at all. The default is 9.
The mtime argument is an optional numeric timestamp to be written
to the last modification time field in the stream when compressing.
If omitted or None, the current time is used.
"""
if mode and ('t' in mode or 'U' in mode):
raise ValueError("Invalid mode: {!r}".format(mode))
if mode and 'b' not in mode:
mode += 'b'
if fileobj is None:
fileobj = self.myfileobj = builtins.open(filename, mode or 'rb')
if filename is None:
filename = getattr(fileobj, 'name', '')
if not isinstance(filename, (str, bytes)):
filename = ''
else:
filename = os.fspath(filename)
origmode = mode
if mode is None:
mode = getattr(fileobj, 'mode', 'rb')
if mode.startswith('r'):
self.mode = READ
raw = _GzipReader(fileobj)
self._buffer = io.BufferedReader(raw)
self.name = filename
elif mode.startswith(('w', 'a', 'x')):
if origmode is None:
import warnings
warnings.warn(
"GzipFile was opened for writing, but this will "
"change in future Python releases. "
"Specify the mode argument for opening it for writing.",
FutureWarning, 2)
self.mode = WRITE
self._init_write(filename)
self.compress = zlib.compressobj(compresslevel,
zlib.DEFLATED,
-zlib.MAX_WBITS,
zlib.DEF_MEM_LEVEL,
0)
self._write_mtime = mtime
else:
raise ValueError("Invalid mode: {!r}".format(mode))
self.fileobj = fileobj
if self.mode == WRITE:
self._write_gzip_header(compresslevel)
def filename(self):
import warnings
warnings.warn("use the name attribute", DeprecationWarning, 2)
if self.mode == WRITE and self.name[-3:] != ".gz":
return self.name + ".gz"
return self.name
def mtime(self):
"""Last modification time read from stream, or None"""
return self._buffer.raw._last_mtime
def __repr__(self):
s = repr(self.fileobj)
return '<gzip ' + s[1:-1] + ' ' + hex(id(self)) + '>'
def _init_write(self, filename):
self.name = filename
self.crc = zlib.crc32(b"")
self.size = 0
self.writebuf = []
self.bufsize = 0
self.offset = 0 # Current file offset for seek(), tell(), etc
def _write_gzip_header(self, compresslevel):
self.fileobj.write(b'\037\213') # magic header
self.fileobj.write(b'\010') # compression method
try:
# RFC 1952 requires the FNAME field to be Latin-1. Do not
# include filenames that cannot be represented that way.
fname = os.path.basename(self.name)
if not isinstance(fname, bytes):
fname = fname.encode('latin-1')
if fname.endswith(b'.gz'):
fname = fname[:-3]
except UnicodeEncodeError:
fname = b''
flags = 0
if fname:
flags = FNAME
self.fileobj.write(chr(flags).encode('latin-1'))
mtime = self._write_mtime
if mtime is None:
mtime = time.time()
write32u(self.fileobj, int(mtime))
if compresslevel == _COMPRESS_LEVEL_BEST:
xfl = b'\002'
elif compresslevel == _COMPRESS_LEVEL_FAST:
xfl = b'\004'
else:
xfl = b'\000'
self.fileobj.write(xfl)
self.fileobj.write(b'\377')
if fname:
self.fileobj.write(fname + b'\000')
def write(self,data):
self._check_not_closed()
if self.mode != WRITE:
import errno
raise OSError(errno.EBADF, "write() on read-only GzipFile object")
if self.fileobj is None:
raise ValueError("write() on closed GzipFile object")
if isinstance(data, (bytes, bytearray)):
length = len(data)
else:
# accept any data that supports the buffer protocol
data = memoryview(data)
length = data.nbytes
if length > 0:
self.fileobj.write(self.compress.compress(data))
self.size += length
self.crc = zlib.crc32(data, self.crc)
self.offset += length
return length
def read(self, size=-1):
self._check_not_closed()
if self.mode != READ:
import errno
raise OSError(errno.EBADF, "read() on write-only GzipFile object")
return self._buffer.read(size)
def read1(self, size=-1):
"""Implements BufferedIOBase.read1()
Reads up to a buffer's worth of data if size is negative."""
self._check_not_closed()
if self.mode != READ:
import errno
raise OSError(errno.EBADF, "read1() on write-only GzipFile object")
if size < 0:
size = io.DEFAULT_BUFFER_SIZE
return self._buffer.read1(size)
def peek(self, n):
self._check_not_closed()
if self.mode != READ:
import errno
raise OSError(errno.EBADF, "peek() on write-only GzipFile object")
return self._buffer.peek(n)
def closed(self):
return self.fileobj is None
def close(self):
fileobj = self.fileobj
if fileobj is None:
return
self.fileobj = None
try:
if self.mode == WRITE:
fileobj.write(self.compress.flush())
write32u(fileobj, self.crc)
# self.size may exceed 2 GiB, or even 4 GiB
write32u(fileobj, self.size & 0xffffffff)
elif self.mode == READ:
self._buffer.close()
finally:
myfileobj = self.myfileobj
if myfileobj:
self.myfileobj = None
myfileobj.close()
def flush(self,zlib_mode=zlib.Z_SYNC_FLUSH):
self._check_not_closed()
if self.mode == WRITE:
# Ensure the compressor's buffer is flushed
self.fileobj.write(self.compress.flush(zlib_mode))
self.fileobj.flush()
def fileno(self):
"""Invoke the underlying file object's fileno() method.
This will raise AttributeError if the underlying file object
doesn't support fileno().
"""
return self.fileobj.fileno()
def rewind(self):
'''Return the uncompressed stream file position indicator to the
beginning of the file'''
if self.mode != READ:
raise OSError("Can't rewind in write mode")
self._buffer.seek(0)
def readable(self):
return self.mode == READ
def writable(self):
return self.mode == WRITE
def seekable(self):
return True
def seek(self, offset, whence=io.SEEK_SET):
if self.mode == WRITE:
if whence != io.SEEK_SET:
if whence == io.SEEK_CUR:
offset = self.offset + offset
else:
raise ValueError('Seek from end not supported')
if offset < self.offset:
raise OSError('Negative seek in write mode')
count = offset - self.offset
chunk = b'\0' * 1024
for i in range(count // 1024):
self.write(chunk)
self.write(b'\0' * (count % 1024))
elif self.mode == READ:
self._check_not_closed()
return self._buffer.seek(offset, whence)
return self.offset
def readline(self, size=-1):
self._check_not_closed()
return self._buffer.readline(size)
The provided code snippet includes necessary dependencies for implementing the `decompress` function. Write a Python function `def decompress(data)` to solve the following problem:
Decompress a gzip compressed string in one shot. Return the decompressed string.
Here is the function:
def decompress(data):
"""Decompress a gzip compressed string in one shot.
Return the decompressed string.
"""
with GzipFile(fileobj=io.BytesIO(data)) as f:
return f.read() | Decompress a gzip compressed string in one shot. Return the decompressed string. |
186,522 | import struct
class ZoneInfoNotFoundError(KeyError):
def load_tzdata(key):
import importlib.resources
components = key.split("/")
package_name = ".".join(["tzdata.zoneinfo"] + components[:-1])
resource_name = components[-1]
try:
return importlib.resources.open_binary(package_name, resource_name)
except (ImportError, FileNotFoundError, UnicodeEncodeError):
# There are three types of exception that can be raised that all amount
# to "we cannot find this key":
#
# ImportError: If package_name doesn't exist (e.g. if tzdata is not
# installed, or if there's an error in the folder name like
# Amrica/New_York)
# FileNotFoundError: If resource_name doesn't exist in the package
# (e.g. Europe/Krasnoy)
# UnicodeEncodeError: If package_name or resource_name are not UTF-8,
# such as keys containing a surrogate character.
raise ZoneInfoNotFoundError(f"No time zone found with key {key}") | null |
186,523 | import struct
class _TZifHeader:
__slots__ = [
"version",
"isutcnt",
"isstdcnt",
"leapcnt",
"timecnt",
"typecnt",
"charcnt",
]
def __init__(self, *args):
for attr, val in zip(self.__slots__, args, strict=True):
setattr(self, attr, val)
def from_file(cls, stream):
# The header starts with a 4-byte "magic" value
if stream.read(4) != b"TZif":
raise ValueError("Invalid TZif file: magic not found")
_version = stream.read(1)
if _version == b"\x00":
version = 1
else:
version = int(_version)
stream.read(15)
args = (version,)
# Slots are defined in the order that the bytes are arranged
args = args + struct.unpack(">6l", stream.read(24))
return cls(*args)
def load_data(fobj):
header = _TZifHeader.from_file(fobj)
if header.version == 1:
time_size = 4
time_type = "l"
else:
# Version 2+ has 64-bit integer transition times
time_size = 8
time_type = "q"
# Version 2+ also starts with a Version 1 header and data, which
# we need to skip now
skip_bytes = (
header.timecnt * 5 # Transition times and types
+ header.typecnt * 6 # Local time type records
+ header.charcnt # Time zone designations
+ header.leapcnt * 8 # Leap second records
+ header.isstdcnt # Standard/wall indicators
+ header.isutcnt # UT/local indicators
)
fobj.seek(skip_bytes, 1)
# Now we need to read the second header, which is not the same
# as the first
header = _TZifHeader.from_file(fobj)
typecnt = header.typecnt
timecnt = header.timecnt
charcnt = header.charcnt
# The data portion starts with timecnt transitions and indices
if timecnt:
trans_list_utc = struct.unpack(
f">{timecnt}{time_type}", fobj.read(timecnt * time_size)
)
trans_idx = struct.unpack(f">{timecnt}B", fobj.read(timecnt))
else:
trans_list_utc = ()
trans_idx = ()
# Read the ttinfo struct, (utoff, isdst, abbrind)
if typecnt:
utcoff, isdst, abbrind = zip(
*(struct.unpack(">lbb", fobj.read(6)) for i in range(typecnt))
)
else:
utcoff = ()
isdst = ()
abbrind = ()
# Now read the abbreviations. They are null-terminated strings, indexed
# not by position in the array but by position in the unsplit
# abbreviation string. I suppose this makes more sense in C, which uses
# null to terminate the strings, but it's inconvenient here...
abbr_vals = {}
abbr_chars = fobj.read(charcnt)
def get_abbr(idx):
# Gets a string starting at idx and running until the next \x00
#
# We cannot pre-populate abbr_vals by splitting on \x00 because there
# are some zones that use subsets of longer abbreviations, like so:
#
# LMT\x00AHST\x00HDT\x00
#
# Where the idx to abbr mapping should be:
#
# {0: "LMT", 4: "AHST", 5: "HST", 9: "HDT"}
if idx not in abbr_vals:
span_end = abbr_chars.find(b"\x00", idx)
abbr_vals[idx] = abbr_chars[idx:span_end].decode()
return abbr_vals[idx]
abbr = tuple(get_abbr(idx) for idx in abbrind)
# The remainder of the file consists of leap seconds (currently unused) and
# the standard/wall and ut/local indicators, which are metadata we don't need.
# In version 2 files, we need to skip the unnecessary data to get at the TZ string:
if header.version >= 2:
# Each leap second record has size (time_size + 4)
skip_bytes = header.isutcnt + header.isstdcnt + header.leapcnt * 12
fobj.seek(skip_bytes, 1)
c = fobj.read(1) # Should be \n
assert c == b"\n", c
tz_bytes = b""
while (c := fobj.read(1)) != b"\n":
tz_bytes += c
tz_str = tz_bytes
else:
tz_str = None
return trans_idx, trans_list_utc, utcoff, isdst, abbr, tz_str | null |
186,525 | import bisect
import calendar
import collections
import functools
import re
import weakref
from datetime import datetime, timedelta, tzinfo
from . import _common, _tzpath
def _load_timedelta(seconds):
return timedelta(seconds=seconds)
class _ttinfo:
__slots__ = ["utcoff", "dstoff", "tzname"]
def __init__(self, utcoff, dstoff, tzname):
self.utcoff = utcoff
self.dstoff = dstoff
self.tzname = tzname
def __eq__(self, other):
return (
self.utcoff == other.utcoff
and self.dstoff == other.dstoff
and self.tzname == other.tzname
)
def __repr__(self): # pragma: nocover
return (
f"{self.__class__.__name__}"
+ f"({self.utcoff}, {self.dstoff}, {self.tzname})"
)
class _TZStr:
__slots__ = (
"std",
"dst",
"start",
"end",
"get_trans_info",
"get_trans_info_fromutc",
"dst_diff",
)
def __init__(
self, std_abbr, std_offset, dst_abbr, dst_offset, start=None, end=None
):
self.dst_diff = dst_offset - std_offset
std_offset = _load_timedelta(std_offset)
self.std = _ttinfo(
utcoff=std_offset, dstoff=_load_timedelta(0), tzname=std_abbr
)
self.start = start
self.end = end
dst_offset = _load_timedelta(dst_offset)
delta = _load_timedelta(self.dst_diff)
self.dst = _ttinfo(utcoff=dst_offset, dstoff=delta, tzname=dst_abbr)
# These are assertions because the constructor should only be called
# by functions that would fail before passing start or end
assert start is not None, "No transition start specified"
assert end is not None, "No transition end specified"
self.get_trans_info = self._get_trans_info
self.get_trans_info_fromutc = self._get_trans_info_fromutc
def transitions(self, year):
start = self.start.year_to_epoch(year)
end = self.end.year_to_epoch(year)
return start, end
def _get_trans_info(self, ts, year, fold):
"""Get the information about the current transition - tti"""
start, end = self.transitions(year)
# With fold = 0, the period (denominated in local time) with the
# smaller offset starts at the end of the gap and ends at the end of
# the fold; with fold = 1, it runs from the start of the gap to the
# beginning of the fold.
#
# So in order to determine the DST boundaries we need to know both
# the fold and whether DST is positive or negative (rare), and it
# turns out that this boils down to fold XOR is_positive.
if fold == (self.dst_diff >= 0):
end -= self.dst_diff
else:
start += self.dst_diff
if start < end:
isdst = start <= ts < end
else:
isdst = not (end <= ts < start)
return self.dst if isdst else self.std
def _get_trans_info_fromutc(self, ts, year):
start, end = self.transitions(year)
start -= self.std.utcoff.total_seconds()
end -= self.dst.utcoff.total_seconds()
if start < end:
isdst = start <= ts < end
else:
isdst = not (end <= ts < start)
# For positive DST, the ambiguous period is one dst_diff after the end
# of DST; for negative DST, the ambiguous period is one dst_diff before
# the start of DST.
if self.dst_diff > 0:
ambig_start = end
ambig_end = end + self.dst_diff
else:
ambig_start = start
ambig_end = start - self.dst_diff
fold = ambig_start <= ts < ambig_end
return (self.dst if isdst else self.std, fold)
def _parse_dst_start_end(dststr):
date, *time = dststr.split("/")
if date[0] == "M":
n_is_julian = False
m = re.match(r"M(\d{1,2})\.(\d).(\d)$", date)
if m is None:
raise ValueError(f"Invalid dst start/end date: {dststr}")
date_offset = tuple(map(int, m.groups()))
offset = _CalendarOffset(*date_offset)
else:
if date[0] == "J":
n_is_julian = True
date = date[1:]
else:
n_is_julian = False
doy = int(date)
offset = _DayOffset(doy, n_is_julian)
if time:
time_components = list(map(int, time[0].split(":")))
n_components = len(time_components)
if n_components < 3:
time_components.extend([0] * (3 - n_components))
offset.hour, offset.minute, offset.second = time_components
return offset
def _parse_tz_delta(tz_delta):
match = re.match(
r"(?P<sign>[+-])?(?P<h>\d{1,2})(:(?P<m>\d{2})(:(?P<s>\d{2}))?)?",
tz_delta,
)
# Anything passed to this function should already have hit an equivalent
# regular expression to find the section to parse.
assert match is not None, tz_delta
h, m, s = (
int(v) if v is not None else 0
for v in map(match.group, ("h", "m", "s"))
)
total = h * 3600 + m * 60 + s
if not -86400 < total < 86400:
raise ValueError(
f"Offset must be strictly between -24h and +24h: {tz_delta}"
)
# Yes, +5 maps to an offset of -5h
if match.group("sign") != "-":
total *= -1
return total
def _parse_tz_str(tz_str):
# The tz string has the format:
#
# std[offset[dst[offset],start[/time],end[/time]]]
#
# std and dst must be 3 or more characters long and must not contain
# a leading colon, embedded digits, commas, nor a plus or minus signs;
# The spaces between "std" and "offset" are only for display and are
# not actually present in the string.
#
# The format of the offset is ``[+|-]hh[:mm[:ss]]``
offset_str, *start_end_str = tz_str.split(",", 1)
# fmt: off
parser_re = re.compile(
r"(?P<std>[^<0-9:.+-]+|<[a-zA-Z0-9+\-]+>)" +
r"((?P<stdoff>[+-]?\d{1,2}(:\d{2}(:\d{2})?)?)" +
r"((?P<dst>[^0-9:.+-]+|<[a-zA-Z0-9+\-]+>)" +
r"((?P<dstoff>[+-]?\d{1,2}(:\d{2}(:\d{2})?)?))?" +
r")?" + # dst
r")?$" # stdoff
)
# fmt: on
m = parser_re.match(offset_str)
if m is None:
raise ValueError(f"{tz_str} is not a valid TZ string")
std_abbr = m.group("std")
dst_abbr = m.group("dst")
dst_offset = None
std_abbr = std_abbr.strip("<>")
if dst_abbr:
dst_abbr = dst_abbr.strip("<>")
if std_offset := m.group("stdoff"):
try:
std_offset = _parse_tz_delta(std_offset)
except ValueError as e:
raise ValueError(f"Invalid STD offset in {tz_str}") from e
else:
std_offset = 0
if dst_abbr is not None:
if dst_offset := m.group("dstoff"):
try:
dst_offset = _parse_tz_delta(dst_offset)
except ValueError as e:
raise ValueError(f"Invalid DST offset in {tz_str}") from e
else:
dst_offset = std_offset + 3600
if not start_end_str:
raise ValueError(f"Missing transition rules: {tz_str}")
start_end_strs = start_end_str[0].split(",", 1)
try:
start, end = (_parse_dst_start_end(x) for x in start_end_strs)
except ValueError as e:
raise ValueError(f"Invalid TZ string: {tz_str}") from e
return _TZStr(std_abbr, std_offset, dst_abbr, dst_offset, start, end)
elif start_end_str:
raise ValueError(f"Transition rule present without DST: {tz_str}")
else:
# This is a static ttinfo, don't return _TZStr
return _ttinfo(
_load_timedelta(std_offset), _load_timedelta(0), std_abbr
) | null |
186,530 | from pickle import DEFAULT_PROTOCOL, Pickler, Unpickler
from io import BytesIO
import collections.abc
class DbfilenameShelf(Shelf):
"""Shelf implementation using the "dbm" generic dbm interface.
This is initialized with the filename for the dbm database.
See the module's __doc__ string for an overview of the interface.
"""
def __init__(self, filename, flag='c', protocol=None, writeback=False):
import dbm
Shelf.__init__(self, dbm.open(filename, flag), protocol, writeback)
The provided code snippet includes necessary dependencies for implementing the `open` function. Write a Python function `def open(filename, flag='c', protocol=None, writeback=False)` to solve the following problem:
Open a persistent dictionary for reading and writing. The filename parameter is the base filename for the underlying database. As a side-effect, an extension may be added to the filename and more than one file may be created. The optional flag parameter has the same interpretation as the flag parameter of dbm.open(). The optional protocol parameter specifies the version of the pickle protocol. See the module's __doc__ string for an overview of the interface.
Here is the function:
def open(filename, flag='c', protocol=None, writeback=False):
"""Open a persistent dictionary for reading and writing.
The filename parameter is the base filename for the underlying
database. As a side-effect, an extension may be added to the
filename and more than one file may be created. The optional flag
parameter has the same interpretation as the flag parameter of
dbm.open(). The optional protocol parameter specifies the
version of the pickle protocol.
See the module's __doc__ string for an overview of the interface.
"""
return DbfilenameShelf(filename, flag, protocol, writeback) | Open a persistent dictionary for reading and writing. The filename parameter is the base filename for the underlying database. As a side-effect, an extension may be added to the filename and more than one file may be created. The optional flag parameter has the same interpretation as the flag parameter of dbm.open(). The optional protocol parameter specifies the version of the pickle protocol. See the module's __doc__ string for an overview of the interface. |
186,540 | from collections import namedtuple
def what(filename):
from os.path import (curdir, pardir, sep, pathsep, defpath, extsep, altsep,
devnull)
def glob(pathname, *, root_dir=None, dir_fd=None, recursive=False):
def testall(list, recursive, toplevel):
import sys
import os
for filename in list:
if os.path.isdir(filename):
print(filename + '/:', end=' ')
if recursive or toplevel:
print('recursing down:')
import glob
names = glob.glob(os.path.join(glob.escape(filename), '*'))
testall(names, recursive, 0)
else:
print('*** directory (use -r) ***')
else:
print(filename + ':', end=' ')
sys.stdout.flush()
try:
print(what(filename))
except OSError:
print('*** not found ***') | null |
186,551 | import re
import struct
import binascii
_b32alphabet = b'ABCDEFGHIJKLMNOPQRSTUVWXYZ234567'
def _b32encode(alphabet, s):
global _b32tab2
# Delay the initialization of the table to not waste memory
# if the function is never called
if alphabet not in _b32tab2:
b32tab = [bytes((i,)) for i in alphabet]
_b32tab2[alphabet] = [a + b for a in b32tab for b in b32tab]
b32tab = None
if not isinstance(s, bytes_types):
s = memoryview(s).tobytes()
leftover = len(s) % 5
# Pad the last quantum with zero bits if necessary
if leftover:
s = s + b'\0' * (5 - leftover) # Don't use += !
encoded = bytearray()
from_bytes = int.from_bytes
b32tab2 = _b32tab2[alphabet]
for i in range(0, len(s), 5):
c = from_bytes(s[i: i + 5], 'big')
encoded += (b32tab2[c >> 30] + # bits 1 - 10
b32tab2[(c >> 20) & 0x3ff] + # bits 11 - 20
b32tab2[(c >> 10) & 0x3ff] + # bits 21 - 30
b32tab2[c & 0x3ff] # bits 31 - 40
)
# Adjust for any leftover partial quanta
if leftover == 1:
encoded[-6:] = b'======'
elif leftover == 2:
encoded[-4:] = b'===='
elif leftover == 3:
encoded[-3:] = b'==='
elif leftover == 4:
encoded[-1:] = b'='
return bytes(encoded)
def b32encode(s):
return _b32encode(_b32alphabet, s) | null |
186,552 | import re
import struct
import binascii
_b32alphabet = b'ABCDEFGHIJKLMNOPQRSTUVWXYZ234567'
def _b32decode(alphabet, s, casefold=False, map01=None):
global _b32rev
# Delay the initialization of the table to not waste memory
# if the function is never called
if alphabet not in _b32rev:
_b32rev[alphabet] = {v: k for k, v in enumerate(alphabet)}
s = _bytes_from_decode_data(s)
if len(s) % 8:
raise binascii.Error('Incorrect padding')
# Handle section 2.4 zero and one mapping. The flag map01 will be either
# False, or the character to map the digit 1 (one) to. It should be
# either L (el) or I (eye).
if map01 is not None:
map01 = _bytes_from_decode_data(map01)
assert len(map01) == 1, repr(map01)
s = s.translate(bytes.maketrans(b'01', b'O' + map01))
if casefold:
s = s.upper()
# Strip off pad characters from the right. We need to count the pad
# characters because this will tell us how many null bytes to remove from
# the end of the decoded string.
l = len(s)
s = s.rstrip(b'=')
padchars = l - len(s)
# Now decode the full quanta
decoded = bytearray()
b32rev = _b32rev[alphabet]
for i in range(0, len(s), 8):
quanta = s[i: i + 8]
acc = 0
try:
for c in quanta:
acc = (acc << 5) + b32rev[c]
except KeyError:
raise binascii.Error('Non-base32 digit found') from None
decoded += acc.to_bytes(5, 'big')
# Process the last, partial quanta
if l % 8 or padchars not in {0, 1, 3, 4, 6}:
raise binascii.Error('Incorrect padding')
if padchars and decoded:
acc <<= 5 * padchars
last = acc.to_bytes(5, 'big')
leftover = (43 - 5 * padchars) // 8 # 1: 4, 3: 3, 4: 2, 6: 1
decoded[-5:] = last[:leftover]
return bytes(decoded)
def b32decode(s, casefold=False, map01=None):
return _b32decode(_b32alphabet, s, casefold, map01) | null |
186,553 | import re
import struct
import binascii
_b32hexalphabet = b'0123456789ABCDEFGHIJKLMNOPQRSTUV'
def _b32encode(alphabet, s):
global _b32tab2
# Delay the initialization of the table to not waste memory
# if the function is never called
if alphabet not in _b32tab2:
b32tab = [bytes((i,)) for i in alphabet]
_b32tab2[alphabet] = [a + b for a in b32tab for b in b32tab]
b32tab = None
if not isinstance(s, bytes_types):
s = memoryview(s).tobytes()
leftover = len(s) % 5
# Pad the last quantum with zero bits if necessary
if leftover:
s = s + b'\0' * (5 - leftover) # Don't use += !
encoded = bytearray()
from_bytes = int.from_bytes
b32tab2 = _b32tab2[alphabet]
for i in range(0, len(s), 5):
c = from_bytes(s[i: i + 5], 'big')
encoded += (b32tab2[c >> 30] + # bits 1 - 10
b32tab2[(c >> 20) & 0x3ff] + # bits 11 - 20
b32tab2[(c >> 10) & 0x3ff] + # bits 21 - 30
b32tab2[c & 0x3ff] # bits 31 - 40
)
# Adjust for any leftover partial quanta
if leftover == 1:
encoded[-6:] = b'======'
elif leftover == 2:
encoded[-4:] = b'===='
elif leftover == 3:
encoded[-3:] = b'==='
elif leftover == 4:
encoded[-1:] = b'='
return bytes(encoded)
def b32hexencode(s):
return _b32encode(_b32hexalphabet, s) | null |
186,554 | import re
import struct
import binascii
_b32hexalphabet = b'0123456789ABCDEFGHIJKLMNOPQRSTUV'
def _b32decode(alphabet, s, casefold=False, map01=None):
def b32hexdecode(s, casefold=False):
# base32hex does not have the 01 mapping
return _b32decode(_b32hexalphabet, s, casefold) | null |
186,562 | import warnings as _warnings
try:
import _hashlib as _hashopenssl
except ImportError:
_hashopenssl = None
_functype = None
from _operator import _compare_digest as compare_digest
else:
compare_digest = _hashopenssl.compare_digest
_functype = type(_hashopenssl.openssl_sha256) # builtin type
import hashlib as _hashlib
trans_5C = bytes((x ^ 0x5C) for x in range(256))
trans_36 = bytes((x ^ 0x36) for x in range(256))
def new(key, msg=None, digestmod=''):
"""Create a new hashing object and return it.
key: bytes or buffer, The starting key for the hash.
msg: bytes or buffer, Initial input for the hash, or None.
digestmod: A hash name suitable for hashlib.new(). *OR*
A hashlib constructor returning a new hash object. *OR*
A module supporting PEP 247.
Required as of 3.8, despite its position after the optional
msg argument. Passing it as a keyword argument is
recommended, though not required for legacy API reasons.
You can now feed arbitrary bytes into the object using its update()
method, and can ask for the hash value at any time by calling its digest()
or hexdigest() methods.
"""
return HMAC(key, msg, digestmod)
The provided code snippet includes necessary dependencies for implementing the `digest` function. Write a Python function `def digest(key, msg, digest)` to solve the following problem:
Fast inline implementation of HMAC. key: bytes or buffer, The key for the keyed hash object. msg: bytes or buffer, Input message. digest: A hash name suitable for hashlib.new() for best performance. *OR* A hashlib constructor returning a new hash object. *OR* A module supporting PEP 247.
Here is the function:
def digest(key, msg, digest):
"""Fast inline implementation of HMAC.
key: bytes or buffer, The key for the keyed hash object.
msg: bytes or buffer, Input message.
digest: A hash name suitable for hashlib.new() for best performance. *OR*
A hashlib constructor returning a new hash object. *OR*
A module supporting PEP 247.
"""
if _hashopenssl is not None and isinstance(digest, (str, _functype)):
try:
return _hashopenssl.hmac_digest(key, msg, digest)
except _hashopenssl.UnsupportedDigestmodError:
pass
if callable(digest):
digest_cons = digest
elif isinstance(digest, str):
digest_cons = lambda d=b'': _hashlib.new(digest, d)
else:
digest_cons = lambda d=b'': digest.new(d)
inner = digest_cons()
outer = digest_cons()
blocksize = getattr(inner, 'block_size', 64)
if len(key) > blocksize:
key = digest_cons(key).digest()
key = key + b'\x00' * (blocksize - len(key))
inner.update(key.translate(trans_36))
outer.update(key.translate(trans_5C))
inner.update(msg)
outer.update(inner.digest())
return outer.digest() | Fast inline implementation of HMAC. key: bytes or buffer, The key for the keyed hash object. msg: bytes or buffer, Input message. digest: A hash name suitable for hashlib.new() for best performance. *OR* A hashlib constructor returning a new hash object. *OR* A module supporting PEP 247. |
186,569 | import os
import warnings
import tkinter
from tkinter import *
from tkinter import _cnfmerge
The provided code snippet includes necessary dependencies for implementing the `OptionName` function. Write a Python function `def OptionName(widget)` to solve the following problem:
Returns the qualified path name for the widget. Normally used to set default options for subwidgets. See tixwidgets.py
Here is the function:
def OptionName(widget):
'''Returns the qualified path name for the widget. Normally used to set
default options for subwidgets. See tixwidgets.py'''
return widget.tk.call('tixOptionName', widget._w) | Returns the qualified path name for the widget. Normally used to set default options for subwidgets. See tixwidgets.py |
186,570 | import os
import warnings
import tkinter
from tkinter import *
from tkinter import _cnfmerge
def FileTypeList(dict):
s = ''
for type in dict.keys():
s = s + '{{' + type + '} {' + type + ' - ' + dict[type] + '}} '
return s | null |
186,571 | from tkinter import _cnfmerge, Widget, TclError, Button, Pack
DIALOG_ICON = 'questhead'
class Dialog(Widget):
def __init__(self, master=None, cnf={}, **kw):
def destroy(self):
def _test():
d = Dialog(None, {'title': 'File Modified',
'text':
'File "Python.h" has been modified'
' since the last time it was saved.'
' Do you want to save it before'
' exiting the application.',
'bitmap': DIALOG_ICON,
'default': 0,
'strings': ('Save File',
'Discard Changes',
'Return to Editor')})
print(d.num) | null |
186,572 | import tkinter
class DndHandler:
def __init__(self, source, event):
def __del__(self):
def on_motion(self, event):
def on_release(self, event):
def cancel(self, event=None):
def finish(self, event, commit=0):
def dnd_start(source, event):
h = DndHandler(source, event)
if h.root is not None:
return h
else:
return None | null |
186,573 | from tkinter import *
from tkinter import _get_temp_root, _destroy_temp_root
from tkinter import messagebox
def _place_window(w, parent=None):
w.wm_withdraw() # Remain invisible while we figure out the geometry
w.update_idletasks() # Actualize geometry information
minwidth = w.winfo_reqwidth()
minheight = w.winfo_reqheight()
maxwidth = w.winfo_vrootwidth()
maxheight = w.winfo_vrootheight()
if parent is not None and parent.winfo_ismapped():
x = parent.winfo_rootx() + (parent.winfo_width() - minwidth) // 2
y = parent.winfo_rooty() + (parent.winfo_height() - minheight) // 2
vrootx = w.winfo_vrootx()
vrooty = w.winfo_vrooty()
x = min(x, vrootx + maxwidth - minwidth)
x = max(x, vrootx)
y = min(y, vrooty + maxheight - minheight)
y = max(y, vrooty)
if w._windowingsystem == 'aqua':
# Avoid the native menu bar which sits on top of everything.
y = max(y, 22)
else:
x = (w.winfo_screenwidth() - minwidth) // 2
y = (w.winfo_screenheight() - minheight) // 2
w.wm_maxsize(maxwidth, maxheight)
w.wm_geometry('+%d+%d' % (x, y))
w.wm_deiconify() # Become visible at the desired location | null |
186,574 | from tkinter import *
from tkinter import _get_temp_root, _destroy_temp_root
from tkinter import messagebox
def _setup_dialog(w):
if w._windowingsystem == "aqua":
w.tk.call("::tk::unsupported::MacWindowStyle", "style",
w, "moveableModal", "")
elif w._windowingsystem == "x11":
w.wm_attributes("-type", "dialog") | null |
186,575 | from tkinter import *
from tkinter import _get_temp_root, _destroy_temp_root
from tkinter import messagebox
class _QueryInteger(_QueryDialog):
errormessage = "Not an integer."
def getresult(self):
return self.getint(self.entry.get())
The provided code snippet includes necessary dependencies for implementing the `askinteger` function. Write a Python function `def askinteger(title, prompt, **kw)` to solve the following problem:
get an integer from the user Arguments: title -- the dialog title prompt -- the label text **kw -- see SimpleDialog class Return value is an integer
Here is the function:
def askinteger(title, prompt, **kw):
'''get an integer from the user
Arguments:
title -- the dialog title
prompt -- the label text
**kw -- see SimpleDialog class
Return value is an integer
'''
d = _QueryInteger(title, prompt, **kw)
return d.result | get an integer from the user Arguments: title -- the dialog title prompt -- the label text **kw -- see SimpleDialog class Return value is an integer |
186,576 | from tkinter import *
from tkinter import _get_temp_root, _destroy_temp_root
from tkinter import messagebox
class _QueryFloat(_QueryDialog):
errormessage = "Not a floating point value."
def getresult(self):
return self.getdouble(self.entry.get())
The provided code snippet includes necessary dependencies for implementing the `askfloat` function. Write a Python function `def askfloat(title, prompt, **kw)` to solve the following problem:
get a float from the user Arguments: title -- the dialog title prompt -- the label text **kw -- see SimpleDialog class Return value is a float
Here is the function:
def askfloat(title, prompt, **kw):
'''get a float from the user
Arguments:
title -- the dialog title
prompt -- the label text
**kw -- see SimpleDialog class
Return value is a float
'''
d = _QueryFloat(title, prompt, **kw)
return d.result | get a float from the user Arguments: title -- the dialog title prompt -- the label text **kw -- see SimpleDialog class Return value is a float |
186,577 | from tkinter import *
from tkinter import _get_temp_root, _destroy_temp_root
from tkinter import messagebox
class _QueryString(_QueryDialog):
def __init__(self, *args, **kw):
if "show" in kw:
self.__show = kw["show"]
del kw["show"]
else:
self.__show = None
_QueryDialog.__init__(self, *args, **kw)
def body(self, master):
entry = _QueryDialog.body(self, master)
if self.__show is not None:
entry.configure(show=self.__show)
return entry
def getresult(self):
return self.entry.get()
The provided code snippet includes necessary dependencies for implementing the `askstring` function. Write a Python function `def askstring(title, prompt, **kw)` to solve the following problem:
get a string from the user Arguments: title -- the dialog title prompt -- the label text **kw -- see SimpleDialog class Return value is a string
Here is the function:
def askstring(title, prompt, **kw):
'''get a string from the user
Arguments:
title -- the dialog title
prompt -- the label text
**kw -- see SimpleDialog class
Return value is a string
'''
d = _QueryString(title, prompt, **kw)
return d.result | get a string from the user Arguments: title -- the dialog title prompt -- the label text **kw -- see SimpleDialog class Return value is a string |
186,579 | import itertools
import tkinter
class Font:
"""Represents a named font.
Constructor options are:
font -- font specifier (name, system font, or (family, size, style)-tuple)
name -- name to use for this font configuration (defaults to a unique name)
exists -- does a named font by this name already exist?
Creates a new named font if False, points to the existing font if True.
Raises _tkinter.TclError if the assertion is false.
the following are ignored if font is specified:
family -- font 'family', e.g. Courier, Times, Helvetica
size -- font size in points
weight -- font thickness: NORMAL, BOLD
slant -- font slant: ROMAN, ITALIC
underline -- font underlining: false (0), true (1)
overstrike -- font strikeout: false (0), true (1)
"""
counter = itertools.count(1)
def _set(self, kw):
options = []
for k, v in kw.items():
options.append("-"+k)
options.append(str(v))
return tuple(options)
def _get(self, args):
options = []
for k in args:
options.append("-"+k)
return tuple(options)
def _mkdict(self, args):
options = {}
for i in range(0, len(args), 2):
options[args[i][1:]] = args[i+1]
return options
def __init__(self, root=None, font=None, name=None, exists=False,
**options):
if root is None:
root = tkinter._get_default_root('use font')
tk = getattr(root, 'tk', root)
if font:
# get actual settings corresponding to the given font
font = tk.splitlist(tk.call("font", "actual", font))
else:
font = self._set(options)
if not name:
name = "font" + str(next(self.counter))
self.name = name
if exists:
self.delete_font = False
# confirm font exists
if self.name not in tk.splitlist(tk.call("font", "names")):
raise tkinter._tkinter.TclError(
"named font %s does not already exist" % (self.name,))
# if font config info supplied, apply it
if font:
tk.call("font", "configure", self.name, *font)
else:
# create new font (raises TclError if the font exists)
tk.call("font", "create", self.name, *font)
self.delete_font = True
self._tk = tk
self._split = tk.splitlist
self._call = tk.call
def __str__(self):
return self.name
def __repr__(self):
return f"<{self.__class__.__module__}.{self.__class__.__qualname__}" \
f" object {self.name!r}>"
def __eq__(self, other):
if not isinstance(other, Font):
return NotImplemented
return self.name == other.name and self._tk == other._tk
def __getitem__(self, key):
return self.cget(key)
def __setitem__(self, key, value):
self.configure(**{key: value})
def __del__(self):
try:
if self.delete_font:
self._call("font", "delete", self.name)
except Exception:
pass
def copy(self):
"Return a distinct copy of the current font"
return Font(self._tk, **self.actual())
def actual(self, option=None, displayof=None):
"Return actual font attributes"
args = ()
if displayof:
args = ('-displayof', displayof)
if option:
args = args + ('-' + option, )
return self._call("font", "actual", self.name, *args)
else:
return self._mkdict(
self._split(self._call("font", "actual", self.name, *args)))
def cget(self, option):
"Get font attribute"
return self._call("font", "config", self.name, "-"+option)
def config(self, **options):
"Modify font attributes"
if options:
self._call("font", "config", self.name,
*self._set(options))
else:
return self._mkdict(
self._split(self._call("font", "config", self.name)))
configure = config
def measure(self, text, displayof=None):
"Return text width"
args = (text,)
if displayof:
args = ('-displayof', displayof, text)
return self._tk.getint(self._call("font", "measure", self.name, *args))
def metrics(self, *options, **kw):
"""Return font metrics.
For best performance, create a dummy widget
using this font before calling this method."""
args = ()
displayof = kw.pop('displayof', None)
if displayof:
args = ('-displayof', displayof)
if options:
args = args + self._get(options)
return self._tk.getint(
self._call("font", "metrics", self.name, *args))
else:
res = self._split(self._call("font", "metrics", self.name, *args))
options = {}
for i in range(0, len(res), 2):
options[res[i][1:]] = self._tk.getint(res[i+1])
return options
The provided code snippet includes necessary dependencies for implementing the `nametofont` function. Write a Python function `def nametofont(name, root=None)` to solve the following problem:
Given the name of a tk named font, returns a Font representation.
Here is the function:
def nametofont(name, root=None):
"""Given the name of a tk named font, returns a Font representation.
"""
return Font(name=name, exists=True, root=root) | Given the name of a tk named font, returns a Font representation. |
186,580 | import itertools
import tkinter
The provided code snippet includes necessary dependencies for implementing the `families` function. Write a Python function `def families(root=None, displayof=None)` to solve the following problem:
Get font families (as a tuple)
Here is the function:
def families(root=None, displayof=None):
"Get font families (as a tuple)"
if root is None:
root = tkinter._get_default_root('use font.families()')
args = ()
if displayof:
args = ('-displayof', displayof)
return root.tk.splitlist(root.tk.call("font", "families", *args)) | Get font families (as a tuple) |
186,605 | import os
import sys
import posixpath
import urllib.parse
_db = None
def init(files=None):
global suffix_map, types_map, encodings_map, common_types
global inited, _db
inited = True # so that MimeTypes.__init__() doesn't call us again
if files is None or _db is None:
db = MimeTypes()
# Quick return if not supported
db.read_windows_registry()
if files is None:
files = knownfiles
else:
files = knownfiles + list(files)
else:
db = _db
for file in files:
if os.path.isfile(file):
db.read(file)
encodings_map = db.encodings_map
suffix_map = db.suffix_map
types_map = db.types_map[True]
common_types = db.types_map[False]
# Make the DB a global variable now that it is fully initialized
_db = db
The provided code snippet includes necessary dependencies for implementing the `guess_all_extensions` function. Write a Python function `def guess_all_extensions(type, strict=True)` to solve the following problem:
Guess the extensions for a file based on its MIME type. Return value is a list of strings giving the possible filename extensions, including the leading dot ('.'). The extension is not guaranteed to have been associated with any particular data stream, but would be mapped to the MIME type `type' by guess_type(). If no extension can be guessed for `type', None is returned. Optional `strict' argument when false adds a bunch of commonly found, but non-standard types.
Here is the function:
def guess_all_extensions(type, strict=True):
"""Guess the extensions for a file based on its MIME type.
Return value is a list of strings giving the possible filename
extensions, including the leading dot ('.'). The extension is not
guaranteed to have been associated with any particular data
stream, but would be mapped to the MIME type `type' by
guess_type(). If no extension can be guessed for `type', None
is returned.
Optional `strict' argument when false adds a bunch of commonly found,
but non-standard types.
"""
if _db is None:
init()
return _db.guess_all_extensions(type, strict) | Guess the extensions for a file based on its MIME type. Return value is a list of strings giving the possible filename extensions, including the leading dot ('.'). The extension is not guaranteed to have been associated with any particular data stream, but would be mapped to the MIME type `type' by guess_type(). If no extension can be guessed for `type', None is returned. Optional `strict' argument when false adds a bunch of commonly found, but non-standard types. |
186,606 | import os
import sys
import posixpath
import urllib.parse
_db = None
def init(files=None):
global suffix_map, types_map, encodings_map, common_types
global inited, _db
inited = True # so that MimeTypes.__init__() doesn't call us again
if files is None or _db is None:
db = MimeTypes()
# Quick return if not supported
db.read_windows_registry()
if files is None:
files = knownfiles
else:
files = knownfiles + list(files)
else:
db = _db
for file in files:
if os.path.isfile(file):
db.read(file)
encodings_map = db.encodings_map
suffix_map = db.suffix_map
types_map = db.types_map[True]
common_types = db.types_map[False]
# Make the DB a global variable now that it is fully initialized
_db = db
The provided code snippet includes necessary dependencies for implementing the `add_type` function. Write a Python function `def add_type(type, ext, strict=True)` to solve the following problem:
Add a mapping between a type and an extension. When the extension is already known, the new type will replace the old one. When the type is already known the extension will be added to the list of known extensions. If strict is true, information will be added to list of standard types, else to the list of non-standard types.
Here is the function:
def add_type(type, ext, strict=True):
"""Add a mapping between a type and an extension.
When the extension is already known, the new
type will replace the old one. When the type
is already known the extension will be added
to the list of known extensions.
If strict is true, information will be added to
list of standard types, else to the list of non-standard
types.
"""
if _db is None:
init()
return _db.add_type(type, ext, strict) | Add a mapping between a type and an extension. When the extension is already known, the new type will replace the old one. When the type is already known the extension will be added to the list of known extensions. If strict is true, information will be added to list of standard types, else to the list of non-standard types. |
186,607 | import os
import sys
import posixpath
import urllib.parse
class MimeTypes:
def __init__(self, filenames=(), strict=True):
def add_type(self, type, ext, strict=True):
def guess_type(self, url, strict=True):
def guess_all_extensions(self, type, strict=True):
def guess_extension(self, type, strict=True):
def read(self, filename, strict=True):
def readfp(self, fp, strict=True):
def read_windows_registry(self, strict=True):
def _read_windows_registry(cls, add_type):
def enum_types(mimedb):
def read_mime_types(file):
try:
f = open(file, encoding='utf-8')
except OSError:
return None
with f:
db = MimeTypes()
db.readfp(f, True)
return db.types_map[True] | null |
186,608 | import os
import sys
import posixpath
import urllib.parse
def _default_mime_types():
global suffix_map, _suffix_map_default
global encodings_map, _encodings_map_default
global types_map, _types_map_default
global common_types, _common_types_default
suffix_map = _suffix_map_default = {
'.svgz': '.svg.gz',
'.tgz': '.tar.gz',
'.taz': '.tar.gz',
'.tz': '.tar.gz',
'.tbz2': '.tar.bz2',
'.txz': '.tar.xz',
}
encodings_map = _encodings_map_default = {
'.gz': 'gzip',
'.Z': 'compress',
'.bz2': 'bzip2',
'.xz': 'xz',
'.br': 'br',
}
# Before adding new types, make sure they are either registered with IANA,
# at http://www.iana.org/assignments/media-types
# or extensions, i.e. using the x- prefix
# If you add to these, please keep them sorted by mime type.
# Make sure the entry with the preferred file extension for a particular mime type
# appears before any others of the same mimetype.
types_map = _types_map_default = {
'.js' : 'application/javascript',
'.mjs' : 'application/javascript',
'.json' : 'application/json',
'.webmanifest': 'application/manifest+json',
'.doc' : 'application/msword',
'.dot' : 'application/msword',
'.wiz' : 'application/msword',
'.bin' : 'application/octet-stream',
'.a' : 'application/octet-stream',
'.dll' : 'application/octet-stream',
'.exe' : 'application/octet-stream',
'.o' : 'application/octet-stream',
'.obj' : 'application/octet-stream',
'.so' : 'application/octet-stream',
'.oda' : 'application/oda',
'.pdf' : 'application/pdf',
'.p7c' : 'application/pkcs7-mime',
'.ps' : 'application/postscript',
'.ai' : 'application/postscript',
'.eps' : 'application/postscript',
'.m3u' : 'application/vnd.apple.mpegurl',
'.m3u8' : 'application/vnd.apple.mpegurl',
'.xls' : 'application/vnd.ms-excel',
'.xlb' : 'application/vnd.ms-excel',
'.ppt' : 'application/vnd.ms-powerpoint',
'.pot' : 'application/vnd.ms-powerpoint',
'.ppa' : 'application/vnd.ms-powerpoint',
'.pps' : 'application/vnd.ms-powerpoint',
'.pwz' : 'application/vnd.ms-powerpoint',
'.wasm' : 'application/wasm',
'.bcpio' : 'application/x-bcpio',
'.cpio' : 'application/x-cpio',
'.csh' : 'application/x-csh',
'.dvi' : 'application/x-dvi',
'.gtar' : 'application/x-gtar',
'.hdf' : 'application/x-hdf',
'.h5' : 'application/x-hdf5',
'.latex' : 'application/x-latex',
'.mif' : 'application/x-mif',
'.cdf' : 'application/x-netcdf',
'.nc' : 'application/x-netcdf',
'.p12' : 'application/x-pkcs12',
'.pfx' : 'application/x-pkcs12',
'.ram' : 'application/x-pn-realaudio',
'.pyc' : 'application/x-python-code',
'.pyo' : 'application/x-python-code',
'.sh' : 'application/x-sh',
'.shar' : 'application/x-shar',
'.swf' : 'application/x-shockwave-flash',
'.sv4cpio': 'application/x-sv4cpio',
'.sv4crc' : 'application/x-sv4crc',
'.tar' : 'application/x-tar',
'.tcl' : 'application/x-tcl',
'.tex' : 'application/x-tex',
'.texi' : 'application/x-texinfo',
'.texinfo': 'application/x-texinfo',
'.roff' : 'application/x-troff',
'.t' : 'application/x-troff',
'.tr' : 'application/x-troff',
'.man' : 'application/x-troff-man',
'.me' : 'application/x-troff-me',
'.ms' : 'application/x-troff-ms',
'.ustar' : 'application/x-ustar',
'.src' : 'application/x-wais-source',
'.xsl' : 'application/xml',
'.rdf' : 'application/xml',
'.wsdl' : 'application/xml',
'.xpdl' : 'application/xml',
'.zip' : 'application/zip',
'.3gp' : 'audio/3gpp',
'.3gpp' : 'audio/3gpp',
'.3g2' : 'audio/3gpp2',
'.3gpp2' : 'audio/3gpp2',
'.aac' : 'audio/aac',
'.adts' : 'audio/aac',
'.loas' : 'audio/aac',
'.ass' : 'audio/aac',
'.au' : 'audio/basic',
'.snd' : 'audio/basic',
'.mp3' : 'audio/mpeg',
'.mp2' : 'audio/mpeg',
'.opus' : 'audio/opus',
'.aif' : 'audio/x-aiff',
'.aifc' : 'audio/x-aiff',
'.aiff' : 'audio/x-aiff',
'.ra' : 'audio/x-pn-realaudio',
'.wav' : 'audio/x-wav',
'.bmp' : 'image/bmp',
'.gif' : 'image/gif',
'.ief' : 'image/ief',
'.jpg' : 'image/jpeg',
'.jpe' : 'image/jpeg',
'.jpeg' : 'image/jpeg',
'.heic' : 'image/heic',
'.heif' : 'image/heif',
'.png' : 'image/png',
'.svg' : 'image/svg+xml',
'.tiff' : 'image/tiff',
'.tif' : 'image/tiff',
'.ico' : 'image/vnd.microsoft.icon',
'.ras' : 'image/x-cmu-raster',
'.bmp' : 'image/x-ms-bmp',
'.pnm' : 'image/x-portable-anymap',
'.pbm' : 'image/x-portable-bitmap',
'.pgm' : 'image/x-portable-graymap',
'.ppm' : 'image/x-portable-pixmap',
'.rgb' : 'image/x-rgb',
'.xbm' : 'image/x-xbitmap',
'.xpm' : 'image/x-xpixmap',
'.xwd' : 'image/x-xwindowdump',
'.eml' : 'message/rfc822',
'.mht' : 'message/rfc822',
'.mhtml' : 'message/rfc822',
'.nws' : 'message/rfc822',
'.css' : 'text/css',
'.csv' : 'text/csv',
'.html' : 'text/html',
'.htm' : 'text/html',
'.txt' : 'text/plain',
'.bat' : 'text/plain',
'.c' : 'text/plain',
'.h' : 'text/plain',
'.ksh' : 'text/plain',
'.pl' : 'text/plain',
'.rtx' : 'text/richtext',
'.tsv' : 'text/tab-separated-values',
'.py' : 'text/x-python',
'.etx' : 'text/x-setext',
'.sgm' : 'text/x-sgml',
'.sgml' : 'text/x-sgml',
'.vcf' : 'text/x-vcard',
'.xml' : 'text/xml',
'.mp4' : 'video/mp4',
'.mpeg' : 'video/mpeg',
'.m1v' : 'video/mpeg',
'.mpa' : 'video/mpeg',
'.mpe' : 'video/mpeg',
'.mpg' : 'video/mpeg',
'.mov' : 'video/quicktime',
'.qt' : 'video/quicktime',
'.webm' : 'video/webm',
'.avi' : 'video/x-msvideo',
'.movie' : 'video/x-sgi-movie',
}
# These are non-standard types, commonly found in the wild. They will
# only match if strict=0 flag is given to the API methods.
# Please sort these too
common_types = _common_types_default = {
'.rtf' : 'application/rtf',
'.midi': 'audio/midi',
'.mid' : 'audio/midi',
'.jpg' : 'image/jpg',
'.pict': 'image/pict',
'.pct' : 'image/pict',
'.pic' : 'image/pict',
'.xul' : 'text/xul',
} | null |
186,625 | import base64
import bisect
import email
import hashlib
import http.client
import io
import os
import posixpath
import re
import socket
import string
import sys
import time
import tempfile
import contextlib
import warnings
from urllib.error import URLError, HTTPError, ContentTooShortError
from urllib.parse import (
urlparse, urlsplit, urljoin, unwrap, quote, unquote,
_splittype, _splithost, _splitport, _splituser, _splitpasswd,
_splitattr, _splitquery, _splitvalue, _splittag, _to_bytes,
unquote_to_bytes, urlunparse)
from urllib.response import addinfourl, addclosehook
def getproxies_environment():
"""Return a dictionary of scheme -> proxy server URL mappings.
Scan the environment for variables named <scheme>_proxy;
this seems to be the standard convention. If you need a
different way, you can pass a proxies dictionary to the
[Fancy]URLopener constructor.
"""
proxies = {}
# in order to prefer lowercase variables, process environment in
# two passes: first matches any, second pass matches lowercase only
for name, value in os.environ.items():
name = name.lower()
if value and name[-6:] == '_proxy':
proxies[name[:-6]] = value
# CVE-2016-1000110 - If we are running as CGI script, forget HTTP_PROXY
# (non-all-lowercase) as it may be set from the web server by a "Proxy:"
# header from the client
# If "proxy" is lowercase, it will still be used thanks to the next block
if 'REQUEST_METHOD' in os.environ:
proxies.pop('http', None)
for name, value in os.environ.items():
if name[-6:] == '_proxy':
name = name.lower()
if value:
proxies[name[:-6]] = value
else:
proxies.pop(name[:-6], None)
return proxies
if sys.platform == 'darwin':
from _scproxy import _get_proxy_settings, _get_proxies
elif os.name == 'nt':
def getproxies_registry():
"""Return a dictionary of scheme -> proxy server URL mappings.
Win32 uses the registry to store proxies.
"""
proxies = {}
try:
import winreg
except ImportError:
# Std module, so should be around - but you never know!
return proxies
try:
internetSettings = winreg.OpenKey(winreg.HKEY_CURRENT_USER,
r'Software\Microsoft\Windows\CurrentVersion\Internet Settings')
proxyEnable = winreg.QueryValueEx(internetSettings,
'ProxyEnable')[0]
if proxyEnable:
# Returned as Unicode but problems if not converted to ASCII
proxyServer = str(winreg.QueryValueEx(internetSettings,
'ProxyServer')[0])
if '=' not in proxyServer and ';' not in proxyServer:
# Use one setting for all protocols.
proxyServer = 'http={0};https={0};ftp={0}'.format(proxyServer)
for p in proxyServer.split(';'):
protocol, address = p.split('=', 1)
# See if address has a type:// prefix
if not re.match('(?:[^/:]+)://', address):
# Add type:// prefix to address without specifying type
if protocol in ('http', 'https', 'ftp'):
# The default proxy type of Windows is HTTP
address = 'http://' + address
elif protocol == 'socks':
address = 'socks://' + address
proxies[protocol] = address
# Use SOCKS proxy for HTTP(S) protocols
if proxies.get('socks'):
# The default SOCKS proxy type of Windows is SOCKS4
address = re.sub(r'^socks://', 'socks4://', proxies['socks'])
proxies['http'] = proxies.get('http') or address
proxies['https'] = proxies.get('https') or address
internetSettings.Close()
except (OSError, ValueError, TypeError):
# Either registry key not found etc, or the value in an
# unexpected format.
# proxies already set up to be empty so nothing to do
pass
return proxies
else:
# By default use environment variables
getproxies = getproxies_environment
proxy_bypass = proxy_bypass_environment
The provided code snippet includes necessary dependencies for implementing the `getproxies` function. Write a Python function `def getproxies()` to solve the following problem:
Return a dictionary of scheme -> proxy server URL mappings. Returns settings gathered from the environment, if specified, or the registry.
Here is the function:
def getproxies():
"""Return a dictionary of scheme -> proxy server URL mappings.
Returns settings gathered from the environment, if specified,
or the registry.
"""
return getproxies_environment() or getproxies_registry() | Return a dictionary of scheme -> proxy server URL mappings. Returns settings gathered from the environment, if specified, or the registry. |
186,637 | import re
import sys
import types
import collections
import warnings
from collections import namedtuple
def _splitport(host):
def splitport(host):
warnings.warn("urllib.parse.splitport() is deprecated as of 3.8, "
"use urllib.parse.urlparse() instead",
DeprecationWarning, stacklevel=2)
return _splitport(host) | null |
186,638 | import re
import sys
import types
import collections
import warnings
from collections import namedtuple
def _splitnport(host, defport=-1):
"""Split host and port, returning numeric port.
Return given default port if no ':' found; defaults to -1.
Return numerical port if a valid number are found after ':'.
Return None if ':' but not a valid number."""
host, delim, port = host.rpartition(':')
if not delim:
host = port
elif port:
try:
nport = int(port)
except ValueError:
nport = None
return host, nport
return host, defport
def splitnport(host, defport=-1):
warnings.warn("urllib.parse.splitnport() is deprecated as of 3.8, "
"use urllib.parse.urlparse() instead",
DeprecationWarning, stacklevel=2)
return _splitnport(host, defport) | null |
186,640 | import re
import sys
import types
import collections
import warnings
from collections import namedtuple
def _splittag(url):
def splittag(url):
warnings.warn("urllib.parse.splittag() is deprecated as of 3.8, "
"use urllib.parse.urlparse() instead",
DeprecationWarning, stacklevel=2)
return _splittag(url) | null |
186,643 | def bisect_right(a, x, lo=0, hi=None, *, key=None):
"""Return the index where to insert item x in list a, assuming a is sorted.
The return value i is such that all e in a[:i] have e <= x, and all e in
a[i:] have e > x. So if x already appears in the list, a.insert(i, x) will
insert just after the rightmost x already there.
Optional args lo (default 0) and hi (default len(a)) bound the
slice of a to be searched.
"""
if lo < 0:
raise ValueError('lo must be non-negative')
if hi is None:
hi = len(a)
# Note, the comparison uses "<" to match the
# __lt__() logic in list.sort() and in heapq.
if key is None:
while lo < hi:
mid = (lo + hi) // 2
if x < a[mid]:
hi = mid
else:
lo = mid + 1
else:
while lo < hi:
mid = (lo + hi) // 2
if x < key(a[mid]):
hi = mid
else:
lo = mid + 1
return lo
The provided code snippet includes necessary dependencies for implementing the `insort_right` function. Write a Python function `def insort_right(a, x, lo=0, hi=None, *, key=None)` to solve the following problem:
Insert item x in list a, and keep it sorted assuming a is sorted. If x is already in a, insert it to the right of the rightmost x. Optional args lo (default 0) and hi (default len(a)) bound the slice of a to be searched.
Here is the function:
def insort_right(a, x, lo=0, hi=None, *, key=None):
"""Insert item x in list a, and keep it sorted assuming a is sorted.
If x is already in a, insert it to the right of the rightmost x.
Optional args lo (default 0) and hi (default len(a)) bound the
slice of a to be searched.
"""
if key is None:
lo = bisect_right(a, x, lo, hi)
else:
lo = bisect_right(a, key(x), lo, hi, key=key)
a.insert(lo, x) | Insert item x in list a, and keep it sorted assuming a is sorted. If x is already in a, insert it to the right of the rightmost x. Optional args lo (default 0) and hi (default len(a)) bound the slice of a to be searched. |
186,644 | def bisect_left(a, x, lo=0, hi=None, *, key=None):
"""Return the index where to insert item x in list a, assuming a is sorted.
The return value i is such that all e in a[:i] have e < x, and all e in
a[i:] have e >= x. So if x already appears in the list, a.insert(i, x) will
insert just before the leftmost x already there.
Optional args lo (default 0) and hi (default len(a)) bound the
slice of a to be searched.
"""
if lo < 0:
raise ValueError('lo must be non-negative')
if hi is None:
hi = len(a)
# Note, the comparison uses "<" to match the
# __lt__() logic in list.sort() and in heapq.
if key is None:
while lo < hi:
mid = (lo + hi) // 2
if a[mid] < x:
lo = mid + 1
else:
hi = mid
else:
while lo < hi:
mid = (lo + hi) // 2
if key(a[mid]) < x:
lo = mid + 1
else:
hi = mid
return lo
The provided code snippet includes necessary dependencies for implementing the `insort_left` function. Write a Python function `def insort_left(a, x, lo=0, hi=None, *, key=None)` to solve the following problem:
Insert item x in list a, and keep it sorted assuming a is sorted. If x is already in a, insert it to the left of the leftmost x. Optional args lo (default 0) and hi (default len(a)) bound the slice of a to be searched.
Here is the function:
def insort_left(a, x, lo=0, hi=None, *, key=None):
"""Insert item x in list a, and keep it sorted assuming a is sorted.
If x is already in a, insert it to the left of the leftmost x.
Optional args lo (default 0) and hi (default len(a)) bound the
slice of a to be searched.
"""
if key is None:
lo = bisect_left(a, x, lo, hi)
else:
lo = bisect_left(a, key(x), lo, hi, key=key)
a.insert(lo, x) | Insert item x in list a, and keep it sorted assuming a is sorted. If x is already in a, insert it to the left of the leftmost x. Optional args lo (default 0) and hi (default len(a)) bound the slice of a to be searched. |
186,647 | import base64
import sys
import time
from datetime import datetime
from decimal import Decimal
import http.client
import urllib.parse
from xml.parsers import expat
import errno
from io import BytesIO
if _try('%Y'): # Mac OS X
def _iso8601_format(value):
return value.strftime("%Y%m%dT%H:%M:%S")
elif _try('%4Y'): # Linux
def _iso8601_format(value):
return value.strftime("%4Y%m%dT%H:%M:%S")
else:
def _iso8601_format(value):
return value.strftime("%Y%m%dT%H:%M:%S").zfill(17)
class datetime(date):
"""datetime(year, month, day[, hour[, minute[, second[, microsecond[,tzinfo]]]]])
The year, month and day arguments are required. tzinfo may be None, or an
instance of a tzinfo subclass. The remaining arguments may be ints.
"""
__slots__ = date.__slots__ + time.__slots__
def __new__(cls, year, month=None, day=None, hour=0, minute=0, second=0,
microsecond=0, tzinfo=None, *, fold=0):
if (isinstance(year, (bytes, str)) and len(year) == 10 and
1 <= ord(year[2:3])&0x7F <= 12):
# Pickle support
if isinstance(year, str):
try:
year = bytes(year, 'latin1')
except UnicodeEncodeError:
# More informative error message.
raise ValueError(
"Failed to encode latin1 string when unpickling "
"a datetime object. "
"pickle.load(data, encoding='latin1') is assumed.")
self = object.__new__(cls)
self.__setstate(year, month)
self._hashcode = -1
return self
year, month, day = _check_date_fields(year, month, day)
hour, minute, second, microsecond, fold = _check_time_fields(
hour, minute, second, microsecond, fold)
_check_tzinfo_arg(tzinfo)
self = object.__new__(cls)
self._year = year
self._month = month
self._day = day
self._hour = hour
self._minute = minute
self._second = second
self._microsecond = microsecond
self._tzinfo = tzinfo
self._hashcode = -1
self._fold = fold
return self
# Read-only field accessors
def hour(self):
"""hour (0-23)"""
return self._hour
def minute(self):
"""minute (0-59)"""
return self._minute
def second(self):
"""second (0-59)"""
return self._second
def microsecond(self):
"""microsecond (0-999999)"""
return self._microsecond
def tzinfo(self):
"""timezone info object"""
return self._tzinfo
def fold(self):
return self._fold
def _fromtimestamp(cls, t, utc, tz):
"""Construct a datetime from a POSIX timestamp (like time.time()).
A timezone info object may be passed in as well.
"""
frac, t = _math.modf(t)
us = round(frac * 1e6)
if us >= 1000000:
t += 1
us -= 1000000
elif us < 0:
t -= 1
us += 1000000
converter = _time.gmtime if utc else _time.localtime
y, m, d, hh, mm, ss, weekday, jday, dst = converter(t)
ss = min(ss, 59) # clamp out leap seconds if the platform has them
result = cls(y, m, d, hh, mm, ss, us, tz)
if tz is None and not utc:
# As of version 2015f max fold in IANA database is
# 23 hours at 1969-09-30 13:00:00 in Kwajalein.
# Let's probe 24 hours in the past to detect a transition:
max_fold_seconds = 24 * 3600
# On Windows localtime_s throws an OSError for negative values,
# thus we can't perform fold detection for values of time less
# than the max time fold. See comments in _datetimemodule's
# version of this method for more details.
if t < max_fold_seconds and sys.platform.startswith("win"):
return result
y, m, d, hh, mm, ss = converter(t - max_fold_seconds)[:6]
probe1 = cls(y, m, d, hh, mm, ss, us, tz)
trans = result - probe1 - timedelta(0, max_fold_seconds)
if trans.days < 0:
y, m, d, hh, mm, ss = converter(t + trans // timedelta(0, 1))[:6]
probe2 = cls(y, m, d, hh, mm, ss, us, tz)
if probe2 == result:
result._fold = 1
elif tz is not None:
result = tz.fromutc(result)
return result
def fromtimestamp(cls, t, tz=None):
"""Construct a datetime from a POSIX timestamp (like time.time()).
A timezone info object may be passed in as well.
"""
_check_tzinfo_arg(tz)
return cls._fromtimestamp(t, tz is not None, tz)
def utcfromtimestamp(cls, t):
"""Construct a naive UTC datetime from a POSIX timestamp."""
return cls._fromtimestamp(t, True, None)
def now(cls, tz=None):
"Construct a datetime from time.time() and optional time zone info."
t = _time.time()
return cls.fromtimestamp(t, tz)
def utcnow(cls):
"Construct a UTC datetime from time.time()."
t = _time.time()
return cls.utcfromtimestamp(t)
def combine(cls, date, time, tzinfo=True):
"Construct a datetime from a given date and a given time."
if not isinstance(date, _date_class):
raise TypeError("date argument must be a date instance")
if not isinstance(time, _time_class):
raise TypeError("time argument must be a time instance")
if tzinfo is True:
tzinfo = time.tzinfo
return cls(date.year, date.month, date.day,
time.hour, time.minute, time.second, time.microsecond,
tzinfo, fold=time.fold)
def fromisoformat(cls, date_string):
"""Construct a datetime from the output of datetime.isoformat()."""
if not isinstance(date_string, str):
raise TypeError('fromisoformat: argument must be str')
# Split this at the separator
dstr = date_string[0:10]
tstr = date_string[11:]
try:
date_components = _parse_isoformat_date(dstr)
except ValueError:
raise ValueError(f'Invalid isoformat string: {date_string!r}')
if tstr:
try:
time_components = _parse_isoformat_time(tstr)
except ValueError:
raise ValueError(f'Invalid isoformat string: {date_string!r}')
else:
time_components = [0, 0, 0, 0, None]
return cls(*(date_components + time_components))
def timetuple(self):
"Return local time tuple compatible with time.localtime()."
dst = self.dst()
if dst is None:
dst = -1
elif dst:
dst = 1
else:
dst = 0
return _build_struct_time(self.year, self.month, self.day,
self.hour, self.minute, self.second,
dst)
def _mktime(self):
"""Return integer POSIX timestamp."""
epoch = datetime(1970, 1, 1)
max_fold_seconds = 24 * 3600
t = (self - epoch) // timedelta(0, 1)
def local(u):
y, m, d, hh, mm, ss = _time.localtime(u)[:6]
return (datetime(y, m, d, hh, mm, ss) - epoch) // timedelta(0, 1)
# Our goal is to solve t = local(u) for u.
a = local(t) - t
u1 = t - a
t1 = local(u1)
if t1 == t:
# We found one solution, but it may not be the one we need.
# Look for an earlier solution (if `fold` is 0), or a
# later one (if `fold` is 1).
u2 = u1 + (-max_fold_seconds, max_fold_seconds)[self.fold]
b = local(u2) - u2
if a == b:
return u1
else:
b = t1 - u1
assert a != b
u2 = t - b
t2 = local(u2)
if t2 == t:
return u2
if t1 == t:
return u1
# We have found both offsets a and b, but neither t - a nor t - b is
# a solution. This means t is in the gap.
return (max, min)[self.fold](u1, u2)
def timestamp(self):
"Return POSIX timestamp as float"
if self._tzinfo is None:
s = self._mktime()
return s + self.microsecond / 1e6
else:
return (self - _EPOCH).total_seconds()
def utctimetuple(self):
"Return UTC time tuple compatible with time.gmtime()."
offset = self.utcoffset()
if offset:
self -= offset
y, m, d = self.year, self.month, self.day
hh, mm, ss = self.hour, self.minute, self.second
return _build_struct_time(y, m, d, hh, mm, ss, 0)
def date(self):
"Return the date part."
return date(self._year, self._month, self._day)
def time(self):
"Return the time part, with tzinfo None."
return time(self.hour, self.minute, self.second, self.microsecond, fold=self.fold)
def timetz(self):
"Return the time part, with same tzinfo."
return time(self.hour, self.minute, self.second, self.microsecond,
self._tzinfo, fold=self.fold)
def replace(self, year=None, month=None, day=None, hour=None,
minute=None, second=None, microsecond=None, tzinfo=True,
*, fold=None):
"""Return a new datetime with new values for the specified fields."""
if year is None:
year = self.year
if month is None:
month = self.month
if day is None:
day = self.day
if hour is None:
hour = self.hour
if minute is None:
minute = self.minute
if second is None:
second = self.second
if microsecond is None:
microsecond = self.microsecond
if tzinfo is True:
tzinfo = self.tzinfo
if fold is None:
fold = self.fold
return type(self)(year, month, day, hour, minute, second,
microsecond, tzinfo, fold=fold)
def _local_timezone(self):
if self.tzinfo is None:
ts = self._mktime()
else:
ts = (self - _EPOCH) // timedelta(seconds=1)
localtm = _time.localtime(ts)
local = datetime(*localtm[:6])
# Extract TZ data
gmtoff = localtm.tm_gmtoff
zone = localtm.tm_zone
return timezone(timedelta(seconds=gmtoff), zone)
def astimezone(self, tz=None):
if tz is None:
tz = self._local_timezone()
elif not isinstance(tz, tzinfo):
raise TypeError("tz argument must be an instance of tzinfo")
mytz = self.tzinfo
if mytz is None:
mytz = self._local_timezone()
myoffset = mytz.utcoffset(self)
else:
myoffset = mytz.utcoffset(self)
if myoffset is None:
mytz = self.replace(tzinfo=None)._local_timezone()
myoffset = mytz.utcoffset(self)
if tz is mytz:
return self
# Convert self to UTC, and attach the new time zone object.
utc = (self - myoffset).replace(tzinfo=tz)
# Convert from UTC to tz's local time.
return tz.fromutc(utc)
# Ways to produce a string.
def ctime(self):
"Return ctime() style string."
weekday = self.toordinal() % 7 or 7
return "%s %s %2d %02d:%02d:%02d %04d" % (
_DAYNAMES[weekday],
_MONTHNAMES[self._month],
self._day,
self._hour, self._minute, self._second,
self._year)
def isoformat(self, sep='T', timespec='auto'):
"""Return the time formatted according to ISO.
The full format looks like 'YYYY-MM-DD HH:MM:SS.mmmmmm'.
By default, the fractional part is omitted if self.microsecond == 0.
If self.tzinfo is not None, the UTC offset is also attached, giving
giving a full format of 'YYYY-MM-DD HH:MM:SS.mmmmmm+HH:MM'.
Optional argument sep specifies the separator between date and
time, default 'T'.
The optional argument timespec specifies the number of additional
terms of the time to include. Valid options are 'auto', 'hours',
'minutes', 'seconds', 'milliseconds' and 'microseconds'.
"""
s = ("%04d-%02d-%02d%c" % (self._year, self._month, self._day, sep) +
_format_time(self._hour, self._minute, self._second,
self._microsecond, timespec))
off = self.utcoffset()
tz = _format_offset(off)
if tz:
s += tz
return s
def __repr__(self):
"""Convert to formal string, for repr()."""
L = [self._year, self._month, self._day, # These are never zero
self._hour, self._minute, self._second, self._microsecond]
if L[-1] == 0:
del L[-1]
if L[-1] == 0:
del L[-1]
s = "%s.%s(%s)" % (self.__class__.__module__,
self.__class__.__qualname__,
", ".join(map(str, L)))
if self._tzinfo is not None:
assert s[-1:] == ")"
s = s[:-1] + ", tzinfo=%r" % self._tzinfo + ")"
if self._fold:
assert s[-1:] == ")"
s = s[:-1] + ", fold=1)"
return s
def __str__(self):
"Convert to string, for str()."
return self.isoformat(sep=' ')
def strptime(cls, date_string, format):
'string, format -> new datetime parsed from a string (like time.strptime()).'
import _strptime
return _strptime._strptime_datetime(cls, date_string, format)
def utcoffset(self):
"""Return the timezone offset as timedelta positive east of UTC (negative west of
UTC)."""
if self._tzinfo is None:
return None
offset = self._tzinfo.utcoffset(self)
_check_utc_offset("utcoffset", offset)
return offset
def tzname(self):
"""Return the timezone name.
Note that the name is 100% informational -- there's no requirement that
it mean anything in particular. For example, "GMT", "UTC", "-500",
"-5:00", "EDT", "US/Eastern", "America/New York" are all valid replies.
"""
if self._tzinfo is None:
return None
name = self._tzinfo.tzname(self)
_check_tzname(name)
return name
def dst(self):
"""Return 0 if DST is not in effect, or the DST offset (as timedelta
positive eastward) if DST is in effect.
This is purely informational; the DST offset has already been added to
the UTC offset returned by utcoffset() if applicable, so there's no
need to consult dst() unless you're interested in displaying the DST
info.
"""
if self._tzinfo is None:
return None
offset = self._tzinfo.dst(self)
_check_utc_offset("dst", offset)
return offset
# Comparisons of datetime objects with other.
def __eq__(self, other):
if isinstance(other, datetime):
return self._cmp(other, allow_mixed=True) == 0
elif not isinstance(other, date):
return NotImplemented
else:
return False
def __le__(self, other):
if isinstance(other, datetime):
return self._cmp(other) <= 0
elif not isinstance(other, date):
return NotImplemented
else:
_cmperror(self, other)
def __lt__(self, other):
if isinstance(other, datetime):
return self._cmp(other) < 0
elif not isinstance(other, date):
return NotImplemented
else:
_cmperror(self, other)
def __ge__(self, other):
if isinstance(other, datetime):
return self._cmp(other) >= 0
elif not isinstance(other, date):
return NotImplemented
else:
_cmperror(self, other)
def __gt__(self, other):
if isinstance(other, datetime):
return self._cmp(other) > 0
elif not isinstance(other, date):
return NotImplemented
else:
_cmperror(self, other)
def _cmp(self, other, allow_mixed=False):
assert isinstance(other, datetime)
mytz = self._tzinfo
ottz = other._tzinfo
myoff = otoff = None
if mytz is ottz:
base_compare = True
else:
myoff = self.utcoffset()
otoff = other.utcoffset()
# Assume that allow_mixed means that we are called from __eq__
if allow_mixed:
if myoff != self.replace(fold=not self.fold).utcoffset():
return 2
if otoff != other.replace(fold=not other.fold).utcoffset():
return 2
base_compare = myoff == otoff
if base_compare:
return _cmp((self._year, self._month, self._day,
self._hour, self._minute, self._second,
self._microsecond),
(other._year, other._month, other._day,
other._hour, other._minute, other._second,
other._microsecond))
if myoff is None or otoff is None:
if allow_mixed:
return 2 # arbitrary non-zero value
else:
raise TypeError("cannot compare naive and aware datetimes")
# XXX What follows could be done more efficiently...
diff = self - other # this will take offsets into account
if diff.days < 0:
return -1
return diff and 1 or 0
def __add__(self, other):
"Add a datetime and a timedelta."
if not isinstance(other, timedelta):
return NotImplemented
delta = timedelta(self.toordinal(),
hours=self._hour,
minutes=self._minute,
seconds=self._second,
microseconds=self._microsecond)
delta += other
hour, rem = divmod(delta.seconds, 3600)
minute, second = divmod(rem, 60)
if 0 < delta.days <= _MAXORDINAL:
return type(self).combine(date.fromordinal(delta.days),
time(hour, minute, second,
delta.microseconds,
tzinfo=self._tzinfo))
raise OverflowError("result out of range")
__radd__ = __add__
def __sub__(self, other):
"Subtract two datetimes, or a datetime and a timedelta."
if not isinstance(other, datetime):
if isinstance(other, timedelta):
return self + -other
return NotImplemented
days1 = self.toordinal()
days2 = other.toordinal()
secs1 = self._second + self._minute * 60 + self._hour * 3600
secs2 = other._second + other._minute * 60 + other._hour * 3600
base = timedelta(days1 - days2,
secs1 - secs2,
self._microsecond - other._microsecond)
if self._tzinfo is other._tzinfo:
return base
myoff = self.utcoffset()
otoff = other.utcoffset()
if myoff == otoff:
return base
if myoff is None or otoff is None:
raise TypeError("cannot mix naive and timezone-aware time")
return base + otoff - myoff
def __hash__(self):
if self._hashcode == -1:
if self.fold:
t = self.replace(fold=0)
else:
t = self
tzoff = t.utcoffset()
if tzoff is None:
self._hashcode = hash(t._getstate()[0])
else:
days = _ymd2ord(self.year, self.month, self.day)
seconds = self.hour * 3600 + self.minute * 60 + self.second
self._hashcode = hash(timedelta(days, seconds, self.microsecond) - tzoff)
return self._hashcode
# Pickle support.
def _getstate(self, protocol=3):
yhi, ylo = divmod(self._year, 256)
us2, us3 = divmod(self._microsecond, 256)
us1, us2 = divmod(us2, 256)
m = self._month
if self._fold and protocol > 3:
m += 128
basestate = bytes([yhi, ylo, m, self._day,
self._hour, self._minute, self._second,
us1, us2, us3])
if self._tzinfo is None:
return (basestate,)
else:
return (basestate, self._tzinfo)
def __setstate(self, string, tzinfo):
if tzinfo is not None and not isinstance(tzinfo, _tzinfo_class):
raise TypeError("bad tzinfo state arg")
(yhi, ylo, m, self._day, self._hour,
self._minute, self._second, us1, us2, us3) = string
if m > 127:
self._fold = 1
self._month = m - 128
else:
self._fold = 0
self._month = m
self._year = yhi * 256 + ylo
self._microsecond = (((us1 << 8) | us2) << 8) | us3
self._tzinfo = tzinfo
def __reduce_ex__(self, protocol):
return (self.__class__, self._getstate(protocol))
def __reduce__(self):
return self.__reduce_ex__(2)
datetime.min = datetime(1, 1, 1)
datetime.max = datetime(9999, 12, 31, 23, 59, 59, 999999)
datetime.resolution = timedelta(microseconds=1)
def _strftime(value):
if isinstance(value, datetime):
return _iso8601_format(value)
if not isinstance(value, (tuple, time.struct_time)):
if value == 0:
value = time.time()
value = time.localtime(value)
return "%04d%02d%02dT%02d:%02d:%02d" % value[:6] | null |
186,653 | import sys
import types
import collections
import io
from opcode import *
from opcode import __all__ as _opcodes_all
def _get_code_object(x):
"""Helper to handle methods, compiled or raw code objects, and strings."""
# Extract functions from methods.
if hasattr(x, '__func__'):
x = x.__func__
# Extract compiled code objects from...
if hasattr(x, '__code__'): # ...a function, or
x = x.__code__
elif hasattr(x, 'gi_code'): #...a generator object, or
x = x.gi_code
elif hasattr(x, 'ag_code'): #...an asynchronous generator object, or
x = x.ag_code
elif hasattr(x, 'cr_code'): #...a coroutine.
x = x.cr_code
# Handle source code.
if isinstance(x, str):
x = _try_compile(x, "<disassembly>")
# By now, if we don't have a code object, we can't disassemble x.
if hasattr(x, 'co_code'):
return x
raise TypeError("don't know how to disassemble %s objects" %
type(x).__name__)
def _get_instructions_bytes(code, varnames=None, names=None, constants=None,
cells=None, linestarts=None, line_offset=0):
"""Iterate over the instructions in a bytecode string.
Generates a sequence of Instruction namedtuples giving the details of each
opcode. Additional information about the code's runtime environment
(e.g. variable names, constants) can be specified using optional
arguments.
"""
labels = findlabels(code)
starts_line = None
for offset, op, arg in _unpack_opargs(code):
if linestarts is not None:
starts_line = linestarts.get(offset, None)
if starts_line is not None:
starts_line += line_offset
is_jump_target = offset in labels
argval = None
argrepr = ''
if arg is not None:
# Set argval to the dereferenced value of the argument when
# available, and argrepr to the string representation of argval.
# _disassemble_bytes needs the string repr of the
# raw name index for LOAD_GLOBAL, LOAD_CONST, etc.
argval = arg
if op in hasconst:
argval, argrepr = _get_const_info(arg, constants)
elif op in hasname:
argval, argrepr = _get_name_info(arg, names)
elif op in hasjabs:
argval = arg*2
argrepr = "to " + repr(argval)
elif op in hasjrel:
argval = offset + 2 + arg*2
argrepr = "to " + repr(argval)
elif op in haslocal:
argval, argrepr = _get_name_info(arg, varnames)
elif op in hascompare:
argval = cmp_op[arg]
argrepr = argval
elif op in hasfree:
argval, argrepr = _get_name_info(arg, cells)
elif op == FORMAT_VALUE:
argval, argrepr = FORMAT_VALUE_CONVERTERS[arg & 0x3]
argval = (argval, bool(arg & 0x4))
if argval[1]:
if argrepr:
argrepr += ', '
argrepr += 'with format'
elif op == MAKE_FUNCTION:
argrepr = ', '.join(s for i, s in enumerate(MAKE_FUNCTION_FLAGS)
if arg & (1<<i))
yield Instruction(opname[op], op,
arg, argval, argrepr,
offset, starts_line, is_jump_target)
def findlinestarts(code):
"""Find the offsets in a byte code which are start of lines in the source.
Generate pairs (offset, lineno)
"""
lastline = None
for start, end, line in code.co_lines():
if line is not None and line != lastline:
lastline = line
yield start, line
return
The provided code snippet includes necessary dependencies for implementing the `get_instructions` function. Write a Python function `def get_instructions(x, *, first_line=None)` to solve the following problem:
Iterator for the opcodes in methods, functions or code Generates a series of Instruction named tuples giving the details of each operations in the supplied code. If *first_line* is not None, it indicates the line number that should be reported for the first source line in the disassembled code. Otherwise, the source line information (if any) is taken directly from the disassembled code object.
Here is the function:
def get_instructions(x, *, first_line=None):
"""Iterator for the opcodes in methods, functions or code
Generates a series of Instruction named tuples giving the details of
each operations in the supplied code.
If *first_line* is not None, it indicates the line number that should
be reported for the first source line in the disassembled code.
Otherwise, the source line information (if any) is taken directly from
the disassembled code object.
"""
co = _get_code_object(x)
cell_names = co.co_cellvars + co.co_freevars
linestarts = dict(findlinestarts(co))
if first_line is not None:
line_offset = first_line - co.co_firstlineno
else:
line_offset = 0
return _get_instructions_bytes(co.co_code, co.co_varnames, co.co_names,
co.co_consts, cell_names, linestarts,
line_offset) | Iterator for the opcodes in methods, functions or code Generates a series of Instruction named tuples giving the details of each operations in the supplied code. If *first_line* is not None, it indicates the line number that should be reported for the first source line in the disassembled code. Otherwise, the source line information (if any) is taken directly from the disassembled code object. |
186,655 | import collections as _collections
import dataclasses as _dataclasses
import re
import sys as _sys
import types as _types
from io import StringIO as _StringIO
def pprint(object, stream=None, indent=1, width=80, depth=None, *,
compact=False, sort_dicts=True, underscore_numbers=False):
"""Pretty-print a Python object to a stream [default is sys.stdout]."""
printer = PrettyPrinter(
stream=stream, indent=indent, width=width, depth=depth,
compact=compact, sort_dicts=sort_dicts,
underscore_numbers=underscore_numbers)
printer.pprint(object)
The provided code snippet includes necessary dependencies for implementing the `pp` function. Write a Python function `def pp(object, *args, sort_dicts=False, **kwargs)` to solve the following problem:
Pretty-print a Python object
Here is the function:
def pp(object, *args, sort_dicts=False, **kwargs):
"""Pretty-print a Python object"""
pprint(object, *args, sort_dicts=sort_dicts, **kwargs) | Pretty-print a Python object |
186,656 | import collections as _collections
import dataclasses as _dataclasses
import re
import sys as _sys
import types as _types
from io import StringIO as _StringIO
class PrettyPrinter:
def __init__(self, indent=1, width=80, depth=None, stream=None, *,
compact=False, sort_dicts=True, underscore_numbers=False):
"""Handle pretty printing operations onto a stream using a set of
configured parameters.
indent
Number of spaces to indent for each level of nesting.
width
Attempted maximum number of columns in the output.
depth
The maximum depth to print out nested structures.
stream
The desired output stream. If omitted (or false), the standard
output stream available at construction will be used.
compact
If true, several items will be combined in one line.
sort_dicts
If true, dict keys are sorted.
"""
indent = int(indent)
width = int(width)
if indent < 0:
raise ValueError('indent must be >= 0')
if depth is not None and depth <= 0:
raise ValueError('depth must be > 0')
if not width:
raise ValueError('width must be != 0')
self._depth = depth
self._indent_per_level = indent
self._width = width
if stream is not None:
self._stream = stream
else:
self._stream = _sys.stdout
self._compact = bool(compact)
self._sort_dicts = sort_dicts
self._underscore_numbers = underscore_numbers
def pprint(self, object):
self._format(object, self._stream, 0, 0, {}, 0)
self._stream.write("\n")
def pformat(self, object):
sio = _StringIO()
self._format(object, sio, 0, 0, {}, 0)
return sio.getvalue()
def isrecursive(self, object):
return self.format(object, {}, 0, 0)[2]
def isreadable(self, object):
s, readable, recursive = self.format(object, {}, 0, 0)
return readable and not recursive
def _format(self, object, stream, indent, allowance, context, level):
objid = id(object)
if objid in context:
stream.write(_recursion(object))
self._recursive = True
self._readable = False
return
rep = self._repr(object, context, level)
max_width = self._width - indent - allowance
if len(rep) > max_width:
p = self._dispatch.get(type(object).__repr__, None)
if p is not None:
context[objid] = 1
p(self, object, stream, indent, allowance, context, level + 1)
del context[objid]
return
elif (_dataclasses.is_dataclass(object) and
not isinstance(object, type) and
object.__dataclass_params__.repr and
# Check dataclass has generated repr method.
hasattr(object.__repr__, "__wrapped__") and
"__create_fn__" in object.__repr__.__wrapped__.__qualname__):
context[objid] = 1
self._pprint_dataclass(object, stream, indent, allowance, context, level + 1)
del context[objid]
return
stream.write(rep)
def _pprint_dataclass(self, object, stream, indent, allowance, context, level):
cls_name = object.__class__.__name__
indent += len(cls_name) + 1
items = [(f.name, getattr(object, f.name)) for f in _dataclasses.fields(object) if f.repr]
stream.write(cls_name + '(')
self._format_namespace_items(items, stream, indent, allowance, context, level)
stream.write(')')
_dispatch = {}
def _pprint_dict(self, object, stream, indent, allowance, context, level):
write = stream.write
write('{')
if self._indent_per_level > 1:
write((self._indent_per_level - 1) * ' ')
length = len(object)
if length:
if self._sort_dicts:
items = sorted(object.items(), key=_safe_tuple)
else:
items = object.items()
self._format_dict_items(items, stream, indent, allowance + 1,
context, level)
write('}')
_dispatch[dict.__repr__] = _pprint_dict
def _pprint_ordered_dict(self, object, stream, indent, allowance, context, level):
if not len(object):
stream.write(repr(object))
return
cls = object.__class__
stream.write(cls.__name__ + '(')
self._format(list(object.items()), stream,
indent + len(cls.__name__) + 1, allowance + 1,
context, level)
stream.write(')')
_dispatch[_collections.OrderedDict.__repr__] = _pprint_ordered_dict
def _pprint_list(self, object, stream, indent, allowance, context, level):
stream.write('[')
self._format_items(object, stream, indent, allowance + 1,
context, level)
stream.write(']')
_dispatch[list.__repr__] = _pprint_list
def _pprint_tuple(self, object, stream, indent, allowance, context, level):
stream.write('(')
endchar = ',)' if len(object) == 1 else ')'
self._format_items(object, stream, indent, allowance + len(endchar),
context, level)
stream.write(endchar)
_dispatch[tuple.__repr__] = _pprint_tuple
def _pprint_set(self, object, stream, indent, allowance, context, level):
if not len(object):
stream.write(repr(object))
return
typ = object.__class__
if typ is set:
stream.write('{')
endchar = '}'
else:
stream.write(typ.__name__ + '({')
endchar = '})'
indent += len(typ.__name__) + 1
object = sorted(object, key=_safe_key)
self._format_items(object, stream, indent, allowance + len(endchar),
context, level)
stream.write(endchar)
_dispatch[set.__repr__] = _pprint_set
_dispatch[frozenset.__repr__] = _pprint_set
def _pprint_str(self, object, stream, indent, allowance, context, level):
write = stream.write
if not len(object):
write(repr(object))
return
chunks = []
lines = object.splitlines(True)
if level == 1:
indent += 1
allowance += 1
max_width1 = max_width = self._width - indent
for i, line in enumerate(lines):
rep = repr(line)
if i == len(lines) - 1:
max_width1 -= allowance
if len(rep) <= max_width1:
chunks.append(rep)
else:
# A list of alternating (non-space, space) strings
parts = re.findall(r'\S*\s*', line)
assert parts
assert not parts[-1]
parts.pop() # drop empty last part
max_width2 = max_width
current = ''
for j, part in enumerate(parts):
candidate = current + part
if j == len(parts) - 1 and i == len(lines) - 1:
max_width2 -= allowance
if len(repr(candidate)) > max_width2:
if current:
chunks.append(repr(current))
current = part
else:
current = candidate
if current:
chunks.append(repr(current))
if len(chunks) == 1:
write(rep)
return
if level == 1:
write('(')
for i, rep in enumerate(chunks):
if i > 0:
write('\n' + ' '*indent)
write(rep)
if level == 1:
write(')')
_dispatch[str.__repr__] = _pprint_str
def _pprint_bytes(self, object, stream, indent, allowance, context, level):
write = stream.write
if len(object) <= 4:
write(repr(object))
return
parens = level == 1
if parens:
indent += 1
allowance += 1
write('(')
delim = ''
for rep in _wrap_bytes_repr(object, self._width - indent, allowance):
write(delim)
write(rep)
if not delim:
delim = '\n' + ' '*indent
if parens:
write(')')
_dispatch[bytes.__repr__] = _pprint_bytes
def _pprint_bytearray(self, object, stream, indent, allowance, context, level):
write = stream.write
write('bytearray(')
self._pprint_bytes(bytes(object), stream, indent + 10,
allowance + 1, context, level + 1)
write(')')
_dispatch[bytearray.__repr__] = _pprint_bytearray
def _pprint_mappingproxy(self, object, stream, indent, allowance, context, level):
stream.write('mappingproxy(')
self._format(object.copy(), stream, indent + 13, allowance + 1,
context, level)
stream.write(')')
_dispatch[_types.MappingProxyType.__repr__] = _pprint_mappingproxy
def _pprint_simplenamespace(self, object, stream, indent, allowance, context, level):
if type(object) is _types.SimpleNamespace:
# The SimpleNamespace repr is "namespace" instead of the class
# name, so we do the same here. For subclasses; use the class name.
cls_name = 'namespace'
else:
cls_name = object.__class__.__name__
indent += len(cls_name) + 1
items = object.__dict__.items()
stream.write(cls_name + '(')
self._format_namespace_items(items, stream, indent, allowance, context, level)
stream.write(')')
_dispatch[_types.SimpleNamespace.__repr__] = _pprint_simplenamespace
def _format_dict_items(self, items, stream, indent, allowance, context,
level):
write = stream.write
indent += self._indent_per_level
delimnl = ',\n' + ' ' * indent
last_index = len(items) - 1
for i, (key, ent) in enumerate(items):
last = i == last_index
rep = self._repr(key, context, level)
write(rep)
write(': ')
self._format(ent, stream, indent + len(rep) + 2,
allowance if last else 1,
context, level)
if not last:
write(delimnl)
def _format_namespace_items(self, items, stream, indent, allowance, context, level):
write = stream.write
delimnl = ',\n' + ' ' * indent
last_index = len(items) - 1
for i, (key, ent) in enumerate(items):
last = i == last_index
write(key)
write('=')
if id(ent) in context:
# Special-case representation of recursion to match standard
# recursive dataclass repr.
write("...")
else:
self._format(ent, stream, indent + len(key) + 1,
allowance if last else 1,
context, level)
if not last:
write(delimnl)
def _format_items(self, items, stream, indent, allowance, context, level):
write = stream.write
indent += self._indent_per_level
if self._indent_per_level > 1:
write((self._indent_per_level - 1) * ' ')
delimnl = ',\n' + ' ' * indent
delim = ''
width = max_width = self._width - indent + 1
it = iter(items)
try:
next_ent = next(it)
except StopIteration:
return
last = False
while not last:
ent = next_ent
try:
next_ent = next(it)
except StopIteration:
last = True
max_width -= allowance
width -= allowance
if self._compact:
rep = self._repr(ent, context, level)
w = len(rep) + 2
if width < w:
width = max_width
if delim:
delim = delimnl
if width >= w:
width -= w
write(delim)
delim = ', '
write(rep)
continue
write(delim)
delim = delimnl
self._format(ent, stream, indent,
allowance if last else 1,
context, level)
def _repr(self, object, context, level):
repr, readable, recursive = self.format(object, context.copy(),
self._depth, level)
if not readable:
self._readable = False
if recursive:
self._recursive = True
return repr
def format(self, object, context, maxlevels, level):
"""Format object for a specific context, returning a string
and flags indicating whether the representation is 'readable'
and whether the object represents a recursive construct.
"""
return self._safe_repr(object, context, maxlevels, level)
def _pprint_default_dict(self, object, stream, indent, allowance, context, level):
if not len(object):
stream.write(repr(object))
return
rdf = self._repr(object.default_factory, context, level)
cls = object.__class__
indent += len(cls.__name__) + 1
stream.write('%s(%s,\n%s' % (cls.__name__, rdf, ' ' * indent))
self._pprint_dict(object, stream, indent, allowance + 1, context, level)
stream.write(')')
_dispatch[_collections.defaultdict.__repr__] = _pprint_default_dict
def _pprint_counter(self, object, stream, indent, allowance, context, level):
if not len(object):
stream.write(repr(object))
return
cls = object.__class__
stream.write(cls.__name__ + '({')
if self._indent_per_level > 1:
stream.write((self._indent_per_level - 1) * ' ')
items = object.most_common()
self._format_dict_items(items, stream,
indent + len(cls.__name__) + 1, allowance + 2,
context, level)
stream.write('})')
_dispatch[_collections.Counter.__repr__] = _pprint_counter
def _pprint_chain_map(self, object, stream, indent, allowance, context, level):
if not len(object.maps):
stream.write(repr(object))
return
cls = object.__class__
stream.write(cls.__name__ + '(')
indent += len(cls.__name__) + 1
for i, m in enumerate(object.maps):
if i == len(object.maps) - 1:
self._format(m, stream, indent, allowance + 1, context, level)
stream.write(')')
else:
self._format(m, stream, indent, 1, context, level)
stream.write(',\n' + ' ' * indent)
_dispatch[_collections.ChainMap.__repr__] = _pprint_chain_map
def _pprint_deque(self, object, stream, indent, allowance, context, level):
if not len(object):
stream.write(repr(object))
return
cls = object.__class__
stream.write(cls.__name__ + '(')
indent += len(cls.__name__) + 1
stream.write('[')
if object.maxlen is None:
self._format_items(object, stream, indent, allowance + 2,
context, level)
stream.write('])')
else:
self._format_items(object, stream, indent, 2,
context, level)
rml = self._repr(object.maxlen, context, level)
stream.write('],\n%smaxlen=%s)' % (' ' * indent, rml))
_dispatch[_collections.deque.__repr__] = _pprint_deque
def _pprint_user_dict(self, object, stream, indent, allowance, context, level):
self._format(object.data, stream, indent, allowance, context, level - 1)
_dispatch[_collections.UserDict.__repr__] = _pprint_user_dict
def _pprint_user_list(self, object, stream, indent, allowance, context, level):
self._format(object.data, stream, indent, allowance, context, level - 1)
_dispatch[_collections.UserList.__repr__] = _pprint_user_list
def _pprint_user_string(self, object, stream, indent, allowance, context, level):
self._format(object.data, stream, indent, allowance, context, level - 1)
_dispatch[_collections.UserString.__repr__] = _pprint_user_string
def _safe_repr(self, object, context, maxlevels, level):
# Return triple (repr_string, isreadable, isrecursive).
typ = type(object)
if typ in _builtin_scalars:
return repr(object), True, False
r = getattr(typ, "__repr__", None)
if issubclass(typ, int) and r is int.__repr__:
if self._underscore_numbers:
return f"{object:_d}", True, False
else:
return repr(object), True, False
if issubclass(typ, dict) and r is dict.__repr__:
if not object:
return "{}", True, False
objid = id(object)
if maxlevels and level >= maxlevels:
return "{...}", False, objid in context
if objid in context:
return _recursion(object), False, True
context[objid] = 1
readable = True
recursive = False
components = []
append = components.append
level += 1
if self._sort_dicts:
items = sorted(object.items(), key=_safe_tuple)
else:
items = object.items()
for k, v in items:
krepr, kreadable, krecur = self.format(
k, context, maxlevels, level)
vrepr, vreadable, vrecur = self.format(
v, context, maxlevels, level)
append("%s: %s" % (krepr, vrepr))
readable = readable and kreadable and vreadable
if krecur or vrecur:
recursive = True
del context[objid]
return "{%s}" % ", ".join(components), readable, recursive
if (issubclass(typ, list) and r is list.__repr__) or \
(issubclass(typ, tuple) and r is tuple.__repr__):
if issubclass(typ, list):
if not object:
return "[]", True, False
format = "[%s]"
elif len(object) == 1:
format = "(%s,)"
else:
if not object:
return "()", True, False
format = "(%s)"
objid = id(object)
if maxlevels and level >= maxlevels:
return format % "...", False, objid in context
if objid in context:
return _recursion(object), False, True
context[objid] = 1
readable = True
recursive = False
components = []
append = components.append
level += 1
for o in object:
orepr, oreadable, orecur = self.format(
o, context, maxlevels, level)
append(orepr)
if not oreadable:
readable = False
if orecur:
recursive = True
del context[objid]
return format % ", ".join(components), readable, recursive
rep = repr(object)
return rep, (rep and not rep.startswith('<')), False
The provided code snippet includes necessary dependencies for implementing the `saferepr` function. Write a Python function `def saferepr(object)` to solve the following problem:
Version of repr() which can handle recursive data structures.
Here is the function:
def saferepr(object):
"""Version of repr() which can handle recursive data structures."""
return PrettyPrinter()._safe_repr(object, {}, None, 0)[0] | Version of repr() which can handle recursive data structures. |
186,657 | import collections as _collections
import dataclasses as _dataclasses
import re
import sys as _sys
import types as _types
from io import StringIO as _StringIO
class PrettyPrinter:
def __init__(self, indent=1, width=80, depth=None, stream=None, *,
compact=False, sort_dicts=True, underscore_numbers=False):
"""Handle pretty printing operations onto a stream using a set of
configured parameters.
indent
Number of spaces to indent for each level of nesting.
width
Attempted maximum number of columns in the output.
depth
The maximum depth to print out nested structures.
stream
The desired output stream. If omitted (or false), the standard
output stream available at construction will be used.
compact
If true, several items will be combined in one line.
sort_dicts
If true, dict keys are sorted.
"""
indent = int(indent)
width = int(width)
if indent < 0:
raise ValueError('indent must be >= 0')
if depth is not None and depth <= 0:
raise ValueError('depth must be > 0')
if not width:
raise ValueError('width must be != 0')
self._depth = depth
self._indent_per_level = indent
self._width = width
if stream is not None:
self._stream = stream
else:
self._stream = _sys.stdout
self._compact = bool(compact)
self._sort_dicts = sort_dicts
self._underscore_numbers = underscore_numbers
def pprint(self, object):
self._format(object, self._stream, 0, 0, {}, 0)
self._stream.write("\n")
def pformat(self, object):
sio = _StringIO()
self._format(object, sio, 0, 0, {}, 0)
return sio.getvalue()
def isrecursive(self, object):
return self.format(object, {}, 0, 0)[2]
def isreadable(self, object):
s, readable, recursive = self.format(object, {}, 0, 0)
return readable and not recursive
def _format(self, object, stream, indent, allowance, context, level):
objid = id(object)
if objid in context:
stream.write(_recursion(object))
self._recursive = True
self._readable = False
return
rep = self._repr(object, context, level)
max_width = self._width - indent - allowance
if len(rep) > max_width:
p = self._dispatch.get(type(object).__repr__, None)
if p is not None:
context[objid] = 1
p(self, object, stream, indent, allowance, context, level + 1)
del context[objid]
return
elif (_dataclasses.is_dataclass(object) and
not isinstance(object, type) and
object.__dataclass_params__.repr and
# Check dataclass has generated repr method.
hasattr(object.__repr__, "__wrapped__") and
"__create_fn__" in object.__repr__.__wrapped__.__qualname__):
context[objid] = 1
self._pprint_dataclass(object, stream, indent, allowance, context, level + 1)
del context[objid]
return
stream.write(rep)
def _pprint_dataclass(self, object, stream, indent, allowance, context, level):
cls_name = object.__class__.__name__
indent += len(cls_name) + 1
items = [(f.name, getattr(object, f.name)) for f in _dataclasses.fields(object) if f.repr]
stream.write(cls_name + '(')
self._format_namespace_items(items, stream, indent, allowance, context, level)
stream.write(')')
_dispatch = {}
def _pprint_dict(self, object, stream, indent, allowance, context, level):
write = stream.write
write('{')
if self._indent_per_level > 1:
write((self._indent_per_level - 1) * ' ')
length = len(object)
if length:
if self._sort_dicts:
items = sorted(object.items(), key=_safe_tuple)
else:
items = object.items()
self._format_dict_items(items, stream, indent, allowance + 1,
context, level)
write('}')
_dispatch[dict.__repr__] = _pprint_dict
def _pprint_ordered_dict(self, object, stream, indent, allowance, context, level):
if not len(object):
stream.write(repr(object))
return
cls = object.__class__
stream.write(cls.__name__ + '(')
self._format(list(object.items()), stream,
indent + len(cls.__name__) + 1, allowance + 1,
context, level)
stream.write(')')
_dispatch[_collections.OrderedDict.__repr__] = _pprint_ordered_dict
def _pprint_list(self, object, stream, indent, allowance, context, level):
stream.write('[')
self._format_items(object, stream, indent, allowance + 1,
context, level)
stream.write(']')
_dispatch[list.__repr__] = _pprint_list
def _pprint_tuple(self, object, stream, indent, allowance, context, level):
stream.write('(')
endchar = ',)' if len(object) == 1 else ')'
self._format_items(object, stream, indent, allowance + len(endchar),
context, level)
stream.write(endchar)
_dispatch[tuple.__repr__] = _pprint_tuple
def _pprint_set(self, object, stream, indent, allowance, context, level):
if not len(object):
stream.write(repr(object))
return
typ = object.__class__
if typ is set:
stream.write('{')
endchar = '}'
else:
stream.write(typ.__name__ + '({')
endchar = '})'
indent += len(typ.__name__) + 1
object = sorted(object, key=_safe_key)
self._format_items(object, stream, indent, allowance + len(endchar),
context, level)
stream.write(endchar)
_dispatch[set.__repr__] = _pprint_set
_dispatch[frozenset.__repr__] = _pprint_set
def _pprint_str(self, object, stream, indent, allowance, context, level):
write = stream.write
if not len(object):
write(repr(object))
return
chunks = []
lines = object.splitlines(True)
if level == 1:
indent += 1
allowance += 1
max_width1 = max_width = self._width - indent
for i, line in enumerate(lines):
rep = repr(line)
if i == len(lines) - 1:
max_width1 -= allowance
if len(rep) <= max_width1:
chunks.append(rep)
else:
# A list of alternating (non-space, space) strings
parts = re.findall(r'\S*\s*', line)
assert parts
assert not parts[-1]
parts.pop() # drop empty last part
max_width2 = max_width
current = ''
for j, part in enumerate(parts):
candidate = current + part
if j == len(parts) - 1 and i == len(lines) - 1:
max_width2 -= allowance
if len(repr(candidate)) > max_width2:
if current:
chunks.append(repr(current))
current = part
else:
current = candidate
if current:
chunks.append(repr(current))
if len(chunks) == 1:
write(rep)
return
if level == 1:
write('(')
for i, rep in enumerate(chunks):
if i > 0:
write('\n' + ' '*indent)
write(rep)
if level == 1:
write(')')
_dispatch[str.__repr__] = _pprint_str
def _pprint_bytes(self, object, stream, indent, allowance, context, level):
write = stream.write
if len(object) <= 4:
write(repr(object))
return
parens = level == 1
if parens:
indent += 1
allowance += 1
write('(')
delim = ''
for rep in _wrap_bytes_repr(object, self._width - indent, allowance):
write(delim)
write(rep)
if not delim:
delim = '\n' + ' '*indent
if parens:
write(')')
_dispatch[bytes.__repr__] = _pprint_bytes
def _pprint_bytearray(self, object, stream, indent, allowance, context, level):
write = stream.write
write('bytearray(')
self._pprint_bytes(bytes(object), stream, indent + 10,
allowance + 1, context, level + 1)
write(')')
_dispatch[bytearray.__repr__] = _pprint_bytearray
def _pprint_mappingproxy(self, object, stream, indent, allowance, context, level):
stream.write('mappingproxy(')
self._format(object.copy(), stream, indent + 13, allowance + 1,
context, level)
stream.write(')')
_dispatch[_types.MappingProxyType.__repr__] = _pprint_mappingproxy
def _pprint_simplenamespace(self, object, stream, indent, allowance, context, level):
if type(object) is _types.SimpleNamespace:
# The SimpleNamespace repr is "namespace" instead of the class
# name, so we do the same here. For subclasses; use the class name.
cls_name = 'namespace'
else:
cls_name = object.__class__.__name__
indent += len(cls_name) + 1
items = object.__dict__.items()
stream.write(cls_name + '(')
self._format_namespace_items(items, stream, indent, allowance, context, level)
stream.write(')')
_dispatch[_types.SimpleNamespace.__repr__] = _pprint_simplenamespace
def _format_dict_items(self, items, stream, indent, allowance, context,
level):
write = stream.write
indent += self._indent_per_level
delimnl = ',\n' + ' ' * indent
last_index = len(items) - 1
for i, (key, ent) in enumerate(items):
last = i == last_index
rep = self._repr(key, context, level)
write(rep)
write(': ')
self._format(ent, stream, indent + len(rep) + 2,
allowance if last else 1,
context, level)
if not last:
write(delimnl)
def _format_namespace_items(self, items, stream, indent, allowance, context, level):
write = stream.write
delimnl = ',\n' + ' ' * indent
last_index = len(items) - 1
for i, (key, ent) in enumerate(items):
last = i == last_index
write(key)
write('=')
if id(ent) in context:
# Special-case representation of recursion to match standard
# recursive dataclass repr.
write("...")
else:
self._format(ent, stream, indent + len(key) + 1,
allowance if last else 1,
context, level)
if not last:
write(delimnl)
def _format_items(self, items, stream, indent, allowance, context, level):
write = stream.write
indent += self._indent_per_level
if self._indent_per_level > 1:
write((self._indent_per_level - 1) * ' ')
delimnl = ',\n' + ' ' * indent
delim = ''
width = max_width = self._width - indent + 1
it = iter(items)
try:
next_ent = next(it)
except StopIteration:
return
last = False
while not last:
ent = next_ent
try:
next_ent = next(it)
except StopIteration:
last = True
max_width -= allowance
width -= allowance
if self._compact:
rep = self._repr(ent, context, level)
w = len(rep) + 2
if width < w:
width = max_width
if delim:
delim = delimnl
if width >= w:
width -= w
write(delim)
delim = ', '
write(rep)
continue
write(delim)
delim = delimnl
self._format(ent, stream, indent,
allowance if last else 1,
context, level)
def _repr(self, object, context, level):
repr, readable, recursive = self.format(object, context.copy(),
self._depth, level)
if not readable:
self._readable = False
if recursive:
self._recursive = True
return repr
def format(self, object, context, maxlevels, level):
"""Format object for a specific context, returning a string
and flags indicating whether the representation is 'readable'
and whether the object represents a recursive construct.
"""
return self._safe_repr(object, context, maxlevels, level)
def _pprint_default_dict(self, object, stream, indent, allowance, context, level):
if not len(object):
stream.write(repr(object))
return
rdf = self._repr(object.default_factory, context, level)
cls = object.__class__
indent += len(cls.__name__) + 1
stream.write('%s(%s,\n%s' % (cls.__name__, rdf, ' ' * indent))
self._pprint_dict(object, stream, indent, allowance + 1, context, level)
stream.write(')')
_dispatch[_collections.defaultdict.__repr__] = _pprint_default_dict
def _pprint_counter(self, object, stream, indent, allowance, context, level):
if not len(object):
stream.write(repr(object))
return
cls = object.__class__
stream.write(cls.__name__ + '({')
if self._indent_per_level > 1:
stream.write((self._indent_per_level - 1) * ' ')
items = object.most_common()
self._format_dict_items(items, stream,
indent + len(cls.__name__) + 1, allowance + 2,
context, level)
stream.write('})')
_dispatch[_collections.Counter.__repr__] = _pprint_counter
def _pprint_chain_map(self, object, stream, indent, allowance, context, level):
if not len(object.maps):
stream.write(repr(object))
return
cls = object.__class__
stream.write(cls.__name__ + '(')
indent += len(cls.__name__) + 1
for i, m in enumerate(object.maps):
if i == len(object.maps) - 1:
self._format(m, stream, indent, allowance + 1, context, level)
stream.write(')')
else:
self._format(m, stream, indent, 1, context, level)
stream.write(',\n' + ' ' * indent)
_dispatch[_collections.ChainMap.__repr__] = _pprint_chain_map
def _pprint_deque(self, object, stream, indent, allowance, context, level):
if not len(object):
stream.write(repr(object))
return
cls = object.__class__
stream.write(cls.__name__ + '(')
indent += len(cls.__name__) + 1
stream.write('[')
if object.maxlen is None:
self._format_items(object, stream, indent, allowance + 2,
context, level)
stream.write('])')
else:
self._format_items(object, stream, indent, 2,
context, level)
rml = self._repr(object.maxlen, context, level)
stream.write('],\n%smaxlen=%s)' % (' ' * indent, rml))
_dispatch[_collections.deque.__repr__] = _pprint_deque
def _pprint_user_dict(self, object, stream, indent, allowance, context, level):
self._format(object.data, stream, indent, allowance, context, level - 1)
_dispatch[_collections.UserDict.__repr__] = _pprint_user_dict
def _pprint_user_list(self, object, stream, indent, allowance, context, level):
self._format(object.data, stream, indent, allowance, context, level - 1)
_dispatch[_collections.UserList.__repr__] = _pprint_user_list
def _pprint_user_string(self, object, stream, indent, allowance, context, level):
self._format(object.data, stream, indent, allowance, context, level - 1)
_dispatch[_collections.UserString.__repr__] = _pprint_user_string
def _safe_repr(self, object, context, maxlevels, level):
# Return triple (repr_string, isreadable, isrecursive).
typ = type(object)
if typ in _builtin_scalars:
return repr(object), True, False
r = getattr(typ, "__repr__", None)
if issubclass(typ, int) and r is int.__repr__:
if self._underscore_numbers:
return f"{object:_d}", True, False
else:
return repr(object), True, False
if issubclass(typ, dict) and r is dict.__repr__:
if not object:
return "{}", True, False
objid = id(object)
if maxlevels and level >= maxlevels:
return "{...}", False, objid in context
if objid in context:
return _recursion(object), False, True
context[objid] = 1
readable = True
recursive = False
components = []
append = components.append
level += 1
if self._sort_dicts:
items = sorted(object.items(), key=_safe_tuple)
else:
items = object.items()
for k, v in items:
krepr, kreadable, krecur = self.format(
k, context, maxlevels, level)
vrepr, vreadable, vrecur = self.format(
v, context, maxlevels, level)
append("%s: %s" % (krepr, vrepr))
readable = readable and kreadable and vreadable
if krecur or vrecur:
recursive = True
del context[objid]
return "{%s}" % ", ".join(components), readable, recursive
if (issubclass(typ, list) and r is list.__repr__) or \
(issubclass(typ, tuple) and r is tuple.__repr__):
if issubclass(typ, list):
if not object:
return "[]", True, False
format = "[%s]"
elif len(object) == 1:
format = "(%s,)"
else:
if not object:
return "()", True, False
format = "(%s)"
objid = id(object)
if maxlevels and level >= maxlevels:
return format % "...", False, objid in context
if objid in context:
return _recursion(object), False, True
context[objid] = 1
readable = True
recursive = False
components = []
append = components.append
level += 1
for o in object:
orepr, oreadable, orecur = self.format(
o, context, maxlevels, level)
append(orepr)
if not oreadable:
readable = False
if orecur:
recursive = True
del context[objid]
return format % ", ".join(components), readable, recursive
rep = repr(object)
return rep, (rep and not rep.startswith('<')), False
The provided code snippet includes necessary dependencies for implementing the `isreadable` function. Write a Python function `def isreadable(object)` to solve the following problem:
Determine if saferepr(object) is readable by eval().
Here is the function:
def isreadable(object):
"""Determine if saferepr(object) is readable by eval()."""
return PrettyPrinter()._safe_repr(object, {}, None, 0)[1] | Determine if saferepr(object) is readable by eval(). |
186,658 | import collections as _collections
import dataclasses as _dataclasses
import re
import sys as _sys
import types as _types
from io import StringIO as _StringIO
class PrettyPrinter:
def __init__(self, indent=1, width=80, depth=None, stream=None, *,
compact=False, sort_dicts=True, underscore_numbers=False):
"""Handle pretty printing operations onto a stream using a set of
configured parameters.
indent
Number of spaces to indent for each level of nesting.
width
Attempted maximum number of columns in the output.
depth
The maximum depth to print out nested structures.
stream
The desired output stream. If omitted (or false), the standard
output stream available at construction will be used.
compact
If true, several items will be combined in one line.
sort_dicts
If true, dict keys are sorted.
"""
indent = int(indent)
width = int(width)
if indent < 0:
raise ValueError('indent must be >= 0')
if depth is not None and depth <= 0:
raise ValueError('depth must be > 0')
if not width:
raise ValueError('width must be != 0')
self._depth = depth
self._indent_per_level = indent
self._width = width
if stream is not None:
self._stream = stream
else:
self._stream = _sys.stdout
self._compact = bool(compact)
self._sort_dicts = sort_dicts
self._underscore_numbers = underscore_numbers
def pprint(self, object):
self._format(object, self._stream, 0, 0, {}, 0)
self._stream.write("\n")
def pformat(self, object):
sio = _StringIO()
self._format(object, sio, 0, 0, {}, 0)
return sio.getvalue()
def isrecursive(self, object):
return self.format(object, {}, 0, 0)[2]
def isreadable(self, object):
s, readable, recursive = self.format(object, {}, 0, 0)
return readable and not recursive
def _format(self, object, stream, indent, allowance, context, level):
objid = id(object)
if objid in context:
stream.write(_recursion(object))
self._recursive = True
self._readable = False
return
rep = self._repr(object, context, level)
max_width = self._width - indent - allowance
if len(rep) > max_width:
p = self._dispatch.get(type(object).__repr__, None)
if p is not None:
context[objid] = 1
p(self, object, stream, indent, allowance, context, level + 1)
del context[objid]
return
elif (_dataclasses.is_dataclass(object) and
not isinstance(object, type) and
object.__dataclass_params__.repr and
# Check dataclass has generated repr method.
hasattr(object.__repr__, "__wrapped__") and
"__create_fn__" in object.__repr__.__wrapped__.__qualname__):
context[objid] = 1
self._pprint_dataclass(object, stream, indent, allowance, context, level + 1)
del context[objid]
return
stream.write(rep)
def _pprint_dataclass(self, object, stream, indent, allowance, context, level):
cls_name = object.__class__.__name__
indent += len(cls_name) + 1
items = [(f.name, getattr(object, f.name)) for f in _dataclasses.fields(object) if f.repr]
stream.write(cls_name + '(')
self._format_namespace_items(items, stream, indent, allowance, context, level)
stream.write(')')
_dispatch = {}
def _pprint_dict(self, object, stream, indent, allowance, context, level):
write = stream.write
write('{')
if self._indent_per_level > 1:
write((self._indent_per_level - 1) * ' ')
length = len(object)
if length:
if self._sort_dicts:
items = sorted(object.items(), key=_safe_tuple)
else:
items = object.items()
self._format_dict_items(items, stream, indent, allowance + 1,
context, level)
write('}')
_dispatch[dict.__repr__] = _pprint_dict
def _pprint_ordered_dict(self, object, stream, indent, allowance, context, level):
if not len(object):
stream.write(repr(object))
return
cls = object.__class__
stream.write(cls.__name__ + '(')
self._format(list(object.items()), stream,
indent + len(cls.__name__) + 1, allowance + 1,
context, level)
stream.write(')')
_dispatch[_collections.OrderedDict.__repr__] = _pprint_ordered_dict
def _pprint_list(self, object, stream, indent, allowance, context, level):
stream.write('[')
self._format_items(object, stream, indent, allowance + 1,
context, level)
stream.write(']')
_dispatch[list.__repr__] = _pprint_list
def _pprint_tuple(self, object, stream, indent, allowance, context, level):
stream.write('(')
endchar = ',)' if len(object) == 1 else ')'
self._format_items(object, stream, indent, allowance + len(endchar),
context, level)
stream.write(endchar)
_dispatch[tuple.__repr__] = _pprint_tuple
def _pprint_set(self, object, stream, indent, allowance, context, level):
if not len(object):
stream.write(repr(object))
return
typ = object.__class__
if typ is set:
stream.write('{')
endchar = '}'
else:
stream.write(typ.__name__ + '({')
endchar = '})'
indent += len(typ.__name__) + 1
object = sorted(object, key=_safe_key)
self._format_items(object, stream, indent, allowance + len(endchar),
context, level)
stream.write(endchar)
_dispatch[set.__repr__] = _pprint_set
_dispatch[frozenset.__repr__] = _pprint_set
def _pprint_str(self, object, stream, indent, allowance, context, level):
write = stream.write
if not len(object):
write(repr(object))
return
chunks = []
lines = object.splitlines(True)
if level == 1:
indent += 1
allowance += 1
max_width1 = max_width = self._width - indent
for i, line in enumerate(lines):
rep = repr(line)
if i == len(lines) - 1:
max_width1 -= allowance
if len(rep) <= max_width1:
chunks.append(rep)
else:
# A list of alternating (non-space, space) strings
parts = re.findall(r'\S*\s*', line)
assert parts
assert not parts[-1]
parts.pop() # drop empty last part
max_width2 = max_width
current = ''
for j, part in enumerate(parts):
candidate = current + part
if j == len(parts) - 1 and i == len(lines) - 1:
max_width2 -= allowance
if len(repr(candidate)) > max_width2:
if current:
chunks.append(repr(current))
current = part
else:
current = candidate
if current:
chunks.append(repr(current))
if len(chunks) == 1:
write(rep)
return
if level == 1:
write('(')
for i, rep in enumerate(chunks):
if i > 0:
write('\n' + ' '*indent)
write(rep)
if level == 1:
write(')')
_dispatch[str.__repr__] = _pprint_str
def _pprint_bytes(self, object, stream, indent, allowance, context, level):
write = stream.write
if len(object) <= 4:
write(repr(object))
return
parens = level == 1
if parens:
indent += 1
allowance += 1
write('(')
delim = ''
for rep in _wrap_bytes_repr(object, self._width - indent, allowance):
write(delim)
write(rep)
if not delim:
delim = '\n' + ' '*indent
if parens:
write(')')
_dispatch[bytes.__repr__] = _pprint_bytes
def _pprint_bytearray(self, object, stream, indent, allowance, context, level):
write = stream.write
write('bytearray(')
self._pprint_bytes(bytes(object), stream, indent + 10,
allowance + 1, context, level + 1)
write(')')
_dispatch[bytearray.__repr__] = _pprint_bytearray
def _pprint_mappingproxy(self, object, stream, indent, allowance, context, level):
stream.write('mappingproxy(')
self._format(object.copy(), stream, indent + 13, allowance + 1,
context, level)
stream.write(')')
_dispatch[_types.MappingProxyType.__repr__] = _pprint_mappingproxy
def _pprint_simplenamespace(self, object, stream, indent, allowance, context, level):
if type(object) is _types.SimpleNamespace:
# The SimpleNamespace repr is "namespace" instead of the class
# name, so we do the same here. For subclasses; use the class name.
cls_name = 'namespace'
else:
cls_name = object.__class__.__name__
indent += len(cls_name) + 1
items = object.__dict__.items()
stream.write(cls_name + '(')
self._format_namespace_items(items, stream, indent, allowance, context, level)
stream.write(')')
_dispatch[_types.SimpleNamespace.__repr__] = _pprint_simplenamespace
def _format_dict_items(self, items, stream, indent, allowance, context,
level):
write = stream.write
indent += self._indent_per_level
delimnl = ',\n' + ' ' * indent
last_index = len(items) - 1
for i, (key, ent) in enumerate(items):
last = i == last_index
rep = self._repr(key, context, level)
write(rep)
write(': ')
self._format(ent, stream, indent + len(rep) + 2,
allowance if last else 1,
context, level)
if not last:
write(delimnl)
def _format_namespace_items(self, items, stream, indent, allowance, context, level):
write = stream.write
delimnl = ',\n' + ' ' * indent
last_index = len(items) - 1
for i, (key, ent) in enumerate(items):
last = i == last_index
write(key)
write('=')
if id(ent) in context:
# Special-case representation of recursion to match standard
# recursive dataclass repr.
write("...")
else:
self._format(ent, stream, indent + len(key) + 1,
allowance if last else 1,
context, level)
if not last:
write(delimnl)
def _format_items(self, items, stream, indent, allowance, context, level):
write = stream.write
indent += self._indent_per_level
if self._indent_per_level > 1:
write((self._indent_per_level - 1) * ' ')
delimnl = ',\n' + ' ' * indent
delim = ''
width = max_width = self._width - indent + 1
it = iter(items)
try:
next_ent = next(it)
except StopIteration:
return
last = False
while not last:
ent = next_ent
try:
next_ent = next(it)
except StopIteration:
last = True
max_width -= allowance
width -= allowance
if self._compact:
rep = self._repr(ent, context, level)
w = len(rep) + 2
if width < w:
width = max_width
if delim:
delim = delimnl
if width >= w:
width -= w
write(delim)
delim = ', '
write(rep)
continue
write(delim)
delim = delimnl
self._format(ent, stream, indent,
allowance if last else 1,
context, level)
def _repr(self, object, context, level):
repr, readable, recursive = self.format(object, context.copy(),
self._depth, level)
if not readable:
self._readable = False
if recursive:
self._recursive = True
return repr
def format(self, object, context, maxlevels, level):
"""Format object for a specific context, returning a string
and flags indicating whether the representation is 'readable'
and whether the object represents a recursive construct.
"""
return self._safe_repr(object, context, maxlevels, level)
def _pprint_default_dict(self, object, stream, indent, allowance, context, level):
if not len(object):
stream.write(repr(object))
return
rdf = self._repr(object.default_factory, context, level)
cls = object.__class__
indent += len(cls.__name__) + 1
stream.write('%s(%s,\n%s' % (cls.__name__, rdf, ' ' * indent))
self._pprint_dict(object, stream, indent, allowance + 1, context, level)
stream.write(')')
_dispatch[_collections.defaultdict.__repr__] = _pprint_default_dict
def _pprint_counter(self, object, stream, indent, allowance, context, level):
if not len(object):
stream.write(repr(object))
return
cls = object.__class__
stream.write(cls.__name__ + '({')
if self._indent_per_level > 1:
stream.write((self._indent_per_level - 1) * ' ')
items = object.most_common()
self._format_dict_items(items, stream,
indent + len(cls.__name__) + 1, allowance + 2,
context, level)
stream.write('})')
_dispatch[_collections.Counter.__repr__] = _pprint_counter
def _pprint_chain_map(self, object, stream, indent, allowance, context, level):
if not len(object.maps):
stream.write(repr(object))
return
cls = object.__class__
stream.write(cls.__name__ + '(')
indent += len(cls.__name__) + 1
for i, m in enumerate(object.maps):
if i == len(object.maps) - 1:
self._format(m, stream, indent, allowance + 1, context, level)
stream.write(')')
else:
self._format(m, stream, indent, 1, context, level)
stream.write(',\n' + ' ' * indent)
_dispatch[_collections.ChainMap.__repr__] = _pprint_chain_map
def _pprint_deque(self, object, stream, indent, allowance, context, level):
if not len(object):
stream.write(repr(object))
return
cls = object.__class__
stream.write(cls.__name__ + '(')
indent += len(cls.__name__) + 1
stream.write('[')
if object.maxlen is None:
self._format_items(object, stream, indent, allowance + 2,
context, level)
stream.write('])')
else:
self._format_items(object, stream, indent, 2,
context, level)
rml = self._repr(object.maxlen, context, level)
stream.write('],\n%smaxlen=%s)' % (' ' * indent, rml))
_dispatch[_collections.deque.__repr__] = _pprint_deque
def _pprint_user_dict(self, object, stream, indent, allowance, context, level):
self._format(object.data, stream, indent, allowance, context, level - 1)
_dispatch[_collections.UserDict.__repr__] = _pprint_user_dict
def _pprint_user_list(self, object, stream, indent, allowance, context, level):
self._format(object.data, stream, indent, allowance, context, level - 1)
_dispatch[_collections.UserList.__repr__] = _pprint_user_list
def _pprint_user_string(self, object, stream, indent, allowance, context, level):
self._format(object.data, stream, indent, allowance, context, level - 1)
_dispatch[_collections.UserString.__repr__] = _pprint_user_string
def _safe_repr(self, object, context, maxlevels, level):
# Return triple (repr_string, isreadable, isrecursive).
typ = type(object)
if typ in _builtin_scalars:
return repr(object), True, False
r = getattr(typ, "__repr__", None)
if issubclass(typ, int) and r is int.__repr__:
if self._underscore_numbers:
return f"{object:_d}", True, False
else:
return repr(object), True, False
if issubclass(typ, dict) and r is dict.__repr__:
if not object:
return "{}", True, False
objid = id(object)
if maxlevels and level >= maxlevels:
return "{...}", False, objid in context
if objid in context:
return _recursion(object), False, True
context[objid] = 1
readable = True
recursive = False
components = []
append = components.append
level += 1
if self._sort_dicts:
items = sorted(object.items(), key=_safe_tuple)
else:
items = object.items()
for k, v in items:
krepr, kreadable, krecur = self.format(
k, context, maxlevels, level)
vrepr, vreadable, vrecur = self.format(
v, context, maxlevels, level)
append("%s: %s" % (krepr, vrepr))
readable = readable and kreadable and vreadable
if krecur or vrecur:
recursive = True
del context[objid]
return "{%s}" % ", ".join(components), readable, recursive
if (issubclass(typ, list) and r is list.__repr__) or \
(issubclass(typ, tuple) and r is tuple.__repr__):
if issubclass(typ, list):
if not object:
return "[]", True, False
format = "[%s]"
elif len(object) == 1:
format = "(%s,)"
else:
if not object:
return "()", True, False
format = "(%s)"
objid = id(object)
if maxlevels and level >= maxlevels:
return format % "...", False, objid in context
if objid in context:
return _recursion(object), False, True
context[objid] = 1
readable = True
recursive = False
components = []
append = components.append
level += 1
for o in object:
orepr, oreadable, orecur = self.format(
o, context, maxlevels, level)
append(orepr)
if not oreadable:
readable = False
if orecur:
recursive = True
del context[objid]
return format % ", ".join(components), readable, recursive
rep = repr(object)
return rep, (rep and not rep.startswith('<')), False
The provided code snippet includes necessary dependencies for implementing the `isrecursive` function. Write a Python function `def isrecursive(object)` to solve the following problem:
Determine if object requires a recursive representation.
Here is the function:
def isrecursive(object):
"""Determine if object requires a recursive representation."""
return PrettyPrinter()._safe_repr(object, {}, None, 0)[2] | Determine if object requires a recursive representation. |
186,659 | import collections as _collections
import dataclasses as _dataclasses
import re
import sys as _sys
import types as _types
from io import StringIO as _StringIO
class _safe_key:
"""Helper function for key functions when sorting unorderable objects.
The wrapped-object will fallback to a Py2.x style comparison for
unorderable types (sorting first comparing the type name and then by
the obj ids). Does not work recursively, so dict.items() must have
_safe_key applied to both the key and the value.
"""
__slots__ = ['obj']
def __init__(self, obj):
self.obj = obj
def __lt__(self, other):
try:
return self.obj < other.obj
except TypeError:
return ((str(type(self.obj)), id(self.obj)) < \
(str(type(other.obj)), id(other.obj)))
The provided code snippet includes necessary dependencies for implementing the `_safe_tuple` function. Write a Python function `def _safe_tuple(t)` to solve the following problem:
Helper function for comparing 2-tuples
Here is the function:
def _safe_tuple(t):
"Helper function for comparing 2-tuples"
return _safe_key(t[0]), _safe_key(t[1]) | Helper function for comparing 2-tuples |
186,660 | import collections as _collections
import dataclasses as _dataclasses
import re
import sys as _sys
import types as _types
from io import StringIO as _StringIO
def _recursion(object):
return ("<Recursion on %s with id=%s>"
% (type(object).__name__, id(object))) | null |
186,661 | import collections as _collections
import dataclasses as _dataclasses
import re
import sys as _sys
import types as _types
from io import StringIO as _StringIO
def pformat(object, indent=1, width=80, depth=None, *,
compact=False, sort_dicts=True, underscore_numbers=False):
"""Format a Python object into a pretty-printed representation."""
return PrettyPrinter(indent=indent, width=width, depth=depth,
compact=compact, sort_dicts=sort_dicts,
underscore_numbers=underscore_numbers).pformat(object)
class PrettyPrinter:
def __init__(self, indent=1, width=80, depth=None, stream=None, *,
compact=False, sort_dicts=True, underscore_numbers=False):
"""Handle pretty printing operations onto a stream using a set of
configured parameters.
indent
Number of spaces to indent for each level of nesting.
width
Attempted maximum number of columns in the output.
depth
The maximum depth to print out nested structures.
stream
The desired output stream. If omitted (or false), the standard
output stream available at construction will be used.
compact
If true, several items will be combined in one line.
sort_dicts
If true, dict keys are sorted.
"""
indent = int(indent)
width = int(width)
if indent < 0:
raise ValueError('indent must be >= 0')
if depth is not None and depth <= 0:
raise ValueError('depth must be > 0')
if not width:
raise ValueError('width must be != 0')
self._depth = depth
self._indent_per_level = indent
self._width = width
if stream is not None:
self._stream = stream
else:
self._stream = _sys.stdout
self._compact = bool(compact)
self._sort_dicts = sort_dicts
self._underscore_numbers = underscore_numbers
def pprint(self, object):
self._format(object, self._stream, 0, 0, {}, 0)
self._stream.write("\n")
def pformat(self, object):
sio = _StringIO()
self._format(object, sio, 0, 0, {}, 0)
return sio.getvalue()
def isrecursive(self, object):
return self.format(object, {}, 0, 0)[2]
def isreadable(self, object):
s, readable, recursive = self.format(object, {}, 0, 0)
return readable and not recursive
def _format(self, object, stream, indent, allowance, context, level):
objid = id(object)
if objid in context:
stream.write(_recursion(object))
self._recursive = True
self._readable = False
return
rep = self._repr(object, context, level)
max_width = self._width - indent - allowance
if len(rep) > max_width:
p = self._dispatch.get(type(object).__repr__, None)
if p is not None:
context[objid] = 1
p(self, object, stream, indent, allowance, context, level + 1)
del context[objid]
return
elif (_dataclasses.is_dataclass(object) and
not isinstance(object, type) and
object.__dataclass_params__.repr and
# Check dataclass has generated repr method.
hasattr(object.__repr__, "__wrapped__") and
"__create_fn__" in object.__repr__.__wrapped__.__qualname__):
context[objid] = 1
self._pprint_dataclass(object, stream, indent, allowance, context, level + 1)
del context[objid]
return
stream.write(rep)
def _pprint_dataclass(self, object, stream, indent, allowance, context, level):
cls_name = object.__class__.__name__
indent += len(cls_name) + 1
items = [(f.name, getattr(object, f.name)) for f in _dataclasses.fields(object) if f.repr]
stream.write(cls_name + '(')
self._format_namespace_items(items, stream, indent, allowance, context, level)
stream.write(')')
_dispatch = {}
def _pprint_dict(self, object, stream, indent, allowance, context, level):
write = stream.write
write('{')
if self._indent_per_level > 1:
write((self._indent_per_level - 1) * ' ')
length = len(object)
if length:
if self._sort_dicts:
items = sorted(object.items(), key=_safe_tuple)
else:
items = object.items()
self._format_dict_items(items, stream, indent, allowance + 1,
context, level)
write('}')
_dispatch[dict.__repr__] = _pprint_dict
def _pprint_ordered_dict(self, object, stream, indent, allowance, context, level):
if not len(object):
stream.write(repr(object))
return
cls = object.__class__
stream.write(cls.__name__ + '(')
self._format(list(object.items()), stream,
indent + len(cls.__name__) + 1, allowance + 1,
context, level)
stream.write(')')
_dispatch[_collections.OrderedDict.__repr__] = _pprint_ordered_dict
def _pprint_list(self, object, stream, indent, allowance, context, level):
stream.write('[')
self._format_items(object, stream, indent, allowance + 1,
context, level)
stream.write(']')
_dispatch[list.__repr__] = _pprint_list
def _pprint_tuple(self, object, stream, indent, allowance, context, level):
stream.write('(')
endchar = ',)' if len(object) == 1 else ')'
self._format_items(object, stream, indent, allowance + len(endchar),
context, level)
stream.write(endchar)
_dispatch[tuple.__repr__] = _pprint_tuple
def _pprint_set(self, object, stream, indent, allowance, context, level):
if not len(object):
stream.write(repr(object))
return
typ = object.__class__
if typ is set:
stream.write('{')
endchar = '}'
else:
stream.write(typ.__name__ + '({')
endchar = '})'
indent += len(typ.__name__) + 1
object = sorted(object, key=_safe_key)
self._format_items(object, stream, indent, allowance + len(endchar),
context, level)
stream.write(endchar)
_dispatch[set.__repr__] = _pprint_set
_dispatch[frozenset.__repr__] = _pprint_set
def _pprint_str(self, object, stream, indent, allowance, context, level):
write = stream.write
if not len(object):
write(repr(object))
return
chunks = []
lines = object.splitlines(True)
if level == 1:
indent += 1
allowance += 1
max_width1 = max_width = self._width - indent
for i, line in enumerate(lines):
rep = repr(line)
if i == len(lines) - 1:
max_width1 -= allowance
if len(rep) <= max_width1:
chunks.append(rep)
else:
# A list of alternating (non-space, space) strings
parts = re.findall(r'\S*\s*', line)
assert parts
assert not parts[-1]
parts.pop() # drop empty last part
max_width2 = max_width
current = ''
for j, part in enumerate(parts):
candidate = current + part
if j == len(parts) - 1 and i == len(lines) - 1:
max_width2 -= allowance
if len(repr(candidate)) > max_width2:
if current:
chunks.append(repr(current))
current = part
else:
current = candidate
if current:
chunks.append(repr(current))
if len(chunks) == 1:
write(rep)
return
if level == 1:
write('(')
for i, rep in enumerate(chunks):
if i > 0:
write('\n' + ' '*indent)
write(rep)
if level == 1:
write(')')
_dispatch[str.__repr__] = _pprint_str
def _pprint_bytes(self, object, stream, indent, allowance, context, level):
write = stream.write
if len(object) <= 4:
write(repr(object))
return
parens = level == 1
if parens:
indent += 1
allowance += 1
write('(')
delim = ''
for rep in _wrap_bytes_repr(object, self._width - indent, allowance):
write(delim)
write(rep)
if not delim:
delim = '\n' + ' '*indent
if parens:
write(')')
_dispatch[bytes.__repr__] = _pprint_bytes
def _pprint_bytearray(self, object, stream, indent, allowance, context, level):
write = stream.write
write('bytearray(')
self._pprint_bytes(bytes(object), stream, indent + 10,
allowance + 1, context, level + 1)
write(')')
_dispatch[bytearray.__repr__] = _pprint_bytearray
def _pprint_mappingproxy(self, object, stream, indent, allowance, context, level):
stream.write('mappingproxy(')
self._format(object.copy(), stream, indent + 13, allowance + 1,
context, level)
stream.write(')')
_dispatch[_types.MappingProxyType.__repr__] = _pprint_mappingproxy
def _pprint_simplenamespace(self, object, stream, indent, allowance, context, level):
if type(object) is _types.SimpleNamespace:
# The SimpleNamespace repr is "namespace" instead of the class
# name, so we do the same here. For subclasses; use the class name.
cls_name = 'namespace'
else:
cls_name = object.__class__.__name__
indent += len(cls_name) + 1
items = object.__dict__.items()
stream.write(cls_name + '(')
self._format_namespace_items(items, stream, indent, allowance, context, level)
stream.write(')')
_dispatch[_types.SimpleNamespace.__repr__] = _pprint_simplenamespace
def _format_dict_items(self, items, stream, indent, allowance, context,
level):
write = stream.write
indent += self._indent_per_level
delimnl = ',\n' + ' ' * indent
last_index = len(items) - 1
for i, (key, ent) in enumerate(items):
last = i == last_index
rep = self._repr(key, context, level)
write(rep)
write(': ')
self._format(ent, stream, indent + len(rep) + 2,
allowance if last else 1,
context, level)
if not last:
write(delimnl)
def _format_namespace_items(self, items, stream, indent, allowance, context, level):
write = stream.write
delimnl = ',\n' + ' ' * indent
last_index = len(items) - 1
for i, (key, ent) in enumerate(items):
last = i == last_index
write(key)
write('=')
if id(ent) in context:
# Special-case representation of recursion to match standard
# recursive dataclass repr.
write("...")
else:
self._format(ent, stream, indent + len(key) + 1,
allowance if last else 1,
context, level)
if not last:
write(delimnl)
def _format_items(self, items, stream, indent, allowance, context, level):
write = stream.write
indent += self._indent_per_level
if self._indent_per_level > 1:
write((self._indent_per_level - 1) * ' ')
delimnl = ',\n' + ' ' * indent
delim = ''
width = max_width = self._width - indent + 1
it = iter(items)
try:
next_ent = next(it)
except StopIteration:
return
last = False
while not last:
ent = next_ent
try:
next_ent = next(it)
except StopIteration:
last = True
max_width -= allowance
width -= allowance
if self._compact:
rep = self._repr(ent, context, level)
w = len(rep) + 2
if width < w:
width = max_width
if delim:
delim = delimnl
if width >= w:
width -= w
write(delim)
delim = ', '
write(rep)
continue
write(delim)
delim = delimnl
self._format(ent, stream, indent,
allowance if last else 1,
context, level)
def _repr(self, object, context, level):
repr, readable, recursive = self.format(object, context.copy(),
self._depth, level)
if not readable:
self._readable = False
if recursive:
self._recursive = True
return repr
def format(self, object, context, maxlevels, level):
"""Format object for a specific context, returning a string
and flags indicating whether the representation is 'readable'
and whether the object represents a recursive construct.
"""
return self._safe_repr(object, context, maxlevels, level)
def _pprint_default_dict(self, object, stream, indent, allowance, context, level):
if not len(object):
stream.write(repr(object))
return
rdf = self._repr(object.default_factory, context, level)
cls = object.__class__
indent += len(cls.__name__) + 1
stream.write('%s(%s,\n%s' % (cls.__name__, rdf, ' ' * indent))
self._pprint_dict(object, stream, indent, allowance + 1, context, level)
stream.write(')')
_dispatch[_collections.defaultdict.__repr__] = _pprint_default_dict
def _pprint_counter(self, object, stream, indent, allowance, context, level):
if not len(object):
stream.write(repr(object))
return
cls = object.__class__
stream.write(cls.__name__ + '({')
if self._indent_per_level > 1:
stream.write((self._indent_per_level - 1) * ' ')
items = object.most_common()
self._format_dict_items(items, stream,
indent + len(cls.__name__) + 1, allowance + 2,
context, level)
stream.write('})')
_dispatch[_collections.Counter.__repr__] = _pprint_counter
def _pprint_chain_map(self, object, stream, indent, allowance, context, level):
if not len(object.maps):
stream.write(repr(object))
return
cls = object.__class__
stream.write(cls.__name__ + '(')
indent += len(cls.__name__) + 1
for i, m in enumerate(object.maps):
if i == len(object.maps) - 1:
self._format(m, stream, indent, allowance + 1, context, level)
stream.write(')')
else:
self._format(m, stream, indent, 1, context, level)
stream.write(',\n' + ' ' * indent)
_dispatch[_collections.ChainMap.__repr__] = _pprint_chain_map
def _pprint_deque(self, object, stream, indent, allowance, context, level):
if not len(object):
stream.write(repr(object))
return
cls = object.__class__
stream.write(cls.__name__ + '(')
indent += len(cls.__name__) + 1
stream.write('[')
if object.maxlen is None:
self._format_items(object, stream, indent, allowance + 2,
context, level)
stream.write('])')
else:
self._format_items(object, stream, indent, 2,
context, level)
rml = self._repr(object.maxlen, context, level)
stream.write('],\n%smaxlen=%s)' % (' ' * indent, rml))
_dispatch[_collections.deque.__repr__] = _pprint_deque
def _pprint_user_dict(self, object, stream, indent, allowance, context, level):
self._format(object.data, stream, indent, allowance, context, level - 1)
_dispatch[_collections.UserDict.__repr__] = _pprint_user_dict
def _pprint_user_list(self, object, stream, indent, allowance, context, level):
self._format(object.data, stream, indent, allowance, context, level - 1)
_dispatch[_collections.UserList.__repr__] = _pprint_user_list
def _pprint_user_string(self, object, stream, indent, allowance, context, level):
self._format(object.data, stream, indent, allowance, context, level - 1)
_dispatch[_collections.UserString.__repr__] = _pprint_user_string
def _safe_repr(self, object, context, maxlevels, level):
# Return triple (repr_string, isreadable, isrecursive).
typ = type(object)
if typ in _builtin_scalars:
return repr(object), True, False
r = getattr(typ, "__repr__", None)
if issubclass(typ, int) and r is int.__repr__:
if self._underscore_numbers:
return f"{object:_d}", True, False
else:
return repr(object), True, False
if issubclass(typ, dict) and r is dict.__repr__:
if not object:
return "{}", True, False
objid = id(object)
if maxlevels and level >= maxlevels:
return "{...}", False, objid in context
if objid in context:
return _recursion(object), False, True
context[objid] = 1
readable = True
recursive = False
components = []
append = components.append
level += 1
if self._sort_dicts:
items = sorted(object.items(), key=_safe_tuple)
else:
items = object.items()
for k, v in items:
krepr, kreadable, krecur = self.format(
k, context, maxlevels, level)
vrepr, vreadable, vrecur = self.format(
v, context, maxlevels, level)
append("%s: %s" % (krepr, vrepr))
readable = readable and kreadable and vreadable
if krecur or vrecur:
recursive = True
del context[objid]
return "{%s}" % ", ".join(components), readable, recursive
if (issubclass(typ, list) and r is list.__repr__) or \
(issubclass(typ, tuple) and r is tuple.__repr__):
if issubclass(typ, list):
if not object:
return "[]", True, False
format = "[%s]"
elif len(object) == 1:
format = "(%s,)"
else:
if not object:
return "()", True, False
format = "(%s)"
objid = id(object)
if maxlevels and level >= maxlevels:
return format % "...", False, objid in context
if objid in context:
return _recursion(object), False, True
context[objid] = 1
readable = True
recursive = False
components = []
append = components.append
level += 1
for o in object:
orepr, oreadable, orecur = self.format(
o, context, maxlevels, level)
append(orepr)
if not oreadable:
readable = False
if orecur:
recursive = True
del context[objid]
return format % ", ".join(components), readable, recursive
rep = repr(object)
return rep, (rep and not rep.startswith('<')), False
def _perfcheck(object=None):
import time
if object is None:
object = [("string", (1, 2), [3, 4], {5: 6, 7: 8})] * 100000
p = PrettyPrinter()
t1 = time.perf_counter()
p._safe_repr(object, {}, None, 0, True)
t2 = time.perf_counter()
p.pformat(object)
t3 = time.perf_counter()
print("_safe_repr:", t2 - t1)
print("pformat:", t3 - t2) | null |
186,662 | import collections as _collections
import dataclasses as _dataclasses
import re
import sys as _sys
import types as _types
from io import StringIO as _StringIO
def _wrap_bytes_repr(object, width, allowance):
current = b''
last = len(object) // 4 * 4
for i in range(0, len(object), 4):
part = object[i: i+4]
candidate = current + part
if i == last:
width -= allowance
if len(repr(candidate)) > width:
if current:
yield repr(current)
current = part
else:
current = candidate
if current:
yield repr(current) | null |
186,663 | from builtins import open as _builtin_open
import io
import os
import _compression
from _bz2 import BZ2Compressor, BZ2Decompressor
The provided code snippet includes necessary dependencies for implementing the `compress` function. Write a Python function `def compress(data, compresslevel=9)` to solve the following problem:
Compress a block of data. compresslevel, if given, must be a number between 1 and 9. For incremental compression, use a BZ2Compressor object instead.
Here is the function:
def compress(data, compresslevel=9):
"""Compress a block of data.
compresslevel, if given, must be a number between 1 and 9.
For incremental compression, use a BZ2Compressor object instead.
"""
comp = BZ2Compressor(compresslevel)
return comp.compress(data) + comp.flush() | Compress a block of data. compresslevel, if given, must be a number between 1 and 9. For incremental compression, use a BZ2Compressor object instead. |
186,664 | from builtins import open as _builtin_open
import io
import os
import _compression
from _bz2 import BZ2Compressor, BZ2Decompressor
The provided code snippet includes necessary dependencies for implementing the `decompress` function. Write a Python function `def decompress(data)` to solve the following problem:
Decompress a block of data. For incremental decompression, use a BZ2Decompressor object instead.
Here is the function:
def decompress(data):
"""Decompress a block of data.
For incremental decompression, use a BZ2Decompressor object instead.
"""
results = []
while data:
decomp = BZ2Decompressor()
try:
res = decomp.decompress(data)
except OSError:
if results:
break # Leftover data is not a valid bzip2 stream; ignore it.
else:
raise # Error on the first iteration; bail out.
results.append(res)
if not decomp.eof:
raise ValueError("Compressed data ended before the "
"end-of-stream marker was reached")
data = decomp.unused_data
return b"".join(results) | Decompress a block of data. For incremental decompression, use a BZ2Decompressor object instead. |
186,667 |
def rgb_to_hls(r, g, b):
maxc = max(r, g, b)
minc = min(r, g, b)
sumc = (maxc+minc)
rangec = (maxc-minc)
l = sumc/2.0
if minc == maxc:
return 0.0, l, 0.0
if l <= 0.5:
s = rangec / sumc
else:
s = rangec / (2.0-sumc)
rc = (maxc-r) / rangec
gc = (maxc-g) / rangec
bc = (maxc-b) / rangec
if r == maxc:
h = bc-gc
elif g == maxc:
h = 2.0+rc-bc
else:
h = 4.0+gc-rc
h = (h/6.0) % 1.0
return h, l, s | null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.