diff --git a/lib/python3.10/site-packages/astunparse/__init__.py b/lib/python3.10/site-packages/astunparse/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e2eeddc88f079f90cdc1df9ac8e6ba3b62578639 --- /dev/null +++ b/lib/python3.10/site-packages/astunparse/__init__.py @@ -0,0 +1,20 @@ +# coding: utf-8 +from __future__ import absolute_import +from six.moves import cStringIO +from .unparser import Unparser +from .printer import Printer + + +__version__ = '1.6.3' + + +def unparse(tree): + v = cStringIO() + Unparser(tree, file=v) + return v.getvalue() + + +def dump(tree): + v = cStringIO() + Printer(file=v).visit(tree) + return v.getvalue() diff --git a/lib/python3.10/site-packages/astunparse/__main__.py b/lib/python3.10/site-packages/astunparse/__main__.py new file mode 100644 index 0000000000000000000000000000000000000000..1729cd7c98a88f1afe19c89f996b78c6ec49c687 --- /dev/null +++ b/lib/python3.10/site-packages/astunparse/__main__.py @@ -0,0 +1,48 @@ +from __future__ import print_function +import sys +import os +import argparse +from .unparser import roundtrip +from . import dump + + +def roundtrip_recursive(target, dump_tree=False): + if os.path.isfile(target): + print(target) + print("=" * len(target)) + if dump_tree: + dump(target) + else: + roundtrip(target) + print() + elif os.path.isdir(target): + for item in os.listdir(target): + if item.endswith(".py"): + roundtrip_recursive(os.path.join(target, item), dump_tree) + else: + print( + "WARNING: skipping '%s', not a file or directory" % target, + file=sys.stderr + ) + + +def main(args): + parser = argparse.ArgumentParser(prog="astunparse") + parser.add_argument( + 'target', + nargs='+', + help="Files or directories to show roundtripped source for" + ) + parser.add_argument( + '--dump', + type=bool, + help="Show a pretty-printed AST instead of the source" + ) + + arguments = parser.parse_args(args) + for target in arguments.target: + roundtrip_recursive(target, dump_tree=arguments.dump) + + +if __name__ == "__main__": + main(sys.argv[1:]) diff --git a/lib/python3.10/site-packages/astunparse/printer.py b/lib/python3.10/site-packages/astunparse/printer.py new file mode 100644 index 0000000000000000000000000000000000000000..92d64f772bff92956f71aa41b806f9f57d074d04 --- /dev/null +++ b/lib/python3.10/site-packages/astunparse/printer.py @@ -0,0 +1,51 @@ +from __future__ import unicode_literals +import sys +import ast +import six + + +class Printer(ast.NodeVisitor): + + def __init__(self, file=sys.stdout, indent=" "): + self.indentation = 0 + self.indent_with = indent + self.f = file + + # overridden to make the API obvious + def visit(self, node): + super(Printer, self).visit(node) + + def write(self, text): + self.f.write(six.text_type(text)) + + def generic_visit(self, node): + + if isinstance(node, list): + nodestart = "[" + nodeend = "]" + children = [("", child) for child in node] + else: + nodestart = type(node).__name__ + "(" + nodeend = ")" + children = [(name + "=", value) for name, value in ast.iter_fields(node)] + + if len(children) > 1: + self.indentation += 1 + + self.write(nodestart) + for i, pair in enumerate(children): + attr, child = pair + if len(children) > 1: + self.write("\n" + self.indent_with * self.indentation) + if isinstance(child, (ast.AST, list)): + self.write(attr) + self.visit(child) + else: + self.write(attr + repr(child)) + + if i != len(children) - 1: + self.write(",") + self.write(nodeend) + + if len(children) > 1: + self.indentation -= 1 diff --git a/lib/python3.10/site-packages/astunparse/unparser.py b/lib/python3.10/site-packages/astunparse/unparser.py new file mode 100644 index 0000000000000000000000000000000000000000..0ef6fd8bcbffa8fe6245246f13ed301bdcb9492c --- /dev/null +++ b/lib/python3.10/site-packages/astunparse/unparser.py @@ -0,0 +1,906 @@ +"Usage: unparse.py " +from __future__ import print_function, unicode_literals +import six +import sys +import ast +import os +import tokenize +from six import StringIO + +# Large float and imaginary literals get turned into infinities in the AST. +# We unparse those infinities to INFSTR. +INFSTR = "1e" + repr(sys.float_info.max_10_exp + 1) + +def interleave(inter, f, seq): + """Call f on each item in seq, calling inter() in between. + """ + seq = iter(seq) + try: + f(next(seq)) + except StopIteration: + pass + else: + for x in seq: + inter() + f(x) + +class Unparser: + """Methods in this class recursively traverse an AST and + output source code for the abstract syntax; original formatting + is disregarded. """ + + def __init__(self, tree, file = sys.stdout): + """Unparser(tree, file=sys.stdout) -> None. + Print the source for tree to file.""" + self.f = file + self.future_imports = [] + self._indent = 0 + self.dispatch(tree) + print("", file=self.f) + self.f.flush() + + def fill(self, text = ""): + "Indent a piece of text, according to the current indentation level" + self.f.write("\n"+" "*self._indent + text) + + def write(self, text): + "Append a piece of text to the current line." + self.f.write(six.text_type(text)) + + def enter(self): + "Print ':', and increase the indentation." + self.write(":") + self._indent += 1 + + def leave(self): + "Decrease the indentation level." + self._indent -= 1 + + def dispatch(self, tree): + "Dispatcher function, dispatching tree type T to method _T." + if isinstance(tree, list): + for t in tree: + self.dispatch(t) + return + meth = getattr(self, "_"+tree.__class__.__name__) + meth(tree) + + + ############### Unparsing methods ###################### + # There should be one method per concrete grammar type # + # Constructors should be grouped by sum type. Ideally, # + # this would follow the order in the grammar, but # + # currently doesn't. # + ######################################################## + + def _Module(self, tree): + for stmt in tree.body: + self.dispatch(stmt) + + def _Interactive(self, tree): + for stmt in tree.body: + self.dispatch(stmt) + + def _Expression(self, tree): + self.dispatch(tree.body) + + # stmt + def _Expr(self, tree): + self.fill() + self.dispatch(tree.value) + + def _NamedExpr(self, tree): + self.write("(") + self.dispatch(tree.target) + self.write(" := ") + self.dispatch(tree.value) + self.write(")") + + def _Import(self, t): + self.fill("import ") + interleave(lambda: self.write(", "), self.dispatch, t.names) + + def _ImportFrom(self, t): + # A from __future__ import may affect unparsing, so record it. + if t.module and t.module == '__future__': + self.future_imports.extend(n.name for n in t.names) + + self.fill("from ") + self.write("." * t.level) + if t.module: + self.write(t.module) + self.write(" import ") + interleave(lambda: self.write(", "), self.dispatch, t.names) + + def _Assign(self, t): + self.fill() + for target in t.targets: + self.dispatch(target) + self.write(" = ") + self.dispatch(t.value) + + def _AugAssign(self, t): + self.fill() + self.dispatch(t.target) + self.write(" "+self.binop[t.op.__class__.__name__]+"= ") + self.dispatch(t.value) + + def _AnnAssign(self, t): + self.fill() + if not t.simple and isinstance(t.target, ast.Name): + self.write('(') + self.dispatch(t.target) + if not t.simple and isinstance(t.target, ast.Name): + self.write(')') + self.write(": ") + self.dispatch(t.annotation) + if t.value: + self.write(" = ") + self.dispatch(t.value) + + def _Return(self, t): + self.fill("return") + if t.value: + self.write(" ") + self.dispatch(t.value) + + def _Pass(self, t): + self.fill("pass") + + def _Break(self, t): + self.fill("break") + + def _Continue(self, t): + self.fill("continue") + + def _Delete(self, t): + self.fill("del ") + interleave(lambda: self.write(", "), self.dispatch, t.targets) + + def _Assert(self, t): + self.fill("assert ") + self.dispatch(t.test) + if t.msg: + self.write(", ") + self.dispatch(t.msg) + + def _Exec(self, t): + self.fill("exec ") + self.dispatch(t.body) + if t.globals: + self.write(" in ") + self.dispatch(t.globals) + if t.locals: + self.write(", ") + self.dispatch(t.locals) + + def _Print(self, t): + self.fill("print ") + do_comma = False + if t.dest: + self.write(">>") + self.dispatch(t.dest) + do_comma = True + for e in t.values: + if do_comma:self.write(", ") + else:do_comma=True + self.dispatch(e) + if not t.nl: + self.write(",") + + def _Global(self, t): + self.fill("global ") + interleave(lambda: self.write(", "), self.write, t.names) + + def _Nonlocal(self, t): + self.fill("nonlocal ") + interleave(lambda: self.write(", "), self.write, t.names) + + def _Await(self, t): + self.write("(") + self.write("await") + if t.value: + self.write(" ") + self.dispatch(t.value) + self.write(")") + + def _Yield(self, t): + self.write("(") + self.write("yield") + if t.value: + self.write(" ") + self.dispatch(t.value) + self.write(")") + + def _YieldFrom(self, t): + self.write("(") + self.write("yield from") + if t.value: + self.write(" ") + self.dispatch(t.value) + self.write(")") + + def _Raise(self, t): + self.fill("raise") + if six.PY3: + if not t.exc: + assert not t.cause + return + self.write(" ") + self.dispatch(t.exc) + if t.cause: + self.write(" from ") + self.dispatch(t.cause) + else: + self.write(" ") + if t.type: + self.dispatch(t.type) + if t.inst: + self.write(", ") + self.dispatch(t.inst) + if t.tback: + self.write(", ") + self.dispatch(t.tback) + + def _Try(self, t): + self.fill("try") + self.enter() + self.dispatch(t.body) + self.leave() + for ex in t.handlers: + self.dispatch(ex) + if t.orelse: + self.fill("else") + self.enter() + self.dispatch(t.orelse) + self.leave() + if t.finalbody: + self.fill("finally") + self.enter() + self.dispatch(t.finalbody) + self.leave() + + def _TryExcept(self, t): + self.fill("try") + self.enter() + self.dispatch(t.body) + self.leave() + + for ex in t.handlers: + self.dispatch(ex) + if t.orelse: + self.fill("else") + self.enter() + self.dispatch(t.orelse) + self.leave() + + def _TryFinally(self, t): + if len(t.body) == 1 and isinstance(t.body[0], ast.TryExcept): + # try-except-finally + self.dispatch(t.body) + else: + self.fill("try") + self.enter() + self.dispatch(t.body) + self.leave() + + self.fill("finally") + self.enter() + self.dispatch(t.finalbody) + self.leave() + + def _ExceptHandler(self, t): + self.fill("except") + if t.type: + self.write(" ") + self.dispatch(t.type) + if t.name: + self.write(" as ") + if six.PY3: + self.write(t.name) + else: + self.dispatch(t.name) + self.enter() + self.dispatch(t.body) + self.leave() + + def _ClassDef(self, t): + self.write("\n") + for deco in t.decorator_list: + self.fill("@") + self.dispatch(deco) + self.fill("class "+t.name) + if six.PY3: + self.write("(") + comma = False + for e in t.bases: + if comma: self.write(", ") + else: comma = True + self.dispatch(e) + for e in t.keywords: + if comma: self.write(", ") + else: comma = True + self.dispatch(e) + if sys.version_info[:2] < (3, 5): + if t.starargs: + if comma: self.write(", ") + else: comma = True + self.write("*") + self.dispatch(t.starargs) + if t.kwargs: + if comma: self.write(", ") + else: comma = True + self.write("**") + self.dispatch(t.kwargs) + self.write(")") + elif t.bases: + self.write("(") + for a in t.bases: + self.dispatch(a) + self.write(", ") + self.write(")") + self.enter() + self.dispatch(t.body) + self.leave() + + def _FunctionDef(self, t): + self.__FunctionDef_helper(t, "def") + + def _AsyncFunctionDef(self, t): + self.__FunctionDef_helper(t, "async def") + + def __FunctionDef_helper(self, t, fill_suffix): + self.write("\n") + for deco in t.decorator_list: + self.fill("@") + self.dispatch(deco) + def_str = fill_suffix+" "+t.name + "(" + self.fill(def_str) + self.dispatch(t.args) + self.write(")") + if getattr(t, "returns", False): + self.write(" -> ") + self.dispatch(t.returns) + self.enter() + self.dispatch(t.body) + self.leave() + + def _For(self, t): + self.__For_helper("for ", t) + + def _AsyncFor(self, t): + self.__For_helper("async for ", t) + + def __For_helper(self, fill, t): + self.fill(fill) + self.dispatch(t.target) + self.write(" in ") + self.dispatch(t.iter) + self.enter() + self.dispatch(t.body) + self.leave() + if t.orelse: + self.fill("else") + self.enter() + self.dispatch(t.orelse) + self.leave() + + def _If(self, t): + self.fill("if ") + self.dispatch(t.test) + self.enter() + self.dispatch(t.body) + self.leave() + # collapse nested ifs into equivalent elifs. + while (t.orelse and len(t.orelse) == 1 and + isinstance(t.orelse[0], ast.If)): + t = t.orelse[0] + self.fill("elif ") + self.dispatch(t.test) + self.enter() + self.dispatch(t.body) + self.leave() + # final else + if t.orelse: + self.fill("else") + self.enter() + self.dispatch(t.orelse) + self.leave() + + def _While(self, t): + self.fill("while ") + self.dispatch(t.test) + self.enter() + self.dispatch(t.body) + self.leave() + if t.orelse: + self.fill("else") + self.enter() + self.dispatch(t.orelse) + self.leave() + + def _generic_With(self, t, async_=False): + self.fill("async with " if async_ else "with ") + if hasattr(t, 'items'): + interleave(lambda: self.write(", "), self.dispatch, t.items) + else: + self.dispatch(t.context_expr) + if t.optional_vars: + self.write(" as ") + self.dispatch(t.optional_vars) + self.enter() + self.dispatch(t.body) + self.leave() + + def _With(self, t): + self._generic_With(t) + + def _AsyncWith(self, t): + self._generic_With(t, async_=True) + + # expr + def _Bytes(self, t): + self.write(repr(t.s)) + + def _Str(self, tree): + if six.PY3: + self.write(repr(tree.s)) + else: + # if from __future__ import unicode_literals is in effect, + # then we want to output string literals using a 'b' prefix + # and unicode literals with no prefix. + if "unicode_literals" not in self.future_imports: + self.write(repr(tree.s)) + elif isinstance(tree.s, str): + self.write("b" + repr(tree.s)) + elif isinstance(tree.s, unicode): + self.write(repr(tree.s).lstrip("u")) + else: + assert False, "shouldn't get here" + + def _JoinedStr(self, t): + # JoinedStr(expr* values) + self.write("f") + string = StringIO() + self._fstring_JoinedStr(t, string.write) + # Deviation from `unparse.py`: Try to find an unused quote. + # This change is made to handle _very_ complex f-strings. + v = string.getvalue() + if '\n' in v or '\r' in v: + quote_types = ["'''", '"""'] + else: + quote_types = ["'", '"', '"""', "'''"] + for quote_type in quote_types: + if quote_type not in v: + v = "{quote_type}{v}{quote_type}".format(quote_type=quote_type, v=v) + break + else: + v = repr(v) + self.write(v) + + def _FormattedValue(self, t): + # FormattedValue(expr value, int? conversion, expr? format_spec) + self.write("f") + string = StringIO() + self._fstring_JoinedStr(t, string.write) + self.write(repr(string.getvalue())) + + def _fstring_JoinedStr(self, t, write): + for value in t.values: + meth = getattr(self, "_fstring_" + type(value).__name__) + meth(value, write) + + def _fstring_Str(self, t, write): + value = t.s.replace("{", "{{").replace("}", "}}") + write(value) + + def _fstring_Constant(self, t, write): + assert isinstance(t.value, str) + value = t.value.replace("{", "{{").replace("}", "}}") + write(value) + + def _fstring_FormattedValue(self, t, write): + write("{") + expr = StringIO() + Unparser(t.value, expr) + expr = expr.getvalue().rstrip("\n") + if expr.startswith("{"): + write(" ") # Separate pair of opening brackets as "{ {" + write(expr) + if t.conversion != -1: + conversion = chr(t.conversion) + assert conversion in "sra" + write("!{conversion}".format(conversion=conversion)) + if t.format_spec: + write(":") + meth = getattr(self, "_fstring_" + type(t.format_spec).__name__) + meth(t.format_spec, write) + write("}") + + def _Name(self, t): + self.write(t.id) + + def _NameConstant(self, t): + self.write(repr(t.value)) + + def _Repr(self, t): + self.write("`") + self.dispatch(t.value) + self.write("`") + + def _write_constant(self, value): + if isinstance(value, (float, complex)): + # Substitute overflowing decimal literal for AST infinities. + self.write(repr(value).replace("inf", INFSTR)) + else: + self.write(repr(value)) + + def _Constant(self, t): + value = t.value + if isinstance(value, tuple): + self.write("(") + if len(value) == 1: + self._write_constant(value[0]) + self.write(",") + else: + interleave(lambda: self.write(", "), self._write_constant, value) + self.write(")") + elif value is Ellipsis: # instead of `...` for Py2 compatibility + self.write("...") + else: + if t.kind == "u": + self.write("u") + self._write_constant(t.value) + + def _Num(self, t): + repr_n = repr(t.n) + if six.PY3: + self.write(repr_n.replace("inf", INFSTR)) + else: + # Parenthesize negative numbers, to avoid turning (-1)**2 into -1**2. + if repr_n.startswith("-"): + self.write("(") + if "inf" in repr_n and repr_n.endswith("*j"): + repr_n = repr_n.replace("*j", "j") + # Substitute overflowing decimal literal for AST infinities. + self.write(repr_n.replace("inf", INFSTR)) + if repr_n.startswith("-"): + self.write(")") + + def _List(self, t): + self.write("[") + interleave(lambda: self.write(", "), self.dispatch, t.elts) + self.write("]") + + def _ListComp(self, t): + self.write("[") + self.dispatch(t.elt) + for gen in t.generators: + self.dispatch(gen) + self.write("]") + + def _GeneratorExp(self, t): + self.write("(") + self.dispatch(t.elt) + for gen in t.generators: + self.dispatch(gen) + self.write(")") + + def _SetComp(self, t): + self.write("{") + self.dispatch(t.elt) + for gen in t.generators: + self.dispatch(gen) + self.write("}") + + def _DictComp(self, t): + self.write("{") + self.dispatch(t.key) + self.write(": ") + self.dispatch(t.value) + for gen in t.generators: + self.dispatch(gen) + self.write("}") + + def _comprehension(self, t): + if getattr(t, 'is_async', False): + self.write(" async for ") + else: + self.write(" for ") + self.dispatch(t.target) + self.write(" in ") + self.dispatch(t.iter) + for if_clause in t.ifs: + self.write(" if ") + self.dispatch(if_clause) + + def _IfExp(self, t): + self.write("(") + self.dispatch(t.body) + self.write(" if ") + self.dispatch(t.test) + self.write(" else ") + self.dispatch(t.orelse) + self.write(")") + + def _Set(self, t): + assert(t.elts) # should be at least one element + self.write("{") + interleave(lambda: self.write(", "), self.dispatch, t.elts) + self.write("}") + + def _Dict(self, t): + self.write("{") + def write_key_value_pair(k, v): + self.dispatch(k) + self.write(": ") + self.dispatch(v) + + def write_item(item): + k, v = item + if k is None: + # for dictionary unpacking operator in dicts {**{'y': 2}} + # see PEP 448 for details + self.write("**") + self.dispatch(v) + else: + write_key_value_pair(k, v) + interleave(lambda: self.write(", "), write_item, zip(t.keys, t.values)) + self.write("}") + + def _Tuple(self, t): + self.write("(") + if len(t.elts) == 1: + elt = t.elts[0] + self.dispatch(elt) + self.write(",") + else: + interleave(lambda: self.write(", "), self.dispatch, t.elts) + self.write(")") + + unop = {"Invert":"~", "Not": "not", "UAdd":"+", "USub":"-"} + def _UnaryOp(self, t): + self.write("(") + self.write(self.unop[t.op.__class__.__name__]) + self.write(" ") + if six.PY2 and isinstance(t.op, ast.USub) and isinstance(t.operand, ast.Num): + # If we're applying unary minus to a number, parenthesize the number. + # This is necessary: -2147483648 is different from -(2147483648) on + # a 32-bit machine (the first is an int, the second a long), and + # -7j is different from -(7j). (The first has real part 0.0, the second + # has real part -0.0.) + self.write("(") + self.dispatch(t.operand) + self.write(")") + else: + self.dispatch(t.operand) + self.write(")") + + binop = { "Add":"+", "Sub":"-", "Mult":"*", "MatMult":"@", "Div":"/", "Mod":"%", + "LShift":"<<", "RShift":">>", "BitOr":"|", "BitXor":"^", "BitAnd":"&", + "FloorDiv":"//", "Pow": "**"} + def _BinOp(self, t): + self.write("(") + self.dispatch(t.left) + self.write(" " + self.binop[t.op.__class__.__name__] + " ") + self.dispatch(t.right) + self.write(")") + + cmpops = {"Eq":"==", "NotEq":"!=", "Lt":"<", "LtE":"<=", "Gt":">", "GtE":">=", + "Is":"is", "IsNot":"is not", "In":"in", "NotIn":"not in"} + def _Compare(self, t): + self.write("(") + self.dispatch(t.left) + for o, e in zip(t.ops, t.comparators): + self.write(" " + self.cmpops[o.__class__.__name__] + " ") + self.dispatch(e) + self.write(")") + + boolops = {ast.And: 'and', ast.Or: 'or'} + def _BoolOp(self, t): + self.write("(") + s = " %s " % self.boolops[t.op.__class__] + interleave(lambda: self.write(s), self.dispatch, t.values) + self.write(")") + + def _Attribute(self,t): + self.dispatch(t.value) + # Special case: 3.__abs__() is a syntax error, so if t.value + # is an integer literal then we need to either parenthesize + # it or add an extra space to get 3 .__abs__(). + if isinstance(t.value, getattr(ast, 'Constant', getattr(ast, 'Num', None))) and isinstance(t.value.n, int): + self.write(" ") + self.write(".") + self.write(t.attr) + + def _Call(self, t): + self.dispatch(t.func) + self.write("(") + comma = False + for e in t.args: + if comma: self.write(", ") + else: comma = True + self.dispatch(e) + for e in t.keywords: + if comma: self.write(", ") + else: comma = True + self.dispatch(e) + if sys.version_info[:2] < (3, 5): + if t.starargs: + if comma: self.write(", ") + else: comma = True + self.write("*") + self.dispatch(t.starargs) + if t.kwargs: + if comma: self.write(", ") + else: comma = True + self.write("**") + self.dispatch(t.kwargs) + self.write(")") + + def _Subscript(self, t): + self.dispatch(t.value) + self.write("[") + self.dispatch(t.slice) + self.write("]") + + def _Starred(self, t): + self.write("*") + self.dispatch(t.value) + + # slice + def _Ellipsis(self, t): + self.write("...") + + def _Index(self, t): + self.dispatch(t.value) + + def _Slice(self, t): + if t.lower: + self.dispatch(t.lower) + self.write(":") + if t.upper: + self.dispatch(t.upper) + if t.step: + self.write(":") + self.dispatch(t.step) + + def _ExtSlice(self, t): + interleave(lambda: self.write(', '), self.dispatch, t.dims) + + # argument + def _arg(self, t): + self.write(t.arg) + if t.annotation: + self.write(": ") + self.dispatch(t.annotation) + + # others + def _arguments(self, t): + first = True + # normal arguments + all_args = getattr(t, 'posonlyargs', []) + t.args + defaults = [None] * (len(all_args) - len(t.defaults)) + t.defaults + for index, elements in enumerate(zip(all_args, defaults), 1): + a, d = elements + if first:first = False + else: self.write(", ") + self.dispatch(a) + if d: + self.write("=") + self.dispatch(d) + if index == len(getattr(t, 'posonlyargs', ())): + self.write(", /") + + # varargs, or bare '*' if no varargs but keyword-only arguments present + if t.vararg or getattr(t, "kwonlyargs", False): + if first:first = False + else: self.write(", ") + self.write("*") + if t.vararg: + if hasattr(t.vararg, 'arg'): + self.write(t.vararg.arg) + if t.vararg.annotation: + self.write(": ") + self.dispatch(t.vararg.annotation) + else: + self.write(t.vararg) + if getattr(t, 'varargannotation', None): + self.write(": ") + self.dispatch(t.varargannotation) + + # keyword-only arguments + if getattr(t, "kwonlyargs", False): + for a, d in zip(t.kwonlyargs, t.kw_defaults): + if first:first = False + else: self.write(", ") + self.dispatch(a), + if d: + self.write("=") + self.dispatch(d) + + # kwargs + if t.kwarg: + if first:first = False + else: self.write(", ") + if hasattr(t.kwarg, 'arg'): + self.write("**"+t.kwarg.arg) + if t.kwarg.annotation: + self.write(": ") + self.dispatch(t.kwarg.annotation) + else: + self.write("**"+t.kwarg) + if getattr(t, 'kwargannotation', None): + self.write(": ") + self.dispatch(t.kwargannotation) + + def _keyword(self, t): + if t.arg is None: + # starting from Python 3.5 this denotes a kwargs part of the invocation + self.write("**") + else: + self.write(t.arg) + self.write("=") + self.dispatch(t.value) + + def _Lambda(self, t): + self.write("(") + self.write("lambda ") + self.dispatch(t.args) + self.write(": ") + self.dispatch(t.body) + self.write(")") + + def _alias(self, t): + self.write(t.name) + if t.asname: + self.write(" as "+t.asname) + + def _withitem(self, t): + self.dispatch(t.context_expr) + if t.optional_vars: + self.write(" as ") + self.dispatch(t.optional_vars) + +def roundtrip(filename, output=sys.stdout): + if six.PY3: + with open(filename, "rb") as pyfile: + encoding = tokenize.detect_encoding(pyfile.readline)[0] + with open(filename, "r", encoding=encoding) as pyfile: + source = pyfile.read() + else: + with open(filename, "r") as pyfile: + source = pyfile.read() + tree = compile(source, filename, "exec", ast.PyCF_ONLY_AST, dont_inherit=True) + Unparser(tree, output) + + + +def testdir(a): + try: + names = [n for n in os.listdir(a) if n.endswith('.py')] + except OSError: + print("Directory not readable: %s" % a, file=sys.stderr) + else: + for n in names: + fullname = os.path.join(a, n) + if os.path.isfile(fullname): + output = StringIO() + print('Testing %s' % fullname) + try: + roundtrip(fullname, output) + except Exception as e: + print(' Failed to compile, exception is %s' % repr(e)) + elif os.path.isdir(fullname): + testdir(fullname) + +def main(args): + if args[0] == '--testdir': + for a in args[1:]: + testdir(a) + else: + for a in args: + roundtrip(a) + +if __name__=='__main__': + main(sys.argv[1:]) diff --git a/lib/python3.10/site-packages/audioread-3.0.1.dist-info/LICENSE b/lib/python3.10/site-packages/audioread-3.0.1.dist-info/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..36b4dd8db02130b8dcf3819e0949e79a0c252230 --- /dev/null +++ b/lib/python3.10/site-packages/audioread-3.0.1.dist-info/LICENSE @@ -0,0 +1,19 @@ +Copyright (c) 2011-2018 Adrian Sampson + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR +OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE +OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/lib/python3.10/site-packages/audioread-3.0.1.dist-info/METADATA b/lib/python3.10/site-packages/audioread-3.0.1.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..349aaa3ebecd5eb3c0da4d697e89ad3060cd9512 --- /dev/null +++ b/lib/python3.10/site-packages/audioread-3.0.1.dist-info/METADATA @@ -0,0 +1,239 @@ +Metadata-Version: 2.1 +Name: audioread +Version: 3.0.1 +Summary: Multi-library, cross-platform audio decoding. +Author-email: Adrian Sampson +Requires-Python: >=3.6 +Description-Content-Type: text/x-rst +Classifier: Topic :: Multimedia :: Sound/Audio :: Conversion +Classifier: Intended Audience :: Developers +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Requires-Dist: tox ; extra == "test" +Project-URL: Home, https://github.com/beetbox/audioread +Provides-Extra: test + +audioread +========= + +Decode audio files using whichever backend is available. The library +currently supports: + +- `Gstreamer`_ via `PyGObject`_. +- `Core Audio`_ on Mac OS X via `ctypes`_. (PyObjC not required.) +- `MAD`_ via the `pymad`_ bindings. +- `FFmpeg`_ or `Libav`_ via its command-line interface. +- The standard library `wave`_, `aifc`_, and `sunau`_ modules (for + uncompressed audio formats). + +.. _Gstreamer: http://gstreamer.freedesktop.org/ +.. _gst-python: http://gstreamer.freedesktop.org/modules/gst-python.html +.. _Core Audio: http://developer.apple.com/technologies/mac/audio-and-video.html +.. _ctypes: http://docs.python.org/library/ctypes.html +.. _MAD: http://www.underbit.com/products/mad/ +.. _pymad: http://spacepants.org/src/pymad/ +.. _FFmpeg: http://ffmpeg.org/ +.. _Libav: https://www.libav.org/ +.. _wave: http://docs.python.org/library/wave.html +.. _aifc: http://docs.python.org/library/aifc.html +.. _sunau: http://docs.python.org/library/sunau.html +.. _PyGObject: https://pygobject.readthedocs.io/ + +Use the library like so:: + + with audioread.audio_open(filename) as f: + print(f.channels, f.samplerate, f.duration) + for buf in f: + do_something(buf) + +Buffers in the file can be accessed by iterating over the object returned from +``audio_open``. Each buffer is a bytes-like object (``buffer``, ``bytes``, or +``bytearray``) containing raw **16-bit little-endian signed integer PCM +data**. (Currently, these PCM format parameters are not configurable, but this +could be added to most of the backends.) + +Additional values are available as fields on the audio file object: + +- ``channels`` is the number of audio channels (an integer). +- ``samplerate`` is given in Hz (an integer). +- ``duration`` is the length of the audio in seconds (a float). + +The ``audio_open`` function transparently selects a backend that can read the +file. (Each backend is implemented in a module inside the ``audioread`` +package.) If no backends succeed in opening the file, a ``DecodeError`` +exception is raised. This exception is only used when the file type is +unsupported by the backends; if the file doesn't exist, a standard ``IOError`` +will be raised. + +A second optional parameter to ``audio_open`` specifies which backends to try +(instead of trying them all, which is the default). You can use the +``available_backends`` function to get a list backends that are usable on the +current system. + +Audioread supports Python 3 (3.8+). + +Example +------- + +The included ``decode.py`` script demonstrates using this package to +convert compressed audio files to WAV files. + +Troubleshooting +--------------- + +A ``NoBackendError`` exception means that the library could not find one of +the libraries or tools it needs to decode audio. This could mean, for example, +that you have a broken installation of `FFmpeg`_. To check, try typing +``ffmpeg -version`` in your shell. If that gives you an error, try installing +FFmpeg with your OS's package manager (e.g., apt or yum) or `using Conda +`_. + +Version History +--------------- + +3.0.1 + Fix a possible deadlock when FFmpeg's version output produces too much data. + +3.0.0 + Drop support for Python 2 and older versions of Python 3. The library now + requires Python 3.6+. + Increase default block size in FFmpegAudioFile to get slightly faster file reading. + Cache backends for faster lookup (thanks to @bmcfee). + Audio file classes now inherit from a common base ``AudioFile`` class. + +2.1.9 + Work correctly with GStreamer 1.18 and later (thanks to @ssssam). + +2.1.8 + Fix an unhandled ``OSError`` when FFmpeg is not installed. + +2.1.7 + Properly close some filehandles in the FFmpeg backend (thanks to + @RyanMarcus and @ssssam). + The maddec backend now always produces bytes objects, like the other + backends (thanks to @ssssam). + Resolve an audio data memory leak in the GStreamer backend (thanks again to + @ssssam). + You can now optionally specify which specific backends ``audio_open`` should + try (thanks once again to @ssssam). + On Windows, avoid opening a console window to run FFmpeg (thanks to @flokX). + +2.1.6 + Fix a "no such process" crash in the FFmpeg backend on Windows Subsystem for + Linux (thanks to @llamasoft). + Avoid suppressing SIGINT in the GStreamer backend on older versions of + PyGObject (thanks to @lazka). + +2.1.5 + Properly clean up the file handle when a backend fails to decode a file. + Fix parsing of "N.M" channel counts in the FFmpeg backend (thanks to @piem). + Avoid a crash in the raw backend when a file uses an unsupported number of + bits per sample (namely, 24-bit samples in Python < 3.4). + Add a ``__version__`` value to the package. + +2.1.4 + Fix a bug in the FFmpeg backend where, after closing a file, the program's + standard input stream would be "broken" and wouldn't receive any input. + +2.1.3 + Avoid some warnings in the GStreamer backend when using modern versions of + GLib. We now require at least GLib 2.32. + +2.1.2 + Fix a file descriptor leak when opening and closing many files using + GStreamer. + +2.1.1 + Just fix ReST formatting in the README. + +2.1.0 + The FFmpeg backend can now also use Libav's ``avconv`` command. + Fix a warning by requiring GStreamer >= 1.0. + Fix some Python 3 crashes with the new GStreamer backend (thanks to + @xix-xeaon). + +2.0.0 + The GStreamer backend now uses GStreamer 1.x via the new + gobject-introspection API (and is compatible with Python 3). + +1.2.2 + When running FFmpeg on Windows, disable its crash dialog. Thanks to + jcsaaddupuy. + +1.2.1 + Fix an unhandled exception when opening non-raw audio files (thanks to + aostanin). + Fix Python 3 compatibility for the raw-file backend. + +1.2.0 + Add support for FFmpeg on Windows (thanks to Jean-Christophe Saad-Dupuy). + +1.1.0 + Add support for Sun/NeXT `Au files`_ via the standard-library ``sunau`` + module (thanks to Dan Ellis). + +1.0.3 + Use the rawread (standard-library) backend for .wav files. + +1.0.2 + Send SIGKILL, not SIGTERM, to ffmpeg processes to avoid occasional hangs. + +1.0.1 + When GStreamer fails to report a duration, raise an exception instead of + silently setting the duration field to None. + +1.0.0 + Catch GStreamer's exception when necessary components, such as + ``uridecodebin``, are missing. + The GStreamer backend now accepts relative paths. + Fix a hang in GStreamer when the stream finishes before it begins (when + reading broken files). + Initial support for Python 3. + +0.8 + All decoding errors are now subclasses of ``DecodeError``. + +0.7 + Fix opening WAV and AIFF files via Unicode filenames. + +0.6 + Make FFmpeg timeout more robust. + Dump FFmpeg output on timeout. + Fix a nondeterministic hang in the Gstreamer backend. + Fix a file descriptor leak in the MAD backend. + +0.5 + Fix crash when FFmpeg fails to report a duration. + Fix a hang when FFmpeg fills up its stderr output buffer. + Add a timeout to ``ffmpeg`` tool execution (currently 10 seconds for each + 4096-byte read); a ``ReadTimeoutError`` exception is raised if the tool times + out. + +0.4 + Fix channel count detection for FFmpeg backend. + +0.3 + Fix a problem with the Gstreamer backend where audio files could be left open + even after the ``GstAudioFile`` was "closed". + +0.2 + Fix a hang in the GStreamer backend that occurs occasionally on some + platforms. + +0.1 + Initial release. + +.. _Au files: http://en.wikipedia.org/wiki/Au_file_format + +Et Cetera +--------- + +``audioread`` is by Adrian Sampson. It is made available under `the MIT +license`_. An alternative to this module is `decoder.py`_. + +.. _the MIT license: http://www.opensource.org/licenses/mit-license.php +.. _decoder.py: http://www.brailleweb.com/cgi-bin/python.py + diff --git a/lib/python3.10/site-packages/audioread-3.0.1.dist-info/RECORD b/lib/python3.10/site-packages/audioread-3.0.1.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..f0a6acd50d7ea12db7f18e2bc0e340d578f3baa9 --- /dev/null +++ b/lib/python3.10/site-packages/audioread-3.0.1.dist-info/RECORD @@ -0,0 +1,15 @@ +audioread-3.0.1.dist-info/INSTALLER,sha256=5hhM4Q4mYTT9z6QB6PGpUAW81PGNFrYrdXMj4oM_6ak,2 +audioread-3.0.1.dist-info/LICENSE,sha256=4A__aKdaWCEyhC4zQmcwaZJVmG8d7DYiUvdCPbAnAZ0,1063 +audioread-3.0.1.dist-info/METADATA,sha256=PFp7DIIloSaEd08t9jljil9LFORgoybrSH0iFDyRJns,8367 +audioread-3.0.1.dist-info/RECORD,, +audioread-3.0.1.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +audioread-3.0.1.dist-info/WHEEL,sha256=EZbGkh7Ie4PoZfRQ8I0ZuP9VklN_TvcZ6DSE5Uar4z4,81 +audioread/__init__.py,sha256=33Jptohj1m0QO5216orqi9-QMkXftvUyS4nxo--6Uj8,3592 +audioread/base.py,sha256=AO1WKrUUQtrh3hCuvHJaAu_HWQnIVXLvkQyOCWFtWhU,725 +audioread/exceptions.py,sha256=RTwYBpMlBy4bWPeSxidoz69YCXjTtpHrHFMuHWiJ6h0,962 +audioread/ffdec.py,sha256=A8kcImseS99YywzNsuK8DsORho-6vyI79XeJ92CN-YQ,10541 +audioread/gstdec.py,sha256=ksh08sEgN-bLVSoITod0QkeQhXDh7s1_3BMUwTGCu2s,14643 +audioread/macca.py,sha256=HOFuu-SlWiCZetSZxGa-u2XndLy3zMCsfOOqAprFfHM,10899 +audioread/maddec.py,sha256=9MbadGkBIYXVgzZq6cgYCV2FAZwVk8AWTYrsWyee98g,2518 +audioread/rawread.py,sha256=eLA23jT41c1e0nyDmnXJrKwgFo4mNBrILhVzDPr4au8,4322 +audioread/version.py,sha256=3nFz3RTyd0r19pXSvghZzf4rus1066YSjTwpEvbrEqo,738 diff --git a/lib/python3.10/site-packages/audioread-3.0.1.dist-info/REQUESTED b/lib/python3.10/site-packages/audioread-3.0.1.dist-info/REQUESTED new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/lib/python3.10/site-packages/audioread-3.0.1.dist-info/WHEEL b/lib/python3.10/site-packages/audioread-3.0.1.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..3b5e64b5e6c4a210201d1676a891fd57b15cda99 --- /dev/null +++ b/lib/python3.10/site-packages/audioread-3.0.1.dist-info/WHEEL @@ -0,0 +1,4 @@ +Wheel-Version: 1.0 +Generator: flit 3.9.0 +Root-Is-Purelib: true +Tag: py3-none-any diff --git a/lib/python3.10/site-packages/datasets-2.14.4.dist-info/INSTALLER b/lib/python3.10/site-packages/datasets-2.14.4.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..5c69047b2eb8235994febeeae1da4a82365a240a --- /dev/null +++ b/lib/python3.10/site-packages/datasets-2.14.4.dist-info/INSTALLER @@ -0,0 +1 @@ +uv \ No newline at end of file diff --git a/lib/python3.10/site-packages/datasets-2.14.4.dist-info/LICENSE b/lib/python3.10/site-packages/datasets-2.14.4.dist-info/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..d645695673349e3947e8e5ae42332d0ac3164cd7 --- /dev/null +++ b/lib/python3.10/site-packages/datasets-2.14.4.dist-info/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/lib/python3.10/site-packages/datasets-2.14.4.dist-info/METADATA b/lib/python3.10/site-packages/datasets-2.14.4.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..475c41d9f54ce167772053a6d518b080973f19a6 --- /dev/null +++ b/lib/python3.10/site-packages/datasets-2.14.4.dist-info/METADATA @@ -0,0 +1,365 @@ +Metadata-Version: 2.1 +Name: datasets +Version: 2.14.4 +Summary: HuggingFace community-driven open-source library of datasets +Home-page: https://github.com/huggingface/datasets +Author: HuggingFace Inc. +Author-email: thomas@huggingface.co +License: Apache 2.0 +Download-URL: https://github.com/huggingface/datasets/tags +Keywords: datasets machine learning datasets metrics +Platform: UNKNOWN +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Developers +Classifier: Intended Audience :: Education +Classifier: Intended Audience :: Science/Research +Classifier: License :: OSI Approved :: Apache Software License +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence +Requires-Python: >=3.8.0 +Description-Content-Type: text/markdown +License-File: LICENSE +Requires-Dist: numpy (>=1.17) +Requires-Dist: pyarrow (>=8.0.0) +Requires-Dist: dill (<0.3.8,>=0.3.0) +Requires-Dist: pandas +Requires-Dist: requests (>=2.19.0) +Requires-Dist: tqdm (>=4.62.1) +Requires-Dist: xxhash +Requires-Dist: multiprocess +Requires-Dist: fsspec[http] (>=2021.11.1) +Requires-Dist: aiohttp +Requires-Dist: huggingface-hub (<1.0.0,>=0.14.0) +Requires-Dist: packaging +Requires-Dist: pyyaml (>=5.1) +Provides-Extra: apache-beam +Requires-Dist: apache-beam (<2.44.0,>=2.26.0) ; extra == 'apache-beam' +Provides-Extra: audio +Requires-Dist: soundfile (>=0.12.1) ; extra == 'audio' +Requires-Dist: librosa ; extra == 'audio' +Provides-Extra: benchmarks +Requires-Dist: tensorflow (==2.12.0) ; extra == 'benchmarks' +Requires-Dist: torch (==2.0.1) ; extra == 'benchmarks' +Requires-Dist: transformers (==4.30.1) ; extra == 'benchmarks' +Provides-Extra: dev +Requires-Dist: absl-py ; extra == 'dev' +Requires-Dist: joblib (<1.3.0) ; extra == 'dev' +Requires-Dist: joblibspark ; extra == 'dev' +Requires-Dist: pytest ; extra == 'dev' +Requires-Dist: pytest-datadir ; extra == 'dev' +Requires-Dist: pytest-xdist ; extra == 'dev' +Requires-Dist: elasticsearch (<8.0.0) ; extra == 'dev' +Requires-Dist: faiss-cpu (>=1.6.4) ; extra == 'dev' +Requires-Dist: lz4 ; extra == 'dev' +Requires-Dist: pyspark (>=3.4) ; extra == 'dev' +Requires-Dist: py7zr ; extra == 'dev' +Requires-Dist: rarfile (>=4.0) ; extra == 'dev' +Requires-Dist: sqlalchemy (<2.0.0) ; extra == 'dev' +Requires-Dist: s3fs (>=2021.11.1) ; extra == 'dev' +Requires-Dist: tiktoken ; extra == 'dev' +Requires-Dist: torch ; extra == 'dev' +Requires-Dist: soundfile (>=0.12.1) ; extra == 'dev' +Requires-Dist: transformers ; extra == 'dev' +Requires-Dist: zstandard ; extra == 'dev' +Requires-Dist: Pillow (>=6.2.1) ; extra == 'dev' +Requires-Dist: librosa ; extra == 'dev' +Requires-Dist: black (~=23.1) ; extra == 'dev' +Requires-Dist: ruff (>=0.0.241) ; extra == 'dev' +Requires-Dist: pyyaml (>=5.3.1) ; extra == 'dev' +Requires-Dist: s3fs ; extra == 'dev' +Requires-Dist: apache-beam (<2.44.0,>=2.26.0) ; (python_version < "3.10") and extra == 'dev' +Requires-Dist: tensorflow (!=2.6.0,!=2.6.1,>=2.3) ; (sys_platform != "darwin" or platform_machine != "arm64") and extra == 'dev' +Requires-Dist: tensorflow (!=2.6.0,!=2.6.1,>=2.2.0) ; (sys_platform != "darwin" or platform_machine != "arm64") and extra == 'dev' +Requires-Dist: tensorflow-macos ; (sys_platform == "darwin" and platform_machine == "arm64") and extra == 'dev' +Provides-Extra: docs +Requires-Dist: s3fs ; extra == 'docs' +Requires-Dist: transformers ; extra == 'docs' +Requires-Dist: torch ; extra == 'docs' +Requires-Dist: tensorflow (!=2.6.0,!=2.6.1,>=2.2.0) ; (sys_platform != "darwin" or platform_machine != "arm64") and extra == 'docs' +Requires-Dist: tensorflow-macos ; (sys_platform == "darwin" and platform_machine == "arm64") and extra == 'docs' +Provides-Extra: jax +Requires-Dist: jax (!=0.3.2,<=0.3.25,>=0.2.8) ; extra == 'jax' +Requires-Dist: jaxlib (<=0.3.25,>=0.1.65) ; extra == 'jax' +Provides-Extra: metrics-tests +Requires-Dist: accelerate ; extra == 'metrics-tests' +Requires-Dist: bert-score (>=0.3.6) ; extra == 'metrics-tests' +Requires-Dist: jiwer ; extra == 'metrics-tests' +Requires-Dist: langdetect ; extra == 'metrics-tests' +Requires-Dist: mauve-text ; extra == 'metrics-tests' +Requires-Dist: nltk ; extra == 'metrics-tests' +Requires-Dist: rouge-score ; extra == 'metrics-tests' +Requires-Dist: sacrebleu ; extra == 'metrics-tests' +Requires-Dist: sacremoses ; extra == 'metrics-tests' +Requires-Dist: scikit-learn ; extra == 'metrics-tests' +Requires-Dist: scipy ; extra == 'metrics-tests' +Requires-Dist: sentencepiece ; extra == 'metrics-tests' +Requires-Dist: seqeval ; extra == 'metrics-tests' +Requires-Dist: spacy (>=3.0.0) ; extra == 'metrics-tests' +Requires-Dist: tldextract ; extra == 'metrics-tests' +Requires-Dist: toml (>=0.10.1) ; extra == 'metrics-tests' +Requires-Dist: typer (<0.5.0) ; extra == 'metrics-tests' +Requires-Dist: requests-file (>=1.5.1) ; extra == 'metrics-tests' +Requires-Dist: tldextract (>=3.1.0) ; extra == 'metrics-tests' +Requires-Dist: texttable (>=1.6.3) ; extra == 'metrics-tests' +Requires-Dist: Werkzeug (>=1.0.1) ; extra == 'metrics-tests' +Requires-Dist: six (~=1.15.0) ; extra == 'metrics-tests' +Provides-Extra: quality +Requires-Dist: black (~=23.1) ; extra == 'quality' +Requires-Dist: ruff (>=0.0.241) ; extra == 'quality' +Requires-Dist: pyyaml (>=5.3.1) ; extra == 'quality' +Provides-Extra: s3 +Requires-Dist: s3fs ; extra == 's3' +Provides-Extra: streaming +Provides-Extra: tensorflow +Requires-Dist: tensorflow (!=2.6.0,!=2.6.1,>=2.2.0) ; (sys_platform != "darwin" or platform_machine != "arm64") and extra == 'tensorflow' +Requires-Dist: tensorflow-macos ; (sys_platform == "darwin" and platform_machine == "arm64") and extra == 'tensorflow' +Provides-Extra: tensorflow_gpu +Requires-Dist: tensorflow-gpu (!=2.6.0,!=2.6.1,>=2.2.0) ; extra == 'tensorflow_gpu' +Provides-Extra: tests +Requires-Dist: absl-py ; extra == 'tests' +Requires-Dist: joblib (<1.3.0) ; extra == 'tests' +Requires-Dist: joblibspark ; extra == 'tests' +Requires-Dist: pytest ; extra == 'tests' +Requires-Dist: pytest-datadir ; extra == 'tests' +Requires-Dist: pytest-xdist ; extra == 'tests' +Requires-Dist: elasticsearch (<8.0.0) ; extra == 'tests' +Requires-Dist: faiss-cpu (>=1.6.4) ; extra == 'tests' +Requires-Dist: lz4 ; extra == 'tests' +Requires-Dist: pyspark (>=3.4) ; extra == 'tests' +Requires-Dist: py7zr ; extra == 'tests' +Requires-Dist: rarfile (>=4.0) ; extra == 'tests' +Requires-Dist: sqlalchemy (<2.0.0) ; extra == 'tests' +Requires-Dist: s3fs (>=2021.11.1) ; extra == 'tests' +Requires-Dist: tiktoken ; extra == 'tests' +Requires-Dist: torch ; extra == 'tests' +Requires-Dist: soundfile (>=0.12.1) ; extra == 'tests' +Requires-Dist: transformers ; extra == 'tests' +Requires-Dist: zstandard ; extra == 'tests' +Requires-Dist: Pillow (>=6.2.1) ; extra == 'tests' +Requires-Dist: librosa ; extra == 'tests' +Requires-Dist: apache-beam (<2.44.0,>=2.26.0) ; (python_version < "3.10") and extra == 'tests' +Requires-Dist: tensorflow (!=2.6.0,!=2.6.1,>=2.3) ; (sys_platform != "darwin" or platform_machine != "arm64") and extra == 'tests' +Requires-Dist: tensorflow-macos ; (sys_platform == "darwin" and platform_machine == "arm64") and extra == 'tests' +Provides-Extra: torch +Requires-Dist: torch ; extra == 'torch' +Provides-Extra: vision +Requires-Dist: Pillow (>=6.2.1) ; extra == 'vision' + +

+ + + + Hugging Face Datasets Library + +
+
+

+ +

+ + Build + + + GitHub + + + Documentation + + + GitHub release + + + Number of datasets + + + Contributor Covenant + + DOI +

+ +🤗 Datasets is a lightweight library providing **two** main features: + +- **one-line dataloaders for many public datasets**: one-liners to download and pre-process any of the ![number of datasets](https://img.shields.io/endpoint?url=https://huggingface.co/api/shields/datasets&color=brightgreen) major public datasets (image datasets, audio datasets, text datasets in 467 languages and dialects, etc.) provided on the [HuggingFace Datasets Hub](https://huggingface.co/datasets). With a simple command like `squad_dataset = load_dataset("squad")`, get any of these datasets ready to use in a dataloader for training/evaluating a ML model (Numpy/Pandas/PyTorch/TensorFlow/JAX), +- **efficient data pre-processing**: simple, fast and reproducible data pre-processing for the public datasets as well as your own local datasets in CSV, JSON, text, PNG, JPEG, WAV, MP3, Parquet, etc. With simple commands like `processed_dataset = dataset.map(process_example)`, efficiently prepare the dataset for inspection and ML model evaluation and training. + +[🎓 **Documentation**](https://huggingface.co/docs/datasets/) [🔎 **Find a dataset in the Hub**](https://huggingface.co/datasets) [🌟 **Share a dataset on the Hub**](https://huggingface.co/docs/datasets/share) + +

+ +

+ +🤗 Datasets is designed to let the community easily add and share new datasets. + +🤗 Datasets has many additional interesting features: + +- Thrive on large datasets: 🤗 Datasets naturally frees the user from RAM memory limitation, all datasets are memory-mapped using an efficient zero-serialization cost backend (Apache Arrow). +- Smart caching: never wait for your data to process several times. +- Lightweight and fast with a transparent and pythonic API (multi-processing/caching/memory-mapping). +- Built-in interoperability with NumPy, pandas, PyTorch, Tensorflow 2 and JAX. +- Native support for audio and image data +- Enable streaming mode to save disk space and start iterating over the dataset immediately. + +🤗 Datasets originated from a fork of the awesome [TensorFlow Datasets](https://github.com/tensorflow/datasets) and the HuggingFace team want to deeply thank the TensorFlow Datasets team for building this amazing library. More details on the differences between 🤗 Datasets and `tfds` can be found in the section [Main differences between 🤗 Datasets and `tfds`](#main-differences-between--datasets-and-tfds). + +# Installation + +## With pip + +🤗 Datasets can be installed from PyPi and has to be installed in a virtual environment (venv or conda for instance) + +```bash +pip install datasets +``` + +## With conda + +🤗 Datasets can be installed using conda as follows: + +```bash +conda install -c huggingface -c conda-forge datasets +``` + +Follow the installation pages of TensorFlow and PyTorch to see how to install them with conda. + +For more details on installation, check the installation page in the documentation: https://huggingface.co/docs/datasets/installation + +## Installation to use with PyTorch/TensorFlow/pandas + +If you plan to use 🤗 Datasets with PyTorch (1.0+), TensorFlow (2.2+) or pandas, you should also install PyTorch, TensorFlow or pandas. + +For more details on using the library with NumPy, pandas, PyTorch or TensorFlow, check the quick start page in the documentation: https://huggingface.co/docs/datasets/quickstart + +# Usage + +🤗 Datasets is made to be very simple to use - the API is centered around a single function, `datasets.load_dataset(dataset_name, **kwargs)`, that instantiates a dataset. + +This library can be used for text/image/audio/etc. datasets. Here is an example to load a text dataset: + +Here is a quick example: + +```python +from datasets import load_dataset + +# Print all the available datasets +from huggingface_hub import list_datasets +print([dataset.id for dataset in list_datasets()]) + +# Load a dataset and print the first example in the training set +squad_dataset = load_dataset('squad') +print(squad_dataset['train'][0]) + +# Process the dataset - add a column with the length of the context texts +dataset_with_length = squad_dataset.map(lambda x: {"length": len(x["context"])}) + +# Process the dataset - tokenize the context texts (using a tokenizer from the 🤗 Transformers library) +from transformers import AutoTokenizer +tokenizer = AutoTokenizer.from_pretrained('bert-base-cased') + +tokenized_dataset = squad_dataset.map(lambda x: tokenizer(x['context']), batched=True) +``` + +If your dataset is bigger than your disk or if you don't want to wait to download the data, you can use streaming: + +```python +# If you want to use the dataset immediately and efficiently stream the data as you iterate over the dataset +image_dataset = load_dataset('cifar100', streaming=True) +for example in image_dataset["train"]: + break +``` + +For more details on using the library, check the quick start page in the documentation: https://huggingface.co/docs/datasets/quickstart and the specific pages on: + +- Loading a dataset: https://huggingface.co/docs/datasets/loading +- What's in a Dataset: https://huggingface.co/docs/datasets/access +- Processing data with 🤗 Datasets: https://huggingface.co/docs/datasets/process + - Processing audio data: https://huggingface.co/docs/datasets/audio_process + - Processing image data: https://huggingface.co/docs/datasets/image_process + - Processing text data: https://huggingface.co/docs/datasets/nlp_process +- Streaming a dataset: https://huggingface.co/docs/datasets/stream +- Writing your own dataset loading script: https://huggingface.co/docs/datasets/dataset_script +- etc. + +# Add a new dataset to the Hub + +We have a very detailed step-by-step guide to add a new dataset to the ![number of datasets](https://img.shields.io/endpoint?url=https://huggingface.co/api/shields/datasets&color=brightgreen) datasets already provided on the [HuggingFace Datasets Hub](https://huggingface.co/datasets). + +You can find: +- [how to upload a dataset to the Hub using your web browser or Python](https://huggingface.co/docs/datasets/upload_dataset) and also +- [how to upload it using Git](https://huggingface.co/docs/datasets/share). + +# Main differences between 🤗 Datasets and `tfds` + +If you are familiar with the great TensorFlow Datasets, here are the main differences between 🤗 Datasets and `tfds`: + +- the scripts in 🤗 Datasets are not provided within the library but are queried, downloaded/cached and dynamically loaded upon request +- the backend serialization of 🤗 Datasets is based on [Apache Arrow](https://arrow.apache.org/) instead of TF Records and leverage python dataclasses for info and features with some diverging features (we mostly don't do encoding and store the raw data as much as possible in the backend serialization cache). +- the user-facing dataset object of 🤗 Datasets is not a `tf.data.Dataset` but a built-in framework-agnostic dataset class with methods inspired by what we like in `tf.data` (like a `map()` method). It basically wraps a memory-mapped Arrow table cache. + +# Disclaimers + +🤗 Datasets may run Python code defined by the dataset authors to parse certain data formats or structures. For security reasons, we ask users to: +- check the dataset scripts they're going to run beforehand and +- pin the `revision` of the repositories they use. + +If you're a dataset owner and wish to update any part of it (description, citation, license, etc.), or do not want your dataset to be included in the Hugging Face Hub, please get in touch by opening a discussion or a pull request in the Community tab of the dataset page. Thanks for your contribution to the ML community! + +## BibTeX + +If you want to cite our 🤗 Datasets library, you can use our [paper](https://arxiv.org/abs/2109.02846): + +```bibtex +@inproceedings{lhoest-etal-2021-datasets, + title = "Datasets: A Community Library for Natural Language Processing", + author = "Lhoest, Quentin and + Villanova del Moral, Albert and + Jernite, Yacine and + Thakur, Abhishek and + von Platen, Patrick and + Patil, Suraj and + Chaumond, Julien and + Drame, Mariama and + Plu, Julien and + Tunstall, Lewis and + Davison, Joe and + {\v{S}}a{\v{s}}ko, Mario and + Chhablani, Gunjan and + Malik, Bhavitvya and + Brandeis, Simon and + Le Scao, Teven and + Sanh, Victor and + Xu, Canwen and + Patry, Nicolas and + McMillan-Major, Angelina and + Schmid, Philipp and + Gugger, Sylvain and + Delangue, Cl{\'e}ment and + Matussi{\`e}re, Th{\'e}o and + Debut, Lysandre and + Bekman, Stas and + Cistac, Pierric and + Goehringer, Thibault and + Mustar, Victor and + Lagunas, Fran{\c{c}}ois and + Rush, Alexander and + Wolf, Thomas", + booktitle = "Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing: System Demonstrations", + month = nov, + year = "2021", + address = "Online and Punta Cana, Dominican Republic", + publisher = "Association for Computational Linguistics", + url = "https://aclanthology.org/2021.emnlp-demo.21", + pages = "175--184", + abstract = "The scale, variety, and quantity of publicly-available NLP datasets has grown rapidly as researchers propose new tasks, larger models, and novel benchmarks. Datasets is a community library for contemporary NLP designed to support this ecosystem. Datasets aims to standardize end-user interfaces, versioning, and documentation, while providing a lightweight front-end that behaves similarly for small datasets as for internet-scale corpora. The design of the library incorporates a distributed, community-driven approach to adding datasets and documenting usage. After a year of development, the library now includes more than 650 unique datasets, has more than 250 contributors, and has helped support a variety of novel cross-dataset research projects and shared tasks. The library is available at https://github.com/huggingface/datasets.", + eprint={2109.02846}, + archivePrefix={arXiv}, + primaryClass={cs.CL}, +} +``` + +If you need to cite a specific version of our 🤗 Datasets library for reproducibility, you can use the corresponding version Zenodo DOI from this [list](https://zenodo.org/search?q=conceptrecid:%224817768%22&sort=-version&all_versions=True). + + diff --git a/lib/python3.10/site-packages/datasets-2.14.4.dist-info/RECORD b/lib/python3.10/site-packages/datasets-2.14.4.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..dee96ab37c11c668a8e6cd57c478c8012d49756d --- /dev/null +++ b/lib/python3.10/site-packages/datasets-2.14.4.dist-info/RECORD @@ -0,0 +1,131 @@ +../../../bin/datasets-cli,sha256=7oI9mamYfuMzQ3NO2AzGid32CALPd9iay3ymjzDjrAQ,341 +datasets-2.14.4.dist-info/INSTALLER,sha256=5hhM4Q4mYTT9z6QB6PGpUAW81PGNFrYrdXMj4oM_6ak,2 +datasets-2.14.4.dist-info/LICENSE,sha256=z8d0m5b2O9McPEK1xHG_dWgUBT6EfBDz6wA0F7xSPTA,11358 +datasets-2.14.4.dist-info/METADATA,sha256=k5iMnqWKNpq2gZ3eII7zOvUvAAj3BQZtdTXBiM3QfOY,19796 +datasets-2.14.4.dist-info/RECORD,, +datasets-2.14.4.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +datasets-2.14.4.dist-info/WHEEL,sha256=OqRkF0eY5GHssMorFjlbTIq072vpHpF60fIQA6lS9xA,92 +datasets-2.14.4.dist-info/entry_points.txt,sha256=vhdg1JXUleCZtwvozP5q5iHqRpSETfyhSDJ39zW3KUA,70 +datasets-2.14.4.dist-info/top_level.txt,sha256=9A857YvCQm_Dg3UjeKkWPz9sDBos0t3zN2pf5krTemQ,9 +datasets/__init__.py,sha256=nfpQY_LXXAkeuTywDWXA6rnFNnb_8bYGsnnqSwwzcKA,2550 +datasets/arrow_dataset.py,sha256=d83jcz_a5I3gHQ-czWb_GwjS3BZDieYPwXvBfkaJnsQ,288717 +datasets/arrow_reader.py,sha256=4zgsJiqRoNU18eAjRGM4CT5cY06WYc6Ikbfa9dVfjTA,26920 +datasets/arrow_writer.py,sha256=Ma0AmUbJdOcnc9Dcu-NW5xKLD1MuwsuAsKbmyqHzer4,33414 +datasets/builder.bak.py,sha256=YZYHkGfXIFeM878CLLM0YvyrY6gLw3_z_MEh-QnCybE,111808 +datasets/builder.py,sha256=O_OG6bd87ld6mEZRDZOKXL0ZlWxPFKF_VpiP7ehLK9k,106717 +datasets/combine.py,sha256=OvMg-5A_cBraHyEXbNTTrWjd9sbUiyA7PG6aBJpbg5Q,10924 +datasets/commands/__init__.py,sha256=rujbQtxJbwHhF9WQqp2DD9tfVTghDMJdl0v6H551Pcs,312 +datasets/commands/convert.py,sha256=-VOqHh0ySkIOfEYmR7HVs7PzouVrkVShqyUtNGcNCYU,7914 +datasets/commands/datasets_cli.py,sha256=mMYGiIYoE9kcZzcWvPDPuT2fEKlhL2hHN9RWgivQu2I,1381 +datasets/commands/dummy_data.py,sha256=rBVQAN1wd9fvldw79PVoL3vNZdqosjO_PPO_SFEYUqw,23106 +datasets/commands/env.py,sha256=U5IPHmBXb05dgZsqsbRLm9Lw8Ni2F571QdgIY13xpro,1171 +datasets/commands/run_beam.py,sha256=Cl6zWXA00C9PjgQyMv_E7SSMN2539no26OLFeznJxYM,6812 +datasets/commands/test.py,sha256=U_Rqs78CLVDrwOamol_RZJ7xCe6TcCvHcMlpeW0Mwgk,8506 +datasets/config.py,sha256=_R1RzaTnDJhWYnch49PaPi_Wv1hiJSN9gpsorYhJvOc,8542 +datasets/data_files.py,sha256=8mq6SYcc2mr7lyjQltypcd_cTVgnViGkomCALysr2aM,27518 +datasets/dataset_dict.py,sha256=rVm7VjTWbzsNibEIk7Xm8wXfcB61TWfTPCFpLbuCnVg,100017 +datasets/distributed.py,sha256=jZ31II0mmlPMhZbEtbAsX6jlK0U69qdpV3uS5U5JFYw,1560 +datasets/download/__init__.py,sha256=lbFOtITDaR7PHrhzJ8VfRnpaOT6NYozSxUcLv_GVfTg,281 +datasets/download/download_config.py,sha256=OBsZBXFKphFysU0eocStWryF3QGFY-9A96RCtxxIi0I,4770 +datasets/download/download_manager.py,sha256=Hk6rnzoenY8K4ZW64sBqGTsDRQSupM_7brWEOa1AdLo,22040 +datasets/download/mock_download_manager.py,sha256=nrisuioyg1ZyV0424fYatCEdOeFD1RR_3sNeSmQKL-o,10445 +datasets/download/streaming_download_manager.py,sha256=HRCBNRimvMj2S364NJHK-dJHQmgYwsxZ-lMnJzLanYQ,43900 +datasets/features/__init__.py,sha256=05nCoWgkpLzZ4xhsNY-uDEeUsTaPcSjbR6pK1RyYswk,447 +datasets/features/audio.py,sha256=ISK9LOnkpggoLulSdsUrNl992zEszf4cL2MbG1Nuu0c,12319 +datasets/features/features.py,sha256=I3lbZRkebE3haHj-m5DNtW2cYysMi_HqXWVrA_ffjrA,86614 +datasets/features/image.py,sha256=p94zl-2pLmnF3ZKoH6yEpfIFAaFZgjkYaSB_jNH4InU,15135 +datasets/features/translation.py,sha256=9tPj27uvGSXBFrCL0JIkm3Do6GcSw_0NngCVbHXVBK4,4373 +datasets/filesystems/__init__.py,sha256=NfkLTMjbvhS2kAbJcclymeafIzsXucBu85zb8N8bxXA,2642 +datasets/filesystems/compression.py,sha256=Rl_E9w_OsERYgIGOVePLHI5k1mcU1cIR8wFkjW8N5cM,6100 +datasets/filesystems/s3filesystem.py,sha256=KowTCvTSsrdAU4syiaRffNw4g25-DTbjsoXBIMWz2tk,5725 +datasets/fingerprint.py,sha256=2T5pFJ0aNOYWN_YqArtZT4FpYfpv-lNllqWRirCVLJE,23309 +datasets/formatting/__init__.py,sha256=3oQaTX0DeV03KNYmzSuSUgxUfjDyrBsDt5e0iqJv4LU,5161 +datasets/formatting/formatting.py,sha256=JwSmaFHJGf4Xa1EJtppL6y59K4_dDXfWoPOxSG50HmI,25742 +datasets/formatting/jax_formatter.py,sha256=KoTbq0XSUQ1Rp3G5IzN3cU192JZ9t5HAZtHiVpHPbB4,6839 +datasets/formatting/np_formatter.py,sha256=DJBnt3oF0fHWJCqe4j6o9BOupZ0uGrw_xxFfsGBVoyk,4525 +datasets/formatting/tf_formatter.py,sha256=QRzeq8f1ALa6961PBNFRTH3RT4S-_8soqfUl9a7F89I,4657 +datasets/formatting/torch_formatter.py,sha256=qbETKRaNFh5WNddjENvX6gEOYyf11ieC9sN9E75kMIQ,4252 +datasets/info.py,sha256=vGglh2DX5D92f3dH6R7dv4qfHtpa6VNx8yrUnaumhnQ,26844 +datasets/inspect.py,sha256=JTDJhmAHT8OXVpNEe5ztHZQTMjCT3zJRAbSNtcC7NcI,23495 +datasets/io/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +datasets/io/abc.py,sha256=LwDMXYs6YkhZuz1JiMK4PDIqgNjv7I8xH3UMUELW2ys,1672 +datasets/io/csv.py,sha256=MSCfytHfuPEEgsbfHjC21BbfdwakU7lDyO2pxGcI0_I,5294 +datasets/io/generator.py,sha256=79KzzwIBNtofseIT4Ys3Oec0rB9MrDjALZVNM6RU12o,1839 +datasets/io/json.py,sha256=i0kJEl2YJYjMX-yZ33e-uCgy4cTCU7cGMjedoNuOuBM,6320 +datasets/io/parquet.py,sha256=JJ3rim_i3upHtBfjAie1dtkq5PIP-5daygTDzmtEYsw,5833 +datasets/io/spark.py,sha256=VUIODLHgIbiK0CI0UvthQ_gUO0MQDtHUozvw7Dfs8FI,1797 +datasets/io/sql.py,sha256=scwrohCDYyYwpvKZ83fhSxJZtutHHiEY0CDmYSaGR-w,4413 +datasets/io/text.py,sha256=5XboSqdtjRNBfkgi8hFjKrp1pp6hwiaiQJqwVMvGvX0,2026 +datasets/iterable_dataset.py,sha256=GHJGsGyGrT-vrIqMKSRKJtejXfcglHP_PURq1COQz0I,108763 +datasets/keyhash.py,sha256=SvEYj4Z8jnJq_2_Iwe2CSwW_02mKFA3LgcbC42NUOhQ,3826 +datasets/load.py,sha256=tDy-2YVvgKbbRdQx_7lyUgNFbhAsfMnv42Br3xAXEOs,103111 +datasets/metric.py,sha256=CK4B7UtylfXmxisk_rRd8GfaGKaKI1SZwaKpQWq4EoM,27992 +datasets/naming.py,sha256=QIjMryT3PM3A-IQq_7yVECGHSMSzWPN7wTA5g99M1Fc,3000 +datasets/packaged_modules/__init__.py,sha256=WRXAm_CFLu5VSfqsQhg434zl8RffpWrDs6H4r2vRq28,2681 +datasets/packaged_modules/arrow/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +datasets/packaged_modules/arrow/arrow.py,sha256=MK8wKbYUErZqeCoq9ZDVMhN5DOX94k5wGMgsM-ONstY,3317 +datasets/packaged_modules/audiofolder/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +datasets/packaged_modules/audiofolder/audiofolder.py,sha256=BXRlK57KvYdyEo-L-Qs6qtrG2tL0QUF0cmJvl6L1N-w,1633 +datasets/packaged_modules/csv/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +datasets/packaged_modules/csv/csv.py,sha256=0fF5XJqcRWwZ8FqNS16H8bOkEth6FZcsIBZokhEPMRc,8388 +datasets/packaged_modules/folder_based_builder/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +datasets/packaged_modules/folder_based_builder/folder_based_builder.py,sha256=a6uVsxHmitdhicNyX2ZZOxJlGNtJIHHrC43poFs3tzo,22126 +datasets/packaged_modules/generator/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +datasets/packaged_modules/generator/generator.py,sha256=QZKrNB3ztWPXT_H5OFOl1CBlAlAeckW48kdyySyVVKw,928 +datasets/packaged_modules/imagefolder/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +datasets/packaged_modules/imagefolder/imagefolder.py,sha256=SYu6yxe4iBZzclT7u3m0gaACa6udSi1YOfFSy7dzdwk,1975 +datasets/packaged_modules/json/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +datasets/packaged_modules/json/json.py,sha256=Jei-FP1xNb3ysUgZQ6Mf_426sXrgZrUjP8q4xuzzWWo,9331 +datasets/packaged_modules/pandas/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +datasets/packaged_modules/pandas/pandas.py,sha256=TRgC7LkwmUq0dThVuMVblX8wlyY4lvTExSIvzyUyV1w,2302 +datasets/packaged_modules/parquet/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +datasets/packaged_modules/parquet/parquet.py,sha256=sp1vOPwt64pBWEUIpim-BwbkuDCvnlLFIAxvnC64Il0,4272 +datasets/packaged_modules/spark/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +datasets/packaged_modules/spark/spark.py,sha256=7z8KuKSRVxvmdNekgAVWC5ULP3OFR-iUdXhhkLOF-kU,13916 +datasets/packaged_modules/sql/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +datasets/packaged_modules/sql/sql.py,sha256=Fcnok2-1uX2XnQah4BrtE5SPli6O3JKb9tzMy1lachk,4482 +datasets/packaged_modules/text/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +datasets/packaged_modules/text/text.py,sha256=RXoZlE1Go08KXgo4RPX1GW0ads1a-6iz1QRi1c66OZg,6260 +datasets/parallel/__init__.py,sha256=dEhpBOLbCcsKClTXYJnJRp-ZtrfUV6jsH-CYqviXl-E,89 +datasets/parallel/parallel.py,sha256=POcwLCtYmusW6vpop_UrQYP7OInhOSY978PP0ZOVimM,4553 +datasets/search.py,sha256=1yZmjb0t6HzKJBvjxc3s7aCZPvKB9hpc0xEhYL43riY,35414 +datasets/splits.py,sha256=DjABqLPeUGm2WTFQgEChL1b8Kc0IYC60d59oT9LDdOI,23340 +datasets/streaming.py,sha256=NBgbP5Cycgut_p2uGn3dloyV3n1AXGzUkG-7l2NGskg,6145 +datasets/table.py,sha256=gf5M16_9gzcP4GNiFeSvO2YLw7EcIuofs2VsPHQ_7g8,97004 +datasets/tasks/__init__.py,sha256=Rz2GmKCOHt29M4DDks4mmz-PrVokZ4MGvMhXD29QZFU,1615 +datasets/tasks/audio_classificiation.py,sha256=fkR37qfJfJRPgCizf9iDV-dBnsGmLo2V0w8JpMwyX0M,1297 +datasets/tasks/automatic_speech_recognition.py,sha256=zbTTsLX5N-_Da5oucuk6zBZhDdhD4N5_rzsni9lT_vo,1309 +datasets/tasks/base.py,sha256=SlYEeDS87jruZNNkDRgz-U4q7EUijePL-RTN14ngwsk,1095 +datasets/tasks/image_classification.py,sha256=llF5_koN5APq7cF_WlGy5c9hAVspRlYCprXgwAa7kCc,1297 +datasets/tasks/language_modeling.py,sha256=Vdor-TdCGdiMpaIPZr0fRvgNrt5_D-1JElXKGbfQhvI,581 +datasets/tasks/question_answering.py,sha256=z8a80QRTsouUuIYVKQRDMTxOGeSK1QMycyDHxUW42zg,1105 +datasets/tasks/summarization.py,sha256=adrpmvgfAjXCyDRdZnZ52h0FKql5-EWU61Z2-v6rN-w,772 +datasets/tasks/text_classification.py,sha256=KvlddXxnnzzjCjJmyY3Z-e1G4dpTN0UXqlmZ1L0LrjU,1403 +datasets/utils/__init__.py,sha256=1nEhDcerD-WSbA_W_6fR_CIoLTCO0M0rpgkSQJr04h0,1045 +datasets/utils/beam_utils.py,sha256=DvA0ZVrx4-T9iHpB9VpduKn435p4rFaJw0Ua5cKmpeI,2029 +datasets/utils/cache.py,sha256=ouFjySURlby2H9KqJLfpRBM8H1Fwiuo3LBlfZAB-OPo,10557 +datasets/utils/deprecation_utils.py,sha256=hTHwlzRs92NfNVudH71LMpW70sjbsP5amebrIgi3A-U,3452 +datasets/utils/doc_utils.py,sha256=HoSm0TFaQaCYGfDgNhpBJ4Xc2WQZuOD6dTxLd9D87fs,407 +datasets/utils/download_manager.py,sha256=reLDKIa72LGFuw1Xj6uFcxeIa1yFlO3-MpGXTUPp0to,60 +datasets/utils/experimental.py,sha256=hsTzoXR2lnVpOlRIsgrSTS0iiUhAJAwl7d2xN04N3hc,1096 +datasets/utils/extract.py,sha256=hPoC4kMD8iaAaCDr61ySlYeB7VMGZXZh2ka4zV0EPYM,14194 +datasets/utils/file_utils.py,sha256=JwwPIYCOU9C9ZDtLOeSqVyKwlcmBTbmubFdKeAnPp6k,26095 +datasets/utils/filelock.py,sha256=JDUXzLFPepiHOGUeVw2D6qdUC3ruUT7dijTSTtsievI,13813 +datasets/utils/hub.py,sha256=q2hpEIjPPgzRiTk1m-hTQdHITDq5PiHWjOM1cIN5wDw,452 +datasets/utils/info_utils.py,sha256=ufjCalrf3xyEGEtcTmA53dZjGfoBIRMBRRE3FsZkUBA,5008 +datasets/utils/logging.py,sha256=sBv8RTJP6LAac7R8gOQKblEgTe9x40YmMNOho3ueW0o,7004 +datasets/utils/metadata.py,sha256=L_T6ZAHE_XjHlAFuxfGYsMz1gXYgXPlWOMuL6TBHy2w,10432 +datasets/utils/patching.py,sha256=WMNz-rvovRBkPPJZNyHXRsuH-WA5H4Gg29ocMnBmqSA,4957 +datasets/utils/py_utils.py,sha256=nsWyfkF9tKND3YaKudHytxSq1SBUIu_qGppSLmZ_Ze0,56737 +datasets/utils/readme.py,sha256=JFlaLMCGrIz0nQCdnYKUZk5d9D9DErEYfjtRrX9VzIw,12627 +datasets/utils/resources/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +datasets/utils/resources/creators.json,sha256=XtIpMZefgBOdTevRrQTkFiufbgCbp_iyxseyphYQkn0,257 +datasets/utils/resources/languages.json,sha256=Z0rQNPsfje8zMi8KdvvwxF4APwwqcskJFUvhNiLAgPM,199138 +datasets/utils/resources/multilingualities.json,sha256=02Uc8RtRzfl13l98Y_alZm5HuMYwPzL78B0S5a1X-8c,205 +datasets/utils/resources/readme_structure.yaml,sha256=hNf9msoBZw5jfakQrDb0Af8T325TXdcaHsAO2MUcZvY,3877 +datasets/utils/resources/size_categories.json,sha256=_5nAP7z8R6t7_GfER81QudFO6Y1tqYu4AWrr4Aot8S8,171 +datasets/utils/sharding.py,sha256=FDi895opKH7XkpfIu-ag9PqBQo2PGx0tSO3Dg-gDAAs,4288 +datasets/utils/stratify.py,sha256=uMwuCDRbW342vy-lXDHs6IQusOr7c9nOG3PpnWyzJO4,4091 +datasets/utils/tf_utils.py,sha256=YWmXP525b-kp1A-pnF-rGEOKqmg0Dv5j8RXJieSbkZc,25044 +datasets/utils/typing.py,sha256=LznosIqUzjXgwbRLAGCv4_7-yZo7muYY42Y3495oz5I,224 +datasets/utils/version.py,sha256=Z82cHpjTbQVJyWgnwSU8DsW2G0y-sSbSoOVeQrAds9k,3281 diff --git a/lib/python3.10/site-packages/datasets-2.14.4.dist-info/REQUESTED b/lib/python3.10/site-packages/datasets-2.14.4.dist-info/REQUESTED new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/lib/python3.10/site-packages/datasets-2.14.4.dist-info/WHEEL b/lib/python3.10/site-packages/datasets-2.14.4.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..385faab0525ccdbfd1070a8bebcca3ac8617236e --- /dev/null +++ b/lib/python3.10/site-packages/datasets-2.14.4.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.36.2) +Root-Is-Purelib: true +Tag: py3-none-any + diff --git a/lib/python3.10/site-packages/datasets-2.14.4.dist-info/entry_points.txt b/lib/python3.10/site-packages/datasets-2.14.4.dist-info/entry_points.txt new file mode 100644 index 0000000000000000000000000000000000000000..30133f3a2f8f4748cbf14b40e73ef94d186927e8 --- /dev/null +++ b/lib/python3.10/site-packages/datasets-2.14.4.dist-info/entry_points.txt @@ -0,0 +1,3 @@ +[console_scripts] +datasets-cli = datasets.commands.datasets_cli:main + diff --git a/lib/python3.10/site-packages/datasets-2.14.4.dist-info/top_level.txt b/lib/python3.10/site-packages/datasets-2.14.4.dist-info/top_level.txt new file mode 100644 index 0000000000000000000000000000000000000000..aee11b288aa3e6803c53bde002f7594c44497f5b --- /dev/null +++ b/lib/python3.10/site-packages/datasets-2.14.4.dist-info/top_level.txt @@ -0,0 +1 @@ +datasets diff --git a/lib/python3.10/site-packages/datasets/formatting/__init__.py b/lib/python3.10/site-packages/datasets/formatting/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..5f8090e0d700346fc75a2ba1f6431b11c9ba9299 --- /dev/null +++ b/lib/python3.10/site-packages/datasets/formatting/__init__.py @@ -0,0 +1,132 @@ +# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# flake8: noqa +# Lint as: python3 + +from typing import Dict, List, Optional, Type + +from .. import config +from ..utils import logging +from .formatting import ( + ArrowFormatter, + CustomFormatter, + Formatter, + PandasFormatter, + PythonFormatter, + TensorFormatter, + format_table, + query_table, +) +from .np_formatter import NumpyFormatter + + +logger = logging.get_logger(__name__) + +_FORMAT_TYPES: Dict[Optional[str], Type[Formatter]] = {} +_FORMAT_TYPES_ALIASES: Dict[Optional[str], str] = {} +_FORMAT_TYPES_ALIASES_UNAVAILABLE: Dict[Optional[str], Exception] = {} + + +def _register_formatter( + formatter_cls: type, + format_type: Optional[str], + aliases: Optional[List[str]] = None, +): + """ + Register a Formatter object using a name and optional aliases. + This function must be used on a Formatter class. + """ + aliases = aliases if aliases is not None else [] + if format_type in _FORMAT_TYPES: + logger.warning( + f"Overwriting format type '{format_type}' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})" + ) + _FORMAT_TYPES[format_type] = formatter_cls + for alias in set(aliases + [format_type]): + if alias in _FORMAT_TYPES_ALIASES: + logger.warning( + f"Overwriting format type alias '{alias}' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})" + ) + _FORMAT_TYPES_ALIASES[alias] = format_type + + +def _register_unavailable_formatter( + unavailable_error: Exception, format_type: Optional[str], aliases: Optional[List[str]] = None +): + """ + Register an unavailable Formatter object using a name and optional aliases. + This function must be used on an Exception object that is raised when trying to get the unavailable formatter. + """ + aliases = aliases if aliases is not None else [] + for alias in set(aliases + [format_type]): + _FORMAT_TYPES_ALIASES_UNAVAILABLE[alias] = unavailable_error + + +# Here we define all the available formatting functions that can be used by `Dataset.set_format` +_register_formatter(PythonFormatter, None, aliases=["python"]) +_register_formatter(ArrowFormatter, "arrow", aliases=["pa", "pyarrow"]) +_register_formatter(NumpyFormatter, "numpy", aliases=["np"]) +_register_formatter(PandasFormatter, "pandas", aliases=["pd"]) +_register_formatter(CustomFormatter, "custom") + +if config.TORCH_AVAILABLE: + from .torch_formatter import TorchFormatter + + _register_formatter(TorchFormatter, "torch", aliases=["pt", "pytorch"]) +else: + _torch_error = ValueError("PyTorch needs to be installed to be able to return PyTorch tensors.") + _register_unavailable_formatter(_torch_error, "torch", aliases=["pt", "pytorch"]) + +if config.TF_AVAILABLE: + from .tf_formatter import TFFormatter + + _register_formatter(TFFormatter, "tensorflow", aliases=["tf"]) +else: + _tf_error = ValueError("Tensorflow needs to be installed to be able to return Tensorflow tensors.") + _register_unavailable_formatter(_tf_error, "tensorflow", aliases=["tf"]) + +if config.JAX_AVAILABLE: + from .jax_formatter import JaxFormatter + + _register_formatter(JaxFormatter, "jax", aliases=[]) +else: + _jax_error = ValueError("JAX needs to be installed to be able to return JAX arrays.") + _register_unavailable_formatter(_jax_error, "jax", aliases=[]) + + +def get_format_type_from_alias(format_type: Optional[str]) -> Optional[str]: + """If the given format type is a known alias, then return its main type name. Otherwise return the type with no change.""" + if format_type in _FORMAT_TYPES_ALIASES: + return _FORMAT_TYPES_ALIASES[format_type] + else: + return format_type + + +def get_formatter(format_type: Optional[str], **format_kwargs) -> Formatter: + """ + Factory function to get a Formatter given its type name and keyword arguments. + A formatter is an object that extracts and formats data from pyarrow table. + It defines the formatting for rows, colums and batches. + If the formatter for a given type name doesn't exist or is not available, an error is raised. + """ + format_type = get_format_type_from_alias(format_type) + if format_type in _FORMAT_TYPES: + return _FORMAT_TYPES[format_type](**format_kwargs) + if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE: + raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type] + else: + raise ValueError( + f"Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None)}, but got '{format_type}'" + ) diff --git a/lib/python3.10/site-packages/datasets/formatting/formatting.py b/lib/python3.10/site-packages/datasets/formatting/formatting.py new file mode 100644 index 0000000000000000000000000000000000000000..53672b9c7f74486f440efdbd1eed439120bc0ea2 --- /dev/null +++ b/lib/python3.10/site-packages/datasets/formatting/formatting.py @@ -0,0 +1,649 @@ +# Copyright 2020 The HuggingFace Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from collections.abc import Mapping, MutableMapping +from functools import partial + +# Lint as: python3 +from typing import Any, Callable, Dict, Generic, Iterable, List, Optional, TypeVar, Union + +import numpy as np +import pandas as pd +import pyarrow as pa +from packaging import version + +from .. import config +from ..features import Features +from ..features.features import _ArrayXDExtensionType, _is_zero_copy_only, decode_nested_example, pandas_types_mapper +from ..table import Table +from ..utils.py_utils import no_op_if_value_is_null + + +T = TypeVar("T") + +RowFormat = TypeVar("RowFormat") +ColumnFormat = TypeVar("ColumnFormat") +BatchFormat = TypeVar("BatchFormat") + + +def _is_range_contiguous(key: range) -> bool: + return key.step == 1 and key.stop >= key.start + + +def _raise_bad_key_type(key: Any): + raise TypeError( + f"Wrong key type: '{key}' of type '{type(key)}'. Expected one of int, slice, range, str or Iterable." + ) + + +def _query_table_with_indices_mapping( + table: Table, key: Union[int, slice, range, str, Iterable], indices: Table +) -> pa.Table: + """ + Query a pyarrow Table to extract the subtable that correspond to the given key. + The :obj:`indices` parameter corresponds to the indices mapping in case we cant to take into + account a shuffling or an indices selection for example. + The indices table must contain one column named "indices" of type uint64. + """ + if isinstance(key, int): + key = indices.fast_slice(key % indices.num_rows, 1).column(0)[0].as_py() + return _query_table(table, key) + if isinstance(key, slice): + key = range(*key.indices(indices.num_rows)) + if isinstance(key, range): + if _is_range_contiguous(key) and key.start >= 0: + return _query_table( + table, [i.as_py() for i in indices.fast_slice(key.start, key.stop - key.start).column(0)] + ) + else: + pass # treat as an iterable + if isinstance(key, str): + table = table.select([key]) + return _query_table(table, indices.column(0).to_pylist()) + if isinstance(key, Iterable): + return _query_table(table, [indices.fast_slice(i, 1).column(0)[0].as_py() for i in key]) + + _raise_bad_key_type(key) + + +def _query_table(table: Table, key: Union[int, slice, range, str, Iterable]) -> pa.Table: + """ + Query a pyarrow Table to extract the subtable that correspond to the given key. + """ + if isinstance(key, int): + return table.fast_slice(key % table.num_rows, 1) + if isinstance(key, slice): + key = range(*key.indices(table.num_rows)) + if isinstance(key, range): + if _is_range_contiguous(key) and key.start >= 0: + return table.fast_slice(key.start, key.stop - key.start) + else: + pass # treat as an iterable + if isinstance(key, str): + return table.table.drop([column for column in table.column_names if column != key]) + if isinstance(key, Iterable): + key = np.fromiter(key, np.int64) + if len(key) == 0: + return table.table.slice(0, 0) + # don't use pyarrow.Table.take even for pyarrow >=1.0 (see https://issues.apache.org/jira/browse/ARROW-9773) + return table.fast_gather(key % table.num_rows) + + _raise_bad_key_type(key) + + +def _is_array_with_nulls(pa_array: pa.Array) -> bool: + return pa_array.null_count > 0 + + +class BaseArrowExtractor(Generic[RowFormat, ColumnFormat, BatchFormat]): + """ + Arrow extractor are used to extract data from pyarrow tables. + It makes it possible to extract rows, columns and batches. + These three extractions types have to be implemented. + """ + + def extract_row(self, pa_table: pa.Table) -> RowFormat: + raise NotImplementedError + + def extract_column(self, pa_table: pa.Table) -> ColumnFormat: + raise NotImplementedError + + def extract_batch(self, pa_table: pa.Table) -> BatchFormat: + raise NotImplementedError + + +def _unnest(py_dict: Dict[str, List[T]]) -> Dict[str, T]: + """Return the first element of a batch (dict) as a row (dict)""" + return {key: array[0] for key, array in py_dict.items()} + + +class SimpleArrowExtractor(BaseArrowExtractor[pa.Table, pa.Array, pa.Table]): + def extract_row(self, pa_table: pa.Table) -> pa.Table: + return pa_table + + def extract_column(self, pa_table: pa.Table) -> pa.Array: + return pa_table.column(0) + + def extract_batch(self, pa_table: pa.Table) -> pa.Table: + return pa_table + + +class PythonArrowExtractor(BaseArrowExtractor[dict, list, dict]): + def extract_row(self, pa_table: pa.Table) -> dict: + return _unnest(pa_table.to_pydict()) + + def extract_column(self, pa_table: pa.Table) -> list: + return pa_table.column(0).to_pylist() + + def extract_batch(self, pa_table: pa.Table) -> dict: + return pa_table.to_pydict() + + +class NumpyArrowExtractor(BaseArrowExtractor[dict, np.ndarray, dict]): + def __init__(self, **np_array_kwargs): + self.np_array_kwargs = np_array_kwargs + + def extract_row(self, pa_table: pa.Table) -> dict: + return _unnest(self.extract_batch(pa_table)) + + def extract_column(self, pa_table: pa.Table) -> np.ndarray: + return self._arrow_array_to_numpy(pa_table[pa_table.column_names[0]]) + + def extract_batch(self, pa_table: pa.Table) -> dict: + return {col: self._arrow_array_to_numpy(pa_table[col]) for col in pa_table.column_names} + + def _arrow_array_to_numpy(self, pa_array: pa.Array) -> np.ndarray: + if isinstance(pa_array, pa.ChunkedArray): + if isinstance(pa_array.type, _ArrayXDExtensionType): + # don't call to_pylist() to preserve dtype of the fixed-size array + zero_copy_only = _is_zero_copy_only(pa_array.type.storage_dtype, unnest=True) + array: List = [ + row for chunk in pa_array.chunks for row in chunk.to_numpy(zero_copy_only=zero_copy_only) + ] + else: + zero_copy_only = _is_zero_copy_only(pa_array.type) and all( + not _is_array_with_nulls(chunk) for chunk in pa_array.chunks + ) + array: List = [ + row for chunk in pa_array.chunks for row in chunk.to_numpy(zero_copy_only=zero_copy_only) + ] + else: + if isinstance(pa_array.type, _ArrayXDExtensionType): + # don't call to_pylist() to preserve dtype of the fixed-size array + zero_copy_only = _is_zero_copy_only(pa_array.type.storage_dtype, unnest=True) + array: List = pa_array.to_numpy(zero_copy_only=zero_copy_only) + else: + zero_copy_only = _is_zero_copy_only(pa_array.type) and not _is_array_with_nulls(pa_array) + array: List = pa_array.to_numpy(zero_copy_only=zero_copy_only).tolist() + if len(array) > 0: + if any( + (isinstance(x, np.ndarray) and (x.dtype == object or x.shape != array[0].shape)) + or (isinstance(x, float) and np.isnan(x)) + for x in array + ): + return np.array(array, copy=False, dtype=object) + return np.array(array, copy=False) + + +class PandasArrowExtractor(BaseArrowExtractor[pd.DataFrame, pd.Series, pd.DataFrame]): + def extract_row(self, pa_table: pa.Table) -> pd.DataFrame: + return pa_table.slice(length=1).to_pandas(types_mapper=pandas_types_mapper) + + def extract_column(self, pa_table: pa.Table) -> pd.Series: + return pa_table.select([0]).to_pandas(types_mapper=pandas_types_mapper)[pa_table.column_names[0]] + + def extract_batch(self, pa_table: pa.Table) -> pd.DataFrame: + return pa_table.to_pandas(types_mapper=pandas_types_mapper) + + +class PythonFeaturesDecoder: + def __init__(self, features: Optional[Features]): + self.features = features + + def decode_row(self, row: dict) -> dict: + return self.features.decode_example(row) if self.features else row + + def decode_column(self, column: list, column_name: str) -> list: + return self.features.decode_column(column, column_name) if self.features else column + + def decode_batch(self, batch: dict) -> dict: + return self.features.decode_batch(batch) if self.features else batch + + +class PandasFeaturesDecoder: + def __init__(self, features: Optional[Features]): + self.features = features + + def decode_row(self, row: pd.DataFrame) -> pd.DataFrame: + decode = ( + { + column_name: no_op_if_value_is_null(partial(decode_nested_example, feature)) + for column_name, feature in self.features.items() + if self.features._column_requires_decoding[column_name] + } + if self.features + else {} + ) + if decode: + row[list(decode.keys())] = row.transform(decode) + return row + + def decode_column(self, column: pd.Series, column_name: str) -> pd.Series: + decode = ( + no_op_if_value_is_null(partial(decode_nested_example, self.features[column_name])) + if self.features and column_name in self.features and self.features._column_requires_decoding[column_name] + else None + ) + if decode: + column = column.transform(decode) + return column + + def decode_batch(self, batch: pd.DataFrame) -> pd.DataFrame: + return self.decode_row(batch) + + +class LazyDict(MutableMapping): + """A dictionary backed by Arrow data. The values are formatted on-the-fly when accessing the dictionary.""" + + def __init__(self, pa_table: pa.Table, formatter: "Formatter"): + self.pa_table = pa_table + self.formatter = formatter + + self.data = {key: None for key in pa_table.column_names} + self.keys_to_format = set(self.data.keys()) + + def __len__(self): + return len(self.data) + + def __getitem__(self, key): + value = self.data[key] + if key in self.keys_to_format: + value = self.format(key) + self.data[key] = value + self.keys_to_format.remove(key) + return value + + def __setitem__(self, key, value): + if key in self.keys_to_format: + self.keys_to_format.remove(key) + self.data[key] = value + + def __delitem__(self, key) -> None: + if key in self.keys_to_format: + self.keys_to_format.remove(key) + del self.data[key] + + def __iter__(self): + return iter(self.data) + + def __contains__(self, key): + return key in self.data + + def __repr__(self): + self._format_all() + return repr(self.data) + + if config.PY_VERSION >= version.parse("3.9"): + # merging with the union ("|") operator is supported in Python 3.9+ + + def __or__(self, other): + if isinstance(other, LazyDict): + inst = self.copy() + other = other.copy() + other._format_all() + inst.keys_to_format -= other.data.keys() + inst.data = inst.data | other.data + return inst + if isinstance(other, dict): + inst = self.copy() + inst.keys_to_format -= other.keys() + inst.data = inst.data | other + return inst + return NotImplemented + + def __ror__(self, other): + if isinstance(other, LazyDict): + inst = self.copy() + other = other.copy() + other._format_all() + inst.keys_to_format -= other.data.keys() + inst.data = other.data | inst.data + return inst + if isinstance(other, dict): + inst = self.copy() + inst.keys_to_format -= other.keys() + inst.data = other | inst.data + return inst + return NotImplemented + + def __ior__(self, other): + if isinstance(other, LazyDict): + other = other.copy() + other._format_all() + self.keys_to_format -= other.data.keys() + self.data |= other.data + else: + self.keys_to_format -= other.keys() + self.data |= other + return self + + def __copy__(self): + # Identical to `UserDict.__copy__` + inst = self.__class__.__new__(self.__class__) + inst.__dict__.update(self.__dict__) + # Create a copy and avoid triggering descriptors + inst.__dict__["data"] = self.__dict__["data"].copy() + inst.__dict__["keys_to_format"] = self.__dict__["keys_to_format"].copy() + return inst + + def copy(self): + import copy + + return copy.copy(self) + + @classmethod + def fromkeys(cls, iterable, value=None): + raise NotImplementedError + + def format(self, key): + raise NotImplementedError + + def _format_all(self): + for key in self.keys_to_format: + self.data[key] = self.format(key) + self.keys_to_format.clear() + + +class LazyRow(LazyDict): + def format(self, key): + return self.formatter.format_column(self.pa_table.select([key]))[0] + + +class LazyBatch(LazyDict): + def format(self, key): + return self.formatter.format_column(self.pa_table.select([key])) + + +class Formatter(Generic[RowFormat, ColumnFormat, BatchFormat]): + """ + A formatter is an object that extracts and formats data from pyarrow tables. + It defines the formatting for rows, columns and batches. + """ + + simple_arrow_extractor = SimpleArrowExtractor + python_arrow_extractor = PythonArrowExtractor + numpy_arrow_extractor = NumpyArrowExtractor + pandas_arrow_extractor = PandasArrowExtractor + + def __init__(self, features: Optional[Features] = None): + self.features = features + self.python_features_decoder = PythonFeaturesDecoder(self.features) + self.pandas_features_decoder = PandasFeaturesDecoder(self.features) + + def __call__(self, pa_table: pa.Table, query_type: str) -> Union[RowFormat, ColumnFormat, BatchFormat]: + if query_type == "row": + return self.format_row(pa_table) + elif query_type == "column": + return self.format_column(pa_table) + elif query_type == "batch": + return self.format_batch(pa_table) + + def format_row(self, pa_table: pa.Table) -> RowFormat: + raise NotImplementedError + + def format_column(self, pa_table: pa.Table) -> ColumnFormat: + raise NotImplementedError + + def format_batch(self, pa_table: pa.Table) -> BatchFormat: + raise NotImplementedError + + +class TensorFormatter(Formatter[RowFormat, ColumnFormat, BatchFormat]): + def recursive_tensorize(self, data_struct: dict): + raise NotImplementedError + + +class ArrowFormatter(Formatter[pa.Table, pa.Array, pa.Table]): + def format_row(self, pa_table: pa.Table) -> pa.Table: + return self.simple_arrow_extractor().extract_row(pa_table) + + def format_column(self, pa_table: pa.Table) -> pa.Array: + return self.simple_arrow_extractor().extract_column(pa_table) + + def format_batch(self, pa_table: pa.Table) -> pa.Table: + return self.simple_arrow_extractor().extract_batch(pa_table) + + +class PythonFormatter(Formatter[Mapping, list, Mapping]): + def __init__(self, features=None, lazy=False): + super().__init__(features) + self.lazy = lazy + + def format_row(self, pa_table: pa.Table) -> Mapping: + if self.lazy: + return LazyRow(pa_table, self) + row = self.python_arrow_extractor().extract_row(pa_table) + row = self.python_features_decoder.decode_row(row) + return row + + def format_column(self, pa_table: pa.Table) -> list: + column = self.python_arrow_extractor().extract_column(pa_table) + column = self.python_features_decoder.decode_column(column, pa_table.column_names[0]) + return column + + def format_batch(self, pa_table: pa.Table) -> Mapping: + if self.lazy: + return LazyBatch(pa_table, self) + batch = self.python_arrow_extractor().extract_batch(pa_table) + batch = self.python_features_decoder.decode_batch(batch) + return batch + + +class PandasFormatter(Formatter[pd.DataFrame, pd.Series, pd.DataFrame]): + def format_row(self, pa_table: pa.Table) -> pd.DataFrame: + row = self.pandas_arrow_extractor().extract_row(pa_table) + row = self.pandas_features_decoder.decode_row(row) + return row + + def format_column(self, pa_table: pa.Table) -> pd.Series: + column = self.pandas_arrow_extractor().extract_column(pa_table) + column = self.pandas_features_decoder.decode_column(column, pa_table.column_names[0]) + return column + + def format_batch(self, pa_table: pa.Table) -> pd.DataFrame: + row = self.pandas_arrow_extractor().extract_batch(pa_table) + row = self.pandas_features_decoder.decode_batch(row) + return row + + +class CustomFormatter(Formatter[dict, ColumnFormat, dict]): + """ + A user-defined custom formatter function defined by a ``transform``. + The transform must take as input a batch of data extracted for an arrow table using the python extractor, + and return a batch. + If the output batch is not a dict, then output_all_columns won't work. + If the ouput batch has several fields, then querying a single column won't work since we don't know which field + to return. + """ + + def __init__(self, transform: Callable[[dict], dict], features=None, **kwargs): + super().__init__(features=features) + self.transform = transform + + def format_row(self, pa_table: pa.Table) -> dict: + formatted_batch = self.format_batch(pa_table) + try: + return _unnest(formatted_batch) + except Exception as exc: + raise TypeError( + f"Custom formatting function must return a dict of sequences to be able to pick a row, but got {formatted_batch}" + ) from exc + + def format_column(self, pa_table: pa.Table) -> ColumnFormat: + formatted_batch = self.format_batch(pa_table) + if hasattr(formatted_batch, "keys"): + if len(formatted_batch.keys()) > 1: + raise TypeError( + "Tried to query a column but the custom formatting function returns too many columns. " + f"Only one column was expected but got columns {list(formatted_batch.keys())}." + ) + else: + raise TypeError( + f"Custom formatting function must return a dict to be able to pick a row, but got {formatted_batch}" + ) + try: + return formatted_batch[pa_table.column_names[0]] + except Exception as exc: + raise TypeError( + f"Custom formatting function must return a dict to be able to pick a row, but got {formatted_batch}" + ) from exc + + def format_batch(self, pa_table: pa.Table) -> dict: + batch = self.python_arrow_extractor().extract_batch(pa_table) + batch = self.python_features_decoder.decode_batch(batch) + return self.transform(batch) + + +def _check_valid_column_key(key: str, columns: List[str]) -> None: + if key not in columns: + raise KeyError(f"Column {key} not in the dataset. Current columns in the dataset: {columns}") + + +def _check_valid_index_key(key: Union[int, slice, range, Iterable], size: int) -> None: + if isinstance(key, int): + if (key < 0 and key + size < 0) or (key >= size): + raise IndexError(f"Invalid key: {key} is out of bounds for size {size}") + return + elif isinstance(key, slice): + pass + elif isinstance(key, range): + if len(key) > 0: + _check_valid_index_key(max(key), size=size) + _check_valid_index_key(min(key), size=size) + elif isinstance(key, Iterable): + if len(key) > 0: + _check_valid_index_key(int(max(key)), size=size) + _check_valid_index_key(int(min(key)), size=size) + else: + _raise_bad_key_type(key) + + +def key_to_query_type(key: Union[int, slice, range, str, Iterable]) -> str: + if isinstance(key, int): + return "row" + elif isinstance(key, str): + return "column" + elif isinstance(key, (slice, range, Iterable)): + return "batch" + _raise_bad_key_type(key) + + +def query_table( + table: Table, + key: Union[int, slice, range, str, Iterable], + indices: Optional[Table] = None, +) -> pa.Table: + """ + Query a Table to extract the subtable that correspond to the given key. + + Args: + table (``datasets.table.Table``): The input Table to query from + key (``Union[int, slice, range, str, Iterable]``): The key can be of different types: + - an integer i: the subtable containing only the i-th row + - a slice [i:j:k]: the subtable containing the rows that correspond to this slice + - a range(i, j, k): the subtable containing the rows that correspond to this range + - a string c: the subtable containing all the rows but only the column c + - an iterable l: the subtable that is the concatenation of all the i-th rows for all i in the iterable + indices (Optional ``datasets.table.Table``): If not None, it is used to re-map the given key to the table rows. + The indices table must contain one column named "indices" of type uint64. + This is used in case of shuffling or rows selection. + + + Returns: + ``pyarrow.Table``: the result of the query on the input table + """ + # Check if key is valid + if not isinstance(key, (int, slice, range, str, Iterable)): + _raise_bad_key_type(key) + if isinstance(key, str): + _check_valid_column_key(key, table.column_names) + else: + size = indices.num_rows if indices is not None else table.num_rows + _check_valid_index_key(key, size) + # Query the main table + if indices is None: + pa_subtable = _query_table(table, key) + else: + pa_subtable = _query_table_with_indices_mapping(table, key, indices=indices) + return pa_subtable + + +def format_table( + table: Table, + key: Union[int, slice, range, str, Iterable], + formatter: Formatter, + format_columns: Optional[list] = None, + output_all_columns=False, +): + """ + Format a Table depending on the key that was used and a Formatter object. + + Args: + table (``datasets.table.Table``): The input Table to format + key (``Union[int, slice, range, str, Iterable]``): Depending on the key that was used, the formatter formats + the table as either a row, a column or a batch. + formatter (``datasets.formatting.formatting.Formatter``): Any subclass of a Formatter such as + PythonFormatter, NumpyFormatter, etc. + format_columns (:obj:`List[str]`, optional): if not None, it defines the columns that will be formatted using the + given formatter. Other columns are discarded (unless ``output_all_columns`` is True) + output_all_columns (:obj:`bool`, defaults to False). If True, the formatted output is completed using the columns + that are not in the ``format_columns`` list. For these columns, the PythonFormatter is used. + + + Returns: + A row, column or batch formatted object defined by the Formatter: + - the PythonFormatter returns a dictionary for a row or a batch, and a list for a column. + - the NumpyFormatter returns a dictionary for a row or a batch, and a np.array for a column. + - the PandasFormatter returns a pd.DataFrame for a row or a batch, and a pd.Series for a column. + - the TorchFormatter returns a dictionary for a row or a batch, and a torch.Tensor for a column. + - the TFFormatter returns a dictionary for a row or a batch, and a tf.Tensor for a column. + """ + if isinstance(table, Table): + pa_table = table.table + else: + pa_table = table + query_type = key_to_query_type(key) + python_formatter = PythonFormatter(features=None) + if format_columns is None: + return formatter(pa_table, query_type=query_type) + elif query_type == "column": + if key in format_columns: + return formatter(pa_table, query_type) + else: + return python_formatter(pa_table, query_type=query_type) + else: + pa_table_to_format = pa_table.drop(col for col in pa_table.column_names if col not in format_columns) + formatted_output = formatter(pa_table_to_format, query_type=query_type) + if output_all_columns: + if isinstance(formatted_output, MutableMapping): + pa_table_with_remaining_columns = pa_table.drop( + col for col in pa_table.column_names if col in format_columns + ) + remaining_columns_dict = python_formatter(pa_table_with_remaining_columns, query_type=query_type) + formatted_output.update(remaining_columns_dict) + else: + raise TypeError( + f"Custom formatting function must return a dict to work with output_all_columns=True, but got {formatted_output}" + ) + return formatted_output diff --git a/lib/python3.10/site-packages/datasets/formatting/jax_formatter.py b/lib/python3.10/site-packages/datasets/formatting/jax_formatter.py new file mode 100644 index 0000000000000000000000000000000000000000..8035341c5cd2794345163b388945b3a092708916 --- /dev/null +++ b/lib/python3.10/site-packages/datasets/formatting/jax_formatter.py @@ -0,0 +1,160 @@ +# Copyright 2021 The HuggingFace Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Lint as: python3 +import sys +from collections.abc import Mapping +from typing import TYPE_CHECKING, Dict, Optional + +import numpy as np +import pyarrow as pa + +from .. import config +from ..utils.logging import get_logger +from ..utils.py_utils import map_nested +from .formatting import TensorFormatter + + +if TYPE_CHECKING: + import jax + import jaxlib + +logger = get_logger() + +DEVICE_MAPPING: Optional[dict] = None + + +class JaxFormatter(TensorFormatter[Mapping, "jax.Array", Mapping]): + def __init__(self, features=None, device=None, **jnp_array_kwargs): + super().__init__(features=features) + import jax + from jaxlib.xla_client import Device + + if isinstance(device, Device): + raise ValueError( + f"Expected {device} to be a `str` not {type(device)}, as `jaxlib.xla_extension.Device` " + "is not serializable neither with `pickle` nor with `dill`. Instead you can surround " + "the device with `str()` to get its string identifier that will be internally mapped " + "to the actual `jaxlib.xla_extension.Device`." + ) + self.device = device if isinstance(device, str) else str(jax.devices()[0]) + # using global variable since `jaxlib.xla_extension.Device` is not serializable neither + # with `pickle` nor with `dill`, so we need to use a global variable instead + global DEVICE_MAPPING + if DEVICE_MAPPING is None: + DEVICE_MAPPING = self._map_devices_to_str() + if self.device not in list(DEVICE_MAPPING.keys()): + logger.warning( + f"Device with string identifier {self.device} not listed among the available " + f"devices: {list(DEVICE_MAPPING.keys())}, so falling back to the default " + f"device: {str(jax.devices()[0])}." + ) + self.device = str(jax.devices()[0]) + self.jnp_array_kwargs = jnp_array_kwargs + + @staticmethod + def _map_devices_to_str() -> Dict[str, "jaxlib.xla_extension.Device"]: + import jax + + return {str(device): device for device in jax.devices()} + + def _consolidate(self, column): + import jax + import jax.numpy as jnp + + if isinstance(column, list) and column: + if all( + isinstance(x, jax.Array) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column + ): + return jnp.stack(column, axis=0) + return column + + def _tensorize(self, value): + import jax + import jax.numpy as jnp + + if isinstance(value, (str, bytes, type(None))): + return value + elif isinstance(value, (np.character, np.ndarray)) and np.issubdtype(value.dtype, np.character): + return value.tolist() + + default_dtype = {} + + if isinstance(value, (np.number, np.ndarray)) and np.issubdtype(value.dtype, np.integer): + # the default int precision depends on the jax config + # see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision + if jax.config.jax_enable_x64: + default_dtype = {"dtype": jnp.int64} + else: + default_dtype = {"dtype": jnp.int32} + elif isinstance(value, (np.number, np.ndarray)) and np.issubdtype(value.dtype, np.floating): + default_dtype = {"dtype": jnp.float32} + elif config.PIL_AVAILABLE and "PIL" in sys.modules: + import PIL.Image + + if isinstance(value, PIL.Image.Image): + value = np.asarray(value) + + # using global variable since `jaxlib.xla_extension.Device` is not serializable neither + # with `pickle` nor with `dill`, so we need to use a global variable instead + global DEVICE_MAPPING + if DEVICE_MAPPING is None: + DEVICE_MAPPING = self._map_devices_to_str() + + with jax.default_device(DEVICE_MAPPING[self.device]): + # calling jnp.array on a np.ndarray does copy the data + # see https://github.com/google/jax/issues/4486 + return jnp.array(value, **{**default_dtype, **self.jnp_array_kwargs}) + + def _recursive_tensorize(self, data_struct): + import jax + + # support for torch, tf, jax etc. + if config.TORCH_AVAILABLE and "torch" in sys.modules: + import torch + + if isinstance(data_struct, torch.Tensor): + return self._tensorize(data_struct.detach().cpu().numpy()[()]) + if hasattr(data_struct, "__array__") and not isinstance(data_struct, jax.Array): + data_struct = data_struct.__array__() + # support for nested types like struct of list of struct + if isinstance(data_struct, np.ndarray): + if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects + return self._consolidate([self.recursive_tensorize(substruct) for substruct in data_struct]) + elif isinstance(data_struct, (list, tuple)): + return self._consolidate([self.recursive_tensorize(substruct) for substruct in data_struct]) + return self._tensorize(data_struct) + + def recursive_tensorize(self, data_struct: dict): + return map_nested(self._recursive_tensorize, data_struct, map_list=False) + + def format_row(self, pa_table: pa.Table) -> Mapping: + row = self.numpy_arrow_extractor().extract_row(pa_table) + row = self.python_features_decoder.decode_row(row) + return self.recursive_tensorize(row) + + def format_column(self, pa_table: pa.Table) -> "jax.Array": + column = self.numpy_arrow_extractor().extract_column(pa_table) + column = self.python_features_decoder.decode_column(column, pa_table.column_names[0]) + column = self.recursive_tensorize(column) + column = self._consolidate(column) + return column + + def format_batch(self, pa_table: pa.Table) -> Mapping: + batch = self.numpy_arrow_extractor().extract_batch(pa_table) + batch = self.python_features_decoder.decode_batch(batch) + batch = self.recursive_tensorize(batch) + for column_name in batch: + batch[column_name] = self._consolidate(batch[column_name]) + return batch diff --git a/lib/python3.10/site-packages/datasets/formatting/np_formatter.py b/lib/python3.10/site-packages/datasets/formatting/np_formatter.py new file mode 100644 index 0000000000000000000000000000000000000000..95bcff2b51728fdd9647dad382639724df163ce2 --- /dev/null +++ b/lib/python3.10/site-packages/datasets/formatting/np_formatter.py @@ -0,0 +1,106 @@ +# Copyright 2020 The HuggingFace Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys +from collections.abc import Mapping + +import numpy as np +import pyarrow as pa + +from .. import config +from ..utils.py_utils import map_nested +from .formatting import TensorFormatter + + +class NumpyFormatter(TensorFormatter[Mapping, np.ndarray, Mapping]): + def __init__(self, features=None, **np_array_kwargs): + super().__init__(features=features) + self.np_array_kwargs = np_array_kwargs + + def _consolidate(self, column): + if isinstance(column, list): + if column and all( + isinstance(x, np.ndarray) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column + ): + return np.stack(column) + else: + # don't use np.array(column, dtype=object) + # since it fails in certain cases + # see https://stackoverflow.com/q/51005699 + out = np.empty(len(column), dtype=object) + out[:] = column + return out + return column + + def _tensorize(self, value): + if isinstance(value, (str, bytes, type(None))): + return value + elif isinstance(value, (np.character, np.ndarray)) and np.issubdtype(value.dtype, np.character): + return value + elif isinstance(value, np.number): + return value + + default_dtype = {} + + if isinstance(value, np.ndarray) and np.issubdtype(value.dtype, np.integer): + default_dtype = {"dtype": np.int64} + elif isinstance(value, np.ndarray) and np.issubdtype(value.dtype, np.floating): + default_dtype = {"dtype": np.float32} + elif config.PIL_AVAILABLE and "PIL" in sys.modules: + import PIL.Image + + if isinstance(value, PIL.Image.Image): + return np.asarray(value, **self.np_array_kwargs) + + return np.asarray(value, **{**default_dtype, **self.np_array_kwargs}) + + def _recursive_tensorize(self, data_struct): + # support for torch, tf, jax etc. + if config.TORCH_AVAILABLE and "torch" in sys.modules: + import torch + + if isinstance(data_struct, torch.Tensor): + return self._tensorize(data_struct.detach().cpu().numpy()[()]) + if hasattr(data_struct, "__array__") and not isinstance(data_struct, (np.ndarray, np.character, np.number)): + data_struct = data_struct.__array__() + # support for nested types like struct of list of struct + if isinstance(data_struct, np.ndarray): + if data_struct.dtype == object: + return self._consolidate([self.recursive_tensorize(substruct) for substruct in data_struct]) + if isinstance(data_struct, (list, tuple)): + return self._consolidate([self.recursive_tensorize(substruct) for substruct in data_struct]) + return self._tensorize(data_struct) + + def recursive_tensorize(self, data_struct: dict): + return map_nested(self._recursive_tensorize, data_struct, map_list=False) + + def format_row(self, pa_table: pa.Table) -> Mapping: + row = self.numpy_arrow_extractor().extract_row(pa_table) + row = self.python_features_decoder.decode_row(row) + return self.recursive_tensorize(row) + + def format_column(self, pa_table: pa.Table) -> np.ndarray: + column = self.numpy_arrow_extractor().extract_column(pa_table) + column = self.python_features_decoder.decode_column(column, pa_table.column_names[0]) + column = self.recursive_tensorize(column) + column = self._consolidate(column) + return column + + def format_batch(self, pa_table: pa.Table) -> Mapping: + batch = self.numpy_arrow_extractor().extract_batch(pa_table) + batch = self.python_features_decoder.decode_batch(batch) + batch = self.recursive_tensorize(batch) + for column_name in batch: + batch[column_name] = self._consolidate(batch[column_name]) + return batch diff --git a/lib/python3.10/site-packages/datasets/formatting/tf_formatter.py b/lib/python3.10/site-packages/datasets/formatting/tf_formatter.py new file mode 100644 index 0000000000000000000000000000000000000000..adb15cda3815d77fa0272562e83fda029d1babee --- /dev/null +++ b/lib/python3.10/site-packages/datasets/formatting/tf_formatter.py @@ -0,0 +1,115 @@ +# Copyright 2020 The HuggingFace Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Lint as: python3 +import sys +from collections.abc import Mapping +from typing import TYPE_CHECKING + +import numpy as np +import pyarrow as pa + +from .. import config +from ..utils.py_utils import map_nested +from .formatting import TensorFormatter + + +if TYPE_CHECKING: + import tensorflow as tf + + +class TFFormatter(TensorFormatter[Mapping, "tf.Tensor", Mapping]): + def __init__(self, features=None, **tf_tensor_kwargs): + super().__init__(features=features) + self.tf_tensor_kwargs = tf_tensor_kwargs + import tensorflow as tf # noqa: F401 - import tf at initialization + + def _consolidate(self, column): + import tensorflow as tf + + if isinstance(column, list) and column: + if all( + isinstance(x, tf.Tensor) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column + ): + return tf.stack(column) + elif all( + isinstance(x, (tf.Tensor, tf.RaggedTensor)) and x.ndim == 1 and x.dtype == column[0].dtype + for x in column + ): + # only rag 1-D tensors, otherwise some dimensions become ragged even though they were consolidated + return tf.ragged.stack(column) + + return column + + def _tensorize(self, value): + import tensorflow as tf + + if value is None: + return value + + default_dtype = {} + + if isinstance(value, (np.number, np.ndarray)) and np.issubdtype(value.dtype, np.integer): + default_dtype = {"dtype": tf.int64} + elif isinstance(value, (np.number, np.ndarray)) and np.issubdtype(value.dtype, np.floating): + default_dtype = {"dtype": tf.float32} + elif config.PIL_AVAILABLE and "PIL" in sys.modules: + import PIL.Image + + if isinstance(value, PIL.Image.Image): + value = np.asarray(value) + + return tf.convert_to_tensor(value, **{**default_dtype, **self.tf_tensor_kwargs}) + + def _recursive_tensorize(self, data_struct): + import tensorflow as tf + + # support for torch, tf, jax etc. + if config.TORCH_AVAILABLE and "torch" in sys.modules: + import torch + + if isinstance(data_struct, torch.Tensor): + return self._tensorize(data_struct.detach().cpu().numpy()[()]) + if hasattr(data_struct, "__array__") and not isinstance(data_struct, tf.Tensor): + data_struct = data_struct.__array__() + # support for nested types like struct of list of struct + if isinstance(data_struct, np.ndarray): + if data_struct.dtype == object: # tf tensors cannot be instantied from an array of objects + return self._consolidate([self.recursive_tensorize(substruct) for substruct in data_struct]) + elif isinstance(data_struct, (list, tuple)): + return self._consolidate([self.recursive_tensorize(substruct) for substruct in data_struct]) + return self._tensorize(data_struct) + + def recursive_tensorize(self, data_struct: dict): + return map_nested(self._recursive_tensorize, data_struct, map_list=False) + + def format_row(self, pa_table: pa.Table) -> Mapping: + row = self.numpy_arrow_extractor().extract_row(pa_table) + row = self.python_features_decoder.decode_row(row) + return self.recursive_tensorize(row) + + def format_column(self, pa_table: pa.Table) -> "tf.Tensor": + column = self.numpy_arrow_extractor().extract_column(pa_table) + column = self.python_features_decoder.decode_column(column, pa_table.column_names[0]) + column = self.recursive_tensorize(column) + column = self._consolidate(column) + return column + + def format_batch(self, pa_table: pa.Table) -> Mapping: + batch = self.numpy_arrow_extractor().extract_batch(pa_table) + batch = self.python_features_decoder.decode_batch(batch) + batch = self.recursive_tensorize(batch) + for column_name in batch: + batch[column_name] = self._consolidate(batch[column_name]) + return batch diff --git a/lib/python3.10/site-packages/datasets/formatting/torch_formatter.py b/lib/python3.10/site-packages/datasets/formatting/torch_formatter.py new file mode 100644 index 0000000000000000000000000000000000000000..a4e899ac43b74f868e021f8babde801c054650c0 --- /dev/null +++ b/lib/python3.10/site-packages/datasets/formatting/torch_formatter.py @@ -0,0 +1,105 @@ +# Copyright 2020 The HuggingFace Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Lint as: python3 +import sys +from collections.abc import Mapping +from typing import TYPE_CHECKING + +import numpy as np +import pyarrow as pa + +from .. import config +from ..utils.py_utils import map_nested +from .formatting import TensorFormatter + + +if TYPE_CHECKING: + import torch + + +class TorchFormatter(TensorFormatter[Mapping, "torch.Tensor", Mapping]): + def __init__(self, features=None, **torch_tensor_kwargs): + super().__init__(features=features) + self.torch_tensor_kwargs = torch_tensor_kwargs + import torch # noqa import torch at initialization + + def _consolidate(self, column): + import torch + + if isinstance(column, list) and column: + if all( + isinstance(x, torch.Tensor) and x.shape == column[0].shape and x.dtype == column[0].dtype + for x in column + ): + return torch.stack(column) + return column + + def _tensorize(self, value): + import torch + + if isinstance(value, (str, bytes, type(None))): + return value + elif isinstance(value, (np.character, np.ndarray)) and np.issubdtype(value.dtype, np.character): + return value.tolist() + + default_dtype = {} + + if isinstance(value, (np.number, np.ndarray)) and np.issubdtype(value.dtype, np.integer): + default_dtype = {"dtype": torch.int64} + elif isinstance(value, (np.number, np.ndarray)) and np.issubdtype(value.dtype, np.floating): + default_dtype = {"dtype": torch.float32} + elif config.PIL_AVAILABLE and "PIL" in sys.modules: + import PIL.Image + + if isinstance(value, PIL.Image.Image): + value = np.asarray(value) + return torch.tensor(value, **{**default_dtype, **self.torch_tensor_kwargs}) + + def _recursive_tensorize(self, data_struct): + import torch + + # support for torch, tf, jax etc. + if hasattr(data_struct, "__array__") and not isinstance(data_struct, torch.Tensor): + data_struct = data_struct.__array__() + # support for nested types like struct of list of struct + if isinstance(data_struct, np.ndarray): + if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects + return self._consolidate([self.recursive_tensorize(substruct) for substruct in data_struct]) + elif isinstance(data_struct, (list, tuple)): + return self._consolidate([self.recursive_tensorize(substruct) for substruct in data_struct]) + return self._tensorize(data_struct) + + def recursive_tensorize(self, data_struct: dict): + return map_nested(self._recursive_tensorize, data_struct, map_list=False) + + def format_row(self, pa_table: pa.Table) -> Mapping: + row = self.numpy_arrow_extractor().extract_row(pa_table) + row = self.python_features_decoder.decode_row(row) + return self.recursive_tensorize(row) + + def format_column(self, pa_table: pa.Table) -> "torch.Tensor": + column = self.numpy_arrow_extractor().extract_column(pa_table) + column = self.python_features_decoder.decode_column(column, pa_table.column_names[0]) + column = self.recursive_tensorize(column) + column = self._consolidate(column) + return column + + def format_batch(self, pa_table: pa.Table) -> Mapping: + batch = self.numpy_arrow_extractor().extract_batch(pa_table) + batch = self.python_features_decoder.decode_batch(batch) + batch = self.recursive_tensorize(batch) + for column_name in batch: + batch[column_name] = self._consolidate(batch[column_name]) + return batch diff --git a/lib/python3.10/site-packages/datasets/utils/resources/__init__.py b/lib/python3.10/site-packages/datasets/utils/resources/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/lib/python3.10/site-packages/datasets/utils/resources/creators.json b/lib/python3.10/site-packages/datasets/utils/resources/creators.json new file mode 100644 index 0000000000000000000000000000000000000000..d9e15f0039cc27ed8abd9fdf394423a3fada2c95 --- /dev/null +++ b/lib/python3.10/site-packages/datasets/utils/resources/creators.json @@ -0,0 +1,17 @@ +{ + "language": [ + "found", + "crowdsourced", + "expert-generated", + "machine-generated", + "other" + ], + "annotations": [ + "found", + "crowdsourced", + "expert-generated", + "machine-generated", + "no-annotation", + "other" + ] +} diff --git a/lib/python3.10/site-packages/datasets/utils/resources/languages.json b/lib/python3.10/site-packages/datasets/utils/resources/languages.json new file mode 100644 index 0000000000000000000000000000000000000000..ea7686f956b898af3faf97b86be89b71d88855d4 --- /dev/null +++ b/lib/python3.10/site-packages/datasets/utils/resources/languages.json @@ -0,0 +1,8026 @@ +{ + "code": "Programming language (C++, Java, Javascript, Python, etc.)", + "aa": "Afar", + "aaa": "Ghotuo", + "aab": "Alumu-Tesu", + "aac": "Ari", + "aad": "Amal", + "aae": "Arbëreshë Albanian", + "aaf": "Aranadan", + "aag": "Ambrak", + "aah": "Abu' Arapesh", + "aai": "Arifama-Miniafia", + "aak": "Ankave", + "aal": "Afade", + "aan": "Anambé", + "aao": "Algerian Saharan Arabic", + "aap": "Pará Arára", + "aaq": "Eastern Abnaki", + "aas": "Aasáx", + "aat": "Arvanitika Albanian", + "aau": "Abau", + "aav": "Austro-Asiatic languages", + "aaw": "Solong", + "aax": "Mandobo Atas", + "aaz": "Amarasi", + "ab": "Abkhazian", + "aba": "Abé", + "abb": "Bankon", + "abc": "Ambala Ayta", + "abd": "Manide", + "abe": "Western Abnaki", + "abf": "Abai Sungai", + "abg": "Abaga", + "abh": "Tajiki Arabic", + "abi": "Abidji", + "abj": "Aka-Bea", + "abl": "Lampung Nyo", + "abm": "Abanyom", + "abn": "Abua", + "abo": "Abon", + "abp": "Abellen Ayta", + "abq": "Abaza", + "abr": "Abron", + "abs": "Ambonese Malay", + "abt": "Ambulas", + "abu": "Abure", + "abv": "Baharna Arabic", + "abw": "Pal", + "abx": "Inabaknon", + "aby": "Aneme Wake", + "abz": "Abui", + "aca": "Achagua", + "acb": "Áncá", + "acd": "Gikyode", + "ace": "Achinese", + "acf": "Saint Lucian Creole French", + "ach": "Acoli", + "aci": "Aka-Cari", + "ack": "Aka-Kora", + "acl": "Akar-Bale", + "acm": "Mesopotamian Arabic", + "acn": "Achang", + "acp": "Eastern Acipa", + "acq": "Ta'izzi-Adeni Arabic", + "acr": "Achi", + "acs": "Acroá", + "act": "Achterhoeks", + "acu": "Achuar-Shiwiar", + "acv": "Achumawi", + "acw": "Hijazi Arabic", + "acx": "Omani Arabic", + "acy": "Cypriot Arabic", + "acz": "Acheron", + "ada": "Adangme", + "adb": "Atauran", + "add": "Lidzonka; Dzodinka", + "ade": "Adele", + "adf": "Dhofari Arabic", + "adg": "Andegerebinha", + "adh": "Adhola", + "adi": "Adi", + "adj": "Adioukrou", + "adl": "Galo", + "adn": "Adang", + "ado": "Abu", + "adq": "Adangbe", + "adr": "Adonara", + "ads": "Adamorobe Sign Language", + "adt": "Adnyamathanha", + "adu": "Aduge", + "adw": "Amundava", + "adx": "Amdo Tibetan", + "ady": "Adyghe; Adygei", + "adz": "Adzera", + "ae": "Avestan", + "aea": "Areba", + "aeb": "Tunisian Arabic", + "aec": "Saidi Arabic", + "aed": "Argentine Sign Language", + "aee": "Northeast Pashai; Northeast Pashayi", + "aek": "Haeke", + "ael": "Ambele", + "aem": "Arem", + "aen": "Armenian Sign Language", + "aeq": "Aer", + "aer": "Eastern Arrernte", + "aes": "Alsea", + "aeu": "Akeu", + "aew": "Ambakich", + "aey": "Amele", + "aez": "Aeka", + "af": "Afrikaans", + "afa": "Afro-Asiatic languages", + "afb": "Gulf Arabic", + "afd": "Andai", + "afe": "Putukwam", + "afg": "Afghan Sign Language", + "afh": "Afrihili", + "afi": "Akrukay; Chini", + "afk": "Nanubae", + "afn": "Defaka", + "afo": "Eloyi", + "afp": "Tapei", + "afs": "Afro-Seminole Creole", + "aft": "Afitti", + "afu": "Awutu", + "afz": "Obokuitai", + "aga": "Aguano", + "agb": "Legbo", + "agc": "Agatu", + "agd": "Agarabi", + "age": "Angal", + "agf": "Arguni", + "agg": "Angor", + "agh": "Ngelima", + "agi": "Agariya", + "agj": "Argobba", + "agk": "Isarog Agta", + "agl": "Fembe", + "agm": "Angaataha", + "agn": "Agutaynen", + "ago": "Tainae", + "agq": "Aghem", + "agr": "Aguaruna", + "ags": "Esimbi", + "agt": "Central Cagayan Agta", + "agu": "Aguacateco", + "agv": "Remontado Dumagat", + "agw": "Kahua", + "agx": "Aghul", + "agy": "Southern Alta", + "agz": "Mt. Iriga Agta", + "aha": "Ahanta", + "ahb": "Axamb", + "ahg": "Qimant", + "ahh": "Aghu", + "ahi": "Tiagbamrin Aizi", + "ahk": "Akha", + "ahl": "Igo", + "ahm": "Mobumrin Aizi", + "ahn": "Àhàn", + "aho": "Ahom", + "ahp": "Aproumu Aizi", + "ahr": "Ahirani", + "ahs": "Ashe", + "aht": "Ahtena", + "aia": "Arosi", + "aib": "Ainu (China)", + "aic": "Ainbai", + "aid": "Alngith", + "aie": "Amara", + "aif": "Agi", + "aig": "Antigua and Barbuda Creole English", + "aih": "Ai-Cham", + "aii": "Assyrian Neo-Aramaic", + "aij": "Lishanid Noshan", + "aik": "Ake", + "ail": "Aimele", + "aim": "Aimol", + "ain": "Ainu (Japan)", + "aio": "Aiton", + "aip": "Burumakok", + "aiq": "Aimaq", + "air": "Airoran", + "ait": "Arikem", + "aiw": "Aari", + "aix": "Aighon", + "aiy": "Ali", + "aja": "Aja (South Sudan)", + "ajg": "Aja (Benin)", + "aji": "Ajië", + "ajn": "Andajin", + "ajp": "South Levantine Arabic", + "ajs": "Algerian Jewish Sign Language", + "aju": "Judeo-Moroccan Arabic", + "ajw": "Ajawa", + "ajz": "Amri Karbi", + "ak": "Akan", + "akb": "Batak Angkola", + "akc": "Mpur", + "akd": "Ukpet-Ehom", + "ake": "Akawaio", + "akf": "Akpa", + "akg": "Anakalangu", + "akh": "Angal Heneng", + "aki": "Aiome", + "akj": "Aka-Jeru", + "akk": "Akkadian", + "akl": "Aklanon", + "akm": "Aka-Bo", + "ako": "Akurio", + "akp": "Siwu", + "akq": "Ak", + "akr": "Araki", + "aks": "Akaselem", + "akt": "Akolet", + "aku": "Akum", + "akv": "Akhvakh", + "akw": "Akwa", + "akx": "Aka-Kede", + "aky": "Aka-Kol", + "akz": "Alabama", + "ala": "Alago", + "alc": "Qawasqar", + "ald": "Alladian", + "ale": "Aleut", + "alf": "Alege", + "alg": "Algonquian languages", + "alh": "Alawa", + "ali": "Amaimon", + "alj": "Alangan", + "alk": "Alak", + "all": "Allar", + "alm": "Amblong", + "aln": "Gheg Albanian", + "alo": "Larike-Wakasihu", + "alp": "Alune", + "alq": "Algonquin", + "alr": "Alutor", + "als": "Tosk Albanian", + "alt": "Southern Altai", + "alu": "'Are'are", + "alv": "Atlantic-Congo languages", + "alw": "Alaba-K’abeena; Wanbasana", + "alx": "Amol", + "aly": "Alyawarr", + "alz": "Alur", + "am": "Amharic", + "ama": "Amanayé", + "amb": "Ambo", + "amc": "Amahuaca", + "ame": "Yanesha'", + "amf": "Hamer-Banna", + "amg": "Amurdak", + "ami": "Amis", + "amj": "Amdang", + "amk": "Ambai", + "aml": "War-Jaintia", + "amm": "Ama (Papua New Guinea)", + "amn": "Amanab", + "amo": "Amo", + "amp": "Alamblak", + "amq": "Amahai", + "amr": "Amarakaeri", + "ams": "Southern Amami-Oshima", + "amt": "Amto", + "amu": "Guerrero Amuzgo", + "amv": "Ambelau", + "amw": "Western Neo-Aramaic", + "amx": "Anmatyerre", + "amy": "Ami", + "amz": "Atampaya", + "an": "Aragonese", + "ana": "Andaqui", + "anb": "Andoa", + "anc": "Ngas", + "and": "Ansus", + "ane": "Xârâcùù", + "anf": "Animere", + "ang": "Old English (ca. 450-1100)", + "anh": "Nend", + "ani": "Andi", + "anj": "Anor", + "ank": "Goemai", + "anl": "Anu-Hkongso Chin", + "anm": "Anal", + "ann": "Obolo", + "ano": "Andoque", + "anp": "Angika", + "anq": "Jarawa (India)", + "anr": "Andh", + "ans": "Anserma", + "ant": "Antakarinya; Antikarinya", + "anu": "Anuak", + "anv": "Denya", + "anw": "Anaang", + "anx": "Andra-Hus", + "any": "Anyin", + "anz": "Anem", + "aoa": "Angolar", + "aob": "Abom", + "aoc": "Pemon", + "aod": "Andarum", + "aoe": "Angal Enen", + "aof": "Bragat", + "aog": "Angoram", + "aoi": "Anindilyakwa", + "aoj": "Mufian", + "aok": "Arhö", + "aol": "Alor", + "aom": "Ömie", + "aon": "Bumbita Arapesh", + "aor": "Aore", + "aos": "Taikat", + "aot": "Atong (India); A'tong", + "aou": "A'ou", + "aox": "Atorada", + "aoz": "Uab Meto", + "apa": "Apache languages", + "apb": "Sa'a", + "apc": "North Levantine Arabic", + "apd": "Sudanese Arabic", + "ape": "Bukiyip", + "apf": "Pahanan Agta", + "apg": "Ampanang", + "aph": "Athpariya", + "api": "Apiaká", + "apj": "Jicarilla Apache", + "apk": "Kiowa Apache", + "apl": "Lipan Apache", + "apm": "Mescalero-Chiricahua Apache", + "apn": "Apinayé", + "apo": "Ambul", + "app": "Apma", + "apq": "A-Pucikwar", + "apr": "Arop-Lokep", + "aps": "Arop-Sissano", + "apt": "Apatani", + "apu": "Apurinã", + "apv": "Alapmunte", + "apw": "Western Apache", + "apx": "Aputai", + "apy": "Apalaí", + "apz": "Safeyoka", + "aqa": "Alacalufan languages", + "aqc": "Archi", + "aqd": "Ampari Dogon", + "aqg": "Arigidi", + "aqk": "Aninka", + "aql": "Algic languages", + "aqm": "Atohwaim", + "aqn": "Northern Alta", + "aqp": "Atakapa", + "aqr": "Arhâ", + "aqt": "Angaité", + "aqz": "Akuntsu", + "ar": "Arabic", + "arb": "Standard Arabic", + "arc": "Official Aramaic (700-300 BCE); Imperial Aramaic (700-300 BCE)", + "ard": "Arabana", + "are": "Western Arrarnta", + "arh": "Arhuaco", + "ari": "Arikara", + "arj": "Arapaso", + "ark": "Arikapú", + "arl": "Arabela", + "arn": "Mapudungun; Mapuche", + "aro": "Araona", + "arp": "Arapaho", + "arq": "Algerian Arabic", + "arr": "Karo (Brazil)", + "ars": "Najdi Arabic", + "art": "Artificial languages", + "aru": "Aruá (Amazonas State); Arawá", + "arv": "Arbore", + "arw": "Arawak", + "arx": "Aruá (Rodonia State)", + "ary": "Moroccan Arabic", + "arz": "Egyptian Arabic", + "as": "Assamese", + "asa": "Asu (Tanzania)", + "asb": "Assiniboine", + "asc": "Casuarina Coast Asmat", + "ase": "American Sign Language", + "asf": "Auslan; Australian Sign Language", + "asg": "Cishingini", + "ash": "Abishira", + "asi": "Buruwai", + "asj": "Sari", + "ask": "Ashkun", + "asl": "Asilulu", + "asn": "Xingú Asuriní", + "aso": "Dano", + "asp": "Algerian Sign Language", + "asq": "Austrian Sign Language", + "asr": "Asuri", + "ass": "Ipulo", + "ast": "Asturian; Asturleonese; Bable; Leonese", + "asu": "Tocantins Asurini", + "asv": "Asoa", + "asw": "Australian Aborigines Sign Language", + "asx": "Muratayak", + "asy": "Yaosakor Asmat", + "asz": "As", + "ata": "Pele-Ata", + "atb": "Zaiwa", + "atc": "Atsahuaca", + "atd": "Ata Manobo", + "ate": "Atemble", + "atg": "Ivbie North-Okpela-Arhe", + "ath": "Athapascan languages", + "ati": "Attié", + "atj": "Atikamekw", + "atk": "Ati", + "atl": "Mt. Iraya Agta", + "atm": "Ata", + "atn": "Ashtiani", + "ato": "Atong (Cameroon)", + "atp": "Pudtol Atta", + "atq": "Aralle-Tabulahan", + "atr": "Waimiri-Atroari", + "ats": "Gros Ventre", + "att": "Pamplona Atta", + "atu": "Reel", + "atv": "Northern Altai", + "atw": "Atsugewi", + "atx": "Arutani", + "aty": "Aneityum", + "atz": "Arta", + "aua": "Asumboa", + "aub": "Alugu", + "auc": "Waorani", + "aud": "Anuta", + "auf": "Arauan languages", + "aug": "Aguna", + "auh": "Aushi", + "aui": "Anuki", + "auj": "Awjilah", + "auk": "Heyo", + "aul": "Aulua", + "aum": "Asu (Nigeria)", + "aun": "Molmo One", + "auo": "Auyokawa", + "aup": "Makayam", + "auq": "Anus; Korur", + "aur": "Aruek", + "aus": "Australian languages", + "aut": "Austral", + "auu": "Auye", + "auw": "Awyi", + "aux": "Aurá", + "auy": "Awiyaana", + "auz": "Uzbeki Arabic", + "av": "Avaric", + "avb": "Avau", + "avd": "Alviri-Vidari", + "avi": "Avikam", + "avk": "Kotava", + "avl": "Eastern Egyptian Bedawi Arabic", + "avm": "Angkamuthi", + "avn": "Avatime", + "avo": "Agavotaguerra", + "avs": "Aushiri", + "avt": "Au", + "avu": "Avokaya", + "avv": "Avá-Canoeiro", + "awa": "Awadhi", + "awb": "Awa (Papua New Guinea)", + "awc": "Cicipu", + "awd": "Arawakan languages", + "awe": "Awetí", + "awg": "Anguthimri", + "awh": "Awbono", + "awi": "Aekyom", + "awk": "Awabakal", + "awm": "Arawum", + "awn": "Awngi", + "awo": "Awak", + "awr": "Awera", + "aws": "South Awyu", + "awt": "Araweté", + "awu": "Central Awyu", + "awv": "Jair Awyu", + "aww": "Awun", + "awx": "Awara", + "awy": "Edera Awyu", + "axb": "Abipon", + "axe": "Ayerrerenge", + "axg": "Mato Grosso Arára", + "axk": "Yaka (Central African Republic)", + "axl": "Lower Southern Aranda", + "axm": "Middle Armenian", + "axx": "Xârâgurè", + "ay": "Aymara", + "aya": "Awar", + "ayb": "Ayizo Gbe", + "ayc": "Southern Aymara", + "ayd": "Ayabadhu", + "aye": "Ayere", + "ayg": "Ginyanga", + "ayh": "Hadrami Arabic", + "ayi": "Leyigha", + "ayk": "Akuku", + "ayl": "Libyan Arabic", + "ayn": "Sanaani Arabic", + "ayo": "Ayoreo", + "ayp": "North Mesopotamian Arabic", + "ayq": "Ayi (Papua New Guinea)", + "ayr": "Central Aymara", + "ays": "Sorsogon Ayta", + "ayt": "Magbukun Ayta", + "ayu": "Ayu", + "ayz": "Mai Brat", + "az": "Azerbaijani", + "aza": "Azha", + "azb": "South Azerbaijani", + "azc": "Uto-Aztecan languages", + "azd": "Eastern Durango Nahuatl", + "azg": "San Pedro Amuzgos Amuzgo", + "azj": "North Azerbaijani", + "azm": "Ipalapa Amuzgo", + "azn": "Western Durango Nahuatl", + "azo": "Awing", + "azt": "Faire Atta", + "azz": "Highland Puebla Nahuatl", + "ba": "Bashkir", + "baa": "Babatana", + "bab": "Bainouk-Gunyuño", + "bac": "Badui", + "bad": "Banda languages", + "bae": "Baré", + "baf": "Nubaca", + "bag": "Tuki", + "bah": "Bahamas Creole English", + "bai": "Bamileke languages", + "baj": "Barakai", + "bal": "Baluchi", + "ban": "Balinese", + "bao": "Waimaha", + "bap": "Bantawa", + "bar": "Bavarian", + "bas": "Basa (Cameroon)", + "bat": "Baltic languages", + "bau": "Bada (Nigeria)", + "bav": "Vengo", + "baw": "Bambili-Bambui", + "bax": "Bamun", + "bay": "Batuley", + "bba": "Baatonum", + "bbb": "Barai", + "bbc": "Batak Toba", + "bbd": "Bau", + "bbe": "Bangba", + "bbf": "Baibai", + "bbg": "Barama", + "bbh": "Bugan", + "bbi": "Barombi", + "bbj": "Ghomálá'", + "bbk": "Babanki", + "bbl": "Bats", + "bbm": "Babango", + "bbn": "Uneapa", + "bbo": "Northern Bobo Madaré; Konabéré", + "bbp": "West Central Banda", + "bbq": "Bamali", + "bbr": "Girawa", + "bbs": "Bakpinka", + "bbt": "Mburku", + "bbu": "Kulung (Nigeria)", + "bbv": "Karnai", + "bbw": "Baba", + "bbx": "Bubia", + "bby": "Befang", + "bca": "Central Bai", + "bcb": "Bainouk-Samik", + "bcc": "Southern Balochi", + "bcd": "North Babar", + "bce": "Bamenyam", + "bcf": "Bamu", + "bcg": "Baga Pokur", + "bch": "Bariai", + "bci": "Baoulé", + "bcj": "Bardi", + "bck": "Bunuba", + "bcl": "Central Bikol", + "bcm": "Bannoni", + "bcn": "Bali (Nigeria)", + "bco": "Kaluli", + "bcp": "Bali (Democratic Republic of Congo)", + "bcq": "Bench", + "bcr": "Babine", + "bcs": "Kohumono", + "bct": "Bendi", + "bcu": "Awad Bing", + "bcv": "Shoo-Minda-Nye", + "bcw": "Bana", + "bcy": "Bacama", + "bcz": "Bainouk-Gunyaamolo", + "bda": "Bayot", + "bdb": "Basap", + "bdc": "Emberá-Baudó", + "bdd": "Bunama", + "bde": "Bade", + "bdf": "Biage", + "bdg": "Bonggi", + "bdh": "Baka (South Sudan)", + "bdi": "Burun", + "bdj": "Bai (South Sudan); Bai", + "bdk": "Budukh", + "bdl": "Indonesian Bajau", + "bdm": "Buduma", + "bdn": "Baldemu", + "bdo": "Morom", + "bdp": "Bende", + "bdq": "Bahnar", + "bdr": "West Coast Bajau", + "bds": "Burunge", + "bdt": "Bokoto", + "bdu": "Oroko", + "bdv": "Bodo Parja", + "bdw": "Baham", + "bdx": "Budong-Budong", + "bdy": "Bandjalang", + "bdz": "Badeshi", + "be": "Belarusian", + "bea": "Beaver", + "beb": "Bebele", + "bec": "Iceve-Maci", + "bed": "Bedoanas", + "bee": "Byangsi", + "bef": "Benabena", + "beg": "Belait", + "beh": "Biali", + "bei": "Bekati'", + "bej": "Beja; Bedawiyet", + "bek": "Bebeli", + "bem": "Bemba (Zambia)", + "beo": "Beami", + "bep": "Besoa", + "beq": "Beembe", + "ber": "Berber languages", + "bes": "Besme", + "bet": "Guiberoua Béte", + "beu": "Blagar", + "bev": "Daloa Bété", + "bew": "Betawi", + "bex": "Jur Modo", + "bey": "Beli (Papua New Guinea)", + "bez": "Bena (Tanzania)", + "bfa": "Bari", + "bfb": "Pauri Bareli", + "bfc": "Panyi Bai; Northern Bai", + "bfd": "Bafut", + "bfe": "Betaf; Tena", + "bff": "Bofi", + "bfg": "Busang Kayan", + "bfh": "Blafe", + "bfi": "British Sign Language", + "bfj": "Bafanji", + "bfk": "Ban Khor Sign Language", + "bfl": "Banda-Ndélé", + "bfm": "Mmen", + "bfn": "Bunak", + "bfo": "Malba Birifor", + "bfp": "Beba", + "bfq": "Badaga", + "bfr": "Bazigar", + "bfs": "Southern Bai", + "bft": "Balti", + "bfu": "Gahri", + "bfw": "Bondo", + "bfx": "Bantayanon", + "bfy": "Bagheli", + "bfz": "Mahasu Pahari", + "bg": "Bulgarian", + "bga": "Gwamhi-Wuri", + "bgb": "Bobongko", + "bgc": "Haryanvi", + "bgd": "Rathwi Bareli", + "bge": "Bauria", + "bgf": "Bangandu", + "bgg": "Bugun", + "bgi": "Giangan", + "bgj": "Bangolan", + "bgk": "Bit; Buxinhua", + "bgl": "Bo (Laos)", + "bgn": "Western Balochi", + "bgo": "Baga Koga", + "bgp": "Eastern Balochi", + "bgq": "Bagri", + "bgr": "Bawm Chin", + "bgs": "Tagabawa", + "bgt": "Bughotu", + "bgu": "Mbongno", + "bgv": "Warkay-Bipim", + "bgw": "Bhatri", + "bgx": "Balkan Gagauz Turkish", + "bgy": "Benggoi", + "bgz": "Banggai", + "bh": "Bihari languages", + "bha": "Bharia", + "bhb": "Bhili", + "bhc": "Biga", + "bhd": "Bhadrawahi", + "bhe": "Bhaya", + "bhf": "Odiai", + "bhg": "Binandere", + "bhh": "Bukharic", + "bhi": "Bhilali", + "bhj": "Bahing", + "bhl": "Bimin", + "bhm": "Bathari", + "bhn": "Bohtan Neo-Aramaic", + "bho": "Bhojpuri", + "bhp": "Bima", + "bhq": "Tukang Besi South", + "bhr": "Bara Malagasy", + "bhs": "Buwal", + "bht": "Bhattiyali", + "bhu": "Bhunjia", + "bhv": "Bahau", + "bhw": "Biak", + "bhx": "Bhalay", + "bhy": "Bhele", + "bhz": "Bada (Indonesia)", + "bi": "Bislama", + "bia": "Badimaya", + "bib": "Bissa; Bisa", + "bid": "Bidiyo", + "bie": "Bepour", + "bif": "Biafada", + "big": "Biangai", + "bik": "Bikol", + "bil": "Bile", + "bim": "Bimoba", + "bin": "Bini; Edo", + "bio": "Nai", + "bip": "Bila", + "biq": "Bipi", + "bir": "Bisorio", + "bit": "Berinomo", + "biu": "Biete", + "biv": "Southern Birifor", + "biw": "Kol (Cameroon)", + "bix": "Bijori", + "biy": "Birhor", + "biz": "Baloi", + "bja": "Budza", + "bjb": "Banggarla", + "bjc": "Bariji", + "bje": "Biao-Jiao Mien", + "bjf": "Barzani Jewish Neo-Aramaic", + "bjg": "Bidyogo", + "bjh": "Bahinemo", + "bji": "Burji", + "bjj": "Kanauji", + "bjk": "Barok", + "bjl": "Bulu (Papua New Guinea)", + "bjm": "Bajelani", + "bjn": "Banjar", + "bjo": "Mid-Southern Banda", + "bjp": "Fanamaket", + "bjr": "Binumarien", + "bjs": "Bajan", + "bjt": "Balanta-Ganja", + "bju": "Busuu", + "bjv": "Bedjond", + "bjw": "Bakwé", + "bjx": "Banao Itneg", + "bjy": "Bayali", + "bjz": "Baruga", + "bka": "Kyak", + "bkc": "Baka (Cameroon)", + "bkd": "Binukid; Talaandig", + "bkf": "Beeke", + "bkg": "Buraka", + "bkh": "Bakoko", + "bki": "Baki", + "bkj": "Pande", + "bkk": "Brokskat", + "bkl": "Berik", + "bkm": "Kom (Cameroon)", + "bkn": "Bukitan", + "bko": "Kwa'", + "bkp": "Boko (Democratic Republic of Congo)", + "bkq": "Bakairí", + "bkr": "Bakumpai", + "bks": "Northern Sorsoganon", + "bkt": "Boloki", + "bku": "Buhid", + "bkv": "Bekwarra", + "bkw": "Bekwel", + "bkx": "Baikeno", + "bky": "Bokyi", + "bkz": "Bungku", + "bla": "Siksika", + "blb": "Bilua", + "blc": "Bella Coola", + "bld": "Bolango", + "ble": "Balanta-Kentohe", + "blf": "Buol", + "blh": "Kuwaa", + "bli": "Bolia", + "blj": "Bolongan", + "blk": "Pa'o Karen; Pa'O", + "bll": "Biloxi", + "blm": "Beli (South Sudan)", + "bln": "Southern Catanduanes Bikol", + "blo": "Anii", + "blp": "Blablanga", + "blq": "Baluan-Pam", + "blr": "Blang", + "bls": "Balaesang", + "blt": "Tai Dam", + "blv": "Kibala; Bolo", + "blw": "Balangao", + "blx": "Mag-Indi Ayta", + "bly": "Notre", + "blz": "Balantak", + "bm": "Bambara", + "bma": "Lame", + "bmb": "Bembe", + "bmc": "Biem", + "bmd": "Baga Manduri", + "bme": "Limassa", + "bmf": "Bom-Kim", + "bmg": "Bamwe", + "bmh": "Kein", + "bmi": "Bagirmi", + "bmj": "Bote-Majhi", + "bmk": "Ghayavi", + "bml": "Bomboli", + "bmm": "Northern Betsimisaraka Malagasy", + "bmn": "Bina (Papua New Guinea)", + "bmo": "Bambalang", + "bmp": "Bulgebi", + "bmq": "Bomu", + "bmr": "Muinane", + "bms": "Bilma Kanuri", + "bmt": "Biao Mon", + "bmu": "Somba-Siawari", + "bmv": "Bum", + "bmw": "Bomwali", + "bmx": "Baimak", + "bmz": "Baramu", + "bn": "Bengali; Bangla", + "bna": "Bonerate", + "bnb": "Bookan", + "bnc": "Bontok", + "bnd": "Banda (Indonesia)", + "bne": "Bintauna", + "bnf": "Masiwang", + "bng": "Benga", + "bni": "Bangi", + "bnj": "Eastern Tawbuid", + "bnk": "Bierebo", + "bnl": "Boon", + "bnm": "Batanga", + "bnn": "Bunun", + "bno": "Bantoanon", + "bnp": "Bola", + "bnq": "Bantik", + "bnr": "Butmas-Tur", + "bns": "Bundeli", + "bnt": "Bantu languages", + "bnu": "Bentong", + "bnv": "Bonerif; Beneraf; Edwas", + "bnw": "Bisis", + "bnx": "Bangubangu", + "bny": "Bintulu", + "bnz": "Beezen", + "bo": "Tibetan", + "boa": "Bora", + "bob": "Aweer", + "boe": "Mundabli", + "bof": "Bolon", + "bog": "Bamako Sign Language", + "boh": "Boma", + "boi": "Barbareño", + "boj": "Anjam", + "bok": "Bonjo", + "bol": "Bole", + "bom": "Berom", + "bon": "Bine", + "boo": "Tiemacèwè Bozo", + "bop": "Bonkiman", + "boq": "Bogaya", + "bor": "Borôro", + "bot": "Bongo", + "bou": "Bondei", + "bov": "Tuwuli", + "bow": "Rema", + "box": "Buamu", + "boy": "Bodo (Central African Republic)", + "boz": "Tiéyaxo Bozo", + "bpa": "Daakaka", + "bpc": "Mbuk", + "bpd": "Banda-Banda", + "bpe": "Bauni", + "bpg": "Bonggo", + "bph": "Botlikh", + "bpi": "Bagupi", + "bpj": "Binji", + "bpk": "Orowe; 'Ôrôê", + "bpl": "Broome Pearling Lugger Pidgin", + "bpm": "Biyom", + "bpn": "Dzao Min", + "bpo": "Anasi", + "bpp": "Kaure", + "bpq": "Banda Malay", + "bpr": "Koronadal Blaan", + "bps": "Sarangani Blaan", + "bpt": "Barrow Point", + "bpu": "Bongu", + "bpv": "Bian Marind", + "bpw": "Bo (Papua New Guinea)", + "bpx": "Palya Bareli", + "bpy": "Bishnupriya", + "bpz": "Bilba", + "bqa": "Tchumbuli", + "bqb": "Bagusa", + "bqc": "Boko (Benin); Boo", + "bqd": "Bung", + "bqf": "Baga Kaloum", + "bqg": "Bago-Kusuntu", + "bqh": "Baima", + "bqi": "Bakhtiari", + "bqj": "Bandial", + "bqk": "Banda-Mbrès", + "bql": "Bilakura", + "bqm": "Wumboko", + "bqn": "Bulgarian Sign Language", + "bqo": "Balo", + "bqp": "Busa", + "bqq": "Biritai", + "bqr": "Burusu", + "bqs": "Bosngun", + "bqt": "Bamukumbit", + "bqu": "Boguru", + "bqv": "Koro Wachi; Begbere-Ejar", + "bqw": "Buru (Nigeria)", + "bqx": "Baangi", + "bqy": "Bengkala Sign Language", + "bqz": "Bakaka", + "br": "Breton", + "bra": "Braj", + "brb": "Brao; Lave", + "brc": "Berbice Creole Dutch", + "brd": "Baraamu", + "brf": "Bira", + "brg": "Baure", + "brh": "Brahui", + "bri": "Mokpwe", + "brj": "Bieria", + "brk": "Birked", + "brl": "Birwa", + "brm": "Barambu", + "brn": "Boruca", + "bro": "Brokkat", + "brp": "Barapasi", + "brq": "Breri", + "brr": "Birao", + "brs": "Baras", + "brt": "Bitare", + "bru": "Eastern Bru", + "brv": "Western Bru", + "brw": "Bellari", + "brx": "Bodo (India)", + "bry": "Burui", + "brz": "Bilbil", + "bs": "Bosnian", + "bsa": "Abinomn", + "bsb": "Brunei Bisaya", + "bsc": "Bassari; Oniyan", + "bse": "Wushi", + "bsf": "Bauchi", + "bsg": "Bashkardi", + "bsh": "Kati", + "bsi": "Bassossi", + "bsj": "Bangwinji", + "bsk": "Burushaski", + "bsl": "Basa-Gumna", + "bsm": "Busami", + "bsn": "Barasana-Eduria", + "bso": "Buso", + "bsp": "Baga Sitemu", + "bsq": "Bassa", + "bsr": "Bassa-Kontagora", + "bss": "Akoose", + "bst": "Basketo", + "bsu": "Bahonsuai", + "bsv": "Baga Sobané", + "bsw": "Baiso", + "bsx": "Yangkam", + "bsy": "Sabah Bisaya", + "bta": "Bata", + "btc": "Bati (Cameroon)", + "btd": "Batak Dairi", + "bte": "Gamo-Ningi", + "btf": "Birgit", + "btg": "Gagnoa Bété", + "bth": "Biatah Bidayuh", + "bti": "Burate", + "btj": "Bacanese Malay", + "btk": "Batak languages", + "btm": "Batak Mandailing", + "btn": "Ratagnon", + "bto": "Rinconada Bikol", + "btp": "Budibud", + "btq": "Batek", + "btr": "Baetora", + "bts": "Batak Simalungun", + "btt": "Bete-Bendi", + "btu": "Batu", + "btv": "Bateri", + "btw": "Butuanon", + "btx": "Batak Karo", + "bty": "Bobot", + "btz": "Batak Alas-Kluet", + "bua": "Buriat", + "bub": "Bua", + "buc": "Bushi", + "bud": "Ntcham", + "bue": "Beothuk", + "buf": "Bushoong", + "bug": "Buginese", + "buh": "Younuo Bunu", + "bui": "Bongili", + "buj": "Basa-Gurmana", + "buk": "Bugawac", + "bum": "Bulu (Cameroon)", + "bun": "Sherbro", + "buo": "Terei", + "bup": "Busoa", + "buq": "Brem", + "bus": "Bokobaru", + "but": "Bungain", + "buu": "Budu", + "buv": "Bun", + "buw": "Bubi", + "bux": "Boghom", + "buy": "Bullom So", + "buz": "Bukwen", + "bva": "Barein", + "bvb": "Bube", + "bvc": "Baelelea", + "bvd": "Baeggu", + "bve": "Berau Malay", + "bvf": "Boor", + "bvg": "Bonkeng", + "bvh": "Bure", + "bvi": "Belanda Viri", + "bvj": "Baan", + "bvk": "Bukat", + "bvl": "Bolivian Sign Language", + "bvm": "Bamunka", + "bvn": "Buna", + "bvo": "Bolgo", + "bvp": "Bumang", + "bvq": "Birri", + "bvr": "Burarra", + "bvt": "Bati (Indonesia)", + "bvu": "Bukit Malay", + "bvv": "Baniva", + "bvw": "Boga", + "bvx": "Dibole", + "bvy": "Baybayanon", + "bvz": "Bauzi", + "bwa": "Bwatoo", + "bwb": "Namosi-Naitasiri-Serua", + "bwc": "Bwile", + "bwd": "Bwaidoka", + "bwe": "Bwe Karen", + "bwf": "Boselewa", + "bwg": "Barwe", + "bwh": "Bishuo", + "bwi": "Baniwa", + "bwj": "Láá Láá Bwamu", + "bwk": "Bauwaki", + "bwl": "Bwela", + "bwm": "Biwat", + "bwn": "Wunai Bunu", + "bwo": "Boro (Ethiopia); Borna (Ethiopia)", + "bwp": "Mandobo Bawah", + "bwq": "Southern Bobo Madaré", + "bwr": "Bura-Pabir", + "bws": "Bomboma", + "bwt": "Bafaw-Balong", + "bwu": "Buli (Ghana)", + "bww": "Bwa", + "bwx": "Bu-Nao Bunu", + "bwy": "Cwi Bwamu", + "bwz": "Bwisi", + "bxa": "Tairaha", + "bxb": "Belanda Bor", + "bxc": "Molengue", + "bxd": "Pela", + "bxe": "Birale", + "bxf": "Bilur; Minigir", + "bxg": "Bangala", + "bxh": "Buhutu", + "bxi": "Pirlatapa", + "bxj": "Bayungu", + "bxk": "Bukusu; Lubukusu", + "bxl": "Jalkunan", + "bxm": "Mongolia Buriat", + "bxn": "Burduna", + "bxo": "Barikanchi", + "bxp": "Bebil", + "bxq": "Beele", + "bxr": "Russia Buriat", + "bxs": "Busam", + "bxu": "China Buriat", + "bxv": "Berakou", + "bxw": "Bankagooma", + "bxz": "Binahari", + "bya": "Batak", + "byb": "Bikya", + "byc": "Ubaghara", + "byd": "Benyadu'", + "bye": "Pouye", + "byf": "Bete", + "byg": "Baygo", + "byh": "Bhujel", + "byi": "Buyu", + "byj": "Bina (Nigeria)", + "byk": "Biao", + "byl": "Bayono", + "bym": "Bidjara", + "byn": "Bilin; Blin", + "byo": "Biyo", + "byp": "Bumaji", + "byq": "Basay", + "byr": "Baruya; Yipma", + "bys": "Burak", + "byt": "Berti", + "byv": "Medumba", + "byw": "Belhariya", + "byx": "Qaqet", + "byz": "Banaro", + "bza": "Bandi", + "bzb": "Andio", + "bzc": "Southern Betsimisaraka Malagasy", + "bzd": "Bribri", + "bze": "Jenaama Bozo", + "bzf": "Boikin", + "bzg": "Babuza", + "bzh": "Mapos Buang", + "bzi": "Bisu", + "bzj": "Belize Kriol English", + "bzk": "Nicaragua Creole English", + "bzl": "Boano (Sulawesi)", + "bzm": "Bolondo", + "bzn": "Boano (Maluku)", + "bzo": "Bozaba", + "bzp": "Kemberano", + "bzq": "Buli (Indonesia)", + "bzr": "Biri", + "bzs": "Brazilian Sign Language", + "bzt": "Brithenig", + "bzu": "Burmeso", + "bzv": "Naami", + "bzw": "Basa (Nigeria)", + "bzx": "Kɛlɛngaxo Bozo", + "bzy": "Obanliku", + "bzz": "Evant", + "ca": "Catalan; Valencian", + "caa": "Chortí", + "cab": "Garifuna", + "cac": "Chuj", + "cad": "Caddo", + "cae": "Lehar; Laalaa", + "caf": "Southern Carrier", + "cag": "Nivaclé", + "cah": "Cahuarano", + "cai": "Central American Indian languages", + "caj": "Chané", + "cak": "Kaqchikel; Cakchiquel", + "cal": "Carolinian", + "cam": "Cemuhî", + "can": "Chambri", + "cao": "Chácobo", + "cap": "Chipaya", + "caq": "Car Nicobarese", + "car": "Galibi Carib", + "cas": "Tsimané", + "cau": "Caucasian languages", + "cav": "Cavineña", + "caw": "Callawalla", + "cax": "Chiquitano", + "cay": "Cayuga", + "caz": "Canichana", + "cba": "Chibchan languages", + "cbb": "Cabiyarí", + "cbc": "Carapana", + "cbd": "Carijona", + "cbg": "Chimila", + "cbi": "Chachi", + "cbj": "Ede Cabe", + "cbk": "Chavacano", + "cbl": "Bualkhaw Chin", + "cbn": "Nyahkur", + "cbo": "Izora", + "cbq": "Tsucuba; Cuba", + "cbr": "Cashibo-Cacataibo", + "cbs": "Cashinahua", + "cbt": "Chayahuita", + "cbu": "Candoshi-Shapra", + "cbv": "Cacua", + "cbw": "Kinabalian", + "cby": "Carabayo", + "ccc": "Chamicuro", + "ccd": "Cafundo Creole", + "cce": "Chopi", + "ccg": "Samba Daka", + "cch": "Atsam", + "ccj": "Kasanga", + "ccl": "Cutchi-Swahili", + "ccm": "Malaccan Creole Malay", + "ccn": "North Caucasian languages", + "cco": "Comaltepec Chinantec", + "ccp": "Chakma", + "ccr": "Cacaopera", + "ccs": "South Caucasian languages", + "cda": "Choni", + "cdc": "Chadic languages", + "cdd": "Caddoan languages", + "cde": "Chenchu", + "cdf": "Chiru", + "cdh": "Chambeali", + "cdi": "Chodri", + "cdj": "Churahi", + "cdm": "Chepang", + "cdn": "Chaudangsi", + "cdo": "Min Dong Chinese", + "cdr": "Cinda-Regi-Tiyal", + "cds": "Chadian Sign Language", + "cdy": "Chadong", + "cdz": "Koda", + "ce": "Chechen", + "cea": "Lower Chehalis", + "ceb": "Cebuano", + "ceg": "Chamacoco", + "cek": "Eastern Khumi Chin", + "cel": "Celtic languages", + "cen": "Cen", + "cet": "Centúúm", + "cey": "Ekai Chin", + "cfa": "Dijim-Bwilim", + "cfd": "Cara", + "cfg": "Como Karim", + "cfm": "Falam Chin", + "cga": "Changriwa", + "cgc": "Kagayanen", + "cgg": "Chiga", + "cgk": "Chocangacakha", + "ch": "Chamorro", + "chb": "Chibcha", + "chc": "Catawba", + "chd": "Highland Oaxaca Chontal", + "chf": "Tabasco Chontal", + "chg": "Chagatai", + "chh": "Chinook", + "chj": "Ojitlán Chinantec", + "chk": "Chuukese", + "chl": "Cahuilla", + "chm": "Mari (Russia)", + "chn": "Chinook jargon", + "cho": "Choctaw", + "chp": "Chipewyan; Dene Suline", + "chq": "Quiotepec Chinantec", + "chr": "Cherokee", + "cht": "Cholón", + "chw": "Chuwabu", + "chx": "Chantyal", + "chy": "Cheyenne", + "chz": "Ozumacín Chinantec", + "cia": "Cia-Cia", + "cib": "Ci Gbe", + "cic": "Chickasaw", + "cid": "Chimariko", + "cie": "Cineni", + "cih": "Chinali", + "cik": "Chitkuli Kinnauri", + "cim": "Cimbrian", + "cin": "Cinta Larga", + "cip": "Chiapanec", + "cir": "Tiri; Haméa; Méa", + "ciw": "Chippewa", + "ciy": "Chaima", + "cja": "Western Cham", + "cje": "Chru", + "cjh": "Upper Chehalis", + "cji": "Chamalal", + "cjk": "Chokwe", + "cjm": "Eastern Cham", + "cjn": "Chenapian", + "cjo": "Ashéninka Pajonal", + "cjp": "Cabécar", + "cjs": "Shor", + "cjv": "Chuave", + "cjy": "Jinyu Chinese", + "ckb": "Central Kurdish", + "ckh": "Chak", + "ckl": "Cibak", + "ckm": "Chakavian", + "ckn": "Kaang Chin", + "cko": "Anufo", + "ckq": "Kajakse", + "ckr": "Kairak", + "cks": "Tayo", + "ckt": "Chukot", + "cku": "Koasati", + "ckv": "Kavalan", + "ckx": "Caka", + "cky": "Cakfem-Mushere", + "ckz": "Cakchiquel-Quiché Mixed Language", + "cla": "Ron", + "clc": "Chilcotin", + "cld": "Chaldean Neo-Aramaic", + "cle": "Lealao Chinantec", + "clh": "Chilisso", + "cli": "Chakali", + "clj": "Laitu Chin", + "clk": "Idu-Mishmi", + "cll": "Chala", + "clm": "Clallam", + "clo": "Lowland Oaxaca Chontal", + "clt": "Lautu Chin", + "clu": "Caluyanun", + "clw": "Chulym", + "cly": "Eastern Highland Chatino", + "cma": "Maa", + "cmc": "Chamic languages", + "cme": "Cerma", + "cmg": "Classical Mongolian", + "cmi": "Emberá-Chamí", + "cml": "Campalagian", + "cmm": "Michigamea", + "cmn": "Mandarin Chinese", + "cmo": "Central Mnong", + "cmr": "Mro-Khimi Chin", + "cms": "Messapic", + "cmt": "Camtho", + "cna": "Changthang", + "cnb": "Chinbon Chin", + "cnc": "Côông", + "cng": "Northern Qiang", + "cnh": "Hakha Chin; Haka Chin", + "cni": "Asháninka", + "cnk": "Khumi Chin", + "cnl": "Lalana Chinantec", + "cno": "Con", + "cnp": "Northern Ping Chinese; Northern Pinghua", + "cnq": "Chung", + "cnr": "Montenegrin", + "cns": "Central Asmat", + "cnt": "Tepetotutla Chinantec", + "cnu": "Chenoua", + "cnw": "Ngawn Chin", + "cnx": "Middle Cornish", + "co": "Corsican", + "coa": "Cocos Islands Malay", + "cob": "Chicomuceltec", + "coc": "Cocopa", + "cod": "Cocama-Cocamilla", + "coe": "Koreguaje", + "cof": "Colorado", + "cog": "Chong", + "coh": "Chonyi-Dzihana-Kauma; Chichonyi-Chidzihana-Chikauma", + "coj": "Cochimi", + "cok": "Santa Teresa Cora", + "col": "Columbia-Wenatchi", + "com": "Comanche", + "con": "Cofán", + "coo": "Comox", + "cop": "Coptic", + "coq": "Coquille", + "cot": "Caquinte", + "cou": "Wamey", + "cov": "Cao Miao", + "cow": "Cowlitz", + "cox": "Nanti", + "coz": "Chochotec", + "cpa": "Palantla Chinantec", + "cpb": "Ucayali-Yurúa Ashéninka", + "cpc": "Ajyíninka Apurucayali", + "cpe": "English-based creoles and pidgins", + "cpf": "French-based creoles and pidgins", + "cpg": "Cappadocian Greek", + "cpi": "Chinese Pidgin English", + "cpn": "Cherepon", + "cpo": "Kpeego", + "cpp": "Portuguese-based creoles and pidgins", + "cps": "Capiznon", + "cpu": "Pichis Ashéninka", + "cpx": "Pu-Xian Chinese", + "cpy": "South Ucayali Ashéninka", + "cqd": "Chuanqiandian Cluster Miao", + "cr": "Cree", + "cra": "Chara", + "crb": "Island Carib", + "crc": "Lonwolwol", + "crd": "Coeur d'Alene", + "crf": "Caramanta", + "crg": "Michif", + "crh": "Crimean Tatar; Crimean Turkish", + "cri": "Sãotomense", + "crj": "Southern East Cree", + "crk": "Plains Cree", + "crl": "Northern East Cree", + "crm": "Moose Cree", + "crn": "El Nayar Cora", + "cro": "Crow", + "crp": "Creoles and pidgins", + "crq": "Iyo'wujwa Chorote", + "crr": "Carolina Algonquian", + "crs": "Seselwa Creole French", + "crt": "Iyojwa'ja Chorote", + "crv": "Chaura", + "crw": "Chrau", + "crx": "Carrier", + "cry": "Cori", + "crz": "Cruzeño", + "cs": "Czech", + "csa": "Chiltepec Chinantec", + "csb": "Kashubian", + "csc": "Catalan Sign Language; Lengua de señas catalana; Llengua de Signes Catalana", + "csd": "Chiangmai Sign Language", + "cse": "Czech Sign Language", + "csf": "Cuba Sign Language", + "csg": "Chilean Sign Language", + "csh": "Asho Chin", + "csi": "Coast Miwok", + "csj": "Songlai Chin", + "csk": "Jola-Kasa", + "csl": "Chinese Sign Language", + "csm": "Central Sierra Miwok", + "csn": "Colombian Sign Language", + "cso": "Sochiapam Chinantec; Sochiapan Chinantec", + "csp": "Southern Ping Chinese; Southern Pinghua", + "csq": "Croatia Sign Language", + "csr": "Costa Rican Sign Language", + "css": "Southern Ohlone", + "cst": "Northern Ohlone", + "csu": "Central Sudanic languages", + "csv": "Sumtu Chin", + "csw": "Swampy Cree", + "csx": "Cambodian Sign Language", + "csy": "Siyin Chin", + "csz": "Coos", + "cta": "Tataltepec Chatino", + "ctc": "Chetco", + "ctd": "Tedim Chin", + "cte": "Tepinapa Chinantec", + "ctg": "Chittagonian", + "cth": "Thaiphum Chin", + "ctl": "Tlacoatzintepec Chinantec", + "ctm": "Chitimacha", + "ctn": "Chhintange", + "cto": "Emberá-Catío", + "ctp": "Western Highland Chatino", + "cts": "Northern Catanduanes Bikol", + "ctt": "Wayanad Chetti", + "ctu": "Chol", + "cty": "Moundadan Chetty", + "ctz": "Zacatepec Chatino", + "cu": "Church Slavic; Church Slavonic; Old Bulgarian; Old Church Slavonic; Old Slavonic", + "cua": "Cua", + "cub": "Cubeo", + "cuc": "Usila Chinantec", + "cuh": "Chuka; Gichuka", + "cui": "Cuiba", + "cuj": "Mashco Piro", + "cuk": "San Blas Kuna", + "cul": "Culina; Kulina", + "cuo": "Cumanagoto", + "cup": "Cupeño", + "cuq": "Cun", + "cur": "Chhulung", + "cus": "Cushitic languages", + "cut": "Teutila Cuicatec", + "cuu": "Tai Ya", + "cuv": "Cuvok", + "cuw": "Chukwa", + "cux": "Tepeuxila Cuicatec", + "cuy": "Cuitlatec", + "cv": "Chuvash", + "cvg": "Chug", + "cvn": "Valle Nacional Chinantec", + "cwa": "Kabwa", + "cwb": "Maindo", + "cwd": "Woods Cree", + "cwe": "Kwere", + "cwg": "Chewong; Cheq Wong", + "cwt": "Kuwaataay", + "cy": "Welsh", + "cya": "Nopala Chatino", + "cyb": "Cayubaba", + "cyo": "Cuyonon", + "czh": "Huizhou Chinese", + "czk": "Knaanic", + "czn": "Zenzontepec Chatino", + "czo": "Min Zhong Chinese", + "czt": "Zotung Chin", + "da": "Danish", + "daa": "Dangaléat", + "dac": "Dambi", + "dad": "Marik", + "dae": "Duupa", + "dag": "Dagbani", + "dah": "Gwahatike", + "dai": "Day", + "daj": "Dar Fur Daju", + "dak": "Dakota", + "dal": "Dahalo", + "dam": "Damakawa", + "dao": "Daai Chin", + "daq": "Dandami Maria", + "dar": "Dargwa", + "das": "Daho-Doo", + "dau": "Dar Sila Daju", + "dav": "Taita; Dawida", + "daw": "Davawenyo", + "dax": "Dayi", + "day": "Land Dayak languages", + "daz": "Dao", + "dba": "Bangime", + "dbb": "Deno", + "dbd": "Dadiya", + "dbe": "Dabe", + "dbf": "Edopi", + "dbg": "Dogul Dom Dogon", + "dbi": "Doka", + "dbj": "Ida'an", + "dbl": "Dyirbal", + "dbm": "Duguri", + "dbn": "Duriankere", + "dbo": "Dulbu", + "dbp": "Duwai", + "dbq": "Daba", + "dbr": "Dabarre", + "dbt": "Ben Tey Dogon", + "dbu": "Bondum Dom Dogon", + "dbv": "Dungu", + "dbw": "Bankan Tey Dogon", + "dby": "Dibiyaso", + "dcc": "Deccan", + "dcr": "Negerhollands", + "dda": "Dadi Dadi", + "ddd": "Dongotono", + "dde": "Doondo", + "ddg": "Fataluku", + "ddi": "West Goodenough", + "ddj": "Jaru", + "ddn": "Dendi (Benin)", + "ddo": "Dido", + "ddr": "Dhudhuroa", + "dds": "Donno So Dogon", + "ddw": "Dawera-Daweloor", + "de": "German", + "dec": "Dagik", + "ded": "Dedua", + "dee": "Dewoin", + "def": "Dezfuli", + "deg": "Degema", + "deh": "Dehwari", + "dei": "Demisa", + "dek": "Dek", + "del": "Delaware", + "dem": "Dem", + "den": "Slave (Athapascan)", + "dep": "Pidgin Delaware", + "deq": "Dendi (Central African Republic)", + "der": "Deori", + "des": "Desano", + "dev": "Domung", + "dez": "Dengese", + "dga": "Southern Dagaare", + "dgb": "Bunoge Dogon", + "dgc": "Casiguran Dumagat Agta", + "dgd": "Dagaari Dioula", + "dge": "Degenan", + "dgg": "Doga", + "dgh": "Dghwede", + "dgi": "Northern Dagara", + "dgk": "Dagba", + "dgl": "Andaandi; Dongolawi", + "dgn": "Dagoman", + "dgo": "Dogri (individual language)", + "dgr": "Dogrib; Tłı̨chǫ", + "dgs": "Dogoso", + "dgt": "Ndra'ngith", + "dgw": "Daungwurrung", + "dgx": "Doghoro", + "dgz": "Daga", + "dhd": "Dhundari", + "dhg": "Dhangu-Djangu; Dhangu; Djangu", + "dhi": "Dhimal", + "dhl": "Dhalandji", + "dhm": "Zemba", + "dhn": "Dhanki", + "dho": "Dhodia", + "dhr": "Dhargari", + "dhs": "Dhaiso", + "dhu": "Dhurga", + "dhv": "Dehu; Drehu", + "dhw": "Dhanwar (Nepal)", + "dhx": "Dhungaloo", + "dia": "Dia", + "dib": "South Central Dinka", + "dic": "Lakota Dida", + "did": "Didinga", + "dif": "Dieri; Diyari", + "dig": "Digo; Chidigo", + "dih": "Kumiai", + "dii": "Dimbong", + "dij": "Dai", + "dik": "Southwestern Dinka", + "dil": "Dilling", + "dim": "Dime", + "din": "Dinka", + "dio": "Dibo", + "dip": "Northeastern Dinka", + "diq": "Dimli (individual language)", + "dir": "Dirim", + "dis": "Dimasa", + "diu": "Diriku", + "diw": "Northwestern Dinka", + "dix": "Dixon Reef", + "diy": "Diuwe", + "diz": "Ding", + "dja": "Djadjawurrung", + "djb": "Djinba", + "djc": "Dar Daju Daju", + "djd": "Djamindjung; Ngaliwurru", + "dje": "Zarma", + "djf": "Djangun", + "dji": "Djinang", + "djj": "Djeebbana", + "djk": "Eastern Maroon Creole; Businenge Tongo; Nenge", + "djm": "Jamsay Dogon", + "djn": "Jawoyn; Djauan", + "djo": "Jangkang", + "djr": "Djambarrpuyngu", + "dju": "Kapriman", + "djw": "Djawi", + "dka": "Dakpakha", + "dkg": "Kadung", + "dkk": "Dakka", + "dkr": "Kuijau", + "dks": "Southeastern Dinka", + "dkx": "Mazagway", + "dlg": "Dolgan", + "dlk": "Dahalik", + "dlm": "Dalmatian", + "dln": "Darlong", + "dma": "Duma", + "dmb": "Mombo Dogon", + "dmc": "Gavak", + "dmd": "Madhi Madhi", + "dme": "Dugwor", + "dmf": "Medefaidrin", + "dmg": "Upper Kinabatangan", + "dmk": "Domaaki", + "dml": "Dameli", + "dmm": "Dama", + "dmn": "Mande languages", + "dmo": "Kemedzung", + "dmr": "East Damar", + "dms": "Dampelas", + "dmu": "Dubu; Tebi", + "dmv": "Dumpas", + "dmw": "Mudburra", + "dmx": "Dema", + "dmy": "Demta; Sowari", + "dna": "Upper Grand Valley Dani", + "dnd": "Daonda", + "dne": "Ndendeule", + "dng": "Dungan", + "dni": "Lower Grand Valley Dani", + "dnj": "Dan", + "dnk": "Dengka", + "dnn": "Dzùùngoo", + "dno": "Ndrulo; Northern Lendu", + "dnr": "Danaru", + "dnt": "Mid Grand Valley Dani", + "dnu": "Danau", + "dnv": "Danu", + "dnw": "Western Dani", + "dny": "Dení", + "doa": "Dom", + "dob": "Dobu", + "doc": "Northern Dong", + "doe": "Doe", + "dof": "Domu", + "doh": "Dong", + "doi": "Dogri (macrolanguage)", + "dok": "Dondo", + "dol": "Doso", + "don": "Toura (Papua New Guinea)", + "doo": "Dongo", + "dop": "Lukpa", + "doq": "Dominican Sign Language", + "dor": "Dori'o", + "dos": "Dogosé", + "dot": "Dass", + "dov": "Dombe", + "dow": "Doyayo", + "dox": "Bussa", + "doy": "Dompo", + "doz": "Dorze", + "dpp": "Papar", + "dra": "Dravidian languages", + "drb": "Dair", + "drc": "Minderico", + "drd": "Darmiya", + "dre": "Dolpo", + "drg": "Rungus", + "dri": "C'Lela", + "drl": "Paakantyi", + "drn": "West Damar", + "dro": "Daro-Matu Melanau", + "drq": "Dura", + "drs": "Gedeo", + "drt": "Drents", + "dru": "Rukai", + "dry": "Darai", + "dsb": "Lower Sorbian", + "dse": "Dutch Sign Language", + "dsh": "Daasanach", + "dsi": "Disa", + "dsl": "Danish Sign Language", + "dsn": "Dusner", + "dso": "Desiya", + "dsq": "Tadaksahak", + "dsz": "Mardin Sign Language", + "dta": "Daur", + "dtb": "Labuk-Kinabatangan Kadazan", + "dtd": "Ditidaht", + "dth": "Adithinngithigh", + "dti": "Ana Tinga Dogon", + "dtk": "Tene Kan Dogon", + "dtm": "Tomo Kan Dogon", + "dtn": "Daatsʼíin", + "dto": "Tommo So Dogon", + "dtp": "Kadazan Dusun; Central Dusun", + "dtr": "Lotud", + "dts": "Toro So Dogon", + "dtt": "Toro Tegu Dogon", + "dtu": "Tebul Ure Dogon", + "dty": "Dotyali", + "dua": "Duala", + "dub": "Dubli", + "duc": "Duna", + "due": "Umiray Dumaget Agta", + "duf": "Dumbea; Drubea", + "dug": "Duruma; Chiduruma", + "duh": "Dungra Bhil", + "dui": "Dumun", + "duk": "Uyajitaya", + "dul": "Alabat Island Agta", + "dum": "Middle Dutch (ca. 1050-1350)", + "dun": "Dusun Deyah", + "duo": "Dupaninan Agta", + "dup": "Duano", + "duq": "Dusun Malang", + "dur": "Dii", + "dus": "Dumi", + "duu": "Drung", + "duv": "Duvle", + "duw": "Dusun Witu", + "dux": "Duungooma", + "duy": "Dicamay Agta", + "duz": "Duli-Gey", + "dv": "Dhivehi; Divehi; Maldivian", + "dva": "Duau", + "dwa": "Diri", + "dwk": "Dawik Kui", + "dwr": "Dawro", + "dws": "Dutton World Speedwords", + "dwu": "Dhuwal", + "dww": "Dawawa", + "dwy": "Dhuwaya", + "dwz": "Dewas Rai", + "dya": "Dyan", + "dyb": "Dyaberdyaber", + "dyd": "Dyugun", + "dyg": "Villa Viciosa Agta", + "dyi": "Djimini Senoufo", + "dym": "Yanda Dom Dogon", + "dyn": "Dyangadi; Dhanggatti", + "dyo": "Jola-Fonyi", + "dyu": "Dyula", + "dyy": "Djabugay; Dyaabugay", + "dz": "Dzongkha", + "dza": "Tunzu", + "dze": "Djiwarli", + "dzg": "Dazaga", + "dzl": "Dzalakha", + "dzn": "Dzando", + "eaa": "Karenggapa", + "ebc": "Beginci", + "ebg": "Ebughu", + "ebk": "Eastern Bontok", + "ebo": "Teke-Ebo", + "ebr": "Ebrié", + "ebu": "Embu; Kiembu", + "ecr": "Eteocretan", + "ecs": "Ecuadorian Sign Language", + "ecy": "Eteocypriot", + "ee": "Ewe", + "eee": "E", + "efa": "Efai", + "efe": "Efe", + "efi": "Efik", + "ega": "Ega", + "egl": "Emilian", + "egm": "Benamanga", + "ego": "Eggon", + "egx": "Egyptian languages", + "egy": "Egyptian (Ancient)", + "ehs": "Miyakubo Sign Language", + "ehu": "Ehueun", + "eip": "Eipomek", + "eit": "Eitiep", + "eiv": "Askopan", + "eja": "Ejamat", + "eka": "Ekajuk", + "eke": "Ekit", + "ekg": "Ekari", + "eki": "Eki", + "ekk": "Standard Estonian", + "ekl": "Kol (Bangladesh); Kol", + "ekm": "Elip", + "eko": "Koti", + "ekp": "Ekpeye", + "ekr": "Yace", + "eky": "Eastern Kayah", + "el": "Modern Greek (1453-)", + "ele": "Elepi", + "elh": "El Hugeirat", + "eli": "Nding", + "elk": "Elkei", + "elm": "Eleme", + "elo": "El Molo", + "elu": "Elu", + "elx": "Elamite", + "ema": "Emai-Iuleha-Ora", + "emb": "Embaloh", + "eme": "Emerillon", + "emg": "Eastern Meohang", + "emi": "Mussau-Emira", + "emk": "Eastern Maninkakan", + "emm": "Mamulique", + "emn": "Eman", + "emp": "Northern Emberá", + "emq": "Eastern Minyag", + "ems": "Pacific Gulf Yupik", + "emu": "Eastern Muria", + "emw": "Emplawas", + "emx": "Erromintxela", + "emy": "Epigraphic Mayan", + "emz": "Mbessa", + "en": "English", + "ena": "Apali", + "enb": "Markweeta", + "enc": "En", + "end": "Ende", + "enf": "Forest Enets", + "enh": "Tundra Enets", + "enl": "Enlhet", + "enm": "Middle English (1100-1500)", + "enn": "Engenni", + "eno": "Enggano", + "enq": "Enga", + "enr": "Emumu; Emem", + "enu": "Enu", + "env": "Enwan (Edo State)", + "enw": "Enwan (Akwa Ibom State)", + "enx": "Enxet", + "eo": "Esperanto", + "eot": "Beti (Côte d'Ivoire)", + "epi": "Epie", + "era": "Eravallan", + "erg": "Sie", + "erh": "Eruwa", + "eri": "Ogea", + "erk": "South Efate", + "ero": "Horpa", + "err": "Erre", + "ers": "Ersu", + "ert": "Eritai", + "erw": "Erokwanas", + "es": "Spanish; Castilian", + "ese": "Ese Ejja", + "esg": "Aheri Gondi", + "esh": "Eshtehardi", + "esi": "North Alaskan Inupiatun", + "esk": "Northwest Alaska Inupiatun", + "esl": "Egypt Sign Language", + "esm": "Esuma", + "esn": "Salvadoran Sign Language", + "eso": "Estonian Sign Language", + "esq": "Esselen", + "ess": "Central Siberian Yupik", + "esu": "Central Yupik", + "esx": "Eskimo-Aleut languages", + "esy": "Eskayan", + "et": "Estonian", + "etb": "Etebi", + "etc": "Etchemin", + "eth": "Ethiopian Sign Language", + "etn": "Eton (Vanuatu)", + "eto": "Eton (Cameroon)", + "etr": "Edolo", + "ets": "Yekhee", + "ett": "Etruscan", + "etu": "Ejagham", + "etx": "Eten", + "etz": "Semimi", + "eu": "Basque", + "euq": "Basque (family)", + "eve": "Even", + "evh": "Uvbie", + "evn": "Evenki", + "ewo": "Ewondo", + "ext": "Extremaduran", + "eya": "Eyak", + "eyo": "Keiyo", + "eza": "Ezaa", + "eze": "Uzekwe", + "fa": "Persian", + "faa": "Fasu", + "fab": "Fa d'Ambu", + "fad": "Wagi", + "faf": "Fagani", + "fag": "Finongan", + "fah": "Baissa Fali", + "fai": "Faiwol", + "faj": "Faita", + "fak": "Fang (Cameroon)", + "fal": "South Fali", + "fam": "Fam", + "fan": "Fang (Equatorial Guinea)", + "fap": "Paloor", + "far": "Fataleka", + "fat": "Fanti", + "fau": "Fayu", + "fax": "Fala", + "fay": "Southwestern Fars", + "faz": "Northwestern Fars", + "fbl": "West Albay Bikol", + "fcs": "Quebec Sign Language", + "fer": "Feroge", + "ff": "Fulah", + "ffi": "Foia Foia", + "ffm": "Maasina Fulfulde", + "fgr": "Fongoro", + "fi": "Finnish", + "fia": "Nobiin", + "fie": "Fyer", + "fif": "Faifi", + "fil": "Filipino; Pilipino", + "fip": "Fipa", + "fir": "Firan", + "fit": "Tornedalen Finnish; Meänkieli", + "fiu": "Finno-Ugrian languages", + "fiw": "Fiwaga", + "fj": "Fijian", + "fkk": "Kirya-Konzəl", + "fkv": "Kven Finnish", + "fla": "Kalispel-Pend d'Oreille", + "flh": "Foau", + "fli": "Fali", + "fll": "North Fali", + "fln": "Flinders Island", + "flr": "Fuliiru", + "fly": "Flaaitaal; Tsotsitaal", + "fmp": "Fe'fe'", + "fmu": "Far Western Muria", + "fnb": "Fanbak", + "fng": "Fanagalo", + "fni": "Fania", + "fo": "Faroese", + "fod": "Foodo", + "foi": "Foi", + "fom": "Foma", + "fon": "Fon", + "for": "Fore", + "fos": "Siraya", + "fox": "Formosan languages", + "fpe": "Fernando Po Creole English", + "fqs": "Fas", + "fr": "French", + "frc": "Cajun French", + "frd": "Fordata", + "frk": "Frankish", + "frm": "Middle French (ca. 1400-1600)", + "fro": "Old French (842-ca. 1400)", + "frp": "Arpitan; Francoprovençal", + "frq": "Forak", + "frr": "Northern Frisian", + "frs": "Eastern Frisian", + "frt": "Fortsenal", + "fse": "Finnish Sign Language", + "fsl": "French Sign Language", + "fss": "Finland-Swedish Sign Language; finlandssvenskt teckenspråk; suomenruotsalainen viittomakieli", + "fub": "Adamawa Fulfulde", + "fuc": "Pulaar", + "fud": "East Futuna", + "fue": "Borgu Fulfulde", + "fuf": "Pular", + "fuh": "Western Niger Fulfulde", + "fui": "Bagirmi Fulfulde", + "fuj": "Ko", + "fum": "Fum", + "fun": "Fulniô", + "fuq": "Central-Eastern Niger Fulfulde", + "fur": "Friulian", + "fut": "Futuna-Aniwa", + "fuu": "Furu", + "fuv": "Nigerian Fulfulde", + "fuy": "Fuyug", + "fvr": "Fur", + "fwa": "Fwâi", + "fwe": "Fwe", + "fy": "Western Frisian", + "ga": "Irish", + "gaa": "Ga", + "gab": "Gabri", + "gac": "Mixed Great Andamanese", + "gad": "Gaddang", + "gae": "Guarequena", + "gaf": "Gende", + "gag": "Gagauz", + "gah": "Alekano", + "gai": "Borei", + "gaj": "Gadsup", + "gak": "Gamkonora", + "gal": "Galolen", + "gam": "Kandawo", + "gan": "Gan Chinese", + "gao": "Gants", + "gap": "Gal", + "gaq": "Gata'", + "gar": "Galeya", + "gas": "Adiwasi Garasia", + "gat": "Kenati", + "gau": "Mudhili Gadaba", + "gaw": "Nobonob", + "gax": "Borana-Arsi-Guji Oromo", + "gay": "Gayo", + "gaz": "West Central Oromo", + "gba": "Gbaya (Central African Republic)", + "gbb": "Kaytetye", + "gbd": "Karajarri", + "gbe": "Niksek", + "gbf": "Gaikundi", + "gbg": "Gbanziri", + "gbh": "Defi Gbe", + "gbi": "Galela", + "gbj": "Bodo Gadaba", + "gbk": "Gaddi", + "gbl": "Gamit", + "gbm": "Garhwali", + "gbn": "Mo'da", + "gbo": "Northern Grebo", + "gbp": "Gbaya-Bossangoa", + "gbq": "Gbaya-Bozoum", + "gbr": "Gbagyi", + "gbs": "Gbesi Gbe", + "gbu": "Gagadu", + "gbv": "Gbanu", + "gbw": "Gabi-Gabi", + "gbx": "Eastern Xwla Gbe", + "gby": "Gbari", + "gbz": "Zoroastrian Dari", + "gcc": "Mali", + "gcd": "Ganggalida", + "gce": "Galice", + "gcf": "Guadeloupean Creole French", + "gcl": "Grenadian Creole English", + "gcn": "Gaina", + "gcr": "Guianese Creole French", + "gct": "Colonia Tovar German", + "gd": "Scottish Gaelic; Gaelic", + "gda": "Gade Lohar", + "gdb": "Pottangi Ollar Gadaba", + "gdc": "Gugu Badhun", + "gdd": "Gedaged", + "gde": "Gude", + "gdf": "Guduf-Gava", + "gdg": "Ga'dang", + "gdh": "Gadjerawang; Gajirrabeng", + "gdi": "Gundi", + "gdj": "Gurdjar", + "gdk": "Gadang", + "gdl": "Dirasha", + "gdm": "Laal", + "gdn": "Umanakaina", + "gdo": "Ghodoberi", + "gdq": "Mehri", + "gdr": "Wipi", + "gds": "Ghandruk Sign Language", + "gdt": "Kungardutyi", + "gdu": "Gudu", + "gdx": "Godwari", + "gea": "Geruma", + "geb": "Kire", + "gec": "Gboloo Grebo", + "ged": "Gade", + "gef": "Gerai", + "geg": "Gengle", + "geh": "Hutterite German; Hutterisch", + "gei": "Gebe", + "gej": "Gen", + "gek": "Ywom", + "gel": "ut-Ma'in", + "gem": "Germanic languages", + "geq": "Geme", + "ges": "Geser-Gorom", + "gev": "Eviya", + "gew": "Gera", + "gex": "Garre", + "gey": "Enya", + "gez": "Geez", + "gfk": "Patpatar", + "gft": "Gafat", + "gga": "Gao", + "ggb": "Gbii", + "ggd": "Gugadj", + "gge": "Gurr-goni", + "ggg": "Gurgula", + "ggk": "Kungarakany", + "ggl": "Ganglau", + "ggt": "Gitua", + "ggu": "Gagu; Gban", + "ggw": "Gogodala", + "gha": "Ghadamès", + "ghc": "Hiberno-Scottish Gaelic", + "ghe": "Southern Ghale", + "ghh": "Northern Ghale", + "ghk": "Geko Karen", + "ghl": "Ghulfan", + "ghn": "Ghanongga", + "gho": "Ghomara", + "ghr": "Ghera", + "ghs": "Guhu-Samane", + "ght": "Kuke; Kutang Ghale", + "gia": "Kija", + "gib": "Gibanawa", + "gic": "Gail", + "gid": "Gidar", + "gie": "Gaɓogbo; Guébie", + "gig": "Goaria", + "gih": "Githabul", + "gii": "Girirra", + "gil": "Gilbertese", + "gim": "Gimi (Eastern Highlands)", + "gin": "Hinukh", + "gip": "Gimi (West New Britain)", + "giq": "Green Gelao", + "gir": "Red Gelao", + "gis": "North Giziga", + "git": "Gitxsan", + "giu": "Mulao", + "giw": "White Gelao", + "gix": "Gilima", + "giy": "Giyug", + "giz": "South Giziga", + "gjk": "Kachi Koli", + "gjm": "Gunditjmara", + "gjn": "Gonja", + "gjr": "Gurindji Kriol", + "gju": "Gujari", + "gka": "Guya", + "gkd": "Magɨ (Madang Province)", + "gke": "Ndai", + "gkn": "Gokana", + "gko": "Kok-Nar", + "gkp": "Guinea Kpelle", + "gku": "ǂUngkue", + "gl": "Galician", + "glb": "Belning", + "glc": "Bon Gula", + "gld": "Nanai", + "glh": "Northwest Pashai; Northwest Pashayi", + "glj": "Gula Iro", + "glk": "Gilaki", + "gll": "Garlali", + "glo": "Galambu", + "glr": "Glaro-Twabo", + "glu": "Gula (Chad)", + "glw": "Glavda", + "gly": "Gule", + "gma": "Gambera", + "gmb": "Gula'alaa", + "gmd": "Mághdì", + "gme": "East Germanic languages", + "gmg": "Magɨyi", + "gmh": "Middle High German (ca. 1050-1500)", + "gml": "Middle Low German", + "gmm": "Gbaya-Mbodomo", + "gmn": "Gimnime", + "gmq": "North Germanic languages", + "gmr": "Mirning; Mirniny", + "gmu": "Gumalu", + "gmv": "Gamo", + "gmw": "West Germanic languages", + "gmx": "Magoma", + "gmy": "Mycenaean Greek", + "gmz": "Mgbolizhia", + "gn": "Guarani", + "gna": "Kaansa", + "gnb": "Gangte", + "gnc": "Guanche", + "gnd": "Zulgo-Gemzek", + "gne": "Ganang", + "gng": "Ngangam", + "gnh": "Lere", + "gni": "Gooniyandi", + "gnj": "Ngen", + "gnk": "ǁGana", + "gnl": "Gangulu", + "gnm": "Ginuman", + "gnn": "Gumatj", + "gno": "Northern Gondi", + "gnq": "Gana", + "gnr": "Gureng Gureng", + "gnt": "Guntai", + "gnu": "Gnau", + "gnw": "Western Bolivian Guaraní", + "gnz": "Ganzi", + "goa": "Guro", + "gob": "Playero", + "goc": "Gorakor", + "god": "Godié", + "goe": "Gongduk", + "gof": "Gofa", + "gog": "Gogo", + "goh": "Old High German (ca. 750-1050)", + "goi": "Gobasi", + "goj": "Gowlan", + "gok": "Gowli", + "gol": "Gola", + "gom": "Goan Konkani", + "gon": "Gondi", + "goo": "Gone Dau", + "gop": "Yeretuar", + "goq": "Gorap", + "gor": "Gorontalo", + "gos": "Gronings", + "got": "Gothic", + "gou": "Gavar", + "gov": "Goo", + "gow": "Gorowa", + "gox": "Gobu", + "goy": "Goundo", + "goz": "Gozarkhani", + "gpa": "Gupa-Abawa", + "gpe": "Ghanaian Pidgin English", + "gpn": "Taiap", + "gqa": "Ga'anda", + "gqi": "Guiqiong", + "gqn": "Guana (Brazil)", + "gqr": "Gor", + "gqu": "Qau", + "gra": "Rajput Garasia", + "grb": "Grebo", + "grc": "Ancient Greek (to 1453)", + "grd": "Guruntum-Mbaaru", + "grg": "Madi", + "grh": "Gbiri-Niragu", + "gri": "Ghari", + "grj": "Southern Grebo", + "grk": "Greek languages", + "grm": "Kota Marudu Talantang", + "gro": "Groma", + "grq": "Gorovu", + "grr": "Taznatit", + "grs": "Gresi", + "grt": "Garo", + "gru": "Kistane", + "grv": "Central Grebo", + "grw": "Gweda", + "grx": "Guriaso", + "gry": "Barclayville Grebo", + "grz": "Guramalum", + "gse": "Ghanaian Sign Language", + "gsg": "German Sign Language", + "gsl": "Gusilay", + "gsm": "Guatemalan Sign Language", + "gsn": "Nema; Gusan", + "gso": "Southwest Gbaya", + "gsp": "Wasembo", + "gss": "Greek Sign Language", + "gsw": "Swiss German; Alemannic; Alsatian", + "gta": "Guató", + "gtu": "Aghu-Tharnggala", + "gu": "Gujarati", + "gua": "Shiki", + "gub": "Guajajára", + "guc": "Wayuu", + "gud": "Yocoboué Dida", + "gue": "Gurindji", + "guf": "Gupapuyngu", + "gug": "Paraguayan Guaraní", + "guh": "Guahibo", + "gui": "Eastern Bolivian Guaraní", + "guk": "Gumuz", + "gul": "Sea Island Creole English", + "gum": "Guambiano", + "gun": "Mbyá Guaraní", + "guo": "Guayabero", + "gup": "Gunwinggu", + "guq": "Aché", + "gur": "Farefare", + "gus": "Guinean Sign Language", + "gut": "Maléku Jaíka", + "guu": "Yanomamö", + "guw": "Gun", + "gux": "Gourmanchéma", + "guz": "Gusii; Ekegusii", + "gv": "Manx", + "gva": "Guana (Paraguay)", + "gvc": "Guanano", + "gve": "Duwet", + "gvf": "Golin", + "gvj": "Guajá", + "gvl": "Gulay", + "gvm": "Gurmana", + "gvn": "Kuku-Yalanji", + "gvo": "Gavião Do Jiparaná", + "gvp": "Pará Gavião", + "gvr": "Gurung", + "gvs": "Gumawana", + "gvy": "Guyani", + "gwa": "Mbato", + "gwb": "Gwa", + "gwc": "Gawri; Kalami", + "gwd": "Gawwada", + "gwe": "Gweno", + "gwf": "Gowro", + "gwg": "Moo", + "gwi": "Gwichʼin", + "gwj": "ǀGwi", + "gwm": "Awngthim", + "gwn": "Gwandara", + "gwr": "Gwere", + "gwt": "Gawar-Bati", + "gwu": "Guwamu", + "gww": "Kwini", + "gwx": "Gua", + "gxx": "Wè Southern", + "gya": "Northwest Gbaya", + "gyb": "Garus", + "gyd": "Kayardild", + "gye": "Gyem", + "gyf": "Gungabula", + "gyg": "Gbayi", + "gyi": "Gyele", + "gyl": "Gayil", + "gym": "Ngäbere", + "gyn": "Guyanese Creole English", + "gyo": "Gyalsumdo", + "gyr": "Guarayu", + "gyy": "Gunya", + "gyz": "Geji; Gyaazi", + "gza": "Ganza", + "gzi": "Gazi", + "gzn": "Gane", + "ha": "Hausa", + "haa": "Han", + "hab": "Hanoi Sign Language", + "hac": "Gurani", + "had": "Hatam", + "hae": "Eastern Oromo", + "haf": "Haiphong Sign Language", + "hag": "Hanga", + "hah": "Hahon", + "hai": "Haida", + "haj": "Hajong", + "hak": "Hakka Chinese", + "hal": "Halang", + "ham": "Hewa", + "han": "Hangaza", + "hao": "Hakö", + "hap": "Hupla", + "haq": "Ha", + "har": "Harari", + "has": "Haisla", + "hav": "Havu", + "haw": "Hawaiian", + "hax": "Southern Haida", + "hay": "Haya", + "haz": "Hazaragi", + "hba": "Hamba", + "hbb": "Huba", + "hbn": "Heiban", + "hbo": "Ancient Hebrew", + "hbu": "Habu", + "hca": "Andaman Creole Hindi", + "hch": "Huichol", + "hdn": "Northern Haida", + "hds": "Honduras Sign Language", + "hdy": "Hadiyya", + "he": "Hebrew", + "hea": "Northern Qiandong Miao", + "hed": "Herdé", + "heg": "Helong", + "heh": "Hehe", + "hei": "Heiltsuk", + "hem": "Hemba", + "hgm": "Haiǁom", + "hgw": "Haigwai", + "hhi": "Hoia Hoia", + "hhr": "Kerak", + "hhy": "Hoyahoya", + "hi": "Hindi", + "hia": "Lamang", + "hib": "Hibito", + "hid": "Hidatsa", + "hif": "Fiji Hindi", + "hig": "Kamwe", + "hih": "Pamosu", + "hii": "Hinduri", + "hij": "Hijuk", + "hik": "Seit-Kaitetu", + "hil": "Hiligaynon", + "him": "Himachali languages; Western Pahari languages", + "hio": "Tsoa", + "hir": "Himarimã", + "hit": "Hittite", + "hiw": "Hiw", + "hix": "Hixkaryána", + "hji": "Haji", + "hka": "Kahe", + "hke": "Hunde", + "hkh": "Khah; Poguli", + "hkk": "Hunjara-Kaina Ke", + "hkn": "Mel-Khaonh", + "hks": "Hong Kong Sign Language; Heung Kong Sau Yue", + "hla": "Halia", + "hlb": "Halbi", + "hld": "Halang Doan", + "hle": "Hlersu", + "hlt": "Matu Chin", + "hlu": "Hieroglyphic Luwian", + "hma": "Southern Mashan Hmong; Southern Mashan Miao", + "hmb": "Humburi Senni Songhay", + "hmc": "Central Huishui Hmong; Central Huishui Miao", + "hmd": "Large Flowery Miao; A-hmaos; Da-Hua Miao", + "hme": "Eastern Huishui Hmong; Eastern Huishui Miao", + "hmf": "Hmong Don", + "hmg": "Southwestern Guiyang Hmong", + "hmh": "Southwestern Huishui Hmong; Southwestern Huishui Miao", + "hmi": "Northern Huishui Hmong; Northern Huishui Miao", + "hmj": "Ge; Gejia", + "hmk": "Maek", + "hml": "Luopohe Hmong; Luopohe Miao", + "hmm": "Central Mashan Hmong; Central Mashan Miao", + "hmn": "Hmong; Mong", + "hmp": "Northern Mashan Hmong; Northern Mashan Miao", + "hmq": "Eastern Qiandong Miao", + "hmr": "Hmar", + "hms": "Southern Qiandong Miao", + "hmt": "Hamtai", + "hmu": "Hamap", + "hmv": "Hmong Dô", + "hmw": "Western Mashan Hmong; Western Mashan Miao", + "hmx": "Hmong-Mien languages", + "hmy": "Southern Guiyang Hmong; Southern Guiyang Miao", + "hmz": "Hmong Shua; Sinicized Miao", + "hna": "Mina (Cameroon)", + "hnd": "Southern Hindko", + "hne": "Chhattisgarhi", + "hng": "Hungu", + "hnh": "ǁAni", + "hni": "Hani", + "hnj": "Hmong Njua; Mong Leng; Mong Njua", + "hnn": "Hanunoo", + "hno": "Northern Hindko", + "hns": "Caribbean Hindustani", + "hnu": "Hung", + "ho": "Hiri Motu", + "hoa": "Hoava", + "hob": "Mari (Madang Province)", + "hoc": "Ho", + "hod": "Holma", + "hoe": "Horom", + "hoh": "Hobyót", + "hoi": "Holikachuk", + "hoj": "Hadothi; Haroti", + "hok": "Hokan languages", + "hol": "Holu", + "hom": "Homa", + "hoo": "Holoholo", + "hop": "Hopi", + "hor": "Horo", + "hos": "Ho Chi Minh City Sign Language", + "hot": "Hote; Malê", + "hov": "Hovongan", + "how": "Honi", + "hoy": "Holiya", + "hoz": "Hozo", + "hpo": "Hpon", + "hps": "Hawai'i Sign Language (HSL); Hawai'i Pidgin Sign Language", + "hr": "Croatian", + "hra": "Hrangkhol", + "hrc": "Niwer Mil", + "hre": "Hre", + "hrk": "Haruku", + "hrm": "Horned Miao", + "hro": "Haroi", + "hrp": "Nhirrpi", + "hrt": "Hértevin", + "hru": "Hruso", + "hrw": "Warwar Feni", + "hrx": "Hunsrik", + "hrz": "Harzani", + "hsb": "Upper Sorbian", + "hsh": "Hungarian Sign Language", + "hsl": "Hausa Sign Language", + "hsn": "Xiang Chinese", + "hss": "Harsusi", + "ht": "Haitian; Haitian Creole", + "hti": "Hoti", + "hto": "Minica Huitoto", + "hts": "Hadza", + "htu": "Hitu", + "htx": "Middle Hittite", + "hu": "Hungarian", + "hub": "Huambisa", + "huc": "ǂHua; ǂʼAmkhoe", + "hud": "Huaulu", + "hue": "San Francisco Del Mar Huave", + "huf": "Humene", + "hug": "Huachipaeri", + "huh": "Huilliche", + "hui": "Huli", + "huj": "Northern Guiyang Hmong; Northern Guiyang Miao", + "huk": "Hulung", + "hul": "Hula", + "hum": "Hungana", + "huo": "Hu", + "hup": "Hupa", + "huq": "Tsat", + "hur": "Halkomelem", + "hus": "Huastec", + "hut": "Humla", + "huu": "Murui Huitoto", + "huv": "San Mateo Del Mar Huave", + "huw": "Hukumina", + "hux": "Nüpode Huitoto", + "huy": "Hulaulá", + "huz": "Hunzib", + "hvc": "Haitian Vodoun Culture Language", + "hve": "San Dionisio Del Mar Huave", + "hvk": "Haveke", + "hvn": "Sabu", + "hvv": "Santa María Del Mar Huave", + "hwa": "Wané", + "hwc": "Hawai'i Creole English; Hawai'i Pidgin", + "hwo": "Hwana", + "hy": "Armenian", + "hya": "Hya", + "hyw": "Western Armenian", + "hyx": "Armenian (family)", + "hz": "Herero", + "ia": "Interlingua (International Auxiliary Language Association)", + "iai": "Iaai", + "ian": "Iatmul", + "iar": "Purari", + "iba": "Iban", + "ibb": "Ibibio", + "ibd": "Iwaidja", + "ibe": "Akpes", + "ibg": "Ibanag", + "ibh": "Bih", + "ibl": "Ibaloi", + "ibm": "Agoi", + "ibn": "Ibino", + "ibr": "Ibuoro", + "ibu": "Ibu", + "iby": "Ibani", + "ica": "Ede Ica", + "ich": "Etkywan", + "icl": "Icelandic Sign Language", + "icr": "Islander Creole English", + "id": "Indonesian", + "ida": "Idakho-Isukha-Tiriki; Luidakho-Luisukha-Lutirichi", + "idb": "Indo-Portuguese", + "idc": "Idon; Ajiya", + "idd": "Ede Idaca", + "ide": "Idere", + "idi": "Idi", + "idr": "Indri", + "ids": "Idesa", + "idt": "Idaté", + "idu": "Idoma", + "ie": "Interlingue; Occidental", + "ifa": "Amganad Ifugao", + "ifb": "Batad Ifugao; Ayangan Ifugao", + "ife": "Ifè", + "iff": "Ifo", + "ifk": "Tuwali Ifugao", + "ifm": "Teke-Fuumu", + "ifu": "Mayoyao Ifugao", + "ify": "Keley-I Kallahan", + "ig": "Igbo", + "igb": "Ebira", + "ige": "Igede", + "igg": "Igana", + "igl": "Igala", + "igm": "Kanggape", + "ign": "Ignaciano", + "igo": "Isebe", + "igs": "Interglossa", + "igw": "Igwe", + "ihb": "Iha Based Pidgin", + "ihi": "Ihievbe", + "ihp": "Iha", + "ihw": "Bidhawal", + "ii": "Sichuan Yi; Nuosu", + "iin": "Thiin", + "iir": "Indo-Iranian languages", + "ijc": "Izon", + "ije": "Biseni", + "ijj": "Ede Ije", + "ijn": "Kalabari", + "ijo": "Ijo languages", + "ijs": "Southeast Ijo", + "ik": "Inupiaq", + "ike": "Eastern Canadian Inuktitut", + "iki": "Iko", + "ikk": "Ika", + "ikl": "Ikulu", + "iko": "Olulumo-Ikom", + "ikp": "Ikpeshi", + "ikr": "Ikaranggal", + "iks": "Inuit Sign Language", + "ikt": "Inuinnaqtun; Western Canadian Inuktitut", + "ikv": "Iku-Gora-Ankwa", + "ikw": "Ikwere", + "ikx": "Ik", + "ikz": "Ikizu", + "ila": "Ile Ape", + "ilb": "Ila", + "ilg": "Garig-Ilgar", + "ili": "Ili Turki", + "ilk": "Ilongot", + "ilm": "Iranun (Malaysia)", + "ilo": "Iloko", + "ilp": "Iranun (Philippines)", + "ils": "International Sign", + "ilu": "Ili'uun", + "ilv": "Ilue", + "ima": "Mala Malasar", + "imi": "Anamgura", + "iml": "Miluk", + "imn": "Imonda", + "imo": "Imbongu", + "imr": "Imroing", + "ims": "Marsian", + "imt": "Imotong", + "imy": "Milyan", + "inb": "Inga", + "inc": "Indic languages", + "ine": "Indo-European languages", + "ing": "Degexit'an", + "inh": "Ingush", + "inj": "Jungle Inga", + "inl": "Indonesian Sign Language", + "inm": "Minaean", + "inn": "Isinai", + "ino": "Inoke-Yate", + "inp": "Iñapari", + "ins": "Indian Sign Language", + "int": "Intha", + "inz": "Ineseño", + "io": "Ido", + "ior": "Inor", + "iou": "Tuma-Irumu", + "iow": "Iowa-Oto", + "ipi": "Ipili", + "ipo": "Ipiko", + "iqu": "Iquito", + "iqw": "Ikwo", + "ira": "Iranian languages", + "ire": "Iresim", + "irh": "Irarutu", + "iri": "Rigwe; Irigwe", + "irk": "Iraqw", + "irn": "Irántxe", + "iro": "Iroquoian languages", + "irr": "Ir", + "iru": "Irula", + "irx": "Kamberau", + "iry": "Iraya", + "is": "Icelandic", + "isa": "Isabi", + "isc": "Isconahua", + "isd": "Isnag", + "ise": "Italian Sign Language", + "isg": "Irish Sign Language", + "ish": "Esan", + "isi": "Nkem-Nkum", + "isk": "Ishkashimi", + "ism": "Masimasi", + "isn": "Isanzu", + "iso": "Isoko", + "isr": "Israeli Sign Language", + "ist": "Istriot", + "isu": "Isu (Menchum Division)", + "it": "Italian", + "itb": "Binongan Itneg", + "itc": "Italic languages", + "itd": "Southern Tidung", + "ite": "Itene", + "iti": "Inlaod Itneg", + "itk": "Judeo-Italian", + "itl": "Itelmen", + "itm": "Itu Mbon Uzo", + "ito": "Itonama", + "itr": "Iteri", + "its": "Isekiri", + "itt": "Maeng Itneg", + "itv": "Itawit", + "itw": "Ito", + "itx": "Itik", + "ity": "Moyadan Itneg", + "itz": "Itzá", + "iu": "Inuktitut", + "ium": "Iu Mien", + "ivb": "Ibatan", + "ivv": "Ivatan", + "iwk": "I-Wak", + "iwm": "Iwam", + "iwo": "Iwur", + "iws": "Sepik Iwam", + "ixc": "Ixcatec", + "ixl": "Ixil", + "iya": "Iyayu", + "iyo": "Mesaka", + "iyx": "Yaka (Congo)", + "izh": "Ingrian", + "izr": "Izere", + "izz": "Izii", + "ja": "Japanese", + "jaa": "Jamamadí", + "jab": "Hyam", + "jac": "Popti'; Jakalteko", + "jad": "Jahanka", + "jae": "Yabem", + "jaf": "Jara", + "jah": "Jah Hut", + "jaj": "Zazao", + "jak": "Jakun", + "jal": "Yalahatan", + "jam": "Jamaican Creole English", + "jan": "Jandai", + "jao": "Yanyuwa", + "jaq": "Yaqay", + "jas": "New Caledonian Javanese", + "jat": "Jakati", + "jau": "Yaur", + "jax": "Jambi Malay", + "jay": "Yan-nhangu; Nhangu", + "jaz": "Jawe", + "jbe": "Judeo-Berber", + "jbi": "Badjiri", + "jbj": "Arandai", + "jbk": "Barikewa", + "jbm": "Bijim", + "jbn": "Nafusi", + "jbo": "Lojban", + "jbr": "Jofotek-Bromnya", + "jbt": "Jabutí", + "jbu": "Jukun Takum", + "jbw": "Yawijibaya", + "jcs": "Jamaican Country Sign Language", + "jct": "Krymchak", + "jda": "Jad", + "jdg": "Jadgali", + "jdt": "Judeo-Tat", + "jeb": "Jebero", + "jee": "Jerung", + "jeh": "Jeh", + "jei": "Yei", + "jek": "Jeri Kuo", + "jel": "Yelmek", + "jen": "Dza", + "jer": "Jere", + "jet": "Manem", + "jeu": "Jonkor Bourmataguil", + "jgb": "Ngbee", + "jge": "Judeo-Georgian", + "jgk": "Gwak", + "jgo": "Ngomba", + "jhi": "Jehai", + "jhs": "Jhankot Sign Language", + "jia": "Jina", + "jib": "Jibu", + "jic": "Tol", + "jid": "Bu (Kaduna State)", + "jie": "Jilbe", + "jig": "Jingulu; Djingili", + "jih": "sTodsde; Shangzhai", + "jii": "Jiiddu", + "jil": "Jilim", + "jim": "Jimi (Cameroon)", + "jio": "Jiamao", + "jiq": "Guanyinqiao; Lavrung", + "jit": "Jita", + "jiu": "Youle Jinuo", + "jiv": "Shuar", + "jiy": "Buyuan Jinuo", + "jje": "Jejueo", + "jjr": "Bankal", + "jka": "Kaera", + "jkm": "Mobwa Karen", + "jko": "Kubo", + "jkp": "Paku Karen", + "jkr": "Koro (India)", + "jks": "Amami Koniya Sign Language", + "jku": "Labir", + "jle": "Ngile", + "jls": "Jamaican Sign Language", + "jma": "Dima", + "jmb": "Zumbun", + "jmc": "Machame", + "jmd": "Yamdena", + "jmi": "Jimi (Nigeria)", + "jml": "Jumli", + "jmn": "Makuri Naga", + "jmr": "Kamara", + "jms": "Mashi (Nigeria)", + "jmw": "Mouwase", + "jmx": "Western Juxtlahuaca Mixtec", + "jna": "Jangshung", + "jnd": "Jandavra", + "jng": "Yangman", + "jni": "Janji", + "jnj": "Yemsa", + "jnl": "Rawat", + "jns": "Jaunsari", + "job": "Joba", + "jod": "Wojenaka", + "jog": "Jogi", + "jor": "Jorá", + "jos": "Jordanian Sign Language", + "jow": "Jowulu", + "jpa": "Jewish Palestinian Aramaic", + "jpr": "Judeo-Persian", + "jpx": "Japanese (family)", + "jqr": "Jaqaru", + "jra": "Jarai", + "jrb": "Judeo-Arabic", + "jrr": "Jiru", + "jrt": "Jakattoe", + "jru": "Japrería", + "jsl": "Japanese Sign Language", + "jua": "Júma", + "jub": "Wannu", + "juc": "Jurchen", + "jud": "Worodougou", + "juh": "Hõne", + "jui": "Ngadjuri", + "juk": "Wapan", + "jul": "Jirel", + "jum": "Jumjum", + "jun": "Juang", + "juo": "Jiba", + "jup": "Hupdë", + "jur": "Jurúna", + "jus": "Jumla Sign Language", + "jut": "Jutish", + "juu": "Ju", + "juw": "Wãpha", + "juy": "Juray", + "jv": "Javanese", + "jvd": "Javindo", + "jvn": "Caribbean Javanese", + "jwi": "Jwira-Pepesa", + "jya": "Jiarong", + "jye": "Judeo-Yemeni Arabic", + "jyy": "Jaya", + "ka": "Georgian", + "kaa": "Kara-Kalpak; Karakalpak", + "kab": "Kabyle", + "kac": "Kachin; Jingpho", + "kad": "Adara", + "kae": "Ketangalan", + "kaf": "Katso", + "kag": "Kajaman", + "kah": "Kara (Central African Republic)", + "kai": "Karekare", + "kaj": "Jju", + "kak": "Kalanguya; Kayapa Kallahan", + "kam": "Kamba (Kenya)", + "kao": "Xaasongaxango", + "kap": "Bezhta", + "kaq": "Capanahua", + "kar": "Karen languages", + "kav": "Katukína", + "kaw": "Kawi", + "kax": "Kao", + "kay": "Kamayurá", + "kba": "Kalarko", + "kbb": "Kaxuiâna", + "kbc": "Kadiwéu", + "kbd": "Kabardian", + "kbe": "Kanju", + "kbg": "Khamba", + "kbh": "Camsá", + "kbi": "Kaptiau", + "kbj": "Kari", + "kbk": "Grass Koiari", + "kbl": "Kanembu", + "kbm": "Iwal", + "kbn": "Kare (Central African Republic)", + "kbo": "Keliko", + "kbp": "Kabiyè", + "kbq": "Kamano", + "kbr": "Kafa", + "kbs": "Kande", + "kbt": "Abadi", + "kbu": "Kabutra", + "kbv": "Dera (Indonesia)", + "kbw": "Kaiep", + "kbx": "Ap Ma", + "kby": "Manga Kanuri", + "kbz": "Duhwa", + "kca": "Khanty", + "kcb": "Kawacha", + "kcc": "Lubila", + "kcd": "Ngkâlmpw Kanum", + "kce": "Kaivi", + "kcf": "Ukaan", + "kcg": "Tyap", + "kch": "Vono", + "kci": "Kamantan", + "kcj": "Kobiana", + "kck": "Kalanga", + "kcl": "Kela (Papua New Guinea); Kala", + "kcm": "Gula (Central African Republic)", + "kcn": "Nubi", + "kco": "Kinalakna", + "kcp": "Kanga", + "kcq": "Kamo", + "kcr": "Katla", + "kcs": "Koenoem", + "kct": "Kaian", + "kcu": "Kami (Tanzania)", + "kcv": "Kete", + "kcw": "Kabwari", + "kcx": "Kachama-Ganjule", + "kcy": "Korandje", + "kcz": "Konongo", + "kda": "Worimi", + "kdc": "Kutu", + "kdd": "Yankunytjatjara", + "kde": "Makonde", + "kdf": "Mamusi", + "kdg": "Seba", + "kdh": "Tem", + "kdi": "Kumam", + "kdj": "Karamojong", + "kdk": "Numèè; Kwényi", + "kdl": "Tsikimba", + "kdm": "Kagoma", + "kdn": "Kunda", + "kdo": "Kordofanian languages", + "kdp": "Kaningdon-Nindem", + "kdq": "Koch", + "kdr": "Karaim", + "kdt": "Kuy", + "kdu": "Kadaru", + "kdw": "Koneraw", + "kdx": "Kam", + "kdy": "Keder; Keijar", + "kdz": "Kwaja", + "kea": "Kabuverdianu", + "keb": "Kélé", + "kec": "Keiga", + "ked": "Kerewe", + "kee": "Eastern Keres", + "kef": "Kpessi", + "keg": "Tese", + "keh": "Keak", + "kei": "Kei", + "kej": "Kadar", + "kek": "Kekchí", + "kel": "Kela (Democratic Republic of Congo)", + "kem": "Kemak", + "ken": "Kenyang", + "keo": "Kakwa", + "kep": "Kaikadi", + "keq": "Kamar", + "ker": "Kera", + "kes": "Kugbo", + "ket": "Ket", + "keu": "Akebu", + "kev": "Kanikkaran", + "kew": "West Kewa", + "kex": "Kukna", + "key": "Kupia", + "kez": "Kukele", + "kfa": "Kodava", + "kfb": "Northwestern Kolami", + "kfc": "Konda-Dora", + "kfd": "Korra Koraga", + "kfe": "Kota (India)", + "kff": "Koya", + "kfg": "Kudiya", + "kfh": "Kurichiya", + "kfi": "Kannada Kurumba", + "kfj": "Kemiehua", + "kfk": "Kinnauri", + "kfl": "Kung", + "kfm": "Khunsari", + "kfn": "Kuk", + "kfo": "Koro (Côte d'Ivoire)", + "kfp": "Korwa", + "kfq": "Korku", + "kfr": "Kachhi; Kutchi", + "kfs": "Bilaspuri", + "kft": "Kanjari", + "kfu": "Katkari", + "kfv": "Kurmukar", + "kfw": "Kharam Naga", + "kfx": "Kullu Pahari", + "kfy": "Kumaoni", + "kfz": "Koromfé", + "kg": "Kongo", + "kga": "Koyaga", + "kgb": "Kawe", + "kge": "Komering", + "kgf": "Kube", + "kgg": "Kusunda", + "kgi": "Selangor Sign Language", + "kgj": "Gamale Kham", + "kgk": "Kaiwá", + "kgl": "Kunggari", + "kgm": "Karipúna", + "kgn": "Karingani", + "kgo": "Krongo", + "kgp": "Kaingang", + "kgq": "Kamoro", + "kgr": "Abun", + "kgs": "Kumbainggar", + "kgt": "Somyev", + "kgu": "Kobol", + "kgv": "Karas", + "kgw": "Karon Dori", + "kgx": "Kamaru", + "kgy": "Kyerung", + "kha": "Khasi", + "khb": "Lü", + "khc": "Tukang Besi North", + "khd": "Bädi Kanum", + "khe": "Korowai", + "khf": "Khuen", + "khg": "Khams Tibetan", + "khh": "Kehu", + "khi": "Khoisan languages", + "khj": "Kuturmi", + "khk": "Halh Mongolian", + "khl": "Lusi", + "khn": "Khandesi", + "kho": "Khotanese; Sakan", + "khp": "Kapori; Kapauri", + "khq": "Koyra Chiini Songhay", + "khr": "Kharia", + "khs": "Kasua", + "kht": "Khamti", + "khu": "Nkhumbi", + "khv": "Khvarshi", + "khw": "Khowar", + "khx": "Kanu", + "khy": "Kele (Democratic Republic of Congo)", + "khz": "Keapara", + "ki": "Kikuyu; Gikuyu", + "kia": "Kim", + "kib": "Koalib", + "kic": "Kickapoo", + "kid": "Koshin", + "kie": "Kibet", + "kif": "Eastern Parbate Kham", + "kig": "Kimaama; Kimaghima", + "kih": "Kilmeri", + "kii": "Kitsai", + "kij": "Kilivila", + "kil": "Kariya", + "kim": "Karagas", + "kio": "Kiowa", + "kip": "Sheshi Kham", + "kiq": "Kosadle; Kosare", + "kis": "Kis", + "kit": "Agob", + "kiu": "Kirmanjki (individual language)", + "kiv": "Kimbu", + "kiw": "Northeast Kiwai", + "kix": "Khiamniungan Naga", + "kiy": "Kirikiri", + "kiz": "Kisi", + "kj": "Kuanyama; Kwanyama", + "kja": "Mlap", + "kjb": "Q'anjob'al; Kanjobal", + "kjc": "Coastal Konjo", + "kjd": "Southern Kiwai", + "kje": "Kisar", + "kjg": "Khmu", + "kjh": "Khakas", + "kji": "Zabana", + "kjj": "Khinalugh", + "kjk": "Highland Konjo", + "kjl": "Western Parbate Kham", + "kjm": "Kháng", + "kjn": "Kunjen", + "kjo": "Harijan Kinnauri", + "kjp": "Pwo Eastern Karen", + "kjq": "Western Keres", + "kjr": "Kurudu", + "kjs": "East Kewa", + "kjt": "Phrae Pwo Karen", + "kju": "Kashaya", + "kjv": "Kaikavian Literary Language", + "kjx": "Ramopa", + "kjy": "Erave", + "kjz": "Bumthangkha", + "kk": "Kazakh", + "kka": "Kakanda", + "kkb": "Kwerisa", + "kkc": "Odoodee", + "kkd": "Kinuku", + "kke": "Kakabe", + "kkf": "Kalaktang Monpa", + "kkg": "Mabaka Valley Kalinga", + "kkh": "Khün", + "kki": "Kagulu", + "kkj": "Kako", + "kkk": "Kokota", + "kkl": "Kosarek Yale", + "kkm": "Kiong", + "kkn": "Kon Keu", + "kko": "Karko", + "kkp": "Gugubera; Koko-Bera", + "kkq": "Kaeku", + "kkr": "Kir-Balar", + "kks": "Giiwo", + "kkt": "Koi", + "kku": "Tumi", + "kkv": "Kangean", + "kkw": "Teke-Kukuya", + "kkx": "Kohin", + "kky": "Guugu Yimidhirr; Guguyimidjir", + "kkz": "Kaska", + "kl": "Kalaallisut; Greenlandic", + "kla": "Klamath-Modoc", + "klb": "Kiliwa", + "klc": "Kolbila", + "kld": "Gamilaraay", + "kle": "Kulung (Nepal)", + "klf": "Kendeje", + "klg": "Tagakaulo", + "klh": "Weliki", + "kli": "Kalumpang", + "klj": "Khalaj", + "klk": "Kono (Nigeria)", + "kll": "Kagan Kalagan", + "klm": "Migum", + "kln": "Kalenjin", + "klo": "Kapya", + "klp": "Kamasa", + "klq": "Rumu", + "klr": "Khaling", + "kls": "Kalasha", + "klt": "Nukna", + "klu": "Klao", + "klv": "Maskelynes", + "klw": "Tado; Lindu", + "klx": "Koluwawa", + "kly": "Kalao", + "klz": "Kabola", + "km": "Khmer; Central Khmer", + "kma": "Konni", + "kmb": "Kimbundu", + "kmc": "Southern Dong", + "kmd": "Majukayang Kalinga", + "kme": "Bakole", + "kmf": "Kare (Papua New Guinea)", + "kmg": "Kâte", + "kmh": "Kalam", + "kmi": "Kami (Nigeria)", + "kmj": "Kumarbhag Paharia", + "kmk": "Limos Kalinga", + "kml": "Tanudan Kalinga", + "kmm": "Kom (India)", + "kmn": "Awtuw", + "kmo": "Kwoma", + "kmp": "Gimme", + "kmq": "Kwama", + "kmr": "Northern Kurdish", + "kms": "Kamasau", + "kmt": "Kemtuik", + "kmu": "Kanite", + "kmv": "Karipúna Creole French", + "kmw": "Komo (Democratic Republic of Congo)", + "kmx": "Waboda", + "kmy": "Koma", + "kmz": "Khorasani Turkish", + "kn": "Kannada", + "kna": "Dera (Nigeria)", + "knb": "Lubuagan Kalinga", + "knc": "Central Kanuri", + "knd": "Konda", + "kne": "Kankanaey", + "knf": "Mankanya", + "kng": "Koongo", + "kni": "Kanufi", + "knj": "Western Kanjobal", + "knk": "Kuranko", + "knl": "Keninjal", + "knm": "Kanamarí", + "knn": "Konkani (individual language)", + "kno": "Kono (Sierra Leone)", + "knp": "Kwanja", + "knq": "Kintaq", + "knr": "Kaningra", + "kns": "Kensiu", + "knt": "Panoan Katukína", + "knu": "Kono (Guinea)", + "knv": "Tabo", + "knw": "Kung-Ekoka", + "knx": "Kendayan; Salako", + "kny": "Kanyok", + "knz": "Kalamsé", + "ko": "Korean", + "koa": "Konomala", + "koc": "Kpati", + "kod": "Kodi", + "koe": "Kacipo-Bale Suri", + "kof": "Kubi", + "kog": "Cogui; Kogi", + "koh": "Koyo", + "koi": "Komi-Permyak", + "kok": "Konkani (macrolanguage)", + "kol": "Kol (Papua New Guinea)", + "koo": "Konzo", + "kop": "Waube", + "koq": "Kota (Gabon)", + "kos": "Kosraean", + "kot": "Lagwan", + "kou": "Koke", + "kov": "Kudu-Camo", + "kow": "Kugama", + "koy": "Koyukon", + "koz": "Korak", + "kpa": "Kutto", + "kpb": "Mullu Kurumba", + "kpc": "Curripaco", + "kpd": "Koba", + "kpe": "Kpelle", + "kpf": "Komba", + "kpg": "Kapingamarangi", + "kph": "Kplang", + "kpi": "Kofei", + "kpj": "Karajá", + "kpk": "Kpan", + "kpl": "Kpala", + "kpm": "Koho", + "kpn": "Kepkiriwát", + "kpo": "Ikposo", + "kpq": "Korupun-Sela", + "kpr": "Korafe-Yegha", + "kps": "Tehit", + "kpt": "Karata", + "kpu": "Kafoa", + "kpv": "Komi-Zyrian", + "kpw": "Kobon", + "kpx": "Mountain Koiali", + "kpy": "Koryak", + "kpz": "Kupsabiny", + "kqa": "Mum", + "kqb": "Kovai", + "kqc": "Doromu-Koki", + "kqd": "Koy Sanjaq Surat", + "kqe": "Kalagan", + "kqf": "Kakabai", + "kqg": "Khe", + "kqh": "Kisankasa", + "kqi": "Koitabu", + "kqj": "Koromira", + "kqk": "Kotafon Gbe", + "kql": "Kyenele", + "kqm": "Khisa", + "kqn": "Kaonde", + "kqo": "Eastern Krahn", + "kqp": "Kimré", + "kqq": "Krenak", + "kqr": "Kimaragang", + "kqs": "Northern Kissi", + "kqt": "Klias River Kadazan", + "kqu": "Seroa", + "kqv": "Okolod", + "kqw": "Kandas", + "kqx": "Mser", + "kqy": "Koorete", + "kqz": "Korana", + "kr": "Kanuri", + "kra": "Kumhali", + "krb": "Karkin", + "krc": "Karachay-Balkar", + "krd": "Kairui-Midiki", + "kre": "Panará", + "krf": "Koro (Vanuatu)", + "krh": "Kurama", + "kri": "Krio", + "krj": "Kinaray-A", + "krk": "Kerek", + "krl": "Karelian", + "krn": "Sapo", + "kro": "Kru languages", + "krp": "Korop", + "krr": "Krung", + "krs": "Gbaya (Sudan)", + "krt": "Tumari Kanuri", + "kru": "Kurukh", + "krv": "Kavet", + "krw": "Western Krahn", + "krx": "Karon", + "kry": "Kryts", + "krz": "Sota Kanum", + "ks": "Kashmiri", + "ksa": "Shuwa-Zamani", + "ksb": "Shambala", + "ksc": "Southern Kalinga", + "ksd": "Kuanua", + "kse": "Kuni", + "ksf": "Bafia", + "ksg": "Kusaghe", + "ksh": "Kölsch", + "ksi": "Krisa; I'saka", + "ksj": "Uare", + "ksk": "Kansa", + "ksl": "Kumalu", + "ksm": "Kumba", + "ksn": "Kasiguranin", + "kso": "Kofa", + "ksp": "Kaba", + "ksq": "Kwaami", + "ksr": "Borong", + "kss": "Southern Kisi", + "kst": "Winyé", + "ksu": "Khamyang", + "ksv": "Kusu", + "ksw": "S'gaw Karen", + "ksx": "Kedang", + "ksy": "Kharia Thar", + "ksz": "Kodaku", + "kta": "Katua", + "ktb": "Kambaata", + "ktc": "Kholok", + "ktd": "Kokata; Kukatha", + "kte": "Nubri", + "ktf": "Kwami", + "ktg": "Kalkutung", + "kth": "Karanga", + "kti": "North Muyu", + "ktj": "Plapo Krumen", + "ktk": "Kaniet", + "ktl": "Koroshi", + "ktm": "Kurti", + "ktn": "Karitiâna", + "kto": "Kuot", + "ktp": "Kaduo", + "ktq": "Katabaga", + "kts": "South Muyu", + "ktt": "Ketum", + "ktu": "Kituba (Democratic Republic of Congo)", + "ktv": "Eastern Katu", + "ktw": "Kato", + "ktx": "Kaxararí", + "kty": "Kango (Bas-Uélé District)", + "ktz": "Juǀʼhoan; Juǀʼhoansi", + "ku": "Kurdish", + "kub": "Kutep", + "kuc": "Kwinsu", + "kud": "'Auhelawa", + "kue": "Kuman (Papua New Guinea)", + "kuf": "Western Katu", + "kug": "Kupa", + "kuh": "Kushi", + "kui": "Kuikúro-Kalapálo; Kalapalo", + "kuj": "Kuria", + "kuk": "Kepo'", + "kul": "Kulere", + "kum": "Kumyk", + "kun": "Kunama", + "kuo": "Kumukio", + "kup": "Kunimaipa", + "kuq": "Karipuna", + "kus": "Kusaal", + "kut": "Kutenai", + "kuu": "Upper Kuskokwim", + "kuv": "Kur", + "kuw": "Kpagua", + "kux": "Kukatja", + "kuy": "Kuuku-Ya'u", + "kuz": "Kunza", + "kv": "Komi", + "kva": "Bagvalal", + "kvb": "Kubu", + "kvc": "Kove", + "kvd": "Kui (Indonesia)", + "kve": "Kalabakan", + "kvf": "Kabalai", + "kvg": "Kuni-Boazi", + "kvh": "Komodo", + "kvi": "Kwang", + "kvj": "Psikye", + "kvk": "Korean Sign Language", + "kvl": "Kayaw", + "kvm": "Kendem", + "kvn": "Border Kuna", + "kvo": "Dobel", + "kvp": "Kompane", + "kvq": "Geba Karen", + "kvr": "Kerinci", + "kvt": "Lahta Karen; Lahta", + "kvu": "Yinbaw Karen", + "kvv": "Kola", + "kvw": "Wersing", + "kvx": "Parkari Koli", + "kvy": "Yintale Karen; Yintale", + "kvz": "Tsakwambo; Tsaukambo", + "kw": "Cornish", + "kwa": "Dâw", + "kwb": "Kwa", + "kwc": "Likwala", + "kwd": "Kwaio", + "kwe": "Kwerba", + "kwf": "Kwara'ae", + "kwg": "Sara Kaba Deme", + "kwh": "Kowiai", + "kwi": "Awa-Cuaiquer", + "kwj": "Kwanga", + "kwk": "Kwakiutl", + "kwl": "Kofyar", + "kwm": "Kwambi", + "kwn": "Kwangali", + "kwo": "Kwomtari", + "kwp": "Kodia", + "kwr": "Kwer", + "kws": "Kwese", + "kwt": "Kwesten", + "kwu": "Kwakum", + "kwv": "Sara Kaba Náà", + "kww": "Kwinti", + "kwx": "Khirwar", + "kwy": "San Salvador Kongo", + "kwz": "Kwadi", + "kxa": "Kairiru", + "kxb": "Krobu", + "kxc": "Konso; Khonso", + "kxd": "Brunei", + "kxf": "Manumanaw Karen; Manumanaw", + "kxh": "Karo (Ethiopia)", + "kxi": "Keningau Murut", + "kxj": "Kulfa", + "kxk": "Zayein Karen", + "kxm": "Northern Khmer", + "kxn": "Kanowit-Tanjong Melanau", + "kxo": "Kanoé", + "kxp": "Wadiyara Koli", + "kxq": "Smärky Kanum", + "kxr": "Koro (Papua New Guinea)", + "kxs": "Kangjia", + "kxt": "Koiwat", + "kxv": "Kuvi", + "kxw": "Konai", + "kxx": "Likuba", + "kxy": "Kayong", + "kxz": "Kerewo", + "ky": "Kirghiz; Kyrgyz", + "kya": "Kwaya", + "kyb": "Butbut Kalinga", + "kyc": "Kyaka", + "kyd": "Karey", + "kye": "Krache", + "kyf": "Kouya", + "kyg": "Keyagana", + "kyh": "Karok", + "kyi": "Kiput", + "kyj": "Karao", + "kyk": "Kamayo", + "kyl": "Kalapuya", + "kym": "Kpatili", + "kyn": "Northern Binukidnon", + "kyo": "Kelon", + "kyp": "Kang", + "kyq": "Kenga", + "kyr": "Kuruáya", + "kys": "Baram Kayan", + "kyt": "Kayagar", + "kyu": "Western Kayah", + "kyv": "Kayort", + "kyw": "Kudmali", + "kyx": "Rapoisi", + "kyy": "Kambaira", + "kyz": "Kayabí", + "kza": "Western Karaboro", + "kzb": "Kaibobo", + "kzc": "Bondoukou Kulango", + "kzd": "Kadai", + "kze": "Kosena", + "kzf": "Da'a Kaili", + "kzg": "Kikai", + "kzi": "Kelabit", + "kzk": "Kazukuru", + "kzl": "Kayeli", + "kzm": "Kais", + "kzn": "Kokola", + "kzo": "Kaningi", + "kzp": "Kaidipang", + "kzq": "Kaike", + "kzr": "Karang", + "kzs": "Sugut Dusun", + "kzu": "Kayupulau", + "kzv": "Komyandaret", + "kzw": "Karirí-Xocó", + "kzx": "Kamarian", + "kzy": "Kango (Tshopo District)", + "kzz": "Kalabra", + "la": "Latin", + "laa": "Southern Subanen", + "lab": "Linear A", + "lac": "Lacandon", + "lad": "Ladino", + "lae": "Pattani", + "laf": "Lafofa", + "lag": "Langi", + "lah": "Lahnda", + "lai": "Lambya", + "laj": "Lango (Uganda)", + "lal": "Lalia", + "lam": "Lamba", + "lan": "Laru", + "lap": "Laka (Chad)", + "laq": "Qabiao", + "lar": "Larteh", + "las": "Lama (Togo)", + "lau": "Laba", + "law": "Lauje", + "lax": "Tiwa", + "lay": "Lama Bai", + "laz": "Aribwatsa", + "lb": "Luxembourgish; Letzeburgesch", + "lbb": "Label", + "lbc": "Lakkia", + "lbe": "Lak", + "lbf": "Tinani", + "lbg": "Laopang", + "lbi": "La'bi", + "lbj": "Ladakhi", + "lbk": "Central Bontok", + "lbl": "Libon Bikol", + "lbm": "Lodhi", + "lbn": "Rmeet", + "lbo": "Laven", + "lbq": "Wampar", + "lbr": "Lohorung", + "lbs": "Libyan Sign Language", + "lbt": "Lachi", + "lbu": "Labu", + "lbv": "Lavatbura-Lamusong", + "lbw": "Tolaki", + "lbx": "Lawangan", + "lby": "Lamalama; Lamu-Lamu", + "lbz": "Lardil", + "lcc": "Legenyem", + "lcd": "Lola", + "lce": "Loncong; Sekak", + "lcf": "Lubu", + "lch": "Luchazi", + "lcl": "Lisela", + "lcm": "Tungag", + "lcp": "Western Lawa", + "lcq": "Luhu", + "lcs": "Lisabata-Nuniali", + "lda": "Kla-Dan", + "ldb": "Dũya", + "ldd": "Luri", + "ldg": "Lenyima", + "ldh": "Lamja-Dengsa-Tola", + "ldi": "Laari", + "ldj": "Lemoro", + "ldk": "Leelau", + "ldl": "Kaan", + "ldm": "Landoma", + "ldn": "Láadan", + "ldo": "Loo", + "ldp": "Tso", + "ldq": "Lufu", + "lea": "Lega-Shabunda", + "leb": "Lala-Bisa", + "lec": "Leco", + "led": "Lendu", + "lee": "Lyélé", + "lef": "Lelemi", + "leh": "Lenje", + "lei": "Lemio", + "lej": "Lengola", + "lek": "Leipon", + "lel": "Lele (Democratic Republic of Congo)", + "lem": "Nomaande", + "len": "Lenca", + "leo": "Leti (Cameroon)", + "lep": "Lepcha", + "leq": "Lembena", + "ler": "Lenkau", + "les": "Lese", + "let": "Lesing-Gelimi; Amio-Gelimi", + "leu": "Kara (Papua New Guinea)", + "lev": "Lamma", + "lew": "Ledo Kaili", + "lex": "Luang", + "ley": "Lemolang", + "lez": "Lezghian", + "lfa": "Lefa", + "lfn": "Lingua Franca Nova", + "lg": "Ganda; Luganda", + "lga": "Lungga", + "lgb": "Laghu", + "lgg": "Lugbara", + "lgh": "Laghuu", + "lgi": "Lengilu", + "lgk": "Lingarak; Neverver", + "lgl": "Wala", + "lgm": "Lega-Mwenga", + "lgn": "T'apo; Opuuo", + "lgo": "Lango (South Sudan)", + "lgq": "Logba", + "lgr": "Lengo", + "lgt": "Pahi", + "lgu": "Longgu", + "lgz": "Ligenza", + "lha": "Laha (Viet Nam)", + "lhh": "Laha (Indonesia)", + "lhi": "Lahu Shi", + "lhl": "Lahul Lohar", + "lhm": "Lhomi", + "lhn": "Lahanan", + "lhp": "Lhokpu", + "lhs": "Mlahsö", + "lht": "Lo-Toga", + "lhu": "Lahu", + "li": "Limburgan; Limburger; Limburgish", + "lia": "West-Central Limba", + "lib": "Likum", + "lic": "Hlai", + "lid": "Nyindrou", + "lie": "Likila", + "lif": "Limbu", + "lig": "Ligbi", + "lih": "Lihir", + "lij": "Ligurian", + "lik": "Lika", + "lil": "Lillooet", + "lio": "Liki", + "lip": "Sekpele", + "liq": "Libido", + "lir": "Liberian English", + "lis": "Lisu", + "liu": "Logorik", + "liv": "Liv", + "liw": "Col", + "lix": "Liabuku", + "liy": "Banda-Bambari", + "liz": "Libinza", + "lja": "Golpa", + "lje": "Rampi", + "lji": "Laiyolo", + "ljl": "Li'o", + "ljp": "Lampung Api", + "ljw": "Yirandali", + "ljx": "Yuru", + "lka": "Lakalei", + "lkb": "Kabras; Lukabaras", + "lkc": "Kucong", + "lkd": "Lakondê", + "lke": "Kenyi", + "lkh": "Lakha", + "lki": "Laki", + "lkj": "Remun", + "lkl": "Laeko-Libuat", + "lkm": "Kalaamaya", + "lkn": "Lakon; Vure", + "lko": "Khayo; Olukhayo", + "lkr": "Päri", + "lks": "Kisa; Olushisa", + "lkt": "Lakota", + "lku": "Kungkari", + "lky": "Lokoya", + "lla": "Lala-Roba", + "llb": "Lolo", + "llc": "Lele (Guinea)", + "lld": "Ladin", + "lle": "Lele (Papua New Guinea)", + "llf": "Hermit", + "llg": "Lole", + "llh": "Lamu", + "lli": "Teke-Laali", + "llj": "Ladji Ladji", + "llk": "Lelak", + "lll": "Lilau", + "llm": "Lasalimu", + "lln": "Lele (Chad)", + "llp": "North Efate", + "llq": "Lolak", + "lls": "Lithuanian Sign Language", + "llu": "Lau", + "llx": "Lauan", + "lma": "East Limba", + "lmb": "Merei", + "lmc": "Limilngan", + "lmd": "Lumun", + "lme": "Pévé", + "lmf": "South Lembata", + "lmg": "Lamogai", + "lmh": "Lambichhong", + "lmi": "Lombi", + "lmj": "West Lembata", + "lmk": "Lamkang", + "lml": "Hano", + "lmn": "Lambadi", + "lmo": "Lombard", + "lmp": "Limbum", + "lmq": "Lamatuka", + "lmr": "Lamalera", + "lmu": "Lamenu", + "lmv": "Lomaiviti", + "lmw": "Lake Miwok", + "lmx": "Laimbue", + "lmy": "Lamboya", + "ln": "Lingala", + "lna": "Langbashe", + "lnb": "Mbalanhu", + "lnd": "Lundayeh; Lun Bawang", + "lng": "Langobardic", + "lnh": "Lanoh", + "lni": "Daantanai'", + "lnj": "Leningitij", + "lnl": "South Central Banda", + "lnm": "Langam", + "lnn": "Lorediakarkar", + "lns": "Lamnso'", + "lnu": "Longuda", + "lnw": "Lanima", + "lnz": "Lonzo", + "lo": "Lao", + "loa": "Loloda", + "lob": "Lobi", + "loc": "Inonhan", + "loe": "Saluan", + "lof": "Logol", + "log": "Logo", + "loh": "Narim", + "loi": "Loma (Côte d'Ivoire)", + "loj": "Lou", + "lok": "Loko", + "lol": "Mongo", + "lom": "Loma (Liberia)", + "lon": "Malawi Lomwe", + "loo": "Lombo", + "lop": "Lopa", + "loq": "Lobala", + "lor": "Téén", + "los": "Loniu", + "lot": "Otuho", + "lou": "Louisiana Creole", + "lov": "Lopi", + "low": "Tampias Lobu", + "lox": "Loun", + "loy": "Loke", + "loz": "Lozi", + "lpa": "Lelepa", + "lpe": "Lepki", + "lpn": "Long Phuri Naga", + "lpo": "Lipo", + "lpx": "Lopit", + "lqr": "Logir", + "lra": "Rara Bakati'", + "lrc": "Northern Luri", + "lre": "Laurentian", + "lrg": "Laragia", + "lri": "Marachi; Olumarachi", + "lrk": "Loarki", + "lrl": "Lari", + "lrm": "Marama; Olumarama", + "lrn": "Lorang", + "lro": "Laro", + "lrr": "Southern Yamphu", + "lrt": "Larantuka Malay", + "lrv": "Larevat", + "lrz": "Lemerig", + "lsa": "Lasgerdi", + "lsb": "Burundian Sign Language; Langue des Signes Burundaise", + "lsc": "Albarradas Sign Language; Lengua de señas Albarradas", + "lsd": "Lishana Deni", + "lse": "Lusengo", + "lsh": "Lish", + "lsi": "Lashi", + "lsl": "Latvian Sign Language", + "lsm": "Saamia; Olusamia", + "lsn": "Tibetan Sign Language", + "lso": "Laos Sign Language", + "lsp": "Panamanian Sign Language; Lengua de Señas Panameñas", + "lsr": "Aruop", + "lss": "Lasi", + "lst": "Trinidad and Tobago Sign Language", + "lsv": "Sivia Sign Language", + "lsw": "Seychelles Sign Language; Lalang Siny Seselwa; Langue des Signes Seychelloise", + "lsy": "Mauritian Sign Language", + "lt": "Lithuanian", + "ltc": "Late Middle Chinese", + "ltg": "Latgalian", + "lth": "Thur", + "lti": "Leti (Indonesia)", + "ltn": "Latundê", + "lto": "Tsotso; Olutsotso", + "lts": "Tachoni; Lutachoni", + "ltu": "Latu", + "lu": "Luba-Katanga", + "lua": "Luba-Lulua", + "luc": "Aringa", + "lud": "Ludian", + "lue": "Luvale", + "luf": "Laua", + "lui": "Luiseno", + "luj": "Luna", + "luk": "Lunanakha", + "lul": "Olu'bo", + "lum": "Luimbi", + "lun": "Lunda", + "luo": "Luo (Kenya and Tanzania); Dholuo", + "lup": "Lumbu", + "luq": "Lucumi", + "lur": "Laura", + "lus": "Lushai", + "lut": "Lushootseed", + "luu": "Lumba-Yakkha", + "luv": "Luwati", + "luw": "Luo (Cameroon)", + "luy": "Luyia; Oluluyia", + "luz": "Southern Luri", + "lv": "Latvian", + "lva": "Maku'a", + "lvi": "Lavi", + "lvk": "Lavukaleve", + "lvs": "Standard Latvian", + "lvu": "Levuka", + "lwa": "Lwalu", + "lwe": "Lewo Eleng", + "lwg": "Wanga; Oluwanga", + "lwh": "White Lachi", + "lwl": "Eastern Lawa", + "lwm": "Laomian", + "lwo": "Luwo", + "lws": "Malawian Sign Language", + "lwt": "Lewotobi", + "lwu": "Lawu", + "lww": "Lewo", + "lxm": "Lakurumau", + "lya": "Layakha", + "lyg": "Lyngngam", + "lyn": "Luyana", + "lzh": "Literary Chinese", + "lzl": "Litzlitz", + "lzn": "Leinong Naga", + "lzz": "Laz", + "maa": "San Jerónimo Tecóatl Mazatec", + "mab": "Yutanduchi Mixtec", + "mad": "Madurese", + "mae": "Bo-Rukul", + "maf": "Mafa", + "mag": "Magahi", + "mai": "Maithili", + "maj": "Jalapa De Díaz Mazatec", + "mak": "Makasar", + "mam": "Mam", + "man": "Mandingo; Manding", + "map": "Austronesian languages", + "maq": "Chiquihuitlán Mazatec", + "mas": "Masai", + "mat": "San Francisco Matlatzinca", + "mau": "Huautla Mazatec", + "mav": "Sateré-Mawé", + "maw": "Mampruli", + "max": "North Moluccan Malay", + "maz": "Central Mazahua", + "mba": "Higaonon", + "mbb": "Western Bukidnon Manobo", + "mbc": "Macushi", + "mbd": "Dibabawon Manobo", + "mbe": "Molale", + "mbf": "Baba Malay", + "mbh": "Mangseng", + "mbi": "Ilianen Manobo", + "mbj": "Nadëb", + "mbk": "Malol", + "mbl": "Maxakalí", + "mbm": "Ombamba", + "mbn": "Macaguán", + "mbo": "Mbo (Cameroon)", + "mbp": "Malayo", + "mbq": "Maisin", + "mbr": "Nukak Makú", + "mbs": "Sarangani Manobo", + "mbt": "Matigsalug Manobo", + "mbu": "Mbula-Bwazza", + "mbv": "Mbulungish", + "mbw": "Maring", + "mbx": "Mari (East Sepik Province)", + "mby": "Memoni", + "mbz": "Amoltepec Mixtec", + "mca": "Maca", + "mcb": "Machiguenga", + "mcc": "Bitur", + "mcd": "Sharanahua", + "mce": "Itundujia Mixtec", + "mcf": "Matsés", + "mcg": "Mapoyo", + "mch": "Maquiritari", + "mci": "Mese", + "mcj": "Mvanip", + "mck": "Mbunda", + "mcl": "Macaguaje", + "mcm": "Malaccan Creole Portuguese", + "mcn": "Masana", + "mco": "Coatlán Mixe", + "mcp": "Makaa", + "mcq": "Ese", + "mcr": "Menya", + "mcs": "Mambai", + "mct": "Mengisa", + "mcu": "Cameroon Mambila", + "mcv": "Minanibai", + "mcw": "Mawa (Chad)", + "mcx": "Mpiemo", + "mcy": "South Watut", + "mcz": "Mawan", + "mda": "Mada (Nigeria)", + "mdb": "Morigi", + "mdc": "Male (Papua New Guinea)", + "mdd": "Mbum", + "mde": "Maba (Chad)", + "mdf": "Moksha", + "mdg": "Massalat", + "mdh": "Maguindanaon", + "mdi": "Mamvu", + "mdj": "Mangbetu", + "mdk": "Mangbutu", + "mdl": "Maltese Sign Language", + "mdm": "Mayogo", + "mdn": "Mbati", + "mdp": "Mbala", + "mdq": "Mbole", + "mdr": "Mandar", + "mds": "Maria (Papua New Guinea)", + "mdt": "Mbere", + "mdu": "Mboko", + "mdv": "Santa Lucía Monteverde Mixtec", + "mdw": "Mbosi", + "mdx": "Dizin", + "mdy": "Male (Ethiopia)", + "mdz": "Suruí Do Pará", + "mea": "Menka", + "meb": "Ikobi", + "mec": "Marra", + "med": "Melpa", + "mee": "Mengen", + "mef": "Megam", + "meh": "Southwestern Tlaxiaco Mixtec", + "mei": "Midob", + "mej": "Meyah", + "mek": "Mekeo", + "mel": "Central Melanau", + "mem": "Mangala", + "men": "Mende (Sierra Leone)", + "meo": "Kedah Malay", + "mep": "Miriwoong", + "meq": "Merey", + "mer": "Meru", + "mes": "Masmaje", + "met": "Mato", + "meu": "Motu", + "mev": "Mano", + "mew": "Maaka", + "mey": "Hassaniyya", + "mez": "Menominee", + "mfa": "Pattani Malay", + "mfb": "Bangka", + "mfc": "Mba", + "mfd": "Mendankwe-Nkwen", + "mfe": "Morisyen", + "mff": "Naki", + "mfg": "Mogofin", + "mfh": "Matal", + "mfi": "Wandala", + "mfj": "Mefele", + "mfk": "North Mofu", + "mfl": "Putai", + "mfm": "Marghi South", + "mfn": "Cross River Mbembe", + "mfo": "Mbe", + "mfp": "Makassar Malay", + "mfq": "Moba", + "mfr": "Marrithiyel", + "mfs": "Mexican Sign Language", + "mft": "Mokerang", + "mfu": "Mbwela", + "mfv": "Mandjak", + "mfw": "Mulaha", + "mfx": "Melo", + "mfy": "Mayo", + "mfz": "Mabaan", + "mg": "Malagasy", + "mga": "Middle Irish (900-1200)", + "mgb": "Mararit", + "mgc": "Morokodo", + "mgd": "Moru", + "mge": "Mango", + "mgf": "Maklew", + "mgg": "Mpumpong", + "mgh": "Makhuwa-Meetto", + "mgi": "Lijili", + "mgj": "Abureni", + "mgk": "Mawes", + "mgl": "Maleu-Kilenge", + "mgm": "Mambae", + "mgn": "Mbangi", + "mgo": "Meta'", + "mgp": "Eastern Magar", + "mgq": "Malila", + "mgr": "Mambwe-Lungu", + "mgs": "Manda (Tanzania)", + "mgt": "Mongol", + "mgu": "Mailu", + "mgv": "Matengo", + "mgw": "Matumbi", + "mgy": "Mbunga", + "mgz": "Mbugwe", + "mh": "Marshallese", + "mha": "Manda (India)", + "mhb": "Mahongwe", + "mhc": "Mocho", + "mhd": "Mbugu", + "mhe": "Besisi; Mah Meri", + "mhf": "Mamaa", + "mhg": "Margu", + "mhi": "Ma'di", + "mhj": "Mogholi", + "mhk": "Mungaka", + "mhl": "Mauwake", + "mhm": "Makhuwa-Moniga", + "mhn": "Mócheno", + "mho": "Mashi (Zambia)", + "mhp": "Balinese Malay", + "mhq": "Mandan", + "mhr": "Eastern Mari", + "mhs": "Buru (Indonesia)", + "mht": "Mandahuaca", + "mhu": "Digaro-Mishmi; Darang Deng", + "mhw": "Mbukushu", + "mhx": "Maru; Lhaovo", + "mhy": "Ma'anyan", + "mhz": "Mor (Mor Islands)", + "mi": "Maori", + "mia": "Miami", + "mib": "Atatláhuca Mixtec", + "mic": "Mi'kmaq; Micmac", + "mid": "Mandaic", + "mie": "Ocotepec Mixtec", + "mif": "Mofu-Gudur", + "mig": "San Miguel El Grande Mixtec", + "mih": "Chayuco Mixtec", + "mii": "Chigmecatitlán Mixtec", + "mij": "Abar; Mungbam", + "mik": "Mikasuki", + "mil": "Peñoles Mixtec", + "mim": "Alacatlatzala Mixtec", + "min": "Minangkabau", + "mio": "Pinotepa Nacional Mixtec", + "mip": "Apasco-Apoala Mixtec", + "miq": "Mískito", + "mir": "Isthmus Mixe", + "mit": "Southern Puebla Mixtec", + "miu": "Cacaloxtepec Mixtec", + "miw": "Akoye", + "mix": "Mixtepec Mixtec", + "miy": "Ayutla Mixtec", + "miz": "Coatzospan Mixtec", + "mjb": "Makalero", + "mjc": "San Juan Colorado Mixtec", + "mjd": "Northwest Maidu", + "mje": "Muskum", + "mjg": "Tu", + "mjh": "Mwera (Nyasa)", + "mji": "Kim Mun", + "mjj": "Mawak", + "mjk": "Matukar", + "mjl": "Mandeali", + "mjm": "Medebur", + "mjn": "Ma (Papua New Guinea)", + "mjo": "Malankuravan", + "mjp": "Malapandaram", + "mjq": "Malaryan", + "mjr": "Malavedan", + "mjs": "Miship", + "mjt": "Sauria Paharia", + "mju": "Manna-Dora", + "mjv": "Mannan", + "mjw": "Karbi", + "mjx": "Mahali", + "mjy": "Mahican", + "mjz": "Majhi", + "mk": "Macedonian", + "mka": "Mbre", + "mkb": "Mal Paharia", + "mkc": "Siliput", + "mke": "Mawchi", + "mkf": "Miya", + "mkg": "Mak (China)", + "mkh": "Mon-Khmer languages", + "mki": "Dhatki", + "mkj": "Mokilese", + "mkk": "Byep", + "mkl": "Mokole", + "mkm": "Moklen", + "mkn": "Kupang Malay", + "mko": "Mingang Doso", + "mkp": "Moikodi", + "mkq": "Bay Miwok", + "mkr": "Malas", + "mks": "Silacayoapan Mixtec", + "mkt": "Vamale", + "mku": "Konyanka Maninka", + "mkv": "Mafea", + "mkw": "Kituba (Congo)", + "mkx": "Kinamiging Manobo", + "mky": "East Makian", + "mkz": "Makasae", + "ml": "Malayalam", + "mla": "Malo", + "mlb": "Mbule", + "mlc": "Cao Lan", + "mle": "Manambu", + "mlf": "Mal", + "mlh": "Mape", + "mli": "Malimpung", + "mlj": "Miltu", + "mlk": "Ilwana; Kiwilwana", + "mll": "Malua Bay", + "mlm": "Mulam", + "mln": "Malango", + "mlo": "Mlomp", + "mlp": "Bargam", + "mlq": "Western Maninkakan", + "mlr": "Vame", + "mls": "Masalit", + "mlu": "To'abaita", + "mlv": "Motlav; Mwotlap", + "mlw": "Moloko", + "mlx": "Malfaxal; Naha'ai", + "mlz": "Malaynon", + "mma": "Mama", + "mmb": "Momina", + "mmc": "Michoacán Mazahua", + "mmd": "Maonan", + "mme": "Mae", + "mmf": "Mundat", + "mmg": "North Ambrym", + "mmh": "Mehináku", + "mmi": "Musar", + "mmj": "Majhwar", + "mmk": "Mukha-Dora", + "mml": "Man Met", + "mmm": "Maii", + "mmn": "Mamanwa", + "mmo": "Mangga Buang", + "mmp": "Siawi", + "mmq": "Musak", + "mmr": "Western Xiangxi Miao", + "mmt": "Malalamai", + "mmu": "Mmaala", + "mmv": "Miriti", + "mmw": "Emae", + "mmx": "Madak", + "mmy": "Migaama", + "mmz": "Mabaale", + "mn": "Mongolian", + "mna": "Mbula", + "mnb": "Muna", + "mnc": "Manchu", + "mnd": "Mondé", + "mne": "Naba", + "mnf": "Mundani", + "mng": "Eastern Mnong", + "mnh": "Mono (Democratic Republic of Congo)", + "mni": "Manipuri", + "mnj": "Munji", + "mnk": "Mandinka", + "mnl": "Tiale", + "mnm": "Mapena", + "mnn": "Southern Mnong", + "mno": "Manobo languages", + "mnp": "Min Bei Chinese", + "mnq": "Minriq", + "mnr": "Mono (USA)", + "mns": "Mansi", + "mnu": "Mer", + "mnv": "Rennell-Bellona", + "mnw": "Mon", + "mnx": "Manikion", + "mny": "Manyawa", + "mnz": "Moni", + "moa": "Mwan", + "moc": "Mocoví", + "mod": "Mobilian", + "moe": "Innu; Montagnais", + "mog": "Mongondow", + "moh": "Mohawk", + "moi": "Mboi", + "moj": "Monzombo", + "mok": "Morori", + "mom": "Mangue", + "moo": "Monom", + "mop": "Mopán Maya", + "moq": "Mor (Bomberai Peninsula)", + "mor": "Moro", + "mos": "Mossi", + "mot": "Barí", + "mou": "Mogum", + "mov": "Mohave", + "mow": "Moi (Congo)", + "mox": "Molima", + "moy": "Shekkacho", + "moz": "Mukulu; Gergiko", + "mpa": "Mpoto", + "mpb": "Malak Malak; Mullukmulluk", + "mpc": "Mangarrayi", + "mpd": "Machinere", + "mpe": "Majang", + "mpg": "Marba", + "mph": "Maung", + "mpi": "Mpade", + "mpj": "Martu Wangka; Wangkajunga", + "mpk": "Mbara (Chad)", + "mpl": "Middle Watut", + "mpm": "Yosondúa Mixtec", + "mpn": "Mindiri", + "mpo": "Miu", + "mpp": "Migabac", + "mpq": "Matís", + "mpr": "Vangunu", + "mps": "Dadibi", + "mpt": "Mian", + "mpu": "Makuráp", + "mpv": "Mungkip", + "mpw": "Mapidian", + "mpx": "Misima-Panaeati", + "mpy": "Mapia", + "mpz": "Mpi", + "mqa": "Maba (Indonesia)", + "mqb": "Mbuko", + "mqc": "Mangole", + "mqe": "Matepi", + "mqf": "Momuna", + "mqg": "Kota Bangun Kutai Malay", + "mqh": "Tlazoyaltepec Mixtec", + "mqi": "Mariri", + "mqj": "Mamasa", + "mqk": "Rajah Kabunsuwan Manobo", + "mql": "Mbelime", + "mqm": "South Marquesan", + "mqn": "Moronene", + "mqo": "Modole", + "mqp": "Manipa", + "mqq": "Minokok", + "mqr": "Mander", + "mqs": "West Makian", + "mqt": "Mok", + "mqu": "Mandari", + "mqv": "Mosimo", + "mqw": "Murupi", + "mqx": "Mamuju", + "mqy": "Manggarai", + "mqz": "Pano", + "mr": "Marathi", + "mra": "Mlabri", + "mrb": "Marino", + "mrc": "Maricopa", + "mrd": "Western Magar", + "mre": "Martha's Vineyard Sign Language", + "mrf": "Elseng", + "mrg": "Mising", + "mrh": "Mara Chin", + "mrj": "Western Mari", + "mrk": "Hmwaveke", + "mrl": "Mortlockese", + "mrm": "Merlav; Mwerlap", + "mrn": "Cheke Holo", + "mro": "Mru", + "mrp": "Morouas", + "mrq": "North Marquesan", + "mrr": "Maria (India)", + "mrs": "Maragus", + "mrt": "Marghi Central", + "mru": "Mono (Cameroon)", + "mrv": "Mangareva", + "mrw": "Maranao", + "mrx": "Maremgi; Dineor", + "mry": "Mandaya", + "mrz": "Marind", + "ms": "Malay (macrolanguage)", + "msb": "Masbatenyo", + "msc": "Sankaran Maninka", + "msd": "Yucatec Maya Sign Language", + "mse": "Musey", + "msf": "Mekwei", + "msg": "Moraid", + "msh": "Masikoro Malagasy", + "msi": "Sabah Malay", + "msj": "Ma (Democratic Republic of Congo)", + "msk": "Mansaka", + "msl": "Molof; Poule", + "msm": "Agusan Manobo", + "msn": "Vurës", + "mso": "Mombum", + "msp": "Maritsauá", + "msq": "Caac", + "msr": "Mongolian Sign Language", + "mss": "West Masela", + "msu": "Musom", + "msv": "Maslam", + "msw": "Mansoanka", + "msx": "Moresada", + "msy": "Aruamu", + "msz": "Momare", + "mt": "Maltese", + "mta": "Cotabato Manobo", + "mtb": "Anyin Morofo", + "mtc": "Munit", + "mtd": "Mualang", + "mte": "Mono (Solomon Islands)", + "mtf": "Murik (Papua New Guinea)", + "mtg": "Una", + "mth": "Munggui", + "mti": "Maiwa (Papua New Guinea)", + "mtj": "Moskona", + "mtk": "Mbe'", + "mtl": "Montol", + "mtm": "Mator", + "mtn": "Matagalpa", + "mto": "Totontepec Mixe", + "mtp": "Wichí Lhamtés Nocten", + "mtq": "Muong", + "mtr": "Mewari", + "mts": "Yora", + "mtt": "Mota", + "mtu": "Tututepec Mixtec", + "mtv": "Asaro'o", + "mtw": "Southern Binukidnon", + "mtx": "Tidaá Mixtec", + "mty": "Nabi", + "mua": "Mundang", + "mub": "Mubi", + "muc": "Ajumbu", + "mud": "Mednyj Aleut", + "mue": "Media Lengua", + "mug": "Musgu", + "muh": "Mündü", + "mui": "Musi", + "muj": "Mabire", + "muk": "Mugom", + "mum": "Maiwala", + "mun": "Munda languages", + "muo": "Nyong", + "mup": "Malvi", + "muq": "Eastern Xiangxi Miao", + "mur": "Murle", + "mus": "Creek", + "mut": "Western Muria", + "muu": "Yaaku", + "muv": "Muthuvan", + "mux": "Bo-Ung", + "muy": "Muyang", + "muz": "Mursi", + "mva": "Manam", + "mvb": "Mattole", + "mvd": "Mamboru", + "mve": "Marwari (Pakistan)", + "mvf": "Peripheral Mongolian", + "mvg": "Yucuañe Mixtec", + "mvh": "Mulgi", + "mvi": "Miyako", + "mvk": "Mekmek", + "mvl": "Mbara (Australia)", + "mvn": "Minaveha", + "mvo": "Marovo", + "mvp": "Duri", + "mvq": "Moere", + "mvr": "Marau", + "mvs": "Massep", + "mvt": "Mpotovoro", + "mvu": "Marfa", + "mvv": "Tagal Murut", + "mvw": "Machinga", + "mvx": "Meoswar", + "mvy": "Indus Kohistani", + "mvz": "Mesqan", + "mwa": "Mwatebu", + "mwb": "Juwal", + "mwc": "Are", + "mwe": "Mwera (Chimwera)", + "mwf": "Murrinh-Patha", + "mwg": "Aiklep", + "mwh": "Mouk-Aria", + "mwi": "Labo; Ninde", + "mwk": "Kita Maninkakan", + "mwl": "Mirandese", + "mwm": "Sar", + "mwn": "Nyamwanga", + "mwo": "Central Maewo", + "mwp": "Kala Lagaw Ya", + "mwq": "Mün Chin", + "mwr": "Marwari", + "mws": "Mwimbi-Muthambi", + "mwt": "Moken", + "mwu": "Mittu", + "mwv": "Mentawai", + "mww": "Hmong Daw", + "mwz": "Moingi", + "mxa": "Northwest Oaxaca Mixtec", + "mxb": "Tezoatlán Mixtec", + "mxc": "Manyika", + "mxd": "Modang", + "mxe": "Mele-Fila", + "mxf": "Malgbe", + "mxg": "Mbangala", + "mxh": "Mvuba", + "mxi": "Mozarabic", + "mxj": "Miju-Mishmi; Geman Deng", + "mxk": "Monumbo", + "mxl": "Maxi Gbe", + "mxm": "Meramera", + "mxn": "Moi (Indonesia)", + "mxo": "Mbowe", + "mxp": "Tlahuitoltepec Mixe", + "mxq": "Juquila Mixe", + "mxr": "Murik (Malaysia)", + "mxs": "Huitepec Mixtec", + "mxt": "Jamiltepec Mixtec", + "mxu": "Mada (Cameroon)", + "mxv": "Metlatónoc Mixtec", + "mxw": "Namo", + "mxx": "Mahou; Mawukakan", + "mxy": "Southeastern Nochixtlán Mixtec", + "mxz": "Central Masela", + "my": "Burmese", + "myb": "Mbay", + "myc": "Mayeka", + "mye": "Myene", + "myf": "Bambassi", + "myg": "Manta", + "myh": "Makah", + "myj": "Mangayat", + "myk": "Mamara Senoufo", + "myl": "Moma", + "mym": "Me'en", + "myn": "Mayan languages", + "myo": "Anfillo", + "myp": "Pirahã", + "myr": "Muniche", + "mys": "Mesmes", + "myu": "Mundurukú", + "myv": "Erzya", + "myw": "Muyuw", + "myx": "Masaaba", + "myy": "Macuna", + "myz": "Classical Mandaic", + "mza": "Santa María Zacatepec Mixtec", + "mzb": "Tumzabt", + "mzc": "Madagascar Sign Language", + "mzd": "Malimba", + "mze": "Morawa", + "mzg": "Monastic Sign Language", + "mzh": "Wichí Lhamtés Güisnay", + "mzi": "Ixcatlán Mazatec", + "mzj": "Manya", + "mzk": "Nigeria Mambila", + "mzl": "Mazatlán Mixe", + "mzm": "Mumuye", + "mzn": "Mazanderani", + "mzo": "Matipuhy", + "mzp": "Movima", + "mzq": "Mori Atas", + "mzr": "Marúbo", + "mzs": "Macanese", + "mzt": "Mintil", + "mzu": "Inapang", + "mzv": "Manza", + "mzw": "Deg", + "mzx": "Mawayana", + "mzy": "Mozambican Sign Language", + "mzz": "Maiadomu", + "na": "Nauru", + "naa": "Namla", + "nab": "Southern Nambikuára", + "nac": "Narak", + "nae": "Naka'ela", + "naf": "Nabak", + "nag": "Naga Pidgin", + "nah": "Nahuatl languages", + "nai": "North American Indian languages", + "naj": "Nalu", + "nak": "Nakanai", + "nal": "Nalik", + "nam": "Ngan'gityemerri", + "nan": "Min Nan Chinese", + "nao": "Naaba", + "nap": "Neapolitan", + "naq": "Khoekhoe; Nama (Namibia)", + "nar": "Iguta", + "nas": "Naasioi", + "nat": "Ca̱hungwa̱rya̱; Hungworo", + "naw": "Nawuri", + "nax": "Nakwi", + "nay": "Ngarrindjeri", + "naz": "Coatepec Nahuatl", + "nb": "Norwegian Bokmål", + "nba": "Nyemba", + "nbb": "Ndoe", + "nbc": "Chang Naga", + "nbd": "Ngbinda", + "nbe": "Konyak Naga", + "nbg": "Nagarchal", + "nbh": "Ngamo", + "nbi": "Mao Naga", + "nbj": "Ngarinyman", + "nbk": "Nake", + "nbm": "Ngbaka Ma'bo", + "nbn": "Kuri", + "nbo": "Nkukoli", + "nbp": "Nnam", + "nbq": "Nggem", + "nbr": "Numana", + "nbs": "Namibian Sign Language", + "nbt": "Na", + "nbu": "Rongmei Naga", + "nbv": "Ngamambo", + "nbw": "Southern Ngbandi", + "nby": "Ningera", + "nca": "Iyo", + "ncb": "Central Nicobarese", + "ncc": "Ponam", + "ncd": "Nachering", + "nce": "Yale", + "ncf": "Notsi", + "ncg": "Nisga'a", + "nch": "Central Huasteca Nahuatl", + "nci": "Classical Nahuatl", + "ncj": "Northern Puebla Nahuatl", + "nck": "Na-kara", + "ncl": "Michoacán Nahuatl", + "ncm": "Nambo", + "ncn": "Nauna", + "nco": "Sibe", + "ncq": "Northern Katang", + "ncr": "Ncane", + "ncs": "Nicaraguan Sign Language", + "nct": "Chothe Naga", + "ncu": "Chumburung", + "ncx": "Central Puebla Nahuatl", + "ncz": "Natchez", + "nd": "North Ndebele", + "nda": "Ndasa", + "ndb": "Kenswei Nsei", + "ndc": "Ndau", + "ndd": "Nde-Nsele-Nta", + "ndf": "Nadruvian", + "ndg": "Ndengereko", + "ndh": "Ndali", + "ndi": "Samba Leko", + "ndj": "Ndamba", + "ndk": "Ndaka", + "ndl": "Ndolo", + "ndm": "Ndam", + "ndn": "Ngundi", + "ndp": "Ndo", + "ndq": "Ndombe", + "ndr": "Ndoola", + "nds": "Low German; Low Saxon", + "ndt": "Ndunga", + "ndu": "Dugun", + "ndv": "Ndut", + "ndw": "Ndobo", + "ndx": "Nduga", + "ndy": "Lutos", + "ndz": "Ndogo", + "ne": "Nepali (macrolanguage)", + "nea": "Eastern Ngad'a", + "neb": "Toura (Côte d'Ivoire)", + "nec": "Nedebang", + "ned": "Nde-Gbite", + "nee": "Nêlêmwa-Nixumwak", + "nef": "Nefamese", + "neg": "Negidal", + "neh": "Nyenkha", + "nei": "Neo-Hittite", + "nej": "Neko", + "nek": "Neku", + "nem": "Nemi", + "nen": "Nengone", + "neo": "Ná-Meo", + "neq": "North Central Mixe", + "ner": "Yahadian", + "nes": "Bhoti Kinnauri", + "net": "Nete", + "neu": "Neo", + "nev": "Nyaheun", + "new": "Newari; Nepal Bhasa", + "nex": "Neme", + "ney": "Neyo", + "nez": "Nez Perce", + "nfa": "Dhao", + "nfd": "Ahwai", + "nfl": "Ayiwo; Äiwoo", + "nfr": "Nafaanra", + "nfu": "Mfumte", + "ng": "Ndonga", + "nga": "Ngbaka", + "ngb": "Northern Ngbandi", + "ngc": "Ngombe (Democratic Republic of Congo)", + "ngd": "Ngando (Central African Republic)", + "nge": "Ngemba", + "ngf": "Trans-New Guinea languages", + "ngg": "Ngbaka Manza", + "ngh": "Nǁng", + "ngi": "Ngizim", + "ngj": "Ngie", + "ngk": "Dalabon", + "ngl": "Lomwe", + "ngm": "Ngatik Men's Creole", + "ngn": "Ngwo", + "ngp": "Ngulu", + "ngq": "Ngurimi; Ngoreme", + "ngr": "Engdewu", + "ngs": "Gvoko", + "ngt": "Kriang; Ngeq", + "ngu": "Guerrero Nahuatl", + "ngv": "Nagumi", + "ngw": "Ngwaba", + "ngx": "Nggwahyi", + "ngy": "Tibea", + "ngz": "Ngungwel", + "nha": "Nhanda", + "nhb": "Beng", + "nhc": "Tabasco Nahuatl", + "nhd": "Chiripá; Ava Guaraní", + "nhe": "Eastern Huasteca Nahuatl", + "nhf": "Nhuwala", + "nhg": "Tetelcingo Nahuatl", + "nhh": "Nahari", + "nhi": "Zacatlán-Ahuacatlán-Tepetzintla Nahuatl", + "nhk": "Isthmus-Cosoleacaque Nahuatl", + "nhm": "Morelos Nahuatl", + "nhn": "Central Nahuatl", + "nho": "Takuu", + "nhp": "Isthmus-Pajapan Nahuatl", + "nhq": "Huaxcaleca Nahuatl", + "nhr": "Naro", + "nht": "Ometepec Nahuatl", + "nhu": "Noone", + "nhv": "Temascaltepec Nahuatl", + "nhw": "Western Huasteca Nahuatl", + "nhx": "Isthmus-Mecayapan Nahuatl", + "nhy": "Northern Oaxaca Nahuatl", + "nhz": "Santa María La Alta Nahuatl", + "nia": "Nias", + "nib": "Nakame", + "nic": "Niger-Kordofanian languages", + "nid": "Ngandi", + "nie": "Niellim", + "nif": "Nek", + "nig": "Ngalakgan", + "nih": "Nyiha (Tanzania)", + "nii": "Nii", + "nij": "Ngaju", + "nik": "Southern Nicobarese", + "nil": "Nila", + "nim": "Nilamba", + "nin": "Ninzo", + "nio": "Nganasan", + "niq": "Nandi", + "nir": "Nimboran", + "nis": "Nimi", + "nit": "Southeastern Kolami", + "niu": "Niuean", + "niv": "Gilyak", + "niw": "Nimo", + "nix": "Hema", + "niy": "Ngiti", + "niz": "Ningil", + "nja": "Nzanyi", + "njb": "Nocte Naga", + "njd": "Ndonde Hamba", + "njh": "Lotha Naga", + "nji": "Gudanji", + "njj": "Njen", + "njl": "Njalgulgule", + "njm": "Angami Naga", + "njn": "Liangmai Naga", + "njo": "Ao Naga", + "njr": "Njerep", + "njs": "Nisa", + "njt": "Ndyuka-Trio Pidgin", + "nju": "Ngadjunmaya", + "njx": "Kunyi", + "njy": "Njyem", + "njz": "Nyishi", + "nka": "Nkoya", + "nkb": "Khoibu Naga", + "nkc": "Nkongho", + "nkd": "Koireng", + "nke": "Duke", + "nkf": "Inpui Naga", + "nkg": "Nekgini", + "nkh": "Khezha Naga", + "nki": "Thangal Naga", + "nkj": "Nakai", + "nkk": "Nokuku", + "nkm": "Namat", + "nkn": "Nkangala", + "nko": "Nkonya", + "nkp": "Niuatoputapu", + "nkq": "Nkami", + "nkr": "Nukuoro", + "nks": "North Asmat", + "nkt": "Nyika (Tanzania)", + "nku": "Bouna Kulango", + "nkv": "Nyika (Malawi and Zambia)", + "nkw": "Nkutu", + "nkx": "Nkoroo", + "nkz": "Nkari", + "nl": "Dutch; Flemish", + "nla": "Ngombale", + "nlc": "Nalca", + "nle": "East Nyala", + "nlg": "Gela", + "nli": "Grangali", + "nlj": "Nyali", + "nlk": "Ninia Yali", + "nll": "Nihali", + "nlm": "Mankiyali", + "nlo": "Ngul", + "nlq": "Lao Naga", + "nlu": "Nchumbulu", + "nlv": "Orizaba Nahuatl", + "nlw": "Walangama", + "nlx": "Nahali", + "nly": "Nyamal", + "nlz": "Nalögo", + "nma": "Maram Naga", + "nmb": "Big Nambas; V'ënen Taut", + "nmc": "Ngam", + "nmd": "Ndumu", + "nme": "Mzieme Naga", + "nmf": "Tangkhul Naga (India)", + "nmg": "Kwasio", + "nmh": "Monsang Naga", + "nmi": "Nyam", + "nmj": "Ngombe (Central African Republic)", + "nmk": "Namakura", + "nml": "Ndemli", + "nmm": "Manangba", + "nmn": "ǃXóõ", + "nmo": "Moyon Naga", + "nmp": "Nimanbur", + "nmq": "Nambya", + "nmr": "Nimbari", + "nms": "Letemboi", + "nmt": "Namonuito", + "nmu": "Northeast Maidu", + "nmv": "Ngamini", + "nmw": "Nimoa; Rifao", + "nmx": "Nama (Papua New Guinea)", + "nmy": "Namuyi", + "nmz": "Nawdm", + "nn": "Norwegian Nynorsk", + "nna": "Nyangumarta", + "nnb": "Nande", + "nnc": "Nancere", + "nnd": "West Ambae", + "nne": "Ngandyera", + "nnf": "Ngaing", + "nng": "Maring Naga", + "nnh": "Ngiemboon", + "nni": "North Nuaulu", + "nnj": "Nyangatom", + "nnk": "Nankina", + "nnl": "Northern Rengma Naga", + "nnm": "Namia", + "nnn": "Ngete", + "nnp": "Wancho Naga", + "nnq": "Ngindo", + "nnr": "Narungga", + "nnt": "Nanticoke", + "nnu": "Dwang", + "nnv": "Nugunu (Australia)", + "nnw": "Southern Nuni", + "nny": "Nyangga", + "nnz": "Nda'nda'", + "no": "Norwegian", + "noa": "Woun Meu", + "noc": "Nuk", + "nod": "Northern Thai", + "noe": "Nimadi", + "nof": "Nomane", + "nog": "Nogai", + "noh": "Nomu", + "noi": "Noiri", + "noj": "Nonuya", + "nok": "Nooksack", + "nol": "Nomlaki", + "nom": "Nocamán", + "non": "Old Norse", + "nop": "Numanggang", + "noq": "Ngongo", + "nos": "Eastern Nisu", + "not": "Nomatsiguenga", + "nou": "Ewage-Notu", + "nov": "Novial", + "now": "Nyambo", + "noy": "Noy", + "noz": "Nayi", + "npa": "Nar Phu", + "npb": "Nupbikha", + "npg": "Ponyo-Gongwang Naga", + "nph": "Phom Naga", + "npi": "Nepali (individual language)", + "npl": "Southeastern Puebla Nahuatl", + "npn": "Mondropolon", + "npo": "Pochuri Naga", + "nps": "Nipsan", + "npu": "Puimei Naga", + "npx": "Noipx", + "npy": "Napu", + "nqg": "Southern Nago", + "nqk": "Kura Ede Nago", + "nql": "Ngendelengo", + "nqm": "Ndom", + "nqn": "Nen", + "nqo": "N'Ko; N’Ko", + "nqq": "Kyan-Karyaw Naga", + "nqt": "Nteng", + "nqy": "Akyaung Ari Naga", + "nr": "South Ndebele", + "nra": "Ngom", + "nrb": "Nara", + "nrc": "Noric", + "nre": "Southern Rengma Naga", + "nrf": "Jèrriais; Guernésiais", + "nrg": "Narango", + "nri": "Chokri Naga", + "nrk": "Ngarla", + "nrl": "Ngarluma", + "nrm": "Narom", + "nrn": "Norn", + "nrp": "North Picene", + "nrr": "Norra; Nora", + "nrt": "Northern Kalapuya", + "nru": "Narua", + "nrx": "Ngurmbur", + "nrz": "Lala", + "nsa": "Sangtam Naga", + "nsb": "Lower Nossob", + "nsc": "Nshi", + "nsd": "Southern Nisu", + "nse": "Nsenga", + "nsf": "Northwestern Nisu", + "nsg": "Ngasa", + "nsh": "Ngoshie", + "nsi": "Nigerian Sign Language", + "nsk": "Naskapi", + "nsl": "Norwegian Sign Language", + "nsm": "Sumi Naga", + "nsn": "Nehan", + "nso": "Pedi; Northern Sotho; Sepedi", + "nsp": "Nepalese Sign Language", + "nsq": "Northern Sierra Miwok", + "nsr": "Maritime Sign Language", + "nss": "Nali", + "nst": "Tase Naga", + "nsu": "Sierra Negra Nahuatl", + "nsv": "Southwestern Nisu", + "nsw": "Navut", + "nsx": "Nsongo", + "nsy": "Nasal", + "nsz": "Nisenan", + "ntd": "Northern Tidung", + "nte": "Nathembo", + "ntg": "Ngantangarra", + "nti": "Natioro", + "ntj": "Ngaanyatjarra", + "ntk": "Ikoma-Nata-Isenye", + "ntm": "Nateni", + "nto": "Ntomba", + "ntp": "Northern Tepehuan", + "ntr": "Delo", + "ntu": "Natügu", + "ntw": "Nottoway", + "ntx": "Tangkhul Naga (Myanmar)", + "nty": "Mantsi", + "ntz": "Natanzi", + "nua": "Yuanga", + "nub": "Nubian languages", + "nuc": "Nukuini", + "nud": "Ngala", + "nue": "Ngundu", + "nuf": "Nusu", + "nug": "Nungali", + "nuh": "Ndunda", + "nui": "Ngumbi", + "nuj": "Nyole", + "nuk": "Nuu-chah-nulth; Nuuchahnulth", + "nul": "Nusa Laut", + "num": "Niuafo'ou", + "nun": "Anong", + "nuo": "Nguôn", + "nup": "Nupe-Nupe-Tako", + "nuq": "Nukumanu", + "nur": "Nukuria", + "nus": "Nuer", + "nut": "Nung (Viet Nam)", + "nuu": "Ngbundu", + "nuv": "Northern Nuni", + "nuw": "Nguluwan", + "nux": "Mehek", + "nuy": "Nunggubuyu", + "nuz": "Tlamacazapa Nahuatl", + "nv": "Navajo; Navaho", + "nvh": "Nasarian", + "nvm": "Namiae", + "nvo": "Nyokon", + "nwa": "Nawathinehena", + "nwb": "Nyabwa", + "nwc": "Classical Newari; Classical Nepal Bhasa; Old Newari", + "nwe": "Ngwe", + "nwg": "Ngayawung", + "nwi": "Southwest Tanna", + "nwm": "Nyamusa-Molo", + "nwo": "Nauo", + "nwr": "Nawaru", + "nww": "Ndwewe", + "nwx": "Middle Newar", + "nwy": "Nottoway-Meherrin", + "nxa": "Nauete", + "nxd": "Ngando (Democratic Republic of Congo)", + "nxe": "Nage", + "nxg": "Ngad'a", + "nxi": "Nindi", + "nxk": "Koki Naga", + "nxl": "South Nuaulu", + "nxm": "Numidian", + "nxn": "Ngawun", + "nxo": "Ndambomo", + "nxq": "Naxi", + "nxr": "Ninggerum", + "nxx": "Nafri", + "ny": "Nyanja; Chewa; Chichewa", + "nyb": "Nyangbo", + "nyc": "Nyanga-li", + "nyd": "Nyore; Olunyole", + "nye": "Nyengo", + "nyf": "Giryama; Kigiryama", + "nyg": "Nyindu", + "nyh": "Nyikina", + "nyi": "Ama (Sudan)", + "nyj": "Nyanga", + "nyk": "Nyaneka", + "nyl": "Nyeu", + "nym": "Nyamwezi", + "nyn": "Nyankole", + "nyo": "Nyoro", + "nyp": "Nyang'i", + "nyq": "Nayini", + "nyr": "Nyiha (Malawi)", + "nys": "Nyungar", + "nyt": "Nyawaygi", + "nyu": "Nyungwe", + "nyv": "Nyulnyul", + "nyw": "Nyaw", + "nyx": "Nganyaywana", + "nyy": "Nyakyusa-Ngonde", + "nza": "Tigon Mbembe", + "nzb": "Njebi", + "nzd": "Nzadi", + "nzi": "Nzima", + "nzk": "Nzakara", + "nzm": "Zeme Naga", + "nzs": "New Zealand Sign Language", + "nzu": "Teke-Nzikou", + "nzy": "Nzakambay", + "nzz": "Nanga Dama Dogon", + "oaa": "Orok", + "oac": "Oroch", + "oar": "Old Aramaic (up to 700 BCE); Ancient Aramaic (up to 700 BCE)", + "oav": "Old Avar", + "obi": "Obispeño", + "obk": "Southern Bontok", + "obl": "Oblo", + "obm": "Moabite", + "obo": "Obo Manobo", + "obr": "Old Burmese", + "obt": "Old Breton", + "obu": "Obulom", + "oc": "Occitan (post 1500)", + "oca": "Ocaina", + "och": "Old Chinese", + "ocm": "Old Cham", + "oco": "Old Cornish", + "ocu": "Atzingo Matlatzinca", + "oda": "Odut", + "odk": "Od", + "odt": "Old Dutch", + "odu": "Odual", + "ofo": "Ofo", + "ofs": "Old Frisian", + "ofu": "Efutop", + "ogb": "Ogbia", + "ogc": "Ogbah", + "oge": "Old Georgian", + "ogg": "Ogbogolo", + "ogo": "Khana", + "ogu": "Ogbronuagum", + "oht": "Old Hittite", + "ohu": "Old Hungarian", + "oia": "Oirata", + "oie": "Okolie", + "oin": "Inebu One", + "oj": "Ojibwa", + "ojb": "Northwestern Ojibwa", + "ojc": "Central Ojibwa", + "ojg": "Eastern Ojibwa", + "ojp": "Old Japanese", + "ojs": "Severn Ojibwa", + "ojv": "Ontong Java", + "ojw": "Western Ojibwa", + "oka": "Okanagan", + "okb": "Okobo", + "okc": "Kobo", + "okd": "Okodia", + "oke": "Okpe (Southwestern Edo)", + "okg": "Koko Babangk", + "okh": "Koresh-e Rostam", + "oki": "Okiek", + "okj": "Oko-Juwoi", + "okk": "Kwamtim One", + "okl": "Old Kentish Sign Language", + "okm": "Middle Korean (10th-16th cent.)", + "okn": "Oki-No-Erabu", + "oko": "Old Korean (3rd-9th cent.)", + "okr": "Kirike", + "oks": "Oko-Eni-Osayen", + "oku": "Oku", + "okv": "Orokaiva", + "okx": "Okpe (Northwestern Edo)", + "okz": "Old Khmer", + "ola": "Walungge", + "old": "Mochi", + "ole": "Olekha", + "olk": "Olkol", + "olm": "Oloma", + "olo": "Livvi", + "olr": "Olrat", + "olt": "Old Lithuanian", + "olu": "Kuvale", + "om": "Oromo", + "oma": "Omaha-Ponca", + "omb": "East Ambae", + "omc": "Mochica", + "omg": "Omagua", + "omi": "Omi", + "omk": "Omok", + "oml": "Ombo", + "omn": "Minoan", + "omo": "Utarmbung", + "omp": "Old Manipuri", + "omq": "Oto-Manguean languages", + "omr": "Old Marathi", + "omt": "Omotik", + "omu": "Omurano", + "omv": "Omotic languages", + "omw": "South Tairora", + "omx": "Old Mon", + "omy": "Old Malay", + "ona": "Ona", + "onb": "Lingao", + "one": "Oneida", + "ong": "Olo", + "oni": "Onin", + "onj": "Onjob", + "onk": "Kabore One", + "onn": "Onobasulu", + "ono": "Onondaga", + "onp": "Sartang", + "onr": "Northern One", + "ons": "Ono", + "ont": "Ontenu", + "onu": "Unua", + "onw": "Old Nubian", + "onx": "Onin Based Pidgin", + "ood": "Tohono O'odham", + "oog": "Ong", + "oon": "Önge", + "oor": "Oorlams", + "oos": "Old Ossetic", + "opa": "Okpamheri", + "opk": "Kopkaka", + "opm": "Oksapmin", + "opo": "Opao", + "opt": "Opata", + "opy": "Ofayé", + "or": "Oriya (macrolanguage); Odia (macrolanguage)", + "ora": "Oroha", + "orc": "Orma", + "ore": "Orejón", + "org": "Oring", + "orh": "Oroqen", + "orn": "Orang Kanaq", + "oro": "Orokolo", + "orr": "Oruma", + "ors": "Orang Seletar", + "ort": "Adivasi Oriya", + "oru": "Ormuri", + "orv": "Old Russian", + "orw": "Oro Win", + "orx": "Oro", + "ory": "Odia (individual language); Oriya (individual language)", + "orz": "Ormu", + "os": "Ossetian; Ossetic", + "osa": "Osage", + "osc": "Oscan", + "osi": "Osing", + "osn": "Old Sundanese", + "oso": "Ososo", + "osp": "Old Spanish", + "ost": "Osatu", + "osu": "Southern One", + "osx": "Old Saxon", + "ota": "Ottoman Turkish (1500-1928)", + "otb": "Old Tibetan", + "otd": "Ot Danum", + "ote": "Mezquital Otomi", + "oti": "Oti", + "otk": "Old Turkish", + "otl": "Tilapa Otomi", + "otm": "Eastern Highland Otomi", + "otn": "Tenango Otomi", + "oto": "Otomian languages", + "otq": "Querétaro Otomi", + "otr": "Otoro", + "ots": "Estado de México Otomi", + "ott": "Temoaya Otomi", + "otu": "Otuke", + "otw": "Ottawa", + "otx": "Texcatepec Otomi", + "oty": "Old Tamil", + "otz": "Ixtenco Otomi", + "oua": "Tagargrent", + "oub": "Glio-Oubi", + "oue": "Oune", + "oui": "Old Uighur", + "oum": "Ouma", + "ovd": "Elfdalian; Övdalian", + "owi": "Owiniga", + "owl": "Old Welsh", + "oyb": "Oy", + "oyd": "Oyda", + "oym": "Wayampi", + "oyy": "Oya'oya", + "ozm": "Koonzime", + "pa": "Panjabi; Punjabi", + "paa": "Papuan languages", + "pab": "Parecís", + "pac": "Pacoh", + "pad": "Paumarí", + "pae": "Pagibete", + "paf": "Paranawát", + "pag": "Pangasinan", + "pah": "Tenharim", + "pai": "Pe", + "pak": "Parakanã", + "pal": "Pahlavi", + "pam": "Pampanga; Kapampangan", + "pao": "Northern Paiute", + "pap": "Papiamento", + "paq": "Parya", + "par": "Panamint; Timbisha", + "pas": "Papasena", + "pau": "Palauan", + "pav": "Pakaásnovos", + "paw": "Pawnee", + "pax": "Pankararé", + "pay": "Pech", + "paz": "Pankararú", + "pbb": "Páez", + "pbc": "Patamona", + "pbe": "Mezontla Popoloca", + "pbf": "Coyotepec Popoloca", + "pbg": "Paraujano", + "pbh": "E'ñapa Woromaipu", + "pbi": "Parkwa", + "pbl": "Mak (Nigeria)", + "pbm": "Puebla Mazatec", + "pbn": "Kpasam", + "pbo": "Papel", + "pbp": "Badyara", + "pbr": "Pangwa", + "pbs": "Central Pame", + "pbt": "Southern Pashto", + "pbu": "Northern Pashto", + "pbv": "Pnar", + "pby": "Pyu (Papua New Guinea)", + "pca": "Santa Inés Ahuatempan Popoloca", + "pcb": "Pear", + "pcc": "Bouyei", + "pcd": "Picard", + "pce": "Ruching Palaung", + "pcf": "Paliyan", + "pcg": "Paniya", + "pch": "Pardhan", + "pci": "Duruwa", + "pcj": "Parenga", + "pck": "Paite Chin", + "pcl": "Pardhi", + "pcm": "Nigerian Pidgin", + "pcn": "Piti", + "pcp": "Pacahuara", + "pcw": "Pyapun", + "pda": "Anam", + "pdc": "Pennsylvania German", + "pdi": "Pa Di", + "pdn": "Podena; Fedan", + "pdo": "Padoe", + "pdt": "Plautdietsch", + "pdu": "Kayan", + "pea": "Peranakan Indonesian", + "peb": "Eastern Pomo", + "ped": "Mala (Papua New Guinea)", + "pee": "Taje", + "pef": "Northeastern Pomo", + "peg": "Pengo", + "peh": "Bonan", + "pei": "Chichimeca-Jonaz", + "pej": "Northern Pomo", + "pek": "Penchal", + "pel": "Pekal", + "pem": "Phende", + "peo": "Old Persian (ca. 600-400 B.C.)", + "pep": "Kunja", + "peq": "Southern Pomo", + "pes": "Iranian Persian", + "pev": "Pémono", + "pex": "Petats", + "pey": "Petjo", + "pez": "Eastern Penan", + "pfa": "Pááfang", + "pfe": "Pere", + "pfl": "Pfaelzisch", + "pga": "Sudanese Creole Arabic", + "pgd": "Gāndhārī", + "pgg": "Pangwali", + "pgi": "Pagi", + "pgk": "Rerep", + "pgl": "Primitive Irish", + "pgn": "Paelignian", + "pgs": "Pangseng", + "pgu": "Pagu", + "pgz": "Papua New Guinean Sign Language", + "pha": "Pa-Hng", + "phd": "Phudagi", + "phg": "Phuong", + "phh": "Phukha", + "phi": "Philippine languages", + "phj": "Pahari", + "phk": "Phake", + "phl": "Phalura; Palula", + "phm": "Phimbi", + "phn": "Phoenician", + "pho": "Phunoi", + "phq": "Phana'", + "phr": "Pahari-Potwari", + "pht": "Phu Thai", + "phu": "Phuan", + "phv": "Pahlavani", + "phw": "Phangduwali", + "pi": "Pali", + "pia": "Pima Bajo", + "pib": "Yine", + "pic": "Pinji", + "pid": "Piaroa", + "pie": "Piro", + "pif": "Pingelapese", + "pig": "Pisabo", + "pih": "Pitcairn-Norfolk", + "pij": "Pijao", + "pil": "Yom", + "pim": "Powhatan", + "pin": "Piame", + "pio": "Piapoco", + "pip": "Pero", + "pir": "Piratapuyo", + "pis": "Pijin", + "pit": "Pitta Pitta", + "piu": "Pintupi-Luritja", + "piv": "Pileni; Vaeakau-Taumako", + "piw": "Pimbwe", + "pix": "Piu", + "piy": "Piya-Kwonci", + "piz": "Pije", + "pjt": "Pitjantjatjara", + "pka": "Ardhamāgadhī Prākrit", + "pkb": "Pokomo; Kipfokomo", + "pkc": "Paekche", + "pkg": "Pak-Tong", + "pkh": "Pankhu", + "pkn": "Pakanha", + "pko": "Pökoot", + "pkp": "Pukapuka", + "pkr": "Attapady Kurumba", + "pks": "Pakistan Sign Language", + "pkt": "Maleng", + "pku": "Paku", + "pl": "Polish", + "pla": "Miani", + "plb": "Polonombauk", + "plc": "Central Palawano", + "pld": "Polari", + "ple": "Palu'e", + "plf": "Central Malayo-Polynesian languages", + "plg": "Pilagá", + "plh": "Paulohi", + "plj": "Polci", + "plk": "Kohistani Shina", + "pll": "Shwe Palaung", + "pln": "Palenquero", + "plo": "Oluta Popoluca", + "plq": "Palaic", + "plr": "Palaka Senoufo", + "pls": "San Marcos Tlacoyalco Popoloca; San Marcos Tlalcoyalco Popoloca", + "plt": "Plateau Malagasy", + "plu": "Palikúr", + "plv": "Southwest Palawano", + "plw": "Brooke's Point Palawano", + "ply": "Bolyu", + "plz": "Paluan", + "pma": "Paama", + "pmb": "Pambia", + "pmd": "Pallanganmiddang", + "pme": "Pwaamei", + "pmf": "Pamona", + "pmh": "Māhārāṣṭri Prākrit", + "pmi": "Northern Pumi", + "pmj": "Southern Pumi", + "pmk": "Pamlico", + "pml": "Lingua Franca", + "pmm": "Pomo", + "pmn": "Pam", + "pmo": "Pom", + "pmq": "Northern Pame", + "pmr": "Paynamar", + "pms": "Piemontese", + "pmt": "Tuamotuan", + "pmw": "Plains Miwok", + "pmx": "Poumei Naga", + "pmy": "Papuan Malay", + "pmz": "Southern Pame", + "pna": "Punan Bah-Biau", + "pnb": "Western Panjabi", + "pnc": "Pannei", + "pnd": "Mpinda", + "pne": "Western Penan", + "png": "Pangu; Pongu", + "pnh": "Penrhyn", + "pni": "Aoheng", + "pnj": "Pinjarup", + "pnk": "Paunaka", + "pnl": "Paleni", + "pnm": "Punan Batu 1", + "pnn": "Pinai-Hagahai", + "pno": "Panobo", + "pnp": "Pancana", + "pnq": "Pana (Burkina Faso)", + "pnr": "Panim", + "pns": "Ponosakan", + "pnt": "Pontic", + "pnu": "Jiongnai Bunu", + "pnv": "Pinigura", + "pnw": "Banyjima; Panytyima", + "pnx": "Phong-Kniang", + "pny": "Pinyin", + "pnz": "Pana (Central African Republic)", + "poc": "Poqomam", + "poe": "San Juan Atzingo Popoloca", + "pof": "Poke", + "pog": "Potiguára", + "poh": "Poqomchi'", + "poi": "Highland Popoluca", + "pok": "Pokangá", + "pom": "Southeastern Pomo", + "pon": "Pohnpeian", + "poo": "Central Pomo", + "pop": "Pwapwâ", + "poq": "Texistepec Popoluca", + "pos": "Sayula Popoluca", + "pot": "Potawatomi", + "pov": "Upper Guinea Crioulo", + "pow": "San Felipe Otlaltepec Popoloca", + "pox": "Polabian", + "poy": "Pogolo", + "poz": "Malayo-Polynesian languages", + "ppe": "Papi", + "ppi": "Paipai", + "ppk": "Uma", + "ppl": "Pipil; Nicarao", + "ppm": "Papuma", + "ppn": "Papapana", + "ppo": "Folopa", + "ppp": "Pelende", + "ppq": "Pei", + "pps": "San Luís Temalacayuca Popoloca", + "ppt": "Pare", + "ppu": "Papora", + "pqa": "Pa'a", + "pqe": "Eastern Malayo-Polynesian languages", + "pqm": "Malecite-Passamaquoddy", + "pqw": "Western Malayo-Polynesian languages", + "pra": "Prakrit languages", + "prc": "Parachi", + "prd": "Parsi-Dari", + "pre": "Principense", + "prf": "Paranan", + "prg": "Prussian", + "prh": "Porohanon", + "pri": "Paicî", + "prk": "Parauk", + "prl": "Peruvian Sign Language", + "prm": "Kibiri", + "prn": "Prasuni", + "pro": "Old Provençal (to 1500); Old Occitan (to 1500)", + "prp": "Parsi", + "prq": "Ashéninka Perené", + "prr": "Puri", + "prs": "Dari; Afghan Persian", + "prt": "Phai", + "pru": "Puragi", + "prw": "Parawen", + "prx": "Purik", + "prz": "Providencia Sign Language", + "ps": "Pushto; Pashto", + "psa": "Asue Awyu", + "psc": "Iranian Sign Language; Persian Sign Language", + "psd": "Plains Indian Sign Language", + "pse": "Central Malay", + "psg": "Penang Sign Language", + "psh": "Southwest Pashai; Southwest Pashayi", + "psi": "Southeast Pashai; Southeast Pashayi", + "psl": "Puerto Rican Sign Language", + "psm": "Pauserna", + "psn": "Panasuan", + "pso": "Polish Sign Language", + "psp": "Philippine Sign Language", + "psq": "Pasi", + "psr": "Portuguese Sign Language", + "pss": "Kaulong", + "pst": "Central Pashto", + "psu": "Sauraseni Prākrit", + "psw": "Port Sandwich", + "psy": "Piscataway", + "pt": "Portuguese", + "pta": "Pai Tavytera", + "pth": "Pataxó Hã-Ha-Hãe", + "pti": "Pindiini; Wangkatha", + "ptn": "Patani", + "pto": "Zo'é", + "ptp": "Patep", + "ptq": "Pattapu", + "ptr": "Piamatsina", + "ptt": "Enrekang", + "ptu": "Bambam", + "ptv": "Port Vato", + "ptw": "Pentlatch", + "pty": "Pathiya", + "pua": "Western Highland Purepecha", + "pub": "Purum", + "puc": "Punan Merap", + "pud": "Punan Aput", + "pue": "Puelche", + "puf": "Punan Merah", + "pug": "Phuie", + "pui": "Puinave", + "puj": "Punan Tubu", + "pum": "Puma", + "puo": "Puoc", + "pup": "Pulabu", + "puq": "Puquina", + "pur": "Puruborá", + "put": "Putoh", + "puu": "Punu", + "puw": "Puluwatese", + "pux": "Puare", + "puy": "Purisimeño", + "pwa": "Pawaia", + "pwb": "Panawa", + "pwg": "Gapapaiwa", + "pwi": "Patwin", + "pwm": "Molbog", + "pwn": "Paiwan", + "pwo": "Pwo Western Karen", + "pwr": "Powari", + "pww": "Pwo Northern Karen", + "pxm": "Quetzaltepec Mixe", + "pye": "Pye Krumen", + "pym": "Fyam", + "pyn": "Poyanáwa", + "pys": "Paraguayan Sign Language; Lengua de Señas del Paraguay", + "pyu": "Puyuma", + "pyx": "Pyu (Myanmar)", + "pyy": "Pyen", + "pzh": "Pazeh", + "pzn": "Jejara Naga; Para Naga", + "qu": "Quechua", + "qua": "Quapaw", + "qub": "Huallaga Huánuco Quechua", + "quc": "K'iche'; Quiché", + "qud": "Calderón Highland Quichua", + "quf": "Lambayeque Quechua", + "qug": "Chimborazo Highland Quichua", + "quh": "South Bolivian Quechua", + "qui": "Quileute", + "quk": "Chachapoyas Quechua", + "qul": "North Bolivian Quechua", + "qum": "Sipacapense", + "qun": "Quinault", + "qup": "Southern Pastaza Quechua", + "quq": "Quinqui", + "qur": "Yanahuanca Pasco Quechua", + "qus": "Santiago del Estero Quichua", + "quv": "Sacapulteco", + "quw": "Tena Lowland Quichua", + "qux": "Yauyos Quechua", + "quy": "Ayacucho Quechua", + "quz": "Cusco Quechua", + "qva": "Ambo-Pasco Quechua", + "qvc": "Cajamarca Quechua", + "qve": "Eastern Apurímac Quechua", + "qvh": "Huamalíes-Dos de Mayo Huánuco Quechua", + "qvi": "Imbabura Highland Quichua", + "qvj": "Loja Highland Quichua", + "qvl": "Cajatambo North Lima Quechua", + "qvm": "Margos-Yarowilca-Lauricocha Quechua", + "qvn": "North Junín Quechua", + "qvo": "Napo Lowland Quechua", + "qvp": "Pacaraos Quechua", + "qvs": "San Martín Quechua", + "qvw": "Huaylla Wanca Quechua", + "qvy": "Queyu", + "qvz": "Northern Pastaza Quichua", + "qwa": "Corongo Ancash Quechua", + "qwc": "Classical Quechua", + "qwe": "Quechuan (family)", + "qwh": "Huaylas Ancash Quechua", + "qwm": "Kuman (Russia)", + "qws": "Sihuas Ancash Quechua", + "qwt": "Kwalhioqua-Tlatskanai", + "qxa": "Chiquián Ancash Quechua", + "qxc": "Chincha Quechua", + "qxh": "Panao Huánuco Quechua", + "qxl": "Salasaca Highland Quichua", + "qxn": "Northern Conchucos Ancash Quechua", + "qxo": "Southern Conchucos Ancash Quechua", + "qxp": "Puno Quechua", + "qxq": "Qashqa'i", + "qxr": "Cañar Highland Quichua", + "qxs": "Southern Qiang", + "qxt": "Santa Ana de Tusi Pasco Quechua", + "qxu": "Arequipa-La Unión Quechua", + "qxw": "Jauja Wanca Quechua", + "qya": "Quenya", + "qyp": "Quiripi", + "raa": "Dungmali", + "rab": "Camling", + "rac": "Rasawa", + "rad": "Rade", + "raf": "Western Meohang", + "rag": "Logooli; Lulogooli", + "rah": "Rabha", + "rai": "Ramoaaina", + "raj": "Rajasthani", + "rak": "Tulu-Bohuai", + "ral": "Ralte", + "ram": "Canela", + "ran": "Riantana", + "rao": "Rao", + "rap": "Rapanui", + "raq": "Saam", + "rar": "Rarotongan; Cook Islands Maori", + "ras": "Tegali", + "rat": "Razajerdi", + "rau": "Raute", + "rav": "Sampang", + "raw": "Rawang", + "rax": "Rang", + "ray": "Rapa", + "raz": "Rahambuu", + "rbb": "Rumai Palaung", + "rbk": "Northern Bontok", + "rbl": "Miraya Bikol", + "rbp": "Barababaraba", + "rcf": "Réunion Creole French", + "rdb": "Rudbari", + "rea": "Rerau", + "reb": "Rembong", + "ree": "Rejang Kayan", + "reg": "Kara (Tanzania)", + "rei": "Reli", + "rej": "Rejang", + "rel": "Rendille", + "rem": "Remo", + "ren": "Rengao", + "rer": "Rer Bare", + "res": "Reshe", + "ret": "Retta", + "rey": "Reyesano", + "rga": "Roria", + "rge": "Romano-Greek", + "rgk": "Rangkas", + "rgn": "Romagnol", + "rgr": "Resígaro", + "rgs": "Southern Roglai", + "rgu": "Ringgou", + "rhg": "Rohingya", + "rhp": "Yahang", + "ria": "Riang (India)", + "rib": "Bribri Sign Language", + "rif": "Tarifit", + "ril": "Riang Lang; Riang (Myanmar)", + "rim": "Nyaturu", + "rin": "Nungu", + "rir": "Ribun", + "rit": "Ritharrngu", + "riu": "Riung", + "rjg": "Rajong", + "rji": "Raji", + "rjs": "Rajbanshi", + "rka": "Kraol", + "rkb": "Rikbaktsa", + "rkh": "Rakahanga-Manihiki", + "rki": "Rakhine", + "rkm": "Marka", + "rkt": "Rangpuri; Kamta", + "rkw": "Arakwal", + "rm": "Romansh", + "rma": "Rama", + "rmb": "Rembarrnga", + "rmc": "Carpathian Romani", + "rmd": "Traveller Danish", + "rme": "Angloromani", + "rmf": "Kalo Finnish Romani", + "rmg": "Traveller Norwegian", + "rmh": "Murkim", + "rmi": "Lomavren", + "rmk": "Romkun", + "rml": "Baltic Romani", + "rmm": "Roma", + "rmn": "Balkan Romani", + "rmo": "Sinte Romani", + "rmp": "Rempi", + "rmq": "Caló", + "rms": "Romanian Sign Language", + "rmt": "Domari", + "rmu": "Tavringer Romani", + "rmv": "Romanova", + "rmw": "Welsh Romani", + "rmx": "Romam", + "rmy": "Vlax Romani", + "rmz": "Marma", + "rn": "Rundi", + "rnb": "Brunca Sign Language", + "rnd": "Ruund", + "rng": "Ronga", + "rnl": "Ranglong", + "rnn": "Roon", + "rnp": "Rongpo", + "rnr": "Nari Nari", + "rnw": "Rungwa", + "ro": "Romanian; Moldavian; Moldovan", + "roa": "Romance languages", + "rob": "Tae'", + "roc": "Cacgia Roglai", + "rod": "Rogo", + "roe": "Ronji", + "rof": "Rombo", + "rog": "Northern Roglai", + "rol": "Romblomanon", + "rom": "Romany", + "roo": "Rotokas", + "rop": "Kriol", + "ror": "Rongga", + "rou": "Runga", + "row": "Dela-Oenale", + "rpn": "Repanbitip", + "rpt": "Rapting", + "rri": "Ririo", + "rro": "Waima", + "rrt": "Arritinngithigh", + "rsb": "Romano-Serbian", + "rsk": "Ruthenian; Rusyn", + "rsl": "Russian Sign Language", + "rsm": "Miriwoong Sign Language", + "rsn": "Rwandan Sign Language", + "rtc": "Rungtu Chin", + "rth": "Ratahan", + "rtm": "Rotuman", + "rts": "Yurats", + "rtw": "Rathawi", + "ru": "Russian", + "rub": "Gungu", + "ruc": "Ruuli", + "rue": "Rusyn", + "ruf": "Luguru", + "rug": "Roviana", + "ruh": "Ruga", + "rui": "Rufiji", + "ruk": "Che", + "ruo": "Istro Romanian", + "rup": "Macedo-Romanian; Aromanian; Arumanian", + "ruq": "Megleno Romanian", + "rut": "Rutul", + "ruu": "Lanas Lobu", + "ruy": "Mala (Nigeria)", + "ruz": "Ruma", + "rw": "Kinyarwanda", + "rwa": "Rawo", + "rwk": "Rwa", + "rwl": "Ruwila", + "rwm": "Amba (Uganda)", + "rwo": "Rawa", + "rwr": "Marwari (India)", + "rxd": "Ngardi", + "rxw": "Karuwali; Garuwali", + "ryn": "Northern Amami-Oshima", + "rys": "Yaeyama", + "ryu": "Central Okinawan", + "rzh": "Rāziḥī", + "sa": "Sanskrit", + "saa": "Saba", + "sab": "Buglere", + "sac": "Meskwaki", + "sad": "Sandawe", + "sae": "Sabanê", + "saf": "Safaliba", + "sah": "Yakut", + "sai": "South American Indian languages", + "saj": "Sahu", + "sak": "Sake", + "sal": "Salishan languages", + "sam": "Samaritan Aramaic", + "sao": "Sause", + "saq": "Samburu", + "sar": "Saraveca", + "sas": "Sasak", + "sat": "Santali", + "sau": "Saleman", + "sav": "Saafi-Saafi", + "saw": "Sawi", + "sax": "Sa", + "say": "Saya", + "saz": "Saurashtra", + "sba": "Ngambay", + "sbb": "Simbo", + "sbc": "Kele (Papua New Guinea)", + "sbd": "Southern Samo", + "sbe": "Saliba", + "sbf": "Chabu; Shabo", + "sbg": "Seget", + "sbh": "Sori-Harengan", + "sbi": "Seti", + "sbj": "Surbakhal", + "sbk": "Safwa", + "sbl": "Botolan Sambal", + "sbm": "Sagala", + "sbn": "Sindhi Bhil", + "sbo": "Sabüm", + "sbp": "Sangu (Tanzania)", + "sbq": "Sileibi", + "sbr": "Sembakung Murut", + "sbs": "Subiya", + "sbt": "Kimki", + "sbu": "Stod Bhoti", + "sbv": "Sabine", + "sbw": "Simba", + "sbx": "Seberuang", + "sby": "Soli", + "sbz": "Sara Kaba", + "sc": "Sardinian", + "scb": "Chut", + "sce": "Dongxiang", + "scf": "San Miguel Creole French", + "scg": "Sanggau", + "sch": "Sakachep", + "sci": "Sri Lankan Creole Malay", + "sck": "Sadri", + "scl": "Shina", + "scn": "Sicilian", + "sco": "Scots", + "scp": "Hyolmo; Helambu Sherpa", + "scq": "Sa'och", + "scs": "North Slavey", + "sct": "Southern Katang", + "scu": "Shumcho", + "scv": "Sheni", + "scw": "Sha", + "scx": "Sicel", + "sd": "Sindhi", + "sda": "Toraja-Sa'dan", + "sdb": "Shabak", + "sdc": "Sassarese Sardinian", + "sde": "Surubu", + "sdf": "Sarli", + "sdg": "Savi", + "sdh": "Southern Kurdish", + "sdj": "Suundi", + "sdk": "Sos Kundi", + "sdl": "Saudi Arabian Sign Language", + "sdn": "Gallurese Sardinian", + "sdo": "Bukar-Sadung Bidayuh", + "sdp": "Sherdukpen", + "sdq": "Semandang", + "sdr": "Oraon Sadri", + "sds": "Sened", + "sdt": "Shuadit", + "sdu": "Sarudu", + "sdv": "Eastern Sudanic languages", + "sdx": "Sibu Melanau", + "sdz": "Sallands", + "se": "Northern Sami", + "sea": "Semai", + "seb": "Shempire Senoufo", + "sec": "Sechelt", + "sed": "Sedang", + "see": "Seneca", + "sef": "Cebaara Senoufo", + "seg": "Segeju", + "seh": "Sena", + "sei": "Seri", + "sej": "Sene", + "sek": "Sekani", + "sel": "Selkup", + "sem": "Semitic languages", + "sen": "Nanerigé Sénoufo", + "seo": "Suarmin", + "sep": "Sìcìté Sénoufo", + "seq": "Senara Sénoufo", + "ser": "Serrano", + "ses": "Koyraboro Senni Songhai", + "set": "Sentani", + "seu": "Serui-Laut", + "sev": "Nyarafolo Senoufo", + "sew": "Sewa Bay", + "sey": "Secoya", + "sez": "Senthang Chin", + "sfb": "Langue des signes de Belgique Francophone; French Belgian Sign Language", + "sfe": "Eastern Subanen", + "sfm": "Small Flowery Miao", + "sfs": "South African Sign Language", + "sfw": "Sehwi", + "sg": "Sango", + "sga": "Old Irish (to 900)", + "sgb": "Mag-antsi Ayta", + "sgc": "Kipsigis", + "sgd": "Surigaonon", + "sge": "Segai", + "sgg": "Swiss-German Sign Language", + "sgh": "Shughni", + "sgi": "Suga", + "sgj": "Surgujia", + "sgk": "Sangkong", + "sgm": "Singa", + "sgn": "Sign languages", + "sgp": "Singpho", + "sgr": "Sangisari", + "sgs": "Samogitian", + "sgt": "Brokpake", + "sgu": "Salas", + "sgw": "Sebat Bet Gurage", + "sgx": "Sierra Leone Sign Language", + "sgy": "Sanglechi", + "sgz": "Sursurunga", + "sh": "Serbo-Croatian", + "sha": "Shall-Zwall", + "shb": "Ninam", + "shc": "Sonde", + "shd": "Kundal Shahi", + "she": "Sheko", + "shg": "Shua", + "shh": "Shoshoni", + "shi": "Tachelhit", + "shj": "Shatt", + "shk": "Shilluk", + "shl": "Shendu", + "shm": "Shahrudi", + "shn": "Shan", + "sho": "Shanga", + "shp": "Shipibo-Conibo", + "shq": "Sala", + "shr": "Shi", + "shs": "Shuswap", + "sht": "Shasta", + "shu": "Chadian Arabic", + "shv": "Shehri", + "shw": "Shwai", + "shx": "She", + "shy": "Tachawit", + "shz": "Syenara Senoufo", + "si": "Sinhala; Sinhalese", + "sia": "Akkala Sami", + "sib": "Sebop", + "sid": "Sidamo", + "sie": "Simaa", + "sif": "Siamou", + "sig": "Paasaal", + "sih": "Zire; Sîshëë", + "sii": "Shom Peng", + "sij": "Numbami", + "sik": "Sikiana", + "sil": "Tumulung Sisaala", + "sim": "Mende (Papua New Guinea)", + "sio": "Siouan languages", + "sip": "Sikkimese", + "siq": "Sonia", + "sir": "Siri", + "sis": "Siuslaw", + "sit": "Sino-Tibetan languages", + "siu": "Sinagen", + "siv": "Sumariup", + "siw": "Siwai", + "six": "Sumau", + "siy": "Sivandi", + "siz": "Siwi", + "sja": "Epena", + "sjb": "Sajau Basap", + "sjd": "Kildin Sami", + "sje": "Pite Sami", + "sjg": "Assangori", + "sjk": "Kemi Sami", + "sjl": "Sajalong; Miji", + "sjm": "Mapun", + "sjn": "Sindarin", + "sjo": "Xibe", + "sjp": "Surjapuri", + "sjr": "Siar-Lak", + "sjs": "Senhaja De Srair", + "sjt": "Ter Sami", + "sju": "Ume Sami", + "sjw": "Shawnee", + "sk": "Slovak", + "ska": "Skagit", + "skb": "Saek", + "skc": "Ma Manda", + "skd": "Southern Sierra Miwok", + "ske": "Seke (Vanuatu)", + "skf": "Sakirabiá", + "skg": "Sakalava Malagasy", + "skh": "Sikule", + "ski": "Sika", + "skj": "Seke (Nepal)", + "skm": "Kutong", + "skn": "Kolibugan Subanon", + "sko": "Seko Tengah", + "skp": "Sekapan", + "skq": "Sininkere", + "skr": "Saraiki; Seraiki", + "sks": "Maia", + "skt": "Sakata", + "sku": "Sakao", + "skv": "Skou", + "skw": "Skepi Creole Dutch", + "skx": "Seko Padang", + "sky": "Sikaiana", + "skz": "Sekar", + "sl": "Slovenian", + "sla": "Slavic languages", + "slc": "Sáliba", + "sld": "Sissala", + "sle": "Sholaga", + "slf": "Swiss-Italian Sign Language", + "slg": "Selungai Murut", + "slh": "Southern Puget Sound Salish", + "sli": "Lower Silesian", + "slj": "Salumá", + "sll": "Salt-Yui", + "slm": "Pangutaran Sama", + "sln": "Salinan", + "slp": "Lamaholot", + "slq": "Salchuq", + "slr": "Salar", + "sls": "Singapore Sign Language", + "slt": "Sila", + "slu": "Selaru", + "slw": "Sialum", + "slx": "Salampasu", + "sly": "Selayar", + "slz": "Ma'ya", + "sm": "Samoan", + "sma": "Southern Sami", + "smb": "Simbari", + "smc": "Som", + "smf": "Auwe", + "smg": "Simbali", + "smh": "Samei", + "smi": "Sami languages", + "smj": "Lule Sami", + "smk": "Bolinao", + "sml": "Central Sama", + "smm": "Musasa", + "smn": "Inari Sami", + "smp": "Samaritan", + "smq": "Samo", + "smr": "Simeulue", + "sms": "Skolt Sami", + "smt": "Simte", + "smu": "Somray", + "smv": "Samvedi", + "smw": "Sumbawa", + "smx": "Samba", + "smy": "Semnani", + "smz": "Simeku", + "sn": "Shona", + "snc": "Sinaugoro", + "sne": "Bau Bidayuh", + "snf": "Noon", + "sng": "Sanga (Democratic Republic of Congo)", + "sni": "Sensi", + "snj": "Riverain Sango", + "snk": "Soninke", + "snl": "Sangil", + "snm": "Southern Ma'di", + "snn": "Siona", + "sno": "Snohomish", + "snp": "Siane", + "snq": "Sangu (Gabon)", + "snr": "Sihan", + "sns": "South West Bay; Nahavaq", + "snu": "Senggi; Viid", + "snv": "Sa'ban", + "snw": "Selee", + "snx": "Sam", + "sny": "Saniyo-Hiyewe", + "snz": "Kou", + "so": "Somali", + "soa": "Thai Song", + "sob": "Sobei", + "soc": "So (Democratic Republic of Congo)", + "sod": "Songoora", + "soe": "Songomeno", + "sog": "Sogdian", + "soh": "Aka", + "soi": "Sonha", + "soj": "Soi", + "sok": "Sokoro", + "sol": "Solos", + "son": "Songhai languages", + "soo": "Songo", + "sop": "Songe", + "soq": "Kanasi", + "sor": "Somrai", + "sos": "Seeku", + "sou": "Southern Thai", + "sov": "Sonsorol", + "sow": "Sowanda", + "sox": "Swo", + "soy": "Miyobe", + "soz": "Temi", + "spb": "Sepa (Indonesia)", + "spc": "Sapé", + "spd": "Saep", + "spe": "Sepa (Papua New Guinea)", + "spg": "Sian", + "spi": "Saponi", + "spk": "Sengo", + "spl": "Selepet", + "spm": "Akukem", + "spn": "Sanapaná", + "spo": "Spokane", + "spp": "Supyire Senoufo", + "spq": "Loreto-Ucayali Spanish", + "spr": "Saparua", + "sps": "Saposa", + "spt": "Spiti Bhoti", + "spu": "Sapuan", + "spv": "Sambalpuri; Kosli", + "spx": "South Picene", + "spy": "Sabaot", + "sq": "Albanian", + "sqa": "Shama-Sambuga", + "sqh": "Shau", + "sqj": "Albanian languages", + "sqk": "Albanian Sign Language", + "sqm": "Suma", + "sqn": "Susquehannock", + "sqo": "Sorkhei", + "sqq": "Sou", + "sqr": "Siculo Arabic", + "sqs": "Sri Lankan Sign Language", + "sqt": "Soqotri", + "squ": "Squamish", + "sqx": "Kufr Qassem Sign Language (KQSL)", + "sr": "Serbian", + "sra": "Saruga", + "srb": "Sora", + "src": "Logudorese Sardinian", + "sre": "Sara", + "srf": "Nafi", + "srg": "Sulod", + "srh": "Sarikoli", + "sri": "Siriano", + "srk": "Serudung Murut", + "srl": "Isirawa", + "srm": "Saramaccan", + "srn": "Sranan Tongo", + "sro": "Campidanese Sardinian", + "srq": "Sirionó", + "srr": "Serer", + "srs": "Sarsi", + "srt": "Sauri", + "sru": "Suruí", + "srv": "Southern Sorsoganon", + "srw": "Serua", + "srx": "Sirmauri", + "sry": "Sera", + "srz": "Shahmirzadi", + "ss": "Swati", + "ssa": "Nilo-Saharan languages", + "ssb": "Southern Sama", + "ssc": "Suba-Simbiti", + "ssd": "Siroi", + "sse": "Balangingi; Bangingih Sama", + "ssf": "Thao", + "ssg": "Seimat", + "ssh": "Shihhi Arabic", + "ssi": "Sansi", + "ssj": "Sausi", + "ssk": "Sunam", + "ssl": "Western Sisaala", + "ssm": "Semnam", + "ssn": "Waata", + "sso": "Sissano", + "ssp": "Spanish Sign Language", + "ssq": "So'a", + "ssr": "Swiss-French Sign Language", + "sss": "Sô", + "sst": "Sinasina", + "ssu": "Susuami", + "ssv": "Shark Bay", + "ssx": "Samberigi", + "ssy": "Saho", + "ssz": "Sengseng", + "st": "Southern Sotho", + "sta": "Settla", + "stb": "Northern Subanen", + "std": "Sentinel", + "ste": "Liana-Seti", + "stf": "Seta", + "stg": "Trieng", + "sth": "Shelta", + "sti": "Bulo Stieng", + "stj": "Matya Samo", + "stk": "Arammba", + "stl": "Stellingwerfs", + "stm": "Setaman", + "stn": "Owa", + "sto": "Stoney", + "stp": "Southeastern Tepehuan", + "stq": "Saterfriesisch", + "str": "Straits Salish", + "sts": "Shumashti", + "stt": "Budeh Stieng", + "stu": "Samtao", + "stv": "Silt'e", + "stw": "Satawalese", + "sty": "Siberian Tatar", + "su": "Sundanese", + "sua": "Sulka", + "sub": "Suku", + "suc": "Western Subanon", + "sue": "Suena", + "sug": "Suganga", + "sui": "Suki", + "suj": "Shubi", + "suk": "Sukuma", + "suo": "Bouni", + "suq": "Tirmaga-Chai Suri; Suri", + "sur": "Mwaghavul", + "sus": "Susu", + "sut": "Subtiaba", + "suv": "Puroik", + "suw": "Sumbwa", + "sux": "Sumerian", + "suy": "Suyá", + "suz": "Sunwar", + "sv": "Swedish", + "sva": "Svan", + "svb": "Ulau-Suain", + "svc": "Vincentian Creole English", + "sve": "Serili", + "svk": "Slovakian Sign Language", + "svm": "Slavomolisano", + "svs": "Savosavo", + "svx": "Skalvian", + "sw": "Swahili (macrolanguage)", + "swb": "Maore Comorian", + "swc": "Congo Swahili", + "swf": "Sere", + "swg": "Swabian", + "swh": "Swahili (individual language); Kiswahili", + "swi": "Sui", + "swj": "Sira", + "swk": "Malawi Sena", + "swl": "Swedish Sign Language", + "swm": "Samosa", + "swn": "Sawknah", + "swo": "Shanenawa", + "swp": "Suau", + "swq": "Sharwa", + "swr": "Saweru", + "sws": "Seluwasan", + "swt": "Sawila", + "swu": "Suwawa", + "swv": "Shekhawati", + "sww": "Sowa", + "swx": "Suruahá", + "swy": "Sarua", + "sxb": "Suba", + "sxc": "Sicanian", + "sxe": "Sighu", + "sxg": "Shuhi; Shixing", + "sxk": "Southern Kalapuya", + "sxl": "Selian", + "sxm": "Samre", + "sxn": "Sangir", + "sxo": "Sorothaptic", + "sxr": "Saaroa", + "sxs": "Sasaru", + "sxu": "Upper Saxon", + "sxw": "Saxwe Gbe", + "sya": "Siang", + "syb": "Central Subanen", + "syc": "Classical Syriac", + "syd": "Samoyedic languages", + "syi": "Seki", + "syk": "Sukur", + "syl": "Sylheti", + "sym": "Maya Samo", + "syn": "Senaya", + "syo": "Suoy", + "syr": "Syriac", + "sys": "Sinyar", + "syw": "Kagate", + "syx": "Samay", + "syy": "Al-Sayyid Bedouin Sign Language", + "sza": "Semelai", + "szb": "Ngalum", + "szc": "Semaq Beri", + "szd": "Seru", + "sze": "Seze", + "szg": "Sengele", + "szl": "Silesian", + "szn": "Sula", + "szp": "Suabo", + "szs": "Solomon Islands Sign Language", + "szv": "Isu (Fako Division)", + "szw": "Sawai", + "szy": "Sakizaya", + "ta": "Tamil", + "taa": "Lower Tanana", + "tab": "Tabassaran", + "tac": "Lowland Tarahumara", + "tad": "Tause", + "tae": "Tariana", + "taf": "Tapirapé", + "tag": "Tagoi", + "tai": "Tai languages", + "taj": "Eastern Tamang", + "tak": "Tala", + "tal": "Tal", + "tan": "Tangale", + "tao": "Yami", + "tap": "Taabwa", + "taq": "Tamasheq", + "tar": "Central Tarahumara", + "tas": "Tay Boi", + "tau": "Upper Tanana", + "tav": "Tatuyo", + "taw": "Tai", + "tax": "Tamki", + "tay": "Atayal", + "taz": "Tocho", + "tba": "Aikanã", + "tbc": "Takia", + "tbd": "Kaki Ae", + "tbe": "Tanimbili", + "tbf": "Mandara", + "tbg": "North Tairora", + "tbh": "Dharawal; Thurawal", + "tbi": "Gaam", + "tbj": "Tiang", + "tbk": "Calamian Tagbanwa", + "tbl": "Tboli", + "tbm": "Tagbu", + "tbn": "Barro Negro Tunebo", + "tbo": "Tawala", + "tbp": "Taworta; Diebroud", + "tbq": "Tibeto-Burman languages", + "tbr": "Tumtum", + "tbs": "Tanguat", + "tbt": "Tembo (Kitembo)", + "tbu": "Tubar", + "tbv": "Tobo", + "tbw": "Tagbanwa", + "tbx": "Kapin", + "tby": "Tabaru", + "tbz": "Ditammari", + "tca": "Ticuna", + "tcb": "Tanacross", + "tcc": "Datooga", + "tcd": "Tafi", + "tce": "Southern Tutchone", + "tcf": "Malinaltepec Me'phaa; Malinaltepec Tlapanec", + "tcg": "Tamagario", + "tch": "Turks And Caicos Creole English", + "tci": "Wára", + "tck": "Tchitchege", + "tcl": "Taman (Myanmar)", + "tcm": "Tanahmerah", + "tcn": "Tichurong", + "tco": "Taungyo", + "tcp": "Tawr Chin", + "tcq": "Kaiy", + "tcs": "Torres Strait Creole; Yumplatok", + "tct": "T'en", + "tcu": "Southeastern Tarahumara", + "tcw": "Tecpatlán Totonac", + "tcx": "Toda", + "tcy": "Tulu", + "tcz": "Thado Chin", + "tda": "Tagdal", + "tdb": "Panchpargania", + "tdc": "Emberá-Tadó", + "tdd": "Tai Nüa", + "tde": "Tiranige Diga Dogon", + "tdf": "Talieng", + "tdg": "Western Tamang", + "tdh": "Thulung", + "tdi": "Tomadino", + "tdj": "Tajio", + "tdk": "Tambas", + "tdl": "Sur", + "tdm": "Taruma", + "tdn": "Tondano", + "tdo": "Teme", + "tdq": "Tita", + "tdr": "Todrah", + "tds": "Doutai", + "tdt": "Tetun Dili", + "tdv": "Toro", + "tdx": "Tandroy-Mahafaly Malagasy", + "tdy": "Tadyawan", + "te": "Telugu", + "tea": "Temiar", + "teb": "Tetete", + "tec": "Terik", + "ted": "Tepo Krumen", + "tee": "Huehuetla Tepehua", + "tef": "Teressa", + "teg": "Teke-Tege", + "teh": "Tehuelche", + "tei": "Torricelli", + "tek": "Ibali Teke", + "tem": "Timne", + "ten": "Tama (Colombia)", + "teo": "Teso", + "tep": "Tepecano", + "teq": "Temein", + "ter": "Tereno", + "tes": "Tengger", + "tet": "Tetum", + "teu": "Soo", + "tev": "Teor", + "tew": "Tewa (USA)", + "tex": "Tennet", + "tey": "Tulishi", + "tez": "Tetserret", + "tfi": "Tofin Gbe", + "tfn": "Tanaina", + "tfo": "Tefaro", + "tfr": "Teribe", + "tft": "Ternate", + "tg": "Tajik", + "tga": "Sagalla", + "tgb": "Tobilung", + "tgc": "Tigak", + "tgd": "Ciwogai", + "tge": "Eastern Gorkha Tamang", + "tgf": "Chalikha", + "tgh": "Tobagonian Creole English", + "tgi": "Lawunuia", + "tgj": "Tagin", + "tgn": "Tandaganon", + "tgo": "Sudest", + "tgp": "Tangoa", + "tgq": "Tring", + "tgr": "Tareng", + "tgs": "Nume", + "tgt": "Central Tagbanwa", + "tgu": "Tanggu", + "tgv": "Tingui-Boto", + "tgw": "Tagwana Senoufo", + "tgx": "Tagish", + "tgy": "Togoyo", + "tgz": "Tagalaka", + "th": "Thai", + "thd": "Kuuk Thaayorre; Thayore", + "the": "Chitwania Tharu", + "thf": "Thangmi", + "thh": "Northern Tarahumara", + "thi": "Tai Long", + "thk": "Tharaka; Kitharaka", + "thl": "Dangaura Tharu", + "thm": "Aheu", + "thn": "Thachanadan", + "thp": "Thompson", + "thq": "Kochila Tharu", + "thr": "Rana Tharu", + "ths": "Thakali", + "tht": "Tahltan", + "thu": "Thuri", + "thv": "Tahaggart Tamahaq", + "thy": "Tha", + "thz": "Tayart Tamajeq", + "ti": "Tigrinya", + "tia": "Tidikelt Tamazight", + "tic": "Tira", + "tif": "Tifal", + "tig": "Tigre", + "tih": "Timugon Murut", + "tii": "Tiene", + "tij": "Tilung", + "tik": "Tikar", + "til": "Tillamook", + "tim": "Timbe", + "tin": "Tindi", + "tio": "Teop", + "tip": "Trimuris", + "tiq": "Tiéfo", + "tis": "Masadiit Itneg", + "tit": "Tinigua", + "tiu": "Adasen", + "tiv": "Tiv", + "tiw": "Tiwi", + "tix": "Southern Tiwa", + "tiy": "Tiruray", + "tiz": "Tai Hongjin", + "tja": "Tajuasohn", + "tjg": "Tunjung", + "tji": "Northern Tujia", + "tjj": "Tjungundji", + "tjl": "Tai Laing", + "tjm": "Timucua", + "tjn": "Tonjon", + "tjo": "Temacine Tamazight", + "tjp": "Tjupany", + "tjs": "Southern Tujia", + "tju": "Tjurruru", + "tjw": "Djabwurrung", + "tk": "Turkmen", + "tka": "Truká", + "tkb": "Buksa", + "tkd": "Tukudede", + "tke": "Takwane", + "tkf": "Tukumanféd", + "tkg": "Tesaka Malagasy", + "tkl": "Tokelau", + "tkm": "Takelma", + "tkn": "Toku-No-Shima", + "tkp": "Tikopia", + "tkq": "Tee", + "tkr": "Tsakhur", + "tks": "Takestani", + "tkt": "Kathoriya Tharu", + "tku": "Upper Necaxa Totonac", + "tkv": "Mur Pano", + "tkw": "Teanu", + "tkx": "Tangko", + "tkz": "Takua", + "tl": "Tagalog", + "tla": "Southwestern Tepehuan", + "tlb": "Tobelo", + "tlc": "Yecuatla Totonac", + "tld": "Talaud", + "tlf": "Telefol", + "tlg": "Tofanma", + "tlh": "Klingon; tlhIngan Hol", + "tli": "Tlingit", + "tlj": "Talinga-Bwisi", + "tlk": "Taloki", + "tll": "Tetela", + "tlm": "Tolomako", + "tln": "Talondo'", + "tlo": "Talodi", + "tlp": "Filomena Mata-Coahuitlán Totonac", + "tlq": "Tai Loi", + "tlr": "Talise", + "tls": "Tambotalo", + "tlt": "Sou Nama; Teluti", + "tlu": "Tulehu", + "tlv": "Taliabu", + "tlx": "Khehek", + "tly": "Talysh", + "tma": "Tama (Chad)", + "tmb": "Katbol; Avava", + "tmc": "Tumak", + "tmd": "Haruai", + "tme": "Tremembé", + "tmf": "Toba-Maskoy", + "tmg": "Ternateño", + "tmh": "Tamashek", + "tmi": "Tutuba", + "tmj": "Samarokena", + "tmk": "Northwestern Tamang", + "tml": "Tamnim Citak", + "tmm": "Tai Thanh", + "tmn": "Taman (Indonesia)", + "tmo": "Temoq", + "tmq": "Tumleo", + "tmr": "Jewish Babylonian Aramaic (ca. 200-1200 CE)", + "tms": "Tima", + "tmt": "Tasmate", + "tmu": "Iau", + "tmv": "Tembo (Motembo)", + "tmw": "Temuan", + "tmy": "Tami", + "tmz": "Tamanaku", + "tn": "Tswana", + "tna": "Tacana", + "tnb": "Western Tunebo", + "tnc": "Tanimuca-Retuarã", + "tnd": "Angosturas Tunebo", + "tng": "Tobanga", + "tnh": "Maiani", + "tni": "Tandia", + "tnk": "Kwamera", + "tnl": "Lenakel", + "tnm": "Tabla", + "tnn": "North Tanna", + "tno": "Toromono", + "tnp": "Whitesands", + "tnq": "Taino", + "tnr": "Ménik", + "tns": "Tenis", + "tnt": "Tontemboan", + "tnu": "Tay Khang", + "tnv": "Tangchangya", + "tnw": "Tonsawang", + "tnx": "Tanema", + "tny": "Tongwe", + "tnz": "Ten'edn", + "to": "Tonga (Tonga Islands)", + "tob": "Toba", + "toc": "Coyutla Totonac", + "tod": "Toma", + "tof": "Gizrra", + "tog": "Tonga (Nyasa)", + "toh": "Gitonga", + "toi": "Tonga (Zambia)", + "toj": "Tojolabal", + "tok": "Toki Pona", + "tol": "Tolowa", + "tom": "Tombulu", + "too": "Xicotepec De Juárez Totonac", + "top": "Papantla Totonac", + "toq": "Toposa", + "tor": "Togbo-Vara Banda", + "tos": "Highland Totonac", + "tou": "Tho", + "tov": "Upper Taromi", + "tow": "Jemez", + "tox": "Tobian", + "toy": "Topoiyo", + "toz": "To", + "tpa": "Taupota", + "tpc": "Azoyú Me'phaa; Azoyú Tlapanec", + "tpe": "Tippera", + "tpf": "Tarpia", + "tpg": "Kula", + "tpi": "Tok Pisin", + "tpj": "Tapieté", + "tpk": "Tupinikin", + "tpl": "Tlacoapa Me'phaa; Tlacoapa Tlapanec", + "tpm": "Tampulma", + "tpn": "Tupinambá", + "tpo": "Tai Pao", + "tpp": "Pisaflores Tepehua", + "tpq": "Tukpa", + "tpr": "Tuparí", + "tpt": "Tlachichilco Tepehua", + "tpu": "Tampuan", + "tpv": "Tanapag", + "tpw": "Tupí", + "tpx": "Acatepec Me'phaa; Acatepec Tlapanec", + "tpy": "Trumai", + "tpz": "Tinputz", + "tqb": "Tembé", + "tql": "Lehali", + "tqm": "Turumsa", + "tqn": "Tenino", + "tqo": "Toaripi", + "tqp": "Tomoip", + "tqq": "Tunni", + "tqr": "Torona", + "tqt": "Western Totonac", + "tqu": "Touo", + "tqw": "Tonkawa", + "tr": "Turkish", + "tra": "Tirahi", + "trb": "Terebu", + "trc": "Copala Triqui", + "trd": "Turi", + "tre": "East Tarangan", + "trf": "Trinidadian Creole English", + "trg": "Lishán Didán", + "trh": "Turaka", + "tri": "Trió", + "trj": "Toram", + "trk": "Turkic languages", + "trl": "Traveller Scottish", + "trm": "Tregami", + "trn": "Trinitario", + "tro": "Tarao Naga", + "trp": "Kok Borok", + "trq": "San Martín Itunyoso Triqui", + "trr": "Taushiro", + "trs": "Chicahuaxtla Triqui", + "trt": "Tunggare", + "tru": "Turoyo; Surayt", + "trv": "Sediq; Seediq; Taroko", + "trw": "Torwali", + "trx": "Tringgus-Sembaan Bidayuh", + "try": "Turung", + "trz": "Torá", + "ts": "Tsonga", + "tsa": "Tsaangi", + "tsb": "Tsamai", + "tsc": "Tswa", + "tsd": "Tsakonian", + "tse": "Tunisian Sign Language", + "tsg": "Tausug", + "tsh": "Tsuvan", + "tsi": "Tsimshian", + "tsj": "Tshangla", + "tsk": "Tseku", + "tsl": "Ts'ün-Lao", + "tsm": "Turkish Sign Language; Türk İşaret Dili", + "tsp": "Northern Toussian", + "tsq": "Thai Sign Language", + "tsr": "Akei", + "tss": "Taiwan Sign Language", + "tst": "Tondi Songway Kiini", + "tsu": "Tsou", + "tsv": "Tsogo", + "tsw": "Tsishingini", + "tsx": "Mubami", + "tsy": "Tebul Sign Language", + "tsz": "Purepecha", + "tt": "Tatar", + "tta": "Tutelo", + "ttb": "Gaa", + "ttc": "Tektiteko", + "ttd": "Tauade", + "tte": "Bwanabwana", + "ttf": "Tuotomb", + "ttg": "Tutong", + "tth": "Upper Ta'oih", + "tti": "Tobati", + "ttj": "Tooro", + "ttk": "Totoro", + "ttl": "Totela", + "ttm": "Northern Tutchone", + "ttn": "Towei", + "tto": "Lower Ta'oih", + "ttp": "Tombelala", + "ttq": "Tawallammat Tamajaq", + "ttr": "Tera", + "tts": "Northeastern Thai", + "ttt": "Muslim Tat", + "ttu": "Torau", + "ttv": "Titan", + "ttw": "Long Wat", + "tty": "Sikaritai", + "ttz": "Tsum", + "tua": "Wiarumus", + "tub": "Tübatulabal", + "tuc": "Mutu", + "tud": "Tuxá", + "tue": "Tuyuca", + "tuf": "Central Tunebo", + "tug": "Tunia", + "tuh": "Taulil", + "tui": "Tupuri", + "tuj": "Tugutil", + "tul": "Tula", + "tum": "Tumbuka", + "tun": "Tunica", + "tuo": "Tucano", + "tup": "Tupi languages", + "tuq": "Tedaga", + "tus": "Tuscarora", + "tut": "Altaic languages", + "tuu": "Tututni", + "tuv": "Turkana", + "tuw": "Tungus languages", + "tux": "Tuxináwa", + "tuy": "Tugen", + "tuz": "Turka", + "tva": "Vaghua", + "tvd": "Tsuvadi", + "tve": "Te'un", + "tvk": "Southeast Ambrym", + "tvl": "Tuvalu", + "tvm": "Tela-Masbuar", + "tvn": "Tavoyan", + "tvo": "Tidore", + "tvs": "Taveta", + "tvt": "Tutsa Naga", + "tvu": "Tunen", + "tvw": "Sedoa", + "tvx": "Taivoan", + "tvy": "Timor Pidgin", + "tw": "Twi", + "twa": "Twana", + "twb": "Western Tawbuid", + "twc": "Teshenawa", + "twd": "Twents", + "twe": "Tewa (Indonesia)", + "twf": "Northern Tiwa", + "twg": "Tereweng", + "twh": "Tai Dón", + "twl": "Tawara", + "twm": "Tawang Monpa", + "twn": "Twendi", + "two": "Tswapong", + "twp": "Ere", + "twq": "Tasawaq", + "twr": "Southwestern Tarahumara", + "twt": "Turiwára", + "twu": "Termanu", + "tww": "Tuwari", + "twx": "Tewe", + "twy": "Tawoyan", + "txa": "Tombonuo", + "txb": "Tokharian B", + "txc": "Tsetsaut", + "txe": "Totoli", + "txg": "Tangut", + "txh": "Thracian", + "txi": "Ikpeng", + "txj": "Tarjumo", + "txm": "Tomini", + "txn": "West Tarangan", + "txo": "Toto", + "txq": "Tii", + "txr": "Tartessian", + "txs": "Tonsea", + "txt": "Citak", + "txu": "Kayapó", + "txx": "Tatana", + "txy": "Tanosy Malagasy", + "ty": "Tahitian", + "tya": "Tauya", + "tye": "Kyanga", + "tyh": "O'du", + "tyi": "Teke-Tsaayi", + "tyj": "Tai Do; Tai Yo", + "tyl": "Thu Lao", + "tyn": "Kombai", + "typ": "Thaypan", + "tyr": "Tai Daeng", + "tys": "Tày Sa Pa", + "tyt": "Tày Tac", + "tyu": "Kua", + "tyv": "Tuvinian", + "tyx": "Teke-Tyee", + "tyy": "Tiyaa", + "tyz": "Tày", + "tza": "Tanzanian Sign Language", + "tzh": "Tzeltal", + "tzj": "Tz'utujil", + "tzl": "Talossan", + "tzm": "Central Atlas Tamazight", + "tzn": "Tugun", + "tzo": "Tzotzil", + "tzx": "Tabriak", + "uam": "Uamué", + "uan": "Kuan", + "uar": "Tairuma", + "uba": "Ubang", + "ubi": "Ubi", + "ubl": "Buhi'non Bikol", + "ubr": "Ubir", + "ubu": "Umbu-Ungu", + "uby": "Ubykh", + "uda": "Uda", + "ude": "Udihe", + "udg": "Muduga", + "udi": "Udi", + "udj": "Ujir", + "udl": "Wuzlam", + "udm": "Udmurt", + "udu": "Uduk", + "ues": "Kioko", + "ufi": "Ufim", + "ug": "Uighur; Uyghur", + "uga": "Ugaritic", + "ugb": "Kuku-Ugbanh", + "uge": "Ughele", + "ugh": "Kubachi", + "ugn": "Ugandan Sign Language", + "ugo": "Ugong", + "ugy": "Uruguayan Sign Language", + "uha": "Uhami", + "uhn": "Damal", + "uis": "Uisai", + "uiv": "Iyive", + "uji": "Tanjijili", + "uk": "Ukrainian", + "uka": "Kaburi", + "ukg": "Ukuriguma", + "ukh": "Ukhwejo", + "uki": "Kui (India)", + "ukk": "Muak Sa-aak", + "ukl": "Ukrainian Sign Language", + "ukp": "Ukpe-Bayobiri", + "ukq": "Ukwa", + "uks": "Urubú-Kaapor Sign Language; Kaapor Sign Language", + "uku": "Ukue", + "ukv": "Kuku", + "ukw": "Ukwuani-Aboh-Ndoni", + "uky": "Kuuk-Yak", + "ula": "Fungwa", + "ulb": "Ulukwumi", + "ulc": "Ulch", + "ule": "Lule", + "ulf": "Usku; Afra", + "uli": "Ulithian", + "ulk": "Meriam Mir", + "ull": "Ullatan", + "ulm": "Ulumanda'", + "uln": "Unserdeutsch", + "ulu": "Uma' Lung", + "ulw": "Ulwa", + "uma": "Umatilla", + "umb": "Umbundu", + "umc": "Marrucinian", + "umd": "Umbindhamu", + "umg": "Morrobalama; Umbuygamu", + "umi": "Ukit", + "umm": "Umon", + "umn": "Makyan Naga", + "umo": "Umotína", + "ump": "Umpila", + "umr": "Umbugarla", + "ums": "Pendau", + "umu": "Munsee", + "una": "North Watut", + "und": "Undetermined", + "une": "Uneme", + "ung": "Ngarinyin", + "uni": "Uni", + "unk": "Enawené-Nawé", + "unm": "Unami", + "unn": "Kurnai", + "unr": "Mundari", + "unu": "Unubahe", + "unx": "Munda", + "unz": "Unde Kaili", + "uon": "Kulon", + "upi": "Umeda", + "upv": "Uripiv-Wala-Rano-Atchin", + "ur": "Urdu", + "ura": "Urarina", + "urb": "Urubú-Kaapor; Kaapor", + "urc": "Urningangg", + "ure": "Uru", + "urf": "Uradhi", + "urg": "Urigina", + "urh": "Urhobo", + "uri": "Urim", + "urj": "Uralic languages", + "urk": "Urak Lawoi'", + "url": "Urali", + "urm": "Urapmin", + "urn": "Uruangnirin", + "uro": "Ura (Papua New Guinea)", + "urp": "Uru-Pa-In", + "urr": "Lehalurup; Löyöp", + "urt": "Urat", + "uru": "Urumi", + "urv": "Uruava", + "urw": "Sop", + "urx": "Urimo", + "ury": "Orya", + "urz": "Uru-Eu-Wau-Wau", + "usa": "Usarufa", + "ush": "Ushojo", + "usi": "Usui", + "usk": "Usaghade", + "usp": "Uspanteco", + "uss": "us-Saare", + "usu": "Uya", + "uta": "Otank", + "ute": "Ute-Southern Paiute", + "uth": "ut-Hun", + "utp": "Amba (Solomon Islands)", + "utr": "Etulo", + "utu": "Utu", + "uum": "Urum", + "uur": "Ura (Vanuatu)", + "uuu": "U", + "uve": "West Uvean; Fagauvea", + "uvh": "Uri", + "uvl": "Lote", + "uwa": "Kuku-Uwanh", + "uya": "Doko-Uyanga", + "uz": "Uzbek", + "uzn": "Northern Uzbek", + "uzs": "Southern Uzbek", + "vaa": "Vaagri Booli", + "vae": "Vale", + "vaf": "Vafsi", + "vag": "Vagla", + "vah": "Varhadi-Nagpuri", + "vai": "Vai", + "vaj": "Sekele; Northwestern ǃKung; Vasekele", + "val": "Vehes", + "vam": "Vanimo", + "van": "Valman", + "vao": "Vao", + "vap": "Vaiphei", + "var": "Huarijio", + "vas": "Vasavi", + "vau": "Vanuma", + "vav": "Varli", + "vay": "Wayu", + "vbb": "Southeast Babar", + "vbk": "Southwestern Bontok", + "ve": "Venda", + "vec": "Venetian", + "ved": "Veddah", + "vel": "Veluws", + "vem": "Vemgo-Mabas", + "veo": "Ventureño", + "vep": "Veps", + "ver": "Mom Jango", + "vgr": "Vaghri", + "vgt": "Vlaamse Gebarentaal; Flemish Sign Language", + "vi": "Vietnamese", + "vic": "Virgin Islands Creole English", + "vid": "Vidunda", + "vif": "Vili", + "vig": "Viemo", + "vil": "Vilela", + "vin": "Vinza", + "vis": "Vishavan", + "vit": "Viti", + "viv": "Iduna", + "vka": "Kariyarra", + "vkj": "Kujarge", + "vkk": "Kaur", + "vkl": "Kulisusu", + "vkm": "Kamakan", + "vkn": "Koro Nulu", + "vko": "Kodeoha", + "vkp": "Korlai Creole Portuguese", + "vkt": "Tenggarong Kutai Malay", + "vku": "Kurrama", + "vkz": "Koro Zuba", + "vlp": "Valpei", + "vls": "Vlaams", + "vma": "Martuyhunira", + "vmb": "Barbaram", + "vmc": "Juxtlahuaca Mixtec", + "vmd": "Mudu Koraga", + "vme": "East Masela", + "vmf": "Mainfränkisch", + "vmg": "Lungalunga", + "vmh": "Maraghei", + "vmi": "Miwa", + "vmj": "Ixtayutla Mixtec", + "vmk": "Makhuwa-Shirima", + "vml": "Malgana", + "vmm": "Mitlatongo Mixtec", + "vmp": "Soyaltepec Mazatec", + "vmq": "Soyaltepec Mixtec", + "vmr": "Marenje", + "vms": "Moksela", + "vmu": "Muluridyi", + "vmv": "Valley Maidu", + "vmw": "Makhuwa", + "vmx": "Tamazola Mixtec", + "vmy": "Ayautla Mazatec", + "vmz": "Mazatlán Mazatec", + "vnk": "Vano; Lovono", + "vnm": "Vinmavis; Neve'ei", + "vnp": "Vunapu", + "vo": "Volapük", + "vor": "Voro", + "vot": "Votic", + "vra": "Vera'a", + "vro": "Võro", + "vrs": "Varisi", + "vrt": "Burmbar; Banam Bay", + "vsi": "Moldova Sign Language", + "vsl": "Venezuelan Sign Language", + "vsv": "Valencian Sign Language; Llengua de signes valenciana", + "vto": "Vitou", + "vum": "Vumbu", + "vun": "Vunjo", + "vut": "Vute", + "vwa": "Awa (China)", + "wa": "Walloon", + "waa": "Walla Walla", + "wab": "Wab", + "wac": "Wasco-Wishram", + "wad": "Wamesa; Wondama", + "wae": "Walser", + "waf": "Wakoná", + "wag": "Wa'ema", + "wah": "Watubela", + "wai": "Wares", + "waj": "Waffa", + "wak": "Wakashan languages", + "wal": "Wolaytta; Wolaitta", + "wam": "Wampanoag", + "wan": "Wan", + "wao": "Wappo", + "wap": "Wapishana", + "waq": "Wagiman", + "war": "Waray (Philippines)", + "was": "Washo", + "wat": "Kaninuwa", + "wau": "Waurá", + "wav": "Waka", + "waw": "Waiwai", + "wax": "Watam; Marangis", + "way": "Wayana", + "waz": "Wampur", + "wba": "Warao", + "wbb": "Wabo", + "wbe": "Waritai", + "wbf": "Wara", + "wbh": "Wanda", + "wbi": "Vwanji", + "wbj": "Alagwa", + "wbk": "Waigali", + "wbl": "Wakhi", + "wbm": "Wa", + "wbp": "Warlpiri", + "wbq": "Waddar", + "wbr": "Wagdi", + "wbs": "West Bengal Sign Language", + "wbt": "Warnman", + "wbv": "Wajarri", + "wbw": "Woi", + "wca": "Yanomámi", + "wci": "Waci Gbe", + "wdd": "Wandji", + "wdg": "Wadaginam", + "wdj": "Wadjiginy", + "wdk": "Wadikali", + "wdt": "Wendat", + "wdu": "Wadjigu", + "wdy": "Wadjabangayi", + "wea": "Wewaw", + "wec": "Wè Western", + "wed": "Wedau", + "weg": "Wergaia", + "weh": "Weh", + "wei": "Kiunum", + "wem": "Weme Gbe", + "wen": "Sorbian languages", + "weo": "Wemale", + "wep": "Westphalien", + "wer": "Weri", + "wes": "Cameroon Pidgin", + "wet": "Perai", + "weu": "Rawngtu Chin", + "wew": "Wejewa", + "wfg": "Yafi; Zorop", + "wga": "Wagaya", + "wgb": "Wagawaga", + "wgg": "Wangkangurru; Wangganguru", + "wgi": "Wahgi", + "wgo": "Waigeo", + "wgu": "Wirangu", + "wgy": "Warrgamay", + "wha": "Sou Upaa; Manusela", + "whg": "North Wahgi", + "whk": "Wahau Kenyah", + "whu": "Wahau Kayan", + "wib": "Southern Toussian", + "wic": "Wichita", + "wie": "Wik-Epa", + "wif": "Wik-Keyangan", + "wig": "Wik Ngathan", + "wih": "Wik-Me'anha", + "wii": "Minidien", + "wij": "Wik-Iiyanh", + "wik": "Wikalkan", + "wil": "Wilawila", + "wim": "Wik-Mungkan", + "win": "Ho-Chunk", + "wir": "Wiraféd", + "wiu": "Wiru", + "wiv": "Vitu", + "wiy": "Wiyot", + "wja": "Waja", + "wji": "Warji", + "wka": "Kw'adza", + "wkb": "Kumbaran", + "wkd": "Wakde; Mo", + "wkl": "Kalanadi", + "wkr": "Keerray-Woorroong", + "wku": "Kunduvadi", + "wkw": "Wakawaka", + "wky": "Wangkayutyuru", + "wla": "Walio", + "wlc": "Mwali Comorian", + "wle": "Wolane", + "wlg": "Kunbarlang", + "wlh": "Welaun", + "wli": "Waioli", + "wlk": "Wailaki", + "wll": "Wali (Sudan)", + "wlm": "Middle Welsh", + "wlo": "Wolio", + "wlr": "Wailapa", + "wls": "Wallisian", + "wlu": "Wuliwuli", + "wlv": "Wichí Lhamtés Vejoz", + "wlw": "Walak", + "wlx": "Wali (Ghana)", + "wly": "Waling", + "wma": "Mawa (Nigeria)", + "wmb": "Wambaya", + "wmc": "Wamas", + "wmd": "Mamaindé", + "wme": "Wambule", + "wmg": "Western Minyag", + "wmh": "Waima'a", + "wmi": "Wamin", + "wmm": "Maiwa (Indonesia)", + "wmn": "Waamwang", + "wmo": "Wom (Papua New Guinea)", + "wms": "Wambon", + "wmt": "Walmajarri", + "wmw": "Mwani", + "wmx": "Womo", + "wnb": "Wanambre", + "wnc": "Wantoat", + "wnd": "Wandarang", + "wne": "Waneci", + "wng": "Wanggom", + "wni": "Ndzwani Comorian", + "wnk": "Wanukaka", + "wnm": "Wanggamala", + "wnn": "Wunumara", + "wno": "Wano", + "wnp": "Wanap", + "wnu": "Usan", + "wnw": "Wintu", + "wny": "Wanyi; Waanyi", + "wo": "Wolof", + "woa": "Kuwema; Tyaraity", + "wob": "Wè Northern", + "woc": "Wogeo", + "wod": "Wolani", + "woe": "Woleaian", + "wof": "Gambian Wolof", + "wog": "Wogamusin", + "woi": "Kamang", + "wok": "Longto", + "wom": "Wom (Nigeria)", + "won": "Wongo", + "woo": "Manombai", + "wor": "Woria", + "wos": "Hanga Hundi", + "wow": "Wawonii", + "woy": "Weyto", + "wpc": "Maco", + "wrb": "Waluwarra; Warluwara", + "wrg": "Warungu; Gudjal", + "wrh": "Wiradjuri", + "wri": "Wariyangga", + "wrk": "Garrwa", + "wrl": "Warlmanpa", + "wrm": "Warumungu", + "wrn": "Warnang", + "wro": "Worrorra", + "wrp": "Waropen", + "wrr": "Wardaman", + "wrs": "Waris", + "wru": "Waru", + "wrv": "Waruna", + "wrw": "Gugu Warra", + "wrx": "Wae Rana", + "wry": "Merwari", + "wrz": "Waray (Australia)", + "wsa": "Warembori", + "wsg": "Adilabad Gondi", + "wsi": "Wusi", + "wsk": "Waskia", + "wsr": "Owenia", + "wss": "Wasa", + "wsu": "Wasu", + "wsv": "Wotapuri-Katarqalai", + "wtf": "Watiwa", + "wth": "Wathawurrung", + "wti": "Berta", + "wtk": "Watakataui", + "wtm": "Mewati", + "wtw": "Wotu", + "wua": "Wikngenchera", + "wub": "Wunambal", + "wud": "Wudu", + "wuh": "Wutunhua", + "wul": "Silimo", + "wum": "Wumbvu", + "wun": "Bungu", + "wur": "Wurrugu", + "wut": "Wutung", + "wuu": "Wu Chinese", + "wuv": "Wuvulu-Aua", + "wux": "Wulna", + "wuy": "Wauyai", + "wwa": "Waama", + "wwb": "Wakabunga", + "wwo": "Wetamut; Dorig", + "wwr": "Warrwa", + "www": "Wawa", + "wxa": "Waxianghua", + "wxw": "Wardandi", + "wyb": "Wangaaybuwan-Ngiyambaa", + "wyi": "Woiwurrung", + "wym": "Wymysorys", + "wyn": "Wyandot", + "wyr": "Wayoró", + "wyy": "Western Fijian", + "xaa": "Andalusian Arabic", + "xab": "Sambe", + "xac": "Kachari", + "xad": "Adai", + "xae": "Aequian", + "xag": "Aghwan", + "xai": "Kaimbé", + "xaj": "Ararandewára", + "xak": "Máku", + "xal": "Kalmyk; Oirat", + "xam": "ǀXam", + "xan": "Xamtanga", + "xao": "Khao", + "xap": "Apalachee", + "xaq": "Aquitanian", + "xar": "Karami", + "xas": "Kamas", + "xat": "Katawixi", + "xau": "Kauwera", + "xav": "Xavánte", + "xaw": "Kawaiisu", + "xay": "Kayan Mahakam", + "xbb": "Lower Burdekin", + "xbc": "Bactrian", + "xbd": "Bindal", + "xbe": "Bigambal", + "xbg": "Bunganditj", + "xbi": "Kombio", + "xbj": "Birrpayi", + "xbm": "Middle Breton", + "xbn": "Kenaboi", + "xbo": "Bolgarian", + "xbp": "Bibbulman", + "xbr": "Kambera", + "xbw": "Kambiwá", + "xby": "Batjala; Batyala", + "xcb": "Cumbric", + "xcc": "Camunic", + "xce": "Celtiberian", + "xcg": "Cisalpine Gaulish", + "xch": "Chemakum; Chimakum", + "xcl": "Classical Armenian", + "xcm": "Comecrudo", + "xcn": "Cotoname", + "xco": "Chorasmian", + "xcr": "Carian", + "xct": "Classical Tibetan", + "xcu": "Curonian", + "xcv": "Chuvantsy", + "xcw": "Coahuilteco", + "xcy": "Cayuse", + "xda": "Darkinyung", + "xdc": "Dacian", + "xdk": "Dharuk", + "xdm": "Edomite", + "xdo": "Kwandu", + "xdq": "Kaitag", + "xdy": "Malayic Dayak", + "xeb": "Eblan", + "xed": "Hdi", + "xeg": "ǁXegwi", + "xel": "Kelo", + "xem": "Kembayan", + "xep": "Epi-Olmec", + "xer": "Xerénte", + "xes": "Kesawai", + "xet": "Xetá", + "xeu": "Keoru-Ahia", + "xfa": "Faliscan", + "xga": "Galatian", + "xgb": "Gbin", + "xgd": "Gudang", + "xgf": "Gabrielino-Fernandeño", + "xgg": "Goreng", + "xgi": "Garingbal", + "xgl": "Galindan", + "xgm": "Dharumbal; Guwinmal", + "xgn": "Mongolian languages", + "xgr": "Garza", + "xgu": "Unggumi", + "xgw": "Guwa", + "xh": "Xhosa", + "xha": "Harami", + "xhc": "Hunnic", + "xhd": "Hadrami", + "xhe": "Khetrani", + "xhm": "Middle Khmer (1400 to 1850 CE)", + "xhr": "Hernican", + "xht": "Hattic", + "xhu": "Hurrian", + "xhv": "Khua", + "xib": "Iberian", + "xii": "Xiri", + "xil": "Illyrian", + "xin": "Xinca", + "xir": "Xiriâna", + "xis": "Kisan", + "xiv": "Indus Valley Language", + "xiy": "Xipaya", + "xjb": "Minjungbal", + "xjt": "Jaitmatang", + "xka": "Kalkoti", + "xkb": "Northern Nago", + "xkc": "Kho'ini", + "xkd": "Mendalam Kayan", + "xke": "Kereho", + "xkf": "Khengkha", + "xkg": "Kagoro", + "xki": "Kenyan Sign Language", + "xkj": "Kajali", + "xkk": "Kachok; Kaco'", + "xkl": "Mainstream Kenyah", + "xkn": "Kayan River Kayan", + "xko": "Kiorr", + "xkp": "Kabatei", + "xkq": "Koroni", + "xkr": "Xakriabá", + "xks": "Kumbewaha", + "xkt": "Kantosi", + "xku": "Kaamba", + "xkv": "Kgalagadi", + "xkw": "Kembra", + "xkx": "Karore", + "xky": "Uma' Lasan", + "xkz": "Kurtokha", + "xla": "Kamula", + "xlb": "Loup B", + "xlc": "Lycian", + "xld": "Lydian", + "xle": "Lemnian", + "xlg": "Ligurian (Ancient)", + "xli": "Liburnian", + "xln": "Alanic", + "xlo": "Loup A", + "xlp": "Lepontic", + "xls": "Lusitanian", + "xlu": "Cuneiform Luwian", + "xly": "Elymian", + "xma": "Mushungulu", + "xmb": "Mbonga", + "xmc": "Makhuwa-Marrevone", + "xmd": "Mbudum", + "xme": "Median", + "xmf": "Mingrelian", + "xmg": "Mengaka", + "xmh": "Kugu-Muminh", + "xmj": "Majera", + "xmk": "Ancient Macedonian", + "xml": "Malaysian Sign Language", + "xmm": "Manado Malay", + "xmn": "Manichaean Middle Persian", + "xmo": "Morerebi", + "xmp": "Kuku-Mu'inh", + "xmq": "Kuku-Mangk", + "xmr": "Meroitic", + "xms": "Moroccan Sign Language", + "xmt": "Matbat", + "xmu": "Kamu", + "xmv": "Antankarana Malagasy; Tankarana Malagasy", + "xmw": "Tsimihety Malagasy", + "xmx": "Salawati; Maden", + "xmy": "Mayaguduna", + "xmz": "Mori Bawah", + "xna": "Ancient North Arabian", + "xnb": "Kanakanabu", + "xnd": "Na-Dene languages", + "xng": "Middle Mongolian", + "xnh": "Kuanhua", + "xni": "Ngarigu", + "xnj": "Ngoni (Tanzania)", + "xnk": "Nganakarti", + "xnm": "Ngumbarl", + "xnn": "Northern Kankanay", + "xno": "Anglo-Norman", + "xnq": "Ngoni (Mozambique)", + "xnr": "Kangri", + "xns": "Kanashi", + "xnt": "Narragansett", + "xnu": "Nukunul", + "xny": "Nyiyaparli", + "xnz": "Kenzi; Mattoki", + "xoc": "O'chi'chi'", + "xod": "Kokoda", + "xog": "Soga", + "xoi": "Kominimung", + "xok": "Xokleng", + "xom": "Komo (Sudan)", + "xon": "Konkomba", + "xoo": "Xukurú", + "xop": "Kopar", + "xor": "Korubo", + "xow": "Kowaki", + "xpa": "Pirriya", + "xpb": "Northeastern Tasmanian; Pyemmairrener", + "xpc": "Pecheneg", + "xpd": "Oyster Bay Tasmanian", + "xpe": "Liberia Kpelle", + "xpf": "Southeast Tasmanian; Nuenonne", + "xpg": "Phrygian", + "xph": "North Midlands Tasmanian; Tyerrenoterpanner", + "xpi": "Pictish", + "xpj": "Mpalitjanh", + "xpk": "Kulina Pano", + "xpl": "Port Sorell Tasmanian", + "xpm": "Pumpokol", + "xpn": "Kapinawá", + "xpo": "Pochutec", + "xpp": "Puyo-Paekche", + "xpq": "Mohegan-Pequot", + "xpr": "Parthian", + "xps": "Pisidian", + "xpt": "Punthamara", + "xpu": "Punic", + "xpv": "Northern Tasmanian; Tommeginne", + "xpw": "Northwestern Tasmanian; Peerapper", + "xpx": "Southwestern Tasmanian; Toogee", + "xpy": "Puyo", + "xpz": "Bruny Island Tasmanian", + "xqa": "Karakhanid", + "xqt": "Qatabanian", + "xra": "Krahô", + "xrb": "Eastern Karaboro", + "xrd": "Gundungurra", + "xre": "Kreye", + "xrg": "Minang", + "xri": "Krikati-Timbira", + "xrm": "Armazic", + "xrn": "Arin", + "xrr": "Raetic", + "xrt": "Aranama-Tamique", + "xru": "Marriammu", + "xrw": "Karawa", + "xsa": "Sabaean", + "xsb": "Sambal", + "xsc": "Scythian", + "xsd": "Sidetic", + "xse": "Sempan", + "xsh": "Shamang", + "xsi": "Sio", + "xsj": "Subi", + "xsl": "South Slavey", + "xsm": "Kasem", + "xsn": "Sanga (Nigeria)", + "xso": "Solano", + "xsp": "Silopi", + "xsq": "Makhuwa-Saka", + "xsr": "Sherpa", + "xss": "Assan", + "xsu": "Sanumá", + "xsv": "Sudovian", + "xsy": "Saisiyat", + "xta": "Alcozauca Mixtec", + "xtb": "Chazumba Mixtec", + "xtc": "Katcha-Kadugli-Miri", + "xtd": "Diuxi-Tilantongo Mixtec", + "xte": "Ketengban", + "xtg": "Transalpine Gaulish", + "xth": "Yitha Yitha", + "xti": "Sinicahua Mixtec", + "xtj": "San Juan Teita Mixtec", + "xtl": "Tijaltepec Mixtec", + "xtm": "Magdalena Peñasco Mixtec", + "xtn": "Northern Tlaxiaco Mixtec", + "xto": "Tokharian A", + "xtp": "San Miguel Piedras Mixtec", + "xtq": "Tumshuqese", + "xtr": "Early Tripuri", + "xts": "Sindihui Mixtec", + "xtt": "Tacahua Mixtec", + "xtu": "Cuyamecalco Mixtec", + "xtv": "Thawa", + "xtw": "Tawandê", + "xty": "Yoloxochitl Mixtec", + "xua": "Alu Kurumba", + "xub": "Betta Kurumba", + "xud": "Umiida", + "xug": "Kunigami", + "xuj": "Jennu Kurumba", + "xul": "Ngunawal; Nunukul", + "xum": "Umbrian", + "xun": "Unggaranggu", + "xuo": "Kuo", + "xup": "Upper Umpqua", + "xur": "Urartian", + "xut": "Kuthant", + "xuu": "Kxoe; Khwedam", + "xve": "Venetic", + "xvi": "Kamviri", + "xvn": "Vandalic", + "xvo": "Volscian", + "xvs": "Vestinian", + "xwa": "Kwaza", + "xwc": "Woccon", + "xwd": "Wadi Wadi", + "xwe": "Xwela Gbe", + "xwg": "Kwegu", + "xwj": "Wajuk", + "xwk": "Wangkumara", + "xwl": "Western Xwla Gbe", + "xwo": "Written Oirat", + "xwr": "Kwerba Mamberamo", + "xwt": "Wotjobaluk", + "xww": "Wemba Wemba", + "xxb": "Boro (Ghana)", + "xxk": "Ke'o", + "xxm": "Minkin", + "xxr": "Koropó", + "xxt": "Tambora", + "xya": "Yaygir", + "xyb": "Yandjibara", + "xyj": "Mayi-Yapi", + "xyk": "Mayi-Kulan", + "xyl": "Yalakalore", + "xyt": "Mayi-Thakurti", + "xyy": "Yorta Yorta", + "xzh": "Zhang-Zhung", + "xzm": "Zemgalian", + "xzp": "Ancient Zapotec", + "yaa": "Yaminahua", + "yab": "Yuhup", + "yac": "Pass Valley Yali", + "yad": "Yagua", + "yae": "Pumé", + "yaf": "Yaka (Democratic Republic of Congo)", + "yag": "Yámana", + "yah": "Yazgulyam", + "yai": "Yagnobi", + "yaj": "Banda-Yangere", + "yak": "Yakama", + "yal": "Yalunka", + "yam": "Yamba", + "yan": "Mayangna", + "yao": "Yao", + "yap": "Yapese", + "yaq": "Yaqui", + "yar": "Yabarana", + "yas": "Nugunu (Cameroon)", + "yat": "Yambeta", + "yau": "Yuwana", + "yav": "Yangben", + "yaw": "Yawalapití", + "yax": "Yauma", + "yay": "Agwagwune", + "yaz": "Lokaa", + "yba": "Yala", + "ybb": "Yemba", + "ybe": "West Yugur", + "ybh": "Yakha", + "ybi": "Yamphu", + "ybj": "Hasha", + "ybk": "Bokha", + "ybl": "Yukuben", + "ybm": "Yaben", + "ybn": "Yabaâna", + "ybo": "Yabong", + "ybx": "Yawiyo", + "yby": "Yaweyuha", + "ych": "Chesu", + "ycl": "Lolopo", + "ycn": "Yucuna", + "ycp": "Chepya", + "yda": "Yanda", + "ydd": "Eastern Yiddish", + "yde": "Yangum Dey", + "ydg": "Yidgha", + "ydk": "Yoidik", + "yea": "Ravula", + "yec": "Yeniche", + "yee": "Yimas", + "yei": "Yeni", + "yej": "Yevanic", + "yel": "Yela", + "yer": "Tarok", + "yes": "Nyankpa", + "yet": "Yetfa", + "yeu": "Yerukula", + "yev": "Yapunda", + "yey": "Yeyi", + "yga": "Malyangapa", + "ygi": "Yiningayi", + "ygl": "Yangum Gel", + "ygm": "Yagomi", + "ygp": "Gepo", + "ygr": "Yagaria", + "ygs": "Yolŋu Sign Language", + "ygu": "Yugul", + "ygw": "Yagwoia", + "yha": "Baha Buyang", + "yhd": "Judeo-Iraqi Arabic", + "yhl": "Hlepho Phowa", + "yhs": "Yan-nhaŋu Sign Language", + "yi": "Yiddish", + "yia": "Yinggarda", + "yif": "Ache", + "yig": "Wusa Nasu", + "yih": "Western Yiddish", + "yii": "Yidiny", + "yij": "Yindjibarndi", + "yik": "Dongshanba Lalo", + "yil": "Yindjilandji", + "yim": "Yimchungru Naga", + "yin": "Riang Lai; Yinchia", + "yip": "Pholo", + "yiq": "Miqie", + "yir": "North Awyu", + "yis": "Yis", + "yit": "Eastern Lalu", + "yiu": "Awu", + "yiv": "Northern Nisu", + "yix": "Axi Yi", + "yiz": "Azhe", + "yka": "Yakan", + "ykg": "Northern Yukaghir", + "yki": "Yoke", + "ykk": "Yakaikeke", + "ykl": "Khlula", + "ykm": "Kap", + "ykn": "Kua-nsi", + "yko": "Yasa", + "ykr": "Yekora", + "ykt": "Kathu", + "yku": "Kuamasi", + "yky": "Yakoma", + "yla": "Yaul", + "ylb": "Yaleba", + "yle": "Yele", + "ylg": "Yelogu", + "yli": "Angguruk Yali", + "yll": "Yil", + "ylm": "Limi", + "yln": "Langnian Buyang", + "ylo": "Naluo Yi", + "ylr": "Yalarnnga", + "ylu": "Aribwaung", + "yly": "Nyâlayu; Nyelâyu", + "ymb": "Yambes", + "ymc": "Southern Muji", + "ymd": "Muda", + "yme": "Yameo", + "ymg": "Yamongeri", + "ymh": "Mili", + "ymi": "Moji", + "ymk": "Makwe", + "yml": "Iamalele", + "ymm": "Maay", + "ymn": "Yamna; Sunum", + "ymo": "Yangum Mon", + "ymp": "Yamap", + "ymq": "Qila Muji", + "ymr": "Malasar", + "yms": "Mysian", + "ymx": "Northern Muji", + "ymz": "Muzi", + "yna": "Aluo", + "ynd": "Yandruwandha", + "yne": "Lang'e", + "yng": "Yango", + "ynk": "Naukan Yupik", + "ynl": "Yangulam", + "ynn": "Yana", + "yno": "Yong", + "ynq": "Yendang", + "yns": "Yansi", + "ynu": "Yahuna", + "yo": "Yoruba", + "yob": "Yoba", + "yog": "Yogad", + "yoi": "Yonaguni", + "yok": "Yokuts", + "yol": "Yola", + "yom": "Yombe", + "yon": "Yongkom", + "yot": "Yotti", + "yox": "Yoron", + "yoy": "Yoy", + "ypa": "Phala", + "ypb": "Labo Phowa", + "ypg": "Phola", + "yph": "Phupha", + "ypk": "Yupik languages", + "ypm": "Phuma", + "ypn": "Ani Phowa", + "ypo": "Alo Phola", + "ypp": "Phupa", + "ypz": "Phuza", + "yra": "Yerakai", + "yrb": "Yareba", + "yre": "Yaouré", + "yrk": "Nenets", + "yrl": "Nhengatu", + "yrm": "Yirrk-Mel", + "yrn": "Yerong", + "yro": "Yaroamë", + "yrs": "Yarsun", + "yrw": "Yarawata", + "yry": "Yarluyandi", + "ysc": "Yassic", + "ysd": "Samatao", + "ysg": "Sonaga", + "ysl": "Yugoslavian Sign Language", + "ysm": "Myanmar Sign Language", + "ysn": "Sani", + "yso": "Nisi (China)", + "ysp": "Southern Lolopo", + "ysr": "Sirenik Yupik", + "yss": "Yessan-Mayo", + "ysy": "Sanie", + "yta": "Talu", + "ytl": "Tanglang", + "ytp": "Thopho", + "ytw": "Yout Wam", + "yty": "Yatay", + "yua": "Yucateco; Yucatec Maya", + "yub": "Yugambal", + "yuc": "Yuchi", + "yud": "Judeo-Tripolitanian Arabic", + "yue": "Yue Chinese; Cantonese", + "yuf": "Havasupai-Walapai-Yavapai", + "yug": "Yug", + "yui": "Yurutí", + "yuj": "Karkar-Yuri", + "yuk": "Yuki", + "yul": "Yulu", + "yum": "Quechan", + "yun": "Bena (Nigeria)", + "yup": "Yukpa", + "yuq": "Yuqui", + "yur": "Yurok", + "yut": "Yopno", + "yuw": "Yau (Morobe Province)", + "yux": "Southern Yukaghir", + "yuy": "East Yugur", + "yuz": "Yuracare", + "yva": "Yawa", + "yvt": "Yavitero", + "ywa": "Kalou", + "ywg": "Yinhawangka", + "ywl": "Western Lalu", + "ywn": "Yawanawa", + "ywq": "Wuding-Luquan Yi", + "ywr": "Yawuru", + "ywt": "Xishanba Lalo; Central Lalo", + "ywu": "Wumeng Nasu", + "yww": "Yawarawarga", + "yxa": "Mayawali", + "yxg": "Yagara", + "yxl": "Yardliyawarra", + "yxm": "Yinwum", + "yxu": "Yuyu", + "yxy": "Yabula Yabula", + "yyr": "Yir Yoront", + "yyu": "Yau (Sandaun Province)", + "yyz": "Ayizi", + "yzg": "E'ma Buyang", + "yzk": "Zokhuo", + "za": "Zhuang; Chuang", + "zaa": "Sierra de Juárez Zapotec", + "zab": "Western Tlacolula Valley Zapotec; San Juan Guelavía Zapotec", + "zac": "Ocotlán Zapotec", + "zad": "Cajonos Zapotec", + "zae": "Yareni Zapotec", + "zaf": "Ayoquesco Zapotec", + "zag": "Zaghawa", + "zah": "Zangwal", + "zai": "Isthmus Zapotec", + "zaj": "Zaramo", + "zak": "Zanaki", + "zal": "Zauzou", + "zam": "Miahuatlán Zapotec", + "zao": "Ozolotepec Zapotec", + "zap": "Zapotec", + "zaq": "Aloápam Zapotec", + "zar": "Rincón Zapotec", + "zas": "Santo Domingo Albarradas Zapotec", + "zat": "Tabaa Zapotec", + "zau": "Zangskari", + "zav": "Yatzachi Zapotec", + "zaw": "Mitla Zapotec", + "zax": "Xadani Zapotec", + "zay": "Zayse-Zergulla; Zaysete", + "zaz": "Zari", + "zba": "Balaibalan", + "zbc": "Central Berawan", + "zbe": "East Berawan", + "zbl": "Blissymbols; Bliss; Blissymbolics", + "zbt": "Batui", + "zbu": "Bu (Bauchi State)", + "zbw": "West Berawan", + "zca": "Coatecas Altas Zapotec", + "zcd": "Las Delicias Zapotec", + "zch": "Central Hongshuihe Zhuang", + "zdj": "Ngazidja Comorian", + "zea": "Zeeuws", + "zeg": "Zenag", + "zeh": "Eastern Hongshuihe Zhuang", + "zen": "Zenaga", + "zga": "Kinga", + "zgb": "Guibei Zhuang", + "zgh": "Standard Moroccan Tamazight", + "zgm": "Minz Zhuang", + "zgn": "Guibian Zhuang", + "zgr": "Magori", + "zh": "Chinese", + "zhb": "Zhaba", + "zhd": "Dai Zhuang", + "zhi": "Zhire", + "zhn": "Nong Zhuang", + "zhw": "Zhoa", + "zhx": "Chinese (family)", + "zia": "Zia", + "zib": "Zimbabwe Sign Language", + "zik": "Zimakani", + "zil": "Zialo", + "zim": "Mesme", + "zin": "Zinza", + "ziw": "Zigula", + "ziz": "Zizilivakan", + "zka": "Kaimbulawa", + "zkb": "Koibal", + "zkd": "Kadu", + "zkg": "Koguryo", + "zkh": "Khorezmian", + "zkk": "Karankawa", + "zkn": "Kanan", + "zko": "Kott", + "zkp": "São Paulo Kaingáng", + "zkr": "Zakhring", + "zkt": "Kitan", + "zku": "Kaurna", + "zkv": "Krevinian", + "zkz": "Khazar", + "zla": "Zula", + "zle": "East Slavic languages", + "zlj": "Liujiang Zhuang", + "zlm": "Malay (individual language)", + "zln": "Lianshan Zhuang", + "zlq": "Liuqian Zhuang", + "zls": "South Slavic languages", + "zlw": "West Slavic languages", + "zma": "Manda (Australia)", + "zmb": "Zimba", + "zmc": "Margany", + "zmd": "Maridan", + "zme": "Mangerr", + "zmf": "Mfinu", + "zmg": "Marti Ke", + "zmh": "Makolkol", + "zmi": "Negeri Sembilan Malay", + "zmj": "Maridjabin", + "zmk": "Mandandanyi", + "zml": "Matngala", + "zmm": "Marimanindji; Marramaninyshi", + "zmn": "Mbangwe", + "zmo": "Molo", + "zmp": "Mpuono", + "zmq": "Mituku", + "zmr": "Maranunggu", + "zms": "Mbesa", + "zmt": "Maringarr", + "zmu": "Muruwari", + "zmv": "Mbariman-Gudhinma", + "zmw": "Mbo (Democratic Republic of Congo)", + "zmx": "Bomitaba", + "zmy": "Mariyedi", + "zmz": "Mbandja", + "zna": "Zan Gula", + "znd": "Zande languages", + "zne": "Zande (individual language)", + "zng": "Mang", + "znk": "Manangkari", + "zns": "Mangas", + "zoc": "Copainalá Zoque", + "zoh": "Chimalapa Zoque", + "zom": "Zou", + "zoo": "Asunción Mixtepec Zapotec", + "zoq": "Tabasco Zoque", + "zor": "Rayón Zoque", + "zos": "Francisco León Zoque", + "zpa": "Lachiguiri Zapotec", + "zpb": "Yautepec Zapotec", + "zpc": "Choapan Zapotec", + "zpd": "Southeastern Ixtlán Zapotec", + "zpe": "Petapa Zapotec", + "zpf": "San Pedro Quiatoni Zapotec", + "zpg": "Guevea De Humboldt Zapotec", + "zph": "Totomachapan Zapotec", + "zpi": "Santa María Quiegolani Zapotec", + "zpj": "Quiavicuzas Zapotec", + "zpk": "Tlacolulita Zapotec", + "zpl": "Lachixío Zapotec", + "zpm": "Mixtepec Zapotec", + "zpn": "Santa Inés Yatzechi Zapotec", + "zpo": "Amatlán Zapotec", + "zpp": "El Alto Zapotec", + "zpq": "Zoogocho Zapotec", + "zpr": "Santiago Xanica Zapotec", + "zps": "Coatlán Zapotec", + "zpt": "San Vicente Coatlán Zapotec", + "zpu": "Yalálag Zapotec", + "zpv": "Chichicapan Zapotec", + "zpw": "Zaniza Zapotec", + "zpx": "San Baltazar Loxicha Zapotec", + "zpy": "Mazaltepec Zapotec", + "zpz": "Texmelucan Zapotec", + "zqe": "Qiubei Zhuang", + "zra": "Kara (Korea)", + "zrg": "Mirgan", + "zrn": "Zerenkel", + "zro": "Záparo", + "zrp": "Zarphatic", + "zrs": "Mairasi", + "zsa": "Sarasira", + "zsk": "Kaskean", + "zsl": "Zambian Sign Language", + "zsm": "Standard Malay", + "zsr": "Southern Rincon Zapotec", + "zsu": "Sukurum", + "zte": "Elotepec Zapotec", + "ztg": "Xanaguía Zapotec", + "ztl": "Lapaguía-Guivini Zapotec", + "ztm": "San Agustín Mixtepec Zapotec", + "ztn": "Santa Catarina Albarradas Zapotec", + "ztp": "Loxicha Zapotec", + "ztq": "Quioquitani-Quierí Zapotec", + "zts": "Tilquiapan Zapotec", + "ztt": "Tejalapan Zapotec", + "ztu": "Güilá Zapotec", + "ztx": "Zaachila Zapotec", + "zty": "Yatee Zapotec", + "zu": "Zulu", + "zua": "Zeem", + "zuh": "Tokano", + "zum": "Kumzari", + "zun": "Zuni", + "zuy": "Zumaya", + "zwa": "Zay", + "zyb": "Yongbei Zhuang", + "zyg": "Yang Zhuang", + "zyj": "Youjiang Zhuang", + "zyn": "Yongnan Zhuang", + "zyp": "Zyphe Chin", + "zza": "Zaza; Dimili; Dimli (macrolanguage); Kirdki; Kirmanjki (macrolanguage); Zazaki", + "zzj": "Zuojiang Zhuang" +} \ No newline at end of file diff --git a/lib/python3.10/site-packages/datasets/utils/resources/size_categories.json b/lib/python3.10/site-packages/datasets/utils/resources/size_categories.json new file mode 100644 index 0000000000000000000000000000000000000000..983ce0c10dbb2e2245f90ae47e9de4c1025d5bb1 --- /dev/null +++ b/lib/python3.10/site-packages/datasets/utils/resources/size_categories.json @@ -0,0 +1,14 @@ +[ + "unknown", + "n<1K", + "1K1T" +] diff --git a/lib/python3.10/site-packages/importlib_resources/compat/__init__.py b/lib/python3.10/site-packages/importlib_resources/compat/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/lib/python3.10/site-packages/importlib_resources/compat/py39.py b/lib/python3.10/site-packages/importlib_resources/compat/py39.py new file mode 100644 index 0000000000000000000000000000000000000000..684d3c638311527c24350f29c9bcc93c142b210a --- /dev/null +++ b/lib/python3.10/site-packages/importlib_resources/compat/py39.py @@ -0,0 +1,9 @@ +import sys + +__all__ = ['ZipPath'] + + +if sys.version_info >= (3, 10): + from zipfile import Path as ZipPath +else: + from zipp import Path as ZipPath diff --git a/lib/python3.10/site-packages/importlib_resources/future/__init__.py b/lib/python3.10/site-packages/importlib_resources/future/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/lib/python3.10/site-packages/importlib_resources/future/adapters.py b/lib/python3.10/site-packages/importlib_resources/future/adapters.py new file mode 100644 index 0000000000000000000000000000000000000000..239e52b795fc45ae614d0f7f641b5b4c3f272740 --- /dev/null +++ b/lib/python3.10/site-packages/importlib_resources/future/adapters.py @@ -0,0 +1,102 @@ +import functools +import pathlib +from contextlib import suppress +from types import SimpleNamespace + +from .. import _adapters, readers + + +def _block_standard(reader_getter): + """ + Wrap _adapters.TraversableResourcesLoader.get_resource_reader + and intercept any standard library readers. + """ + + @functools.wraps(reader_getter) + def wrapper(*args, **kwargs): + """ + If the reader is from the standard library, return None to allow + allow likely newer implementations in this library to take precedence. + """ + try: + reader = reader_getter(*args, **kwargs) + except NotADirectoryError: + # MultiplexedPath may fail on zip subdirectory + return + except ValueError as exc: + # NamespaceReader in stdlib may fail for editable installs + # (python/importlib_resources#311, python/importlib_resources#318) + # Remove after bugfix applied to Python 3.13. + if "not enough values to unpack" not in str(exc): + raise + return + # Python 3.10+ + mod_name = reader.__class__.__module__ + if mod_name.startswith('importlib.') and mod_name.endswith('readers'): + return + # Python 3.8, 3.9 + if isinstance(reader, _adapters.CompatibilityFiles) and ( + reader.spec.loader.__class__.__module__.startswith('zipimport') + or reader.spec.loader.__class__.__module__.startswith( + '_frozen_importlib_external' + ) + ): + return + return reader + + return wrapper + + +def _skip_degenerate(reader): + """ + Mask any degenerate reader. Ref #298. + """ + is_degenerate = ( + isinstance(reader, _adapters.CompatibilityFiles) and not reader._reader + ) + return reader if not is_degenerate else None + + +class TraversableResourcesLoader(_adapters.TraversableResourcesLoader): + """ + Adapt loaders to provide TraversableResources and other + compatibility. + + Ensures the readers from importlib_resources are preferred + over stdlib readers. + """ + + def get_resource_reader(self, name): + return ( + _skip_degenerate(_block_standard(super().get_resource_reader)(name)) + or self._standard_reader() + or super().get_resource_reader(name) + ) + + def _standard_reader(self): + return self._zip_reader() or self._namespace_reader() or self._file_reader() + + def _zip_reader(self): + with suppress(AttributeError): + return readers.ZipReader(self.spec.loader, self.spec.name) + + def _namespace_reader(self): + with suppress(AttributeError, ValueError): + return readers.NamespaceReader(self.spec.submodule_search_locations) + + def _file_reader(self): + try: + path = pathlib.Path(self.spec.origin) + except TypeError: + return None + if path.exists(): + return readers.FileReader(SimpleNamespace(path=path)) + + +def wrap_spec(package): + """ + Override _adapters.wrap_spec to use TraversableResourcesLoader + from above. Ensures that future behavior is always available on older + Pythons. + """ + return _adapters.SpecLoaderAdapter(package.__spec__, TraversableResourcesLoader) diff --git a/lib/python3.10/site-packages/importlib_resources/tests/__init__.py b/lib/python3.10/site-packages/importlib_resources/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/lib/python3.10/site-packages/importlib_resources/tests/_path.py b/lib/python3.10/site-packages/importlib_resources/tests/_path.py new file mode 100644 index 0000000000000000000000000000000000000000..0033983dc6628682481023ce3e15b64171423ba8 --- /dev/null +++ b/lib/python3.10/site-packages/importlib_resources/tests/_path.py @@ -0,0 +1,90 @@ +import functools +import pathlib +from typing import Dict, Protocol, Union, runtime_checkable + +#### +# from jaraco.path 3.7.1 + + +class Symlink(str): + """ + A string indicating the target of a symlink. + """ + + +FilesSpec = Dict[str, Union[str, bytes, Symlink, 'FilesSpec']] + + +@runtime_checkable +class TreeMaker(Protocol): + def __truediv__(self, *args, **kwargs): ... # pragma: no cover + + def mkdir(self, **kwargs): ... # pragma: no cover + + def write_text(self, content, **kwargs): ... # pragma: no cover + + def write_bytes(self, content): ... # pragma: no cover + + def symlink_to(self, target): ... # pragma: no cover + + +def _ensure_tree_maker(obj: Union[str, TreeMaker]) -> TreeMaker: + return obj if isinstance(obj, TreeMaker) else pathlib.Path(obj) # type: ignore[return-value] + + +def build( + spec: FilesSpec, + prefix: Union[str, TreeMaker] = pathlib.Path(), # type: ignore[assignment] +): + """ + Build a set of files/directories, as described by the spec. + + Each key represents a pathname, and the value represents + the content. Content may be a nested directory. + + >>> spec = { + ... 'README.txt': "A README file", + ... "foo": { + ... "__init__.py": "", + ... "bar": { + ... "__init__.py": "", + ... }, + ... "baz.py": "# Some code", + ... "bar.py": Symlink("baz.py"), + ... }, + ... "bing": Symlink("foo"), + ... } + >>> target = getfixture('tmp_path') + >>> build(spec, target) + >>> target.joinpath('foo/baz.py').read_text(encoding='utf-8') + '# Some code' + >>> target.joinpath('bing/bar.py').read_text(encoding='utf-8') + '# Some code' + """ + for name, contents in spec.items(): + create(contents, _ensure_tree_maker(prefix) / name) + + +@functools.singledispatch +def create(content: Union[str, bytes, FilesSpec], path): + path.mkdir(exist_ok=True) + build(content, prefix=path) # type: ignore[arg-type] + + +@create.register +def _(content: bytes, path): + path.write_bytes(content) + + +@create.register +def _(content: str, path): + path.write_text(content, encoding='utf-8') + + +@create.register +def _(content: Symlink, path): + path.symlink_to(content) + + +# end from jaraco.path +#### diff --git a/lib/python3.10/site-packages/importlib_resources/tests/compat/__init__.py b/lib/python3.10/site-packages/importlib_resources/tests/compat/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/lib/python3.10/site-packages/importlib_resources/tests/compat/py312.py b/lib/python3.10/site-packages/importlib_resources/tests/compat/py312.py new file mode 100644 index 0000000000000000000000000000000000000000..ea9a58ba2e5c6bfeac1c5b7ae768d8f4197a29a8 --- /dev/null +++ b/lib/python3.10/site-packages/importlib_resources/tests/compat/py312.py @@ -0,0 +1,18 @@ +import contextlib + +from .py39 import import_helper + + +@contextlib.contextmanager +def isolated_modules(): + """ + Save modules on entry and cleanup on exit. + """ + (saved,) = import_helper.modules_setup() + try: + yield + finally: + import_helper.modules_cleanup(saved) + + +vars(import_helper).setdefault('isolated_modules', isolated_modules) diff --git a/lib/python3.10/site-packages/importlib_resources/tests/compat/py39.py b/lib/python3.10/site-packages/importlib_resources/tests/compat/py39.py new file mode 100644 index 0000000000000000000000000000000000000000..e01d276bd96ef5d823946b84b74d3af2710d26f9 --- /dev/null +++ b/lib/python3.10/site-packages/importlib_resources/tests/compat/py39.py @@ -0,0 +1,13 @@ +""" +Backward-compatability shims to support Python 3.9 and earlier. +""" + +from jaraco.test.cpython import from_test_support, try_import + +import_helper = try_import('import_helper') or from_test_support( + 'modules_setup', 'modules_cleanup', 'DirsOnSysPath' +) +os_helper = try_import('os_helper') or from_test_support('temp_dir') +warnings_helper = try_import('warnings_helper') or from_test_support( + 'ignore_warnings', 'check_warnings' +) diff --git a/lib/python3.10/site-packages/importlib_resources/tests/test_compatibilty_files.py b/lib/python3.10/site-packages/importlib_resources/tests/test_compatibilty_files.py new file mode 100644 index 0000000000000000000000000000000000000000..e8aac28415883cff131502da954eab8505da6ec6 --- /dev/null +++ b/lib/python3.10/site-packages/importlib_resources/tests/test_compatibilty_files.py @@ -0,0 +1,103 @@ +import io +import unittest + +import importlib_resources as resources +from importlib_resources._adapters import ( + CompatibilityFiles, + wrap_spec, +) + +from . import util + + +class CompatibilityFilesTests(unittest.TestCase): + @property + def package(self): + bytes_data = io.BytesIO(b'Hello, world!') + return util.create_package( + file=bytes_data, + path='some_path', + contents=('a', 'b', 'c'), + ) + + @property + def files(self): + return resources.files(self.package) + + def test_spec_path_iter(self): + self.assertEqual( + sorted(path.name for path in self.files.iterdir()), + ['a', 'b', 'c'], + ) + + def test_child_path_iter(self): + self.assertEqual(list((self.files / 'a').iterdir()), []) + + def test_orphan_path_iter(self): + self.assertEqual(list((self.files / 'a' / 'a').iterdir()), []) + self.assertEqual(list((self.files / 'a' / 'a' / 'a').iterdir()), []) + + def test_spec_path_is(self): + self.assertFalse(self.files.is_file()) + self.assertFalse(self.files.is_dir()) + + def test_child_path_is(self): + self.assertTrue((self.files / 'a').is_file()) + self.assertFalse((self.files / 'a').is_dir()) + + def test_orphan_path_is(self): + self.assertFalse((self.files / 'a' / 'a').is_file()) + self.assertFalse((self.files / 'a' / 'a').is_dir()) + self.assertFalse((self.files / 'a' / 'a' / 'a').is_file()) + self.assertFalse((self.files / 'a' / 'a' / 'a').is_dir()) + + def test_spec_path_name(self): + self.assertEqual(self.files.name, 'testingpackage') + + def test_child_path_name(self): + self.assertEqual((self.files / 'a').name, 'a') + + def test_orphan_path_name(self): + self.assertEqual((self.files / 'a' / 'b').name, 'b') + self.assertEqual((self.files / 'a' / 'b' / 'c').name, 'c') + + def test_spec_path_open(self): + self.assertEqual(self.files.read_bytes(), b'Hello, world!') + self.assertEqual(self.files.read_text(encoding='utf-8'), 'Hello, world!') + + def test_child_path_open(self): + self.assertEqual((self.files / 'a').read_bytes(), b'Hello, world!') + self.assertEqual( + (self.files / 'a').read_text(encoding='utf-8'), 'Hello, world!' + ) + + def test_orphan_path_open(self): + with self.assertRaises(FileNotFoundError): + (self.files / 'a' / 'b').read_bytes() + with self.assertRaises(FileNotFoundError): + (self.files / 'a' / 'b' / 'c').read_bytes() + + def test_open_invalid_mode(self): + with self.assertRaises(ValueError): + self.files.open('0') + + def test_orphan_path_invalid(self): + with self.assertRaises(ValueError): + CompatibilityFiles.OrphanPath() + + def test_wrap_spec(self): + spec = wrap_spec(self.package) + self.assertIsInstance(spec.loader.get_resource_reader(None), CompatibilityFiles) + + +class CompatibilityFilesNoReaderTests(unittest.TestCase): + @property + def package(self): + return util.create_package_from_loader(None) + + @property + def files(self): + return resources.files(self.package) + + def test_spec_path_joinpath(self): + self.assertIsInstance(self.files / 'a', CompatibilityFiles.OrphanPath) diff --git a/lib/python3.10/site-packages/importlib_resources/tests/test_custom.py b/lib/python3.10/site-packages/importlib_resources/tests/test_custom.py new file mode 100644 index 0000000000000000000000000000000000000000..25ae0e75784d9021aea6d089206fe87b05ee8831 --- /dev/null +++ b/lib/python3.10/site-packages/importlib_resources/tests/test_custom.py @@ -0,0 +1,48 @@ +import contextlib +import pathlib +import unittest + +import importlib_resources as resources + +from .. import abc +from ..abc import ResourceReader, TraversableResources +from . import util +from .compat.py39 import os_helper + + +class SimpleLoader: + """ + A simple loader that only implements a resource reader. + """ + + def __init__(self, reader: ResourceReader): + self.reader = reader + + def get_resource_reader(self, package): + return self.reader + + +class MagicResources(TraversableResources): + """ + Magically returns the resources at path. + """ + + def __init__(self, path: pathlib.Path): + self.path = path + + def files(self): + return self.path + + +class CustomTraversableResourcesTests(unittest.TestCase): + def setUp(self): + self.fixtures = contextlib.ExitStack() + self.addCleanup(self.fixtures.close) + + def test_custom_loader(self): + temp_dir = pathlib.Path(self.fixtures.enter_context(os_helper.temp_dir())) + loader = SimpleLoader(MagicResources(temp_dir)) + pkg = util.create_package_from_loader(loader) + files = resources.files(pkg) + assert isinstance(files, abc.Traversable) + assert list(files.iterdir()) == [] diff --git a/lib/python3.10/site-packages/importlib_resources/tests/test_files.py b/lib/python3.10/site-packages/importlib_resources/tests/test_files.py new file mode 100644 index 0000000000000000000000000000000000000000..be20660313029aab66a34800aac12bf65c9c0469 --- /dev/null +++ b/lib/python3.10/site-packages/importlib_resources/tests/test_files.py @@ -0,0 +1,194 @@ +import contextlib +import importlib +import pathlib +import py_compile +import textwrap +import unittest +import warnings + +import importlib_resources as resources + +from ..abc import Traversable +from . import util +from .compat.py39 import import_helper, os_helper + + +@contextlib.contextmanager +def suppress_known_deprecation(): + with warnings.catch_warnings(record=True) as ctx: + warnings.simplefilter('default', category=DeprecationWarning) + yield ctx + + +class FilesTests: + def test_read_bytes(self): + files = resources.files(self.data) + actual = files.joinpath('utf-8.file').read_bytes() + assert actual == b'Hello, UTF-8 world!\n' + + def test_read_text(self): + files = resources.files(self.data) + actual = files.joinpath('utf-8.file').read_text(encoding='utf-8') + assert actual == 'Hello, UTF-8 world!\n' + + def test_traversable(self): + assert isinstance(resources.files(self.data), Traversable) + + def test_joinpath_with_multiple_args(self): + files = resources.files(self.data) + binfile = files.joinpath('subdirectory', 'binary.file') + self.assertTrue(binfile.is_file()) + + def test_old_parameter(self): + """ + Files used to take a 'package' parameter. Make sure anyone + passing by name is still supported. + """ + with suppress_known_deprecation(): + resources.files(package=self.data) + + +class OpenDiskTests(FilesTests, util.DiskSetup, unittest.TestCase): + pass + + +class OpenZipTests(FilesTests, util.ZipSetup, unittest.TestCase): + pass + + +class OpenNamespaceTests(FilesTests, util.DiskSetup, unittest.TestCase): + MODULE = 'namespacedata01' + + def test_non_paths_in_dunder_path(self): + """ + Non-path items in a namespace package's ``__path__`` are ignored. + + As reported in python/importlib_resources#311, some tools + like Setuptools, when creating editable packages, will inject + non-paths into a namespace package's ``__path__``, a + sentinel like + ``__editable__.sample_namespace-1.0.finder.__path_hook__`` + to cause the ``PathEntryFinder`` to be called when searching + for packages. In that case, resources should still be loadable. + """ + import namespacedata01 # type: ignore[import-not-found] + + namespacedata01.__path__.append( + '__editable__.sample_namespace-1.0.finder.__path_hook__' + ) + + resources.files(namespacedata01) + + +class OpenNamespaceZipTests(FilesTests, util.ZipSetup, unittest.TestCase): + ZIP_MODULE = 'namespacedata01' + + +class DirectSpec: + """ + Override behavior of ModuleSetup to write a full spec directly. + """ + + MODULE = 'unused' + + def load_fixture(self, name): + self.tree_on_path(self.spec) + + +class ModulesFiles: + spec = { + 'mod.py': '', + 'res.txt': 'resources are the best', + } + + def test_module_resources(self): + """ + A module can have resources found adjacent to the module. + """ + import mod # type: ignore[import-not-found] + + actual = resources.files(mod).joinpath('res.txt').read_text(encoding='utf-8') + assert actual == self.spec['res.txt'] + + +class ModuleFilesDiskTests(DirectSpec, util.DiskSetup, ModulesFiles, unittest.TestCase): + pass + + +class ModuleFilesZipTests(DirectSpec, util.ZipSetup, ModulesFiles, unittest.TestCase): + pass + + +class ImplicitContextFiles: + set_val = textwrap.dedent( + f""" + import {resources.__name__} as res + val = res.files().joinpath('res.txt').read_text(encoding='utf-8') + """ + ) + spec = { + 'somepkg': { + '__init__.py': set_val, + 'submod.py': set_val, + 'res.txt': 'resources are the best', + }, + 'frozenpkg': { + '__init__.py': set_val.replace(resources.__name__, 'c_resources'), + 'res.txt': 'resources are the best', + }, + } + + def test_implicit_files_package(self): + """ + Without any parameter, files() will infer the location as the caller. + """ + assert importlib.import_module('somepkg').val == 'resources are the best' + + def test_implicit_files_submodule(self): + """ + Without any parameter, files() will infer the location as the caller. + """ + assert importlib.import_module('somepkg.submod').val == 'resources are the best' + + def _compile_importlib(self): + """ + Make a compiled-only copy of the importlib resources package. + + Currently only code is copied, as importlib resources doesn't itself + have any resources. + """ + bin_site = self.fixtures.enter_context(os_helper.temp_dir()) + c_resources = pathlib.Path(bin_site, 'c_resources') + sources = pathlib.Path(resources.__file__).parent + + for source_path in sources.glob('**/*.py'): + c_path = c_resources.joinpath(source_path.relative_to(sources)).with_suffix( + '.pyc' + ) + py_compile.compile(source_path, c_path) + self.fixtures.enter_context(import_helper.DirsOnSysPath(bin_site)) + + def test_implicit_files_with_compiled_importlib(self): + """ + Caller detection works for compiled-only resources module. + + python/cpython#123085 + """ + self._compile_importlib() + assert importlib.import_module('frozenpkg').val == 'resources are the best' + + +class ImplicitContextFilesDiskTests( + DirectSpec, util.DiskSetup, ImplicitContextFiles, unittest.TestCase +): + pass + + +class ImplicitContextFilesZipTests( + DirectSpec, util.ZipSetup, ImplicitContextFiles, unittest.TestCase +): + pass + + +if __name__ == '__main__': + unittest.main() diff --git a/lib/python3.10/site-packages/importlib_resources/tests/test_functional.py b/lib/python3.10/site-packages/importlib_resources/tests/test_functional.py new file mode 100644 index 0000000000000000000000000000000000000000..9eb2d8153fc78ef208f98209651fd78e77187d80 --- /dev/null +++ b/lib/python3.10/site-packages/importlib_resources/tests/test_functional.py @@ -0,0 +1,267 @@ +import importlib +import os +import unittest + +import importlib_resources as resources + +from . import util +from .compat.py39 import warnings_helper + + +class StringAnchorMixin: + anchor01 = 'data01' + anchor02 = 'data02' + + +class ModuleAnchorMixin: + @property + def anchor01(self): + return importlib.import_module('data01') + + @property + def anchor02(self): + return importlib.import_module('data02') + + +class FunctionalAPIBase: + def setUp(self): + super().setUp() + self.load_fixture('data02') + + def _gen_resourcetxt_path_parts(self): + """Yield various names of a text file in anchor02, each in a subTest""" + for path_parts in ( + ('subdirectory', 'subsubdir', 'resource.txt'), + ('subdirectory/subsubdir/resource.txt',), + ('subdirectory/subsubdir', 'resource.txt'), + ): + with self.subTest(path_parts=path_parts): + yield path_parts + + def assertEndsWith(self, string, suffix): + """Assert that `string` ends with `suffix`. + + Used to ignore an architecture-specific UTF-16 byte-order mark.""" + self.assertEqual(string[-len(suffix) :], suffix) + + def test_read_text(self): + self.assertEqual( + resources.read_text(self.anchor01, 'utf-8.file'), + 'Hello, UTF-8 world!\n', + ) + self.assertEqual( + resources.read_text( + self.anchor02, + 'subdirectory', + 'subsubdir', + 'resource.txt', + encoding='utf-8', + ), + 'a resource', + ) + for path_parts in self._gen_resourcetxt_path_parts(): + self.assertEqual( + resources.read_text( + self.anchor02, + *path_parts, + encoding='utf-8', + ), + 'a resource', + ) + # Use generic OSError, since e.g. attempting to read a directory can + # fail with PermissionError rather than IsADirectoryError + with self.assertRaises(OSError): + resources.read_text(self.anchor01) + with self.assertRaises((OSError, resources.abc.TraversalError)): + resources.read_text(self.anchor01, 'no-such-file') + with self.assertRaises(UnicodeDecodeError): + resources.read_text(self.anchor01, 'utf-16.file') + self.assertEqual( + resources.read_text( + self.anchor01, + 'binary.file', + encoding='latin1', + ), + '\x00\x01\x02\x03', + ) + self.assertEndsWith( # ignore the BOM + resources.read_text( + self.anchor01, + 'utf-16.file', + errors='backslashreplace', + ), + 'Hello, UTF-16 world!\n'.encode('utf-16-le').decode( + errors='backslashreplace', + ), + ) + + def test_read_binary(self): + self.assertEqual( + resources.read_binary(self.anchor01, 'utf-8.file'), + b'Hello, UTF-8 world!\n', + ) + for path_parts in self._gen_resourcetxt_path_parts(): + self.assertEqual( + resources.read_binary(self.anchor02, *path_parts), + b'a resource', + ) + + def test_open_text(self): + with resources.open_text(self.anchor01, 'utf-8.file') as f: + self.assertEqual(f.read(), 'Hello, UTF-8 world!\n') + for path_parts in self._gen_resourcetxt_path_parts(): + with resources.open_text( + self.anchor02, + *path_parts, + encoding='utf-8', + ) as f: + self.assertEqual(f.read(), 'a resource') + # Use generic OSError, since e.g. attempting to read a directory can + # fail with PermissionError rather than IsADirectoryError + with self.assertRaises(OSError): + resources.open_text(self.anchor01) + with self.assertRaises((OSError, resources.abc.TraversalError)): + resources.open_text(self.anchor01, 'no-such-file') + with resources.open_text(self.anchor01, 'utf-16.file') as f: + with self.assertRaises(UnicodeDecodeError): + f.read() + with resources.open_text( + self.anchor01, + 'binary.file', + encoding='latin1', + ) as f: + self.assertEqual(f.read(), '\x00\x01\x02\x03') + with resources.open_text( + self.anchor01, + 'utf-16.file', + errors='backslashreplace', + ) as f: + self.assertEndsWith( # ignore the BOM + f.read(), + 'Hello, UTF-16 world!\n'.encode('utf-16-le').decode( + errors='backslashreplace', + ), + ) + + def test_open_binary(self): + with resources.open_binary(self.anchor01, 'utf-8.file') as f: + self.assertEqual(f.read(), b'Hello, UTF-8 world!\n') + for path_parts in self._gen_resourcetxt_path_parts(): + with resources.open_binary( + self.anchor02, + *path_parts, + ) as f: + self.assertEqual(f.read(), b'a resource') + + def test_path(self): + with resources.path(self.anchor01, 'utf-8.file') as path: + with open(str(path), encoding='utf-8') as f: + self.assertEqual(f.read(), 'Hello, UTF-8 world!\n') + with resources.path(self.anchor01) as path: + with open(os.path.join(path, 'utf-8.file'), encoding='utf-8') as f: + self.assertEqual(f.read(), 'Hello, UTF-8 world!\n') + + def test_is_resource(self): + is_resource = resources.is_resource + self.assertTrue(is_resource(self.anchor01, 'utf-8.file')) + self.assertFalse(is_resource(self.anchor01, 'no_such_file')) + self.assertFalse(is_resource(self.anchor01)) + self.assertFalse(is_resource(self.anchor01, 'subdirectory')) + for path_parts in self._gen_resourcetxt_path_parts(): + self.assertTrue(is_resource(self.anchor02, *path_parts)) + + def test_contents(self): + with warnings_helper.check_warnings((".*contents.*", DeprecationWarning)): + c = resources.contents(self.anchor01) + self.assertGreaterEqual( + set(c), + {'utf-8.file', 'utf-16.file', 'binary.file', 'subdirectory'}, + ) + with ( + self.assertRaises(OSError), + warnings_helper.check_warnings(( + ".*contents.*", + DeprecationWarning, + )), + ): + list(resources.contents(self.anchor01, 'utf-8.file')) + + for path_parts in self._gen_resourcetxt_path_parts(): + with ( + self.assertRaises((OSError, resources.abc.TraversalError)), + warnings_helper.check_warnings(( + ".*contents.*", + DeprecationWarning, + )), + ): + list(resources.contents(self.anchor01, *path_parts)) + with warnings_helper.check_warnings((".*contents.*", DeprecationWarning)): + c = resources.contents(self.anchor01, 'subdirectory') + self.assertGreaterEqual( + set(c), + {'binary.file'}, + ) + + @warnings_helper.ignore_warnings(category=DeprecationWarning) + def test_common_errors(self): + for func in ( + resources.read_text, + resources.read_binary, + resources.open_text, + resources.open_binary, + resources.path, + resources.is_resource, + resources.contents, + ): + with self.subTest(func=func): + # Rejecting None anchor + with self.assertRaises(TypeError): + func(None) + # Rejecting invalid anchor type + with self.assertRaises((TypeError, AttributeError)): + func(1234) + # Unknown module + with self.assertRaises(ModuleNotFoundError): + func('$missing module$') + + def test_text_errors(self): + for func in ( + resources.read_text, + resources.open_text, + ): + with self.subTest(func=func): + # Multiple path arguments need explicit encoding argument. + with self.assertRaises(TypeError): + func( + self.anchor02, + 'subdirectory', + 'subsubdir', + 'resource.txt', + ) + + +class FunctionalAPITest_StringAnchor_Disk( + StringAnchorMixin, + FunctionalAPIBase, + util.DiskSetup, + unittest.TestCase, +): + pass + + +class FunctionalAPITest_ModuleAnchor_Disk( + ModuleAnchorMixin, + FunctionalAPIBase, + util.DiskSetup, + unittest.TestCase, +): + pass + + +class FunctionalAPITest_StringAnchor_Memory( + StringAnchorMixin, + FunctionalAPIBase, + util.MemorySetup, + unittest.TestCase, +): + pass diff --git a/lib/python3.10/site-packages/importlib_resources/tests/test_path.py b/lib/python3.10/site-packages/importlib_resources/tests/test_path.py new file mode 100644 index 0000000000000000000000000000000000000000..0be673d2b1f1b34be9ffa1e19fbdaa31d2a31b5b --- /dev/null +++ b/lib/python3.10/site-packages/importlib_resources/tests/test_path.py @@ -0,0 +1,63 @@ +import io +import pathlib +import unittest + +import importlib_resources as resources + +from . import util + + +class CommonTests(util.CommonTests, unittest.TestCase): + def execute(self, package, path): + with resources.as_file(resources.files(package).joinpath(path)): + pass + + +class PathTests: + def test_reading(self): + """ + Path should be readable and a pathlib.Path instance. + """ + target = resources.files(self.data) / 'utf-8.file' + with resources.as_file(target) as path: + self.assertIsInstance(path, pathlib.Path) + self.assertTrue(path.name.endswith("utf-8.file"), repr(path)) + self.assertEqual('Hello, UTF-8 world!\n', path.read_text(encoding='utf-8')) + + +class PathDiskTests(PathTests, util.DiskSetup, unittest.TestCase): + def test_natural_path(self): + """ + Guarantee the internal implementation detail that + file-system-backed resources do not get the tempdir + treatment. + """ + target = resources.files(self.data) / 'utf-8.file' + with resources.as_file(target) as path: + assert 'data' in str(path) + + +class PathMemoryTests(PathTests, unittest.TestCase): + def setUp(self): + file = io.BytesIO(b'Hello, UTF-8 world!\n') + self.addCleanup(file.close) + self.data = util.create_package( + file=file, path=FileNotFoundError("package exists only in memory") + ) + self.data.__spec__.origin = None + self.data.__spec__.has_location = False + + +class PathZipTests(PathTests, util.ZipSetup, unittest.TestCase): + def test_remove_in_context_manager(self): + """ + It is not an error if the file that was temporarily stashed on the + file system is removed inside the `with` stanza. + """ + target = resources.files(self.data) / 'utf-8.file' + with resources.as_file(target) as path: + path.unlink() + + +if __name__ == '__main__': + unittest.main() diff --git a/lib/python3.10/site-packages/importlib_resources/tests/test_read.py b/lib/python3.10/site-packages/importlib_resources/tests/test_read.py new file mode 100644 index 0000000000000000000000000000000000000000..216c8feb1476075556bcd1986f20aa35123417b5 --- /dev/null +++ b/lib/python3.10/site-packages/importlib_resources/tests/test_read.py @@ -0,0 +1,94 @@ +import unittest +from importlib import import_module + +import importlib_resources as resources + +from . import util + + +class CommonBinaryTests(util.CommonTests, unittest.TestCase): + def execute(self, package, path): + resources.files(package).joinpath(path).read_bytes() + + +class CommonTextTests(util.CommonTests, unittest.TestCase): + def execute(self, package, path): + resources.files(package).joinpath(path).read_text(encoding='utf-8') + + +class ReadTests: + def test_read_bytes(self): + result = resources.files(self.data).joinpath('binary.file').read_bytes() + self.assertEqual(result, bytes(range(4))) + + def test_read_text_default_encoding(self): + result = ( + resources.files(self.data) + .joinpath('utf-8.file') + .read_text(encoding='utf-8') + ) + self.assertEqual(result, 'Hello, UTF-8 world!\n') + + def test_read_text_given_encoding(self): + result = ( + resources.files(self.data) + .joinpath('utf-16.file') + .read_text(encoding='utf-16') + ) + self.assertEqual(result, 'Hello, UTF-16 world!\n') + + def test_read_text_with_errors(self): + """ + Raises UnicodeError without the 'errors' argument. + """ + target = resources.files(self.data) / 'utf-16.file' + self.assertRaises(UnicodeError, target.read_text, encoding='utf-8') + result = target.read_text(encoding='utf-8', errors='ignore') + self.assertEqual( + result, + 'H\x00e\x00l\x00l\x00o\x00,\x00 ' + '\x00U\x00T\x00F\x00-\x001\x006\x00 ' + '\x00w\x00o\x00r\x00l\x00d\x00!\x00\n\x00', + ) + + +class ReadDiskTests(ReadTests, util.DiskSetup, unittest.TestCase): + pass + + +class ReadZipTests(ReadTests, util.ZipSetup, unittest.TestCase): + def test_read_submodule_resource(self): + submodule = import_module('data01.subdirectory') + result = resources.files(submodule).joinpath('binary.file').read_bytes() + self.assertEqual(result, bytes(range(4, 8))) + + def test_read_submodule_resource_by_name(self): + result = ( + resources.files('data01.subdirectory').joinpath('binary.file').read_bytes() + ) + self.assertEqual(result, bytes(range(4, 8))) + + +class ReadNamespaceTests(ReadTests, util.DiskSetup, unittest.TestCase): + MODULE = 'namespacedata01' + + +class ReadNamespaceZipTests(ReadTests, util.ZipSetup, unittest.TestCase): + MODULE = 'namespacedata01' + + def test_read_submodule_resource(self): + submodule = import_module('namespacedata01.subdirectory') + result = resources.files(submodule).joinpath('binary.file').read_bytes() + self.assertEqual(result, bytes(range(12, 16))) + + def test_read_submodule_resource_by_name(self): + result = ( + resources.files('namespacedata01.subdirectory') + .joinpath('binary.file') + .read_bytes() + ) + self.assertEqual(result, bytes(range(12, 16))) + + +if __name__ == '__main__': + unittest.main() diff --git a/lib/python3.10/site-packages/importlib_resources/tests/test_reader.py b/lib/python3.10/site-packages/importlib_resources/tests/test_reader.py new file mode 100644 index 0000000000000000000000000000000000000000..f8cfd8dea0618d62de74ed380731788192d68552 --- /dev/null +++ b/lib/python3.10/site-packages/importlib_resources/tests/test_reader.py @@ -0,0 +1,137 @@ +import os.path +import pathlib +import unittest +from importlib import import_module + +from importlib_resources.readers import MultiplexedPath, NamespaceReader + +from . import util + + +class MultiplexedPathTest(util.DiskSetup, unittest.TestCase): + MODULE = 'namespacedata01' + + def setUp(self): + super().setUp() + self.folder = pathlib.Path(self.data.__path__[0]) + self.data01 = pathlib.Path(self.load_fixture('data01').__file__).parent + self.data02 = pathlib.Path(self.load_fixture('data02').__file__).parent + + def test_init_no_paths(self): + with self.assertRaises(FileNotFoundError): + MultiplexedPath() + + def test_init_file(self): + with self.assertRaises(NotADirectoryError): + MultiplexedPath(self.folder / 'binary.file') + + def test_iterdir(self): + contents = {path.name for path in MultiplexedPath(self.folder).iterdir()} + try: + contents.remove('__pycache__') + except (KeyError, ValueError): + pass + self.assertEqual( + contents, {'subdirectory', 'binary.file', 'utf-16.file', 'utf-8.file'} + ) + + def test_iterdir_duplicate(self): + contents = { + path.name for path in MultiplexedPath(self.folder, self.data01).iterdir() + } + for remove in ('__pycache__', '__init__.pyc'): + try: + contents.remove(remove) + except (KeyError, ValueError): + pass + self.assertEqual( + contents, + {'__init__.py', 'binary.file', 'subdirectory', 'utf-16.file', 'utf-8.file'}, + ) + + def test_is_dir(self): + self.assertEqual(MultiplexedPath(self.folder).is_dir(), True) + + def test_is_file(self): + self.assertEqual(MultiplexedPath(self.folder).is_file(), False) + + def test_open_file(self): + path = MultiplexedPath(self.folder) + with self.assertRaises(FileNotFoundError): + path.read_bytes() + with self.assertRaises(FileNotFoundError): + path.read_text() + with self.assertRaises(FileNotFoundError): + path.open() + + def test_join_path(self): + prefix = str(self.folder.parent) + path = MultiplexedPath(self.folder, self.data01) + self.assertEqual( + str(path.joinpath('binary.file'))[len(prefix) + 1 :], + os.path.join('namespacedata01', 'binary.file'), + ) + sub = path.joinpath('subdirectory') + assert isinstance(sub, MultiplexedPath) + assert 'namespacedata01' in str(sub) + assert 'data01' in str(sub) + self.assertEqual( + str(path.joinpath('imaginary'))[len(prefix) + 1 :], + os.path.join('namespacedata01', 'imaginary'), + ) + self.assertEqual(path.joinpath(), path) + + def test_join_path_compound(self): + path = MultiplexedPath(self.folder) + assert not path.joinpath('imaginary/foo.py').exists() + + def test_join_path_common_subdir(self): + prefix = str(self.data02.parent) + path = MultiplexedPath(self.data01, self.data02) + self.assertIsInstance(path.joinpath('subdirectory'), MultiplexedPath) + self.assertEqual( + str(path.joinpath('subdirectory', 'subsubdir'))[len(prefix) + 1 :], + os.path.join('data02', 'subdirectory', 'subsubdir'), + ) + + def test_repr(self): + self.assertEqual( + repr(MultiplexedPath(self.folder)), + f"MultiplexedPath('{self.folder}')", + ) + + def test_name(self): + self.assertEqual( + MultiplexedPath(self.folder).name, + os.path.basename(self.folder), + ) + + +class NamespaceReaderTest(util.DiskSetup, unittest.TestCase): + MODULE = 'namespacedata01' + + def test_init_error(self): + with self.assertRaises(ValueError): + NamespaceReader(['path1', 'path2']) + + def test_resource_path(self): + namespacedata01 = import_module('namespacedata01') + reader = NamespaceReader(namespacedata01.__spec__.submodule_search_locations) + + root = self.data.__path__[0] + self.assertEqual( + reader.resource_path('binary.file'), os.path.join(root, 'binary.file') + ) + self.assertEqual( + reader.resource_path('imaginary'), os.path.join(root, 'imaginary') + ) + + def test_files(self): + reader = NamespaceReader(self.data.__spec__.submodule_search_locations) + root = self.data.__path__[0] + self.assertIsInstance(reader.files(), MultiplexedPath) + self.assertEqual(repr(reader.files()), f"MultiplexedPath('{root}')") + + +if __name__ == '__main__': + unittest.main() diff --git a/lib/python3.10/site-packages/importlib_resources/tests/test_resource.py b/lib/python3.10/site-packages/importlib_resources/tests/test_resource.py new file mode 100644 index 0000000000000000000000000000000000000000..c80afdc729ec90ac8c3c47aa594533ed8897342f --- /dev/null +++ b/lib/python3.10/site-packages/importlib_resources/tests/test_resource.py @@ -0,0 +1,238 @@ +import unittest +from importlib import import_module + +import importlib_resources as resources + +from . import util + + +class ResourceTests: + # Subclasses are expected to set the `data` attribute. + + def test_is_file_exists(self): + target = resources.files(self.data) / 'binary.file' + self.assertTrue(target.is_file()) + + def test_is_file_missing(self): + target = resources.files(self.data) / 'not-a-file' + self.assertFalse(target.is_file()) + + def test_is_dir(self): + target = resources.files(self.data) / 'subdirectory' + self.assertFalse(target.is_file()) + self.assertTrue(target.is_dir()) + + +class ResourceDiskTests(ResourceTests, util.DiskSetup, unittest.TestCase): + pass + + +class ResourceZipTests(ResourceTests, util.ZipSetup, unittest.TestCase): + pass + + +def names(traversable): + return {item.name for item in traversable.iterdir()} + + +class ResourceLoaderTests(util.DiskSetup, unittest.TestCase): + def test_resource_contents(self): + package = util.create_package( + file=self.data, path=self.data.__file__, contents=['A', 'B', 'C'] + ) + self.assertEqual(names(resources.files(package)), {'A', 'B', 'C'}) + + def test_is_file(self): + package = util.create_package( + file=self.data, + path=self.data.__file__, + contents=['A', 'B', 'C', 'D/E', 'D/F'], + ) + self.assertTrue(resources.files(package).joinpath('B').is_file()) + + def test_is_dir(self): + package = util.create_package( + file=self.data, + path=self.data.__file__, + contents=['A', 'B', 'C', 'D/E', 'D/F'], + ) + self.assertTrue(resources.files(package).joinpath('D').is_dir()) + + def test_resource_missing(self): + package = util.create_package( + file=self.data, + path=self.data.__file__, + contents=['A', 'B', 'C', 'D/E', 'D/F'], + ) + self.assertFalse(resources.files(package).joinpath('Z').is_file()) + + +class ResourceCornerCaseTests(util.DiskSetup, unittest.TestCase): + def test_package_has_no_reader_fallback(self): + """ + Test odd ball packages which: + # 1. Do not have a ResourceReader as a loader + # 2. Are not on the file system + # 3. Are not in a zip file + """ + module = util.create_package( + file=self.data, path=self.data.__file__, contents=['A', 'B', 'C'] + ) + # Give the module a dummy loader. + module.__loader__ = object() + # Give the module a dummy origin. + module.__file__ = '/path/which/shall/not/be/named' + module.__spec__.loader = module.__loader__ + module.__spec__.origin = module.__file__ + self.assertFalse(resources.files(module).joinpath('A').is_file()) + + +class ResourceFromZipsTest01(util.ZipSetup, unittest.TestCase): + def test_is_submodule_resource(self): + submodule = import_module('data01.subdirectory') + self.assertTrue(resources.files(submodule).joinpath('binary.file').is_file()) + + def test_read_submodule_resource_by_name(self): + self.assertTrue( + resources.files('data01.subdirectory').joinpath('binary.file').is_file() + ) + + def test_submodule_contents(self): + submodule = import_module('data01.subdirectory') + self.assertEqual( + names(resources.files(submodule)), {'__init__.py', 'binary.file'} + ) + + def test_submodule_contents_by_name(self): + self.assertEqual( + names(resources.files('data01.subdirectory')), + {'__init__.py', 'binary.file'}, + ) + + def test_as_file_directory(self): + with resources.as_file(resources.files('data01')) as data: + assert data.name == 'data01' + assert data.is_dir() + assert data.joinpath('subdirectory').is_dir() + assert len(list(data.iterdir())) + assert not data.parent.exists() + + +class ResourceFromZipsTest02(util.ZipSetup, unittest.TestCase): + MODULE = 'data02' + + def test_unrelated_contents(self): + """ + Test thata zip with two unrelated subpackages return + distinct resources. Ref python/importlib_resources#44. + """ + self.assertEqual( + names(resources.files('data02.one')), + {'__init__.py', 'resource1.txt'}, + ) + self.assertEqual( + names(resources.files('data02.two')), + {'__init__.py', 'resource2.txt'}, + ) + + +class DeletingZipsTest(util.ZipSetup, unittest.TestCase): + """Having accessed resources in a zip file should not keep an open + reference to the zip. + """ + + def test_iterdir_does_not_keep_open(self): + [item.name for item in resources.files('data01').iterdir()] + + def test_is_file_does_not_keep_open(self): + resources.files('data01').joinpath('binary.file').is_file() + + def test_is_file_failure_does_not_keep_open(self): + resources.files('data01').joinpath('not-present').is_file() + + @unittest.skip("Desired but not supported.") + def test_as_file_does_not_keep_open(self): # pragma: no cover + resources.as_file(resources.files('data01') / 'binary.file') + + def test_entered_path_does_not_keep_open(self): + """ + Mimic what certifi does on import to make its bundle + available for the process duration. + """ + resources.as_file(resources.files('data01') / 'binary.file').__enter__() + + def test_read_binary_does_not_keep_open(self): + resources.files('data01').joinpath('binary.file').read_bytes() + + def test_read_text_does_not_keep_open(self): + resources.files('data01').joinpath('utf-8.file').read_text(encoding='utf-8') + + +class ResourceFromNamespaceTests: + def test_is_submodule_resource(self): + self.assertTrue( + resources.files(import_module('namespacedata01')) + .joinpath('binary.file') + .is_file() + ) + + def test_read_submodule_resource_by_name(self): + self.assertTrue( + resources.files('namespacedata01').joinpath('binary.file').is_file() + ) + + def test_submodule_contents(self): + contents = names(resources.files(import_module('namespacedata01'))) + try: + contents.remove('__pycache__') + except KeyError: + pass + self.assertEqual( + contents, {'subdirectory', 'binary.file', 'utf-8.file', 'utf-16.file'} + ) + + def test_submodule_contents_by_name(self): + contents = names(resources.files('namespacedata01')) + try: + contents.remove('__pycache__') + except KeyError: + pass + self.assertEqual( + contents, {'subdirectory', 'binary.file', 'utf-8.file', 'utf-16.file'} + ) + + def test_submodule_sub_contents(self): + contents = names(resources.files(import_module('namespacedata01.subdirectory'))) + try: + contents.remove('__pycache__') + except KeyError: + pass + self.assertEqual(contents, {'binary.file'}) + + def test_submodule_sub_contents_by_name(self): + contents = names(resources.files('namespacedata01.subdirectory')) + try: + contents.remove('__pycache__') + except KeyError: + pass + self.assertEqual(contents, {'binary.file'}) + + +class ResourceFromNamespaceDiskTests( + util.DiskSetup, + ResourceFromNamespaceTests, + unittest.TestCase, +): + MODULE = 'namespacedata01' + + +class ResourceFromNamespaceZipTests( + util.ZipSetup, + ResourceFromNamespaceTests, + unittest.TestCase, +): + MODULE = 'namespacedata01' + + +if __name__ == '__main__': + unittest.main() diff --git a/lib/python3.10/site-packages/importlib_resources/tests/test_util.py b/lib/python3.10/site-packages/importlib_resources/tests/test_util.py new file mode 100644 index 0000000000000000000000000000000000000000..de304b6f3510a6975f596de99822c1359adc18db --- /dev/null +++ b/lib/python3.10/site-packages/importlib_resources/tests/test_util.py @@ -0,0 +1,29 @@ +import unittest + +from .util import MemorySetup, Traversable + + +class TestMemoryTraversableImplementation(unittest.TestCase): + def test_concrete_methods_are_not_overridden(self): + """`MemoryTraversable` must not override `Traversable` concrete methods. + + This test is not an attempt to enforce a particular `Traversable` protocol; + it merely catches changes in the `Traversable` abstract/concrete methods + that have not been mirrored in the `MemoryTraversable` subclass. + """ + + traversable_concrete_methods = { + method + for method, value in Traversable.__dict__.items() + if callable(value) and method not in Traversable.__abstractmethods__ + } + memory_traversable_concrete_methods = { + method + for method, value in MemorySetup.MemoryTraversable.__dict__.items() + if callable(value) and not method.startswith("__") + } + overridden_methods = ( + memory_traversable_concrete_methods & traversable_concrete_methods + ) + + assert not overridden_methods diff --git a/lib/python3.10/site-packages/importlib_resources/tests/zip.py b/lib/python3.10/site-packages/importlib_resources/tests/zip.py new file mode 100644 index 0000000000000000000000000000000000000000..51ee564870c101bd447969c8433148eba597ab26 --- /dev/null +++ b/lib/python3.10/site-packages/importlib_resources/tests/zip.py @@ -0,0 +1,26 @@ +""" +Generate zip test data files. +""" + +import zipfile + +import zipp + + +def make_zip_file(tree, dst): + """ + Zip the files in tree into a new zipfile at dst. + """ + with zipfile.ZipFile(dst, 'w') as zf: + for name, contents in walk(tree): + zf.writestr(name, contents) + zipp.CompleteDirs.inject(zf) + return dst + + +def walk(tree, prefix=''): + for name, contents in tree.items(): + if isinstance(contents, dict): + yield from walk(contents, prefix=f'{prefix}{name}/') + else: + yield f'{prefix}{name}', contents diff --git a/lib/python3.10/site-packages/pandas/__init__.py b/lib/python3.10/site-packages/pandas/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..ca2eba20432924304517be99d5113bc9f57614d2 --- /dev/null +++ b/lib/python3.10/site-packages/pandas/__init__.py @@ -0,0 +1,367 @@ +from __future__ import annotations + +import os +import warnings + +__docformat__ = "restructuredtext" + +# Let users know if they're missing any of our hard dependencies +_hard_dependencies = ("numpy", "pytz", "dateutil") +_missing_dependencies = [] + +for _dependency in _hard_dependencies: + try: + __import__(_dependency) + except ImportError as _e: # pragma: no cover + _missing_dependencies.append(f"{_dependency}: {_e}") + +if _missing_dependencies: # pragma: no cover + raise ImportError( + "Unable to import required dependencies:\n" + "\n".join(_missing_dependencies) + ) +del _hard_dependencies, _dependency, _missing_dependencies + +try: + # numpy compat + from pandas.compat import ( + is_numpy_dev as _is_numpy_dev, # pyright: ignore[reportUnusedImport] # noqa: F401 + ) +except ImportError as _err: # pragma: no cover + _module = _err.name + raise ImportError( + f"C extension: {_module} not built. If you want to import " + "pandas from the source directory, you may need to run " + "'python setup.py build_ext' to build the C extensions first." + ) from _err + +from pandas._config import ( + get_option, + set_option, + reset_option, + describe_option, + option_context, + options, +) + +# let init-time option registration happen +import pandas.core.config_init # pyright: ignore[reportUnusedImport] # noqa: F401 + +from pandas.core.api import ( + # dtype + ArrowDtype, + Int8Dtype, + Int16Dtype, + Int32Dtype, + Int64Dtype, + UInt8Dtype, + UInt16Dtype, + UInt32Dtype, + UInt64Dtype, + Float32Dtype, + Float64Dtype, + CategoricalDtype, + PeriodDtype, + IntervalDtype, + DatetimeTZDtype, + StringDtype, + BooleanDtype, + # missing + NA, + isna, + isnull, + notna, + notnull, + # indexes + Index, + CategoricalIndex, + RangeIndex, + MultiIndex, + IntervalIndex, + TimedeltaIndex, + DatetimeIndex, + PeriodIndex, + IndexSlice, + # tseries + NaT, + Period, + period_range, + Timedelta, + timedelta_range, + Timestamp, + date_range, + bdate_range, + Interval, + interval_range, + DateOffset, + # conversion + to_numeric, + to_datetime, + to_timedelta, + # misc + Flags, + Grouper, + factorize, + unique, + value_counts, + NamedAgg, + array, + Categorical, + set_eng_float_format, + Series, + DataFrame, +) + +from pandas.core.dtypes.dtypes import SparseDtype + +from pandas.tseries.api import infer_freq +from pandas.tseries import offsets + +from pandas.core.computation.api import eval + +from pandas.core.reshape.api import ( + concat, + lreshape, + melt, + wide_to_long, + merge, + merge_asof, + merge_ordered, + crosstab, + pivot, + pivot_table, + get_dummies, + from_dummies, + cut, + qcut, +) + +from pandas import api, arrays, errors, io, plotting, tseries +from pandas import testing +from pandas.util._print_versions import show_versions + +from pandas.io.api import ( + # excel + ExcelFile, + ExcelWriter, + read_excel, + # parsers + read_csv, + read_fwf, + read_table, + # pickle + read_pickle, + to_pickle, + # pytables + HDFStore, + read_hdf, + # sql + read_sql, + read_sql_query, + read_sql_table, + # misc + read_clipboard, + read_parquet, + read_orc, + read_feather, + read_gbq, + read_html, + read_xml, + read_json, + read_stata, + read_sas, + read_spss, +) + +from pandas.io.json._normalize import json_normalize + +from pandas.util._tester import test + +# use the closest tagged version if possible +_built_with_meson = False +try: + from pandas._version_meson import ( # pyright: ignore [reportMissingImports] + __version__, + __git_version__, + ) + + _built_with_meson = True +except ImportError: + from pandas._version import get_versions + + v = get_versions() + __version__ = v.get("closest-tag", v["version"]) + __git_version__ = v.get("full-revisionid") + del get_versions, v + +# GH#55043 - deprecation of the data_manager option +if "PANDAS_DATA_MANAGER" in os.environ: + warnings.warn( + "The env variable PANDAS_DATA_MANAGER is set. The data_manager option is " + "deprecated and will be removed in a future version. Only the BlockManager " + "will be available. Unset this environment variable to silence this warning.", + FutureWarning, + stacklevel=2, + ) + +del warnings, os + +# module level doc-string +__doc__ = """ +pandas - a powerful data analysis and manipulation library for Python +===================================================================== + +**pandas** is a Python package providing fast, flexible, and expressive data +structures designed to make working with "relational" or "labeled" data both +easy and intuitive. It aims to be the fundamental high-level building block for +doing practical, **real world** data analysis in Python. Additionally, it has +the broader goal of becoming **the most powerful and flexible open source data +analysis / manipulation tool available in any language**. It is already well on +its way toward this goal. + +Main Features +------------- +Here are just a few of the things that pandas does well: + + - Easy handling of missing data in floating point as well as non-floating + point data. + - Size mutability: columns can be inserted and deleted from DataFrame and + higher dimensional objects + - Automatic and explicit data alignment: objects can be explicitly aligned + to a set of labels, or the user can simply ignore the labels and let + `Series`, `DataFrame`, etc. automatically align the data for you in + computations. + - Powerful, flexible group by functionality to perform split-apply-combine + operations on data sets, for both aggregating and transforming data. + - Make it easy to convert ragged, differently-indexed data in other Python + and NumPy data structures into DataFrame objects. + - Intelligent label-based slicing, fancy indexing, and subsetting of large + data sets. + - Intuitive merging and joining data sets. + - Flexible reshaping and pivoting of data sets. + - Hierarchical labeling of axes (possible to have multiple labels per tick). + - Robust IO tools for loading data from flat files (CSV and delimited), + Excel files, databases, and saving/loading data from the ultrafast HDF5 + format. + - Time series-specific functionality: date range generation and frequency + conversion, moving window statistics, date shifting and lagging. +""" + +# Use __all__ to let type checkers know what is part of the public API. +# Pandas is not (yet) a py.typed library: the public API is determined +# based on the documentation. +__all__ = [ + "ArrowDtype", + "BooleanDtype", + "Categorical", + "CategoricalDtype", + "CategoricalIndex", + "DataFrame", + "DateOffset", + "DatetimeIndex", + "DatetimeTZDtype", + "ExcelFile", + "ExcelWriter", + "Flags", + "Float32Dtype", + "Float64Dtype", + "Grouper", + "HDFStore", + "Index", + "IndexSlice", + "Int16Dtype", + "Int32Dtype", + "Int64Dtype", + "Int8Dtype", + "Interval", + "IntervalDtype", + "IntervalIndex", + "MultiIndex", + "NA", + "NaT", + "NamedAgg", + "Period", + "PeriodDtype", + "PeriodIndex", + "RangeIndex", + "Series", + "SparseDtype", + "StringDtype", + "Timedelta", + "TimedeltaIndex", + "Timestamp", + "UInt16Dtype", + "UInt32Dtype", + "UInt64Dtype", + "UInt8Dtype", + "api", + "array", + "arrays", + "bdate_range", + "concat", + "crosstab", + "cut", + "date_range", + "describe_option", + "errors", + "eval", + "factorize", + "get_dummies", + "from_dummies", + "get_option", + "infer_freq", + "interval_range", + "io", + "isna", + "isnull", + "json_normalize", + "lreshape", + "melt", + "merge", + "merge_asof", + "merge_ordered", + "notna", + "notnull", + "offsets", + "option_context", + "options", + "period_range", + "pivot", + "pivot_table", + "plotting", + "qcut", + "read_clipboard", + "read_csv", + "read_excel", + "read_feather", + "read_fwf", + "read_gbq", + "read_hdf", + "read_html", + "read_json", + "read_orc", + "read_parquet", + "read_pickle", + "read_sas", + "read_spss", + "read_sql", + "read_sql_query", + "read_sql_table", + "read_stata", + "read_table", + "read_xml", + "reset_option", + "set_eng_float_format", + "set_option", + "show_versions", + "test", + "testing", + "timedelta_range", + "to_datetime", + "to_numeric", + "to_pickle", + "to_timedelta", + "tseries", + "unique", + "value_counts", + "wide_to_long", +] diff --git a/lib/python3.10/site-packages/pandas/_config/__init__.py b/lib/python3.10/site-packages/pandas/_config/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..97784c924dab46af4f40c291ef293b07b2997684 --- /dev/null +++ b/lib/python3.10/site-packages/pandas/_config/__init__.py @@ -0,0 +1,57 @@ +""" +pandas._config is considered explicitly upstream of everything else in pandas, +should have no intra-pandas dependencies. + +importing `dates` and `display` ensures that keys needed by _libs +are initialized. +""" +__all__ = [ + "config", + "detect_console_encoding", + "get_option", + "set_option", + "reset_option", + "describe_option", + "option_context", + "options", + "using_copy_on_write", + "warn_copy_on_write", +] +from pandas._config import config +from pandas._config import dates # pyright: ignore[reportUnusedImport] # noqa: F401 +from pandas._config.config import ( + _global_config, + describe_option, + get_option, + option_context, + options, + reset_option, + set_option, +) +from pandas._config.display import detect_console_encoding + + +def using_copy_on_write() -> bool: + _mode_options = _global_config["mode"] + return ( + _mode_options["copy_on_write"] is True + and _mode_options["data_manager"] == "block" + ) + + +def warn_copy_on_write() -> bool: + _mode_options = _global_config["mode"] + return ( + _mode_options["copy_on_write"] == "warn" + and _mode_options["data_manager"] == "block" + ) + + +def using_nullable_dtypes() -> bool: + _mode_options = _global_config["mode"] + return _mode_options["nullable_dtypes"] + + +def using_pyarrow_string_dtype() -> bool: + _mode_options = _global_config["future"] + return _mode_options["infer_string"] diff --git a/lib/python3.10/site-packages/pandas/_config/config.py b/lib/python3.10/site-packages/pandas/_config/config.py new file mode 100644 index 0000000000000000000000000000000000000000..c391939d22491099652e13ad81e83b201f140b60 --- /dev/null +++ b/lib/python3.10/site-packages/pandas/_config/config.py @@ -0,0 +1,948 @@ +""" +The config module holds package-wide configurables and provides +a uniform API for working with them. + +Overview +======== + +This module supports the following requirements: +- options are referenced using keys in dot.notation, e.g. "x.y.option - z". +- keys are case-insensitive. +- functions should accept partial/regex keys, when unambiguous. +- options can be registered by modules at import time. +- options can be registered at init-time (via core.config_init) +- options have a default value, and (optionally) a description and + validation function associated with them. +- options can be deprecated, in which case referencing them + should produce a warning. +- deprecated options can optionally be rerouted to a replacement + so that accessing a deprecated option reroutes to a differently + named option. +- options can be reset to their default value. +- all option can be reset to their default value at once. +- all options in a certain sub - namespace can be reset at once. +- the user can set / get / reset or ask for the description of an option. +- a developer can register and mark an option as deprecated. +- you can register a callback to be invoked when the option value + is set or reset. Changing the stored value is considered misuse, but + is not verboten. + +Implementation +============== + +- Data is stored using nested dictionaries, and should be accessed + through the provided API. + +- "Registered options" and "Deprecated options" have metadata associated + with them, which are stored in auxiliary dictionaries keyed on the + fully-qualified key, e.g. "x.y.z.option". + +- the config_init module is imported by the package's __init__.py file. + placing any register_option() calls there will ensure those options + are available as soon as pandas is loaded. If you use register_option + in a module, it will only be available after that module is imported, + which you should be aware of. + +- `config_prefix` is a context_manager (for use with the `with` keyword) + which can save developers some typing, see the docstring. + +""" + +from __future__ import annotations + +from contextlib import ( + ContextDecorator, + contextmanager, +) +import re +from typing import ( + TYPE_CHECKING, + Any, + Callable, + Generic, + NamedTuple, + cast, +) +import warnings + +from pandas._typing import ( + F, + T, +) +from pandas.util._exceptions import find_stack_level + +if TYPE_CHECKING: + from collections.abc import ( + Generator, + Iterable, + ) + + +class DeprecatedOption(NamedTuple): + key: str + msg: str | None + rkey: str | None + removal_ver: str | None + + +class RegisteredOption(NamedTuple): + key: str + defval: object + doc: str + validator: Callable[[object], Any] | None + cb: Callable[[str], Any] | None + + +# holds deprecated option metadata +_deprecated_options: dict[str, DeprecatedOption] = {} + +# holds registered option metadata +_registered_options: dict[str, RegisteredOption] = {} + +# holds the current values for registered options +_global_config: dict[str, Any] = {} + +# keys which have a special meaning +_reserved_keys: list[str] = ["all"] + + +class OptionError(AttributeError, KeyError): + """ + Exception raised for pandas.options. + + Backwards compatible with KeyError checks. + + Examples + -------- + >>> pd.options.context + Traceback (most recent call last): + OptionError: No such option + """ + + +# +# User API + + +def _get_single_key(pat: str, silent: bool) -> str: + keys = _select_options(pat) + if len(keys) == 0: + if not silent: + _warn_if_deprecated(pat) + raise OptionError(f"No such keys(s): {repr(pat)}") + if len(keys) > 1: + raise OptionError("Pattern matched multiple keys") + key = keys[0] + + if not silent: + _warn_if_deprecated(key) + + key = _translate_key(key) + + return key + + +def _get_option(pat: str, silent: bool = False) -> Any: + key = _get_single_key(pat, silent) + + # walk the nested dict + root, k = _get_root(key) + return root[k] + + +def _set_option(*args, **kwargs) -> None: + # must at least 1 arg deal with constraints later + nargs = len(args) + if not nargs or nargs % 2 != 0: + raise ValueError("Must provide an even number of non-keyword arguments") + + # default to false + silent = kwargs.pop("silent", False) + + if kwargs: + kwarg = next(iter(kwargs.keys())) + raise TypeError(f'_set_option() got an unexpected keyword argument "{kwarg}"') + + for k, v in zip(args[::2], args[1::2]): + key = _get_single_key(k, silent) + + o = _get_registered_option(key) + if o and o.validator: + o.validator(v) + + # walk the nested dict + root, k_root = _get_root(key) + root[k_root] = v + + if o.cb: + if silent: + with warnings.catch_warnings(record=True): + o.cb(key) + else: + o.cb(key) + + +def _describe_option(pat: str = "", _print_desc: bool = True) -> str | None: + keys = _select_options(pat) + if len(keys) == 0: + raise OptionError("No such keys(s)") + + s = "\n".join([_build_option_description(k) for k in keys]) + + if _print_desc: + print(s) + return None + return s + + +def _reset_option(pat: str, silent: bool = False) -> None: + keys = _select_options(pat) + + if len(keys) == 0: + raise OptionError("No such keys(s)") + + if len(keys) > 1 and len(pat) < 4 and pat != "all": + raise ValueError( + "You must specify at least 4 characters when " + "resetting multiple keys, use the special keyword " + '"all" to reset all the options to their default value' + ) + + for k in keys: + _set_option(k, _registered_options[k].defval, silent=silent) + + +def get_default_val(pat: str): + key = _get_single_key(pat, silent=True) + return _get_registered_option(key).defval + + +class DictWrapper: + """provide attribute-style access to a nested dict""" + + d: dict[str, Any] + + def __init__(self, d: dict[str, Any], prefix: str = "") -> None: + object.__setattr__(self, "d", d) + object.__setattr__(self, "prefix", prefix) + + def __setattr__(self, key: str, val: Any) -> None: + prefix = object.__getattribute__(self, "prefix") + if prefix: + prefix += "." + prefix += key + # you can't set new keys + # can you can't overwrite subtrees + if key in self.d and not isinstance(self.d[key], dict): + _set_option(prefix, val) + else: + raise OptionError("You can only set the value of existing options") + + def __getattr__(self, key: str): + prefix = object.__getattribute__(self, "prefix") + if prefix: + prefix += "." + prefix += key + try: + v = object.__getattribute__(self, "d")[key] + except KeyError as err: + raise OptionError("No such option") from err + if isinstance(v, dict): + return DictWrapper(v, prefix) + else: + return _get_option(prefix) + + def __dir__(self) -> list[str]: + return list(self.d.keys()) + + +# For user convenience, we'd like to have the available options described +# in the docstring. For dev convenience we'd like to generate the docstrings +# dynamically instead of maintaining them by hand. To this, we use the +# class below which wraps functions inside a callable, and converts +# __doc__ into a property function. The doctsrings below are templates +# using the py2.6+ advanced formatting syntax to plug in a concise list +# of options, and option descriptions. + + +class CallableDynamicDoc(Generic[T]): + def __init__(self, func: Callable[..., T], doc_tmpl: str) -> None: + self.__doc_tmpl__ = doc_tmpl + self.__func__ = func + + def __call__(self, *args, **kwds) -> T: + return self.__func__(*args, **kwds) + + # error: Signature of "__doc__" incompatible with supertype "object" + @property + def __doc__(self) -> str: # type: ignore[override] + opts_desc = _describe_option("all", _print_desc=False) + opts_list = pp_options_list(list(_registered_options.keys())) + return self.__doc_tmpl__.format(opts_desc=opts_desc, opts_list=opts_list) + + +_get_option_tmpl = """ +get_option(pat) + +Retrieves the value of the specified option. + +Available options: + +{opts_list} + +Parameters +---------- +pat : str + Regexp which should match a single option. + Note: partial matches are supported for convenience, but unless you use the + full option name (e.g. x.y.z.option_name), your code may break in future + versions if new options with similar names are introduced. + +Returns +------- +result : the value of the option + +Raises +------ +OptionError : if no such option exists + +Notes +----- +Please reference the :ref:`User Guide ` for more information. + +The available options with its descriptions: + +{opts_desc} + +Examples +-------- +>>> pd.get_option('display.max_columns') # doctest: +SKIP +4 +""" + +_set_option_tmpl = """ +set_option(pat, value) + +Sets the value of the specified option. + +Available options: + +{opts_list} + +Parameters +---------- +pat : str + Regexp which should match a single option. + Note: partial matches are supported for convenience, but unless you use the + full option name (e.g. x.y.z.option_name), your code may break in future + versions if new options with similar names are introduced. +value : object + New value of option. + +Returns +------- +None + +Raises +------ +OptionError if no such option exists + +Notes +----- +Please reference the :ref:`User Guide ` for more information. + +The available options with its descriptions: + +{opts_desc} + +Examples +-------- +>>> pd.set_option('display.max_columns', 4) +>>> df = pd.DataFrame([[1, 2, 3, 4, 5], [6, 7, 8, 9, 10]]) +>>> df + 0 1 ... 3 4 +0 1 2 ... 4 5 +1 6 7 ... 9 10 +[2 rows x 5 columns] +>>> pd.reset_option('display.max_columns') +""" + +_describe_option_tmpl = """ +describe_option(pat, _print_desc=False) + +Prints the description for one or more registered options. + +Call with no arguments to get a listing for all registered options. + +Available options: + +{opts_list} + +Parameters +---------- +pat : str + Regexp pattern. All matching keys will have their description displayed. +_print_desc : bool, default True + If True (default) the description(s) will be printed to stdout. + Otherwise, the description(s) will be returned as a unicode string + (for testing). + +Returns +------- +None by default, the description(s) as a unicode string if _print_desc +is False + +Notes +----- +Please reference the :ref:`User Guide ` for more information. + +The available options with its descriptions: + +{opts_desc} + +Examples +-------- +>>> pd.describe_option('display.max_columns') # doctest: +SKIP +display.max_columns : int + If max_cols is exceeded, switch to truncate view... +""" + +_reset_option_tmpl = """ +reset_option(pat) + +Reset one or more options to their default value. + +Pass "all" as argument to reset all options. + +Available options: + +{opts_list} + +Parameters +---------- +pat : str/regex + If specified only options matching `prefix*` will be reset. + Note: partial matches are supported for convenience, but unless you + use the full option name (e.g. x.y.z.option_name), your code may break + in future versions if new options with similar names are introduced. + +Returns +------- +None + +Notes +----- +Please reference the :ref:`User Guide ` for more information. + +The available options with its descriptions: + +{opts_desc} + +Examples +-------- +>>> pd.reset_option('display.max_columns') # doctest: +SKIP +""" + +# bind the functions with their docstrings into a Callable +# and use that as the functions exposed in pd.api +get_option = CallableDynamicDoc(_get_option, _get_option_tmpl) +set_option = CallableDynamicDoc(_set_option, _set_option_tmpl) +reset_option = CallableDynamicDoc(_reset_option, _reset_option_tmpl) +describe_option = CallableDynamicDoc(_describe_option, _describe_option_tmpl) +options = DictWrapper(_global_config) + +# +# Functions for use by pandas developers, in addition to User - api + + +class option_context(ContextDecorator): + """ + Context manager to temporarily set options in the `with` statement context. + + You need to invoke as ``option_context(pat, val, [(pat, val), ...])``. + + Examples + -------- + >>> from pandas import option_context + >>> with option_context('display.max_rows', 10, 'display.max_columns', 5): + ... pass + """ + + def __init__(self, *args) -> None: + if len(args) % 2 != 0 or len(args) < 2: + raise ValueError( + "Need to invoke as option_context(pat, val, [(pat, val), ...])." + ) + + self.ops = list(zip(args[::2], args[1::2])) + + def __enter__(self) -> None: + self.undo = [(pat, _get_option(pat)) for pat, val in self.ops] + + for pat, val in self.ops: + _set_option(pat, val, silent=True) + + def __exit__(self, *args) -> None: + if self.undo: + for pat, val in self.undo: + _set_option(pat, val, silent=True) + + +def register_option( + key: str, + defval: object, + doc: str = "", + validator: Callable[[object], Any] | None = None, + cb: Callable[[str], Any] | None = None, +) -> None: + """ + Register an option in the package-wide pandas config object + + Parameters + ---------- + key : str + Fully-qualified key, e.g. "x.y.option - z". + defval : object + Default value of the option. + doc : str + Description of the option. + validator : Callable, optional + Function of a single argument, should raise `ValueError` if + called with a value which is not a legal value for the option. + cb + a function of a single argument "key", which is called + immediately after an option value is set/reset. key is + the full name of the option. + + Raises + ------ + ValueError if `validator` is specified and `defval` is not a valid value. + + """ + import keyword + import tokenize + + key = key.lower() + + if key in _registered_options: + raise OptionError(f"Option '{key}' has already been registered") + if key in _reserved_keys: + raise OptionError(f"Option '{key}' is a reserved key") + + # the default value should be legal + if validator: + validator(defval) + + # walk the nested dict, creating dicts as needed along the path + path = key.split(".") + + for k in path: + if not re.match("^" + tokenize.Name + "$", k): + raise ValueError(f"{k} is not a valid identifier") + if keyword.iskeyword(k): + raise ValueError(f"{k} is a python keyword") + + cursor = _global_config + msg = "Path prefix to option '{option}' is already an option" + + for i, p in enumerate(path[:-1]): + if not isinstance(cursor, dict): + raise OptionError(msg.format(option=".".join(path[:i]))) + if p not in cursor: + cursor[p] = {} + cursor = cursor[p] + + if not isinstance(cursor, dict): + raise OptionError(msg.format(option=".".join(path[:-1]))) + + cursor[path[-1]] = defval # initialize + + # save the option metadata + _registered_options[key] = RegisteredOption( + key=key, defval=defval, doc=doc, validator=validator, cb=cb + ) + + +def deprecate_option( + key: str, + msg: str | None = None, + rkey: str | None = None, + removal_ver: str | None = None, +) -> None: + """ + Mark option `key` as deprecated, if code attempts to access this option, + a warning will be produced, using `msg` if given, or a default message + if not. + if `rkey` is given, any access to the key will be re-routed to `rkey`. + + Neither the existence of `key` nor that if `rkey` is checked. If they + do not exist, any subsequence access will fail as usual, after the + deprecation warning is given. + + Parameters + ---------- + key : str + Name of the option to be deprecated. + must be a fully-qualified option name (e.g "x.y.z.rkey"). + msg : str, optional + Warning message to output when the key is referenced. + if no message is given a default message will be emitted. + rkey : str, optional + Name of an option to reroute access to. + If specified, any referenced `key` will be + re-routed to `rkey` including set/get/reset. + rkey must be a fully-qualified option name (e.g "x.y.z.rkey"). + used by the default message if no `msg` is specified. + removal_ver : str, optional + Specifies the version in which this option will + be removed. used by the default message if no `msg` is specified. + + Raises + ------ + OptionError + If the specified key has already been deprecated. + """ + key = key.lower() + + if key in _deprecated_options: + raise OptionError(f"Option '{key}' has already been defined as deprecated.") + + _deprecated_options[key] = DeprecatedOption(key, msg, rkey, removal_ver) + + +# +# functions internal to the module + + +def _select_options(pat: str) -> list[str]: + """ + returns a list of keys matching `pat` + + if pat=="all", returns all registered options + """ + # short-circuit for exact key + if pat in _registered_options: + return [pat] + + # else look through all of them + keys = sorted(_registered_options.keys()) + if pat == "all": # reserved key + return keys + + return [k for k in keys if re.search(pat, k, re.I)] + + +def _get_root(key: str) -> tuple[dict[str, Any], str]: + path = key.split(".") + cursor = _global_config + for p in path[:-1]: + cursor = cursor[p] + return cursor, path[-1] + + +def _is_deprecated(key: str) -> bool: + """Returns True if the given option has been deprecated""" + key = key.lower() + return key in _deprecated_options + + +def _get_deprecated_option(key: str): + """ + Retrieves the metadata for a deprecated option, if `key` is deprecated. + + Returns + ------- + DeprecatedOption (namedtuple) if key is deprecated, None otherwise + """ + try: + d = _deprecated_options[key] + except KeyError: + return None + else: + return d + + +def _get_registered_option(key: str): + """ + Retrieves the option metadata if `key` is a registered option. + + Returns + ------- + RegisteredOption (namedtuple) if key is deprecated, None otherwise + """ + return _registered_options.get(key) + + +def _translate_key(key: str) -> str: + """ + if key id deprecated and a replacement key defined, will return the + replacement key, otherwise returns `key` as - is + """ + d = _get_deprecated_option(key) + if d: + return d.rkey or key + else: + return key + + +def _warn_if_deprecated(key: str) -> bool: + """ + Checks if `key` is a deprecated option and if so, prints a warning. + + Returns + ------- + bool - True if `key` is deprecated, False otherwise. + """ + d = _get_deprecated_option(key) + if d: + if d.msg: + warnings.warn( + d.msg, + FutureWarning, + stacklevel=find_stack_level(), + ) + else: + msg = f"'{key}' is deprecated" + if d.removal_ver: + msg += f" and will be removed in {d.removal_ver}" + if d.rkey: + msg += f", please use '{d.rkey}' instead." + else: + msg += ", please refrain from using it." + + warnings.warn(msg, FutureWarning, stacklevel=find_stack_level()) + return True + return False + + +def _build_option_description(k: str) -> str: + """Builds a formatted description of a registered option and prints it""" + o = _get_registered_option(k) + d = _get_deprecated_option(k) + + s = f"{k} " + + if o.doc: + s += "\n".join(o.doc.strip().split("\n")) + else: + s += "No description available." + + if o: + s += f"\n [default: {o.defval}] [currently: {_get_option(k, True)}]" + + if d: + rkey = d.rkey or "" + s += "\n (Deprecated" + s += f", use `{rkey}` instead." + s += ")" + + return s + + +def pp_options_list(keys: Iterable[str], width: int = 80, _print: bool = False): + """Builds a concise listing of available options, grouped by prefix""" + from itertools import groupby + from textwrap import wrap + + def pp(name: str, ks: Iterable[str]) -> list[str]: + pfx = "- " + name + ".[" if name else "" + ls = wrap( + ", ".join(ks), + width, + initial_indent=pfx, + subsequent_indent=" ", + break_long_words=False, + ) + if ls and ls[-1] and name: + ls[-1] = ls[-1] + "]" + return ls + + ls: list[str] = [] + singles = [x for x in sorted(keys) if x.find(".") < 0] + if singles: + ls += pp("", singles) + keys = [x for x in keys if x.find(".") >= 0] + + for k, g in groupby(sorted(keys), lambda x: x[: x.rfind(".")]): + ks = [x[len(k) + 1 :] for x in list(g)] + ls += pp(k, ks) + s = "\n".join(ls) + if _print: + print(s) + else: + return s + + +# +# helpers + + +@contextmanager +def config_prefix(prefix: str) -> Generator[None, None, None]: + """ + contextmanager for multiple invocations of API with a common prefix + + supported API functions: (register / get / set )__option + + Warning: This is not thread - safe, and won't work properly if you import + the API functions into your module using the "from x import y" construct. + + Example + ------- + import pandas._config.config as cf + with cf.config_prefix("display.font"): + cf.register_option("color", "red") + cf.register_option("size", " 5 pt") + cf.set_option(size, " 6 pt") + cf.get_option(size) + ... + + etc' + + will register options "display.font.color", "display.font.size", set the + value of "display.font.size"... and so on. + """ + # Note: reset_option relies on set_option, and on key directly + # it does not fit in to this monkey-patching scheme + + global register_option, get_option, set_option + + def wrap(func: F) -> F: + def inner(key: str, *args, **kwds): + pkey = f"{prefix}.{key}" + return func(pkey, *args, **kwds) + + return cast(F, inner) + + _register_option = register_option + _get_option = get_option + _set_option = set_option + set_option = wrap(set_option) + get_option = wrap(get_option) + register_option = wrap(register_option) + try: + yield + finally: + set_option = _set_option + get_option = _get_option + register_option = _register_option + + +# These factories and methods are handy for use as the validator +# arg in register_option + + +def is_type_factory(_type: type[Any]) -> Callable[[Any], None]: + """ + + Parameters + ---------- + `_type` - a type to be compared against (e.g. type(x) == `_type`) + + Returns + ------- + validator - a function of a single argument x , which raises + ValueError if type(x) is not equal to `_type` + + """ + + def inner(x) -> None: + if type(x) != _type: + raise ValueError(f"Value must have type '{_type}'") + + return inner + + +def is_instance_factory(_type) -> Callable[[Any], None]: + """ + + Parameters + ---------- + `_type` - the type to be checked against + + Returns + ------- + validator - a function of a single argument x , which raises + ValueError if x is not an instance of `_type` + + """ + if isinstance(_type, (tuple, list)): + _type = tuple(_type) + type_repr = "|".join(map(str, _type)) + else: + type_repr = f"'{_type}'" + + def inner(x) -> None: + if not isinstance(x, _type): + raise ValueError(f"Value must be an instance of {type_repr}") + + return inner + + +def is_one_of_factory(legal_values) -> Callable[[Any], None]: + callables = [c for c in legal_values if callable(c)] + legal_values = [c for c in legal_values if not callable(c)] + + def inner(x) -> None: + if x not in legal_values: + if not any(c(x) for c in callables): + uvals = [str(lval) for lval in legal_values] + pp_values = "|".join(uvals) + msg = f"Value must be one of {pp_values}" + if len(callables): + msg += " or a callable" + raise ValueError(msg) + + return inner + + +def is_nonnegative_int(value: object) -> None: + """ + Verify that value is None or a positive int. + + Parameters + ---------- + value : None or int + The `value` to be checked. + + Raises + ------ + ValueError + When the value is not None or is a negative integer + """ + if value is None: + return + + elif isinstance(value, int): + if value >= 0: + return + + msg = "Value must be a nonnegative integer or None" + raise ValueError(msg) + + +# common type validators, for convenience +# usage: register_option(... , validator = is_int) +is_int = is_type_factory(int) +is_bool = is_type_factory(bool) +is_float = is_type_factory(float) +is_str = is_type_factory(str) +is_text = is_instance_factory((str, bytes)) + + +def is_callable(obj) -> bool: + """ + + Parameters + ---------- + `obj` - the object to be checked + + Returns + ------- + validator - returns True if object is callable + raises ValueError otherwise. + + """ + if not callable(obj): + raise ValueError("Value must be a callable") + return True diff --git a/lib/python3.10/site-packages/pandas/_config/dates.py b/lib/python3.10/site-packages/pandas/_config/dates.py new file mode 100644 index 0000000000000000000000000000000000000000..b37831f96eb73bf2f128929a1769db6c141eebad --- /dev/null +++ b/lib/python3.10/site-packages/pandas/_config/dates.py @@ -0,0 +1,25 @@ +""" +config for datetime formatting +""" +from __future__ import annotations + +from pandas._config import config as cf + +pc_date_dayfirst_doc = """ +: boolean + When True, prints and parses dates with the day first, eg 20/01/2005 +""" + +pc_date_yearfirst_doc = """ +: boolean + When True, prints and parses dates with the year first, eg 2005/01/20 +""" + +with cf.config_prefix("display"): + # Needed upstream of `_libs` because these are used in tslibs.parsing + cf.register_option( + "date_dayfirst", False, pc_date_dayfirst_doc, validator=cf.is_bool + ) + cf.register_option( + "date_yearfirst", False, pc_date_yearfirst_doc, validator=cf.is_bool + ) diff --git a/lib/python3.10/site-packages/pandas/_config/display.py b/lib/python3.10/site-packages/pandas/_config/display.py new file mode 100644 index 0000000000000000000000000000000000000000..df2c3ad36c855d77c33d80c78c3d83ab3c09d5f9 --- /dev/null +++ b/lib/python3.10/site-packages/pandas/_config/display.py @@ -0,0 +1,62 @@ +""" +Unopinionated display configuration. +""" + +from __future__ import annotations + +import locale +import sys + +from pandas._config import config as cf + +# ----------------------------------------------------------------------------- +# Global formatting options +_initial_defencoding: str | None = None + + +def detect_console_encoding() -> str: + """ + Try to find the most capable encoding supported by the console. + slightly modified from the way IPython handles the same issue. + """ + global _initial_defencoding + + encoding = None + try: + encoding = sys.stdout.encoding or sys.stdin.encoding + except (AttributeError, OSError): + pass + + # try again for something better + if not encoding or "ascii" in encoding.lower(): + try: + encoding = locale.getpreferredencoding() + except locale.Error: + # can be raised by locale.setlocale(), which is + # called by getpreferredencoding + # (on some systems, see stdlib locale docs) + pass + + # when all else fails. this will usually be "ascii" + if not encoding or "ascii" in encoding.lower(): + encoding = sys.getdefaultencoding() + + # GH#3360, save the reported defencoding at import time + # MPL backends may change it. Make available for debugging. + if not _initial_defencoding: + _initial_defencoding = sys.getdefaultencoding() + + return encoding + + +pc_encoding_doc = """ +: str/unicode + Defaults to the detected encoding of the console. + Specifies the encoding to be used for strings returned by to_string, + these are generally strings meant to be displayed on the console. +""" + +with cf.config_prefix("display"): + cf.register_option( + "encoding", detect_console_encoding(), pc_encoding_doc, validator=cf.is_text + ) diff --git a/lib/python3.10/site-packages/pandas/_config/localization.py b/lib/python3.10/site-packages/pandas/_config/localization.py new file mode 100644 index 0000000000000000000000000000000000000000..5c1a0ff1395334a55baa6c5d77a71635872fe824 --- /dev/null +++ b/lib/python3.10/site-packages/pandas/_config/localization.py @@ -0,0 +1,172 @@ +""" +Helpers for configuring locale settings. + +Name `localization` is chosen to avoid overlap with builtin `locale` module. +""" +from __future__ import annotations + +from contextlib import contextmanager +import locale +import platform +import re +import subprocess +from typing import TYPE_CHECKING + +from pandas._config.config import options + +if TYPE_CHECKING: + from collections.abc import Generator + + +@contextmanager +def set_locale( + new_locale: str | tuple[str, str], lc_var: int = locale.LC_ALL +) -> Generator[str | tuple[str, str], None, None]: + """ + Context manager for temporarily setting a locale. + + Parameters + ---------- + new_locale : str or tuple + A string of the form .. For example to set + the current locale to US English with a UTF8 encoding, you would pass + "en_US.UTF-8". + lc_var : int, default `locale.LC_ALL` + The category of the locale being set. + + Notes + ----- + This is useful when you want to run a particular block of code under a + particular locale, without globally setting the locale. This probably isn't + thread-safe. + """ + # getlocale is not always compliant with setlocale, use setlocale. GH#46595 + current_locale = locale.setlocale(lc_var) + + try: + locale.setlocale(lc_var, new_locale) + normalized_code, normalized_encoding = locale.getlocale() + if normalized_code is not None and normalized_encoding is not None: + yield f"{normalized_code}.{normalized_encoding}" + else: + yield new_locale + finally: + locale.setlocale(lc_var, current_locale) + + +def can_set_locale(lc: str, lc_var: int = locale.LC_ALL) -> bool: + """ + Check to see if we can set a locale, and subsequently get the locale, + without raising an Exception. + + Parameters + ---------- + lc : str + The locale to attempt to set. + lc_var : int, default `locale.LC_ALL` + The category of the locale being set. + + Returns + ------- + bool + Whether the passed locale can be set + """ + try: + with set_locale(lc, lc_var=lc_var): + pass + except (ValueError, locale.Error): + # horrible name for a Exception subclass + return False + else: + return True + + +def _valid_locales(locales: list[str] | str, normalize: bool) -> list[str]: + """ + Return a list of normalized locales that do not throw an ``Exception`` + when set. + + Parameters + ---------- + locales : str + A string where each locale is separated by a newline. + normalize : bool + Whether to call ``locale.normalize`` on each locale. + + Returns + ------- + valid_locales : list + A list of valid locales. + """ + return [ + loc + for loc in ( + locale.normalize(loc.strip()) if normalize else loc.strip() + for loc in locales + ) + if can_set_locale(loc) + ] + + +def get_locales( + prefix: str | None = None, + normalize: bool = True, +) -> list[str]: + """ + Get all the locales that are available on the system. + + Parameters + ---------- + prefix : str + If not ``None`` then return only those locales with the prefix + provided. For example to get all English language locales (those that + start with ``"en"``), pass ``prefix="en"``. + normalize : bool + Call ``locale.normalize`` on the resulting list of available locales. + If ``True``, only locales that can be set without throwing an + ``Exception`` are returned. + + Returns + ------- + locales : list of strings + A list of locale strings that can be set with ``locale.setlocale()``. + For example:: + + locale.setlocale(locale.LC_ALL, locale_string) + + On error will return an empty list (no locale available, e.g. Windows) + + """ + if platform.system() in ("Linux", "Darwin"): + raw_locales = subprocess.check_output(["locale", "-a"]) + else: + # Other platforms e.g. windows platforms don't define "locale -a" + # Note: is_platform_windows causes circular import here + return [] + + try: + # raw_locales is "\n" separated list of locales + # it may contain non-decodable parts, so split + # extract what we can and then rejoin. + split_raw_locales = raw_locales.split(b"\n") + out_locales = [] + for x in split_raw_locales: + try: + out_locales.append(str(x, encoding=options.display.encoding)) + except UnicodeError: + # 'locale -a' is used to populated 'raw_locales' and on + # Redhat 7 Linux (and maybe others) prints locale names + # using windows-1252 encoding. Bug only triggered by + # a few special characters and when there is an + # extensive list of installed locales. + out_locales.append(str(x, encoding="windows-1252")) + + except TypeError: + pass + + if prefix is None: + return _valid_locales(out_locales, normalize) + + pattern = re.compile(f"{prefix}.*") + found = pattern.findall("\n".join(out_locales)) + return _valid_locales(found, normalize) diff --git a/lib/python3.10/site-packages/pandas/_libs/algos.pyi b/lib/python3.10/site-packages/pandas/_libs/algos.pyi new file mode 100644 index 0000000000000000000000000000000000000000..caf5425dfc7b44cbb2fea103e56d4584709eca1e --- /dev/null +++ b/lib/python3.10/site-packages/pandas/_libs/algos.pyi @@ -0,0 +1,416 @@ +from typing import Any + +import numpy as np + +from pandas._typing import npt + +class Infinity: + def __eq__(self, other) -> bool: ... + def __ne__(self, other) -> bool: ... + def __lt__(self, other) -> bool: ... + def __le__(self, other) -> bool: ... + def __gt__(self, other) -> bool: ... + def __ge__(self, other) -> bool: ... + +class NegInfinity: + def __eq__(self, other) -> bool: ... + def __ne__(self, other) -> bool: ... + def __lt__(self, other) -> bool: ... + def __le__(self, other) -> bool: ... + def __gt__(self, other) -> bool: ... + def __ge__(self, other) -> bool: ... + +def unique_deltas( + arr: np.ndarray, # const int64_t[:] +) -> np.ndarray: ... # np.ndarray[np.int64, ndim=1] +def is_lexsorted(list_of_arrays: list[npt.NDArray[np.int64]]) -> bool: ... +def groupsort_indexer( + index: np.ndarray, # const int64_t[:] + ngroups: int, +) -> tuple[ + np.ndarray, # ndarray[int64_t, ndim=1] + np.ndarray, # ndarray[int64_t, ndim=1] +]: ... +def kth_smallest( + arr: np.ndarray, # numeric[:] + k: int, +) -> Any: ... # numeric + +# ---------------------------------------------------------------------- +# Pairwise correlation/covariance + +def nancorr( + mat: npt.NDArray[np.float64], # const float64_t[:, :] + cov: bool = ..., + minp: int | None = ..., +) -> npt.NDArray[np.float64]: ... # ndarray[float64_t, ndim=2] +def nancorr_spearman( + mat: npt.NDArray[np.float64], # ndarray[float64_t, ndim=2] + minp: int = ..., +) -> npt.NDArray[np.float64]: ... # ndarray[float64_t, ndim=2] + +# ---------------------------------------------------------------------- + +def validate_limit(nobs: int | None, limit=...) -> int: ... +def get_fill_indexer( + mask: npt.NDArray[np.bool_], + limit: int | None = None, +) -> npt.NDArray[np.intp]: ... +def pad( + old: np.ndarray, # ndarray[numeric_object_t] + new: np.ndarray, # ndarray[numeric_object_t] + limit=..., +) -> npt.NDArray[np.intp]: ... # np.ndarray[np.intp, ndim=1] +def pad_inplace( + values: np.ndarray, # numeric_object_t[:] + mask: np.ndarray, # uint8_t[:] + limit=..., +) -> None: ... +def pad_2d_inplace( + values: np.ndarray, # numeric_object_t[:, :] + mask: np.ndarray, # const uint8_t[:, :] + limit=..., +) -> None: ... +def backfill( + old: np.ndarray, # ndarray[numeric_object_t] + new: np.ndarray, # ndarray[numeric_object_t] + limit=..., +) -> npt.NDArray[np.intp]: ... # np.ndarray[np.intp, ndim=1] +def backfill_inplace( + values: np.ndarray, # numeric_object_t[:] + mask: np.ndarray, # uint8_t[:] + limit=..., +) -> None: ... +def backfill_2d_inplace( + values: np.ndarray, # numeric_object_t[:, :] + mask: np.ndarray, # const uint8_t[:, :] + limit=..., +) -> None: ... +def is_monotonic( + arr: np.ndarray, # ndarray[numeric_object_t, ndim=1] + timelike: bool, +) -> tuple[bool, bool, bool]: ... + +# ---------------------------------------------------------------------- +# rank_1d, rank_2d +# ---------------------------------------------------------------------- + +def rank_1d( + values: np.ndarray, # ndarray[numeric_object_t, ndim=1] + labels: np.ndarray | None = ..., # const int64_t[:]=None + is_datetimelike: bool = ..., + ties_method=..., + ascending: bool = ..., + pct: bool = ..., + na_option=..., + mask: npt.NDArray[np.bool_] | None = ..., +) -> np.ndarray: ... # np.ndarray[float64_t, ndim=1] +def rank_2d( + in_arr: np.ndarray, # ndarray[numeric_object_t, ndim=2] + axis: int = ..., + is_datetimelike: bool = ..., + ties_method=..., + ascending: bool = ..., + na_option=..., + pct: bool = ..., +) -> np.ndarray: ... # np.ndarray[float64_t, ndim=1] +def diff_2d( + arr: np.ndarray, # ndarray[diff_t, ndim=2] + out: np.ndarray, # ndarray[out_t, ndim=2] + periods: int, + axis: int, + datetimelike: bool = ..., +) -> None: ... +def ensure_platform_int(arr: object) -> npt.NDArray[np.intp]: ... +def ensure_object(arr: object) -> npt.NDArray[np.object_]: ... +def ensure_float64(arr: object) -> npt.NDArray[np.float64]: ... +def ensure_int8(arr: object) -> npt.NDArray[np.int8]: ... +def ensure_int16(arr: object) -> npt.NDArray[np.int16]: ... +def ensure_int32(arr: object) -> npt.NDArray[np.int32]: ... +def ensure_int64(arr: object) -> npt.NDArray[np.int64]: ... +def ensure_uint64(arr: object) -> npt.NDArray[np.uint64]: ... +def take_1d_int8_int8( + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... +) -> None: ... +def take_1d_int8_int32( + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... +) -> None: ... +def take_1d_int8_int64( + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... +) -> None: ... +def take_1d_int8_float64( + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... +) -> None: ... +def take_1d_int16_int16( + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... +) -> None: ... +def take_1d_int16_int32( + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... +) -> None: ... +def take_1d_int16_int64( + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... +) -> None: ... +def take_1d_int16_float64( + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... +) -> None: ... +def take_1d_int32_int32( + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... +) -> None: ... +def take_1d_int32_int64( + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... +) -> None: ... +def take_1d_int32_float64( + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... +) -> None: ... +def take_1d_int64_int64( + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... +) -> None: ... +def take_1d_int64_float64( + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... +) -> None: ... +def take_1d_float32_float32( + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... +) -> None: ... +def take_1d_float32_float64( + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... +) -> None: ... +def take_1d_float64_float64( + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... +) -> None: ... +def take_1d_object_object( + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... +) -> None: ... +def take_1d_bool_bool( + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... +) -> None: ... +def take_1d_bool_object( + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... +) -> None: ... +def take_2d_axis0_int8_int8( + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... +) -> None: ... +def take_2d_axis0_int8_int32( + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... +) -> None: ... +def take_2d_axis0_int8_int64( + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... +) -> None: ... +def take_2d_axis0_int8_float64( + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... +) -> None: ... +def take_2d_axis0_int16_int16( + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... +) -> None: ... +def take_2d_axis0_int16_int32( + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... +) -> None: ... +def take_2d_axis0_int16_int64( + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... +) -> None: ... +def take_2d_axis0_int16_float64( + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... +) -> None: ... +def take_2d_axis0_int32_int32( + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... +) -> None: ... +def take_2d_axis0_int32_int64( + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... +) -> None: ... +def take_2d_axis0_int32_float64( + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... +) -> None: ... +def take_2d_axis0_int64_int64( + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... +) -> None: ... +def take_2d_axis0_int64_float64( + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... +) -> None: ... +def take_2d_axis0_float32_float32( + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... +) -> None: ... +def take_2d_axis0_float32_float64( + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... +) -> None: ... +def take_2d_axis0_float64_float64( + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... +) -> None: ... +def take_2d_axis0_object_object( + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... +) -> None: ... +def take_2d_axis0_bool_bool( + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... +) -> None: ... +def take_2d_axis0_bool_object( + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... +) -> None: ... +def take_2d_axis1_int8_int8( + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... +) -> None: ... +def take_2d_axis1_int8_int32( + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... +) -> None: ... +def take_2d_axis1_int8_int64( + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... +) -> None: ... +def take_2d_axis1_int8_float64( + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... +) -> None: ... +def take_2d_axis1_int16_int16( + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... +) -> None: ... +def take_2d_axis1_int16_int32( + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... +) -> None: ... +def take_2d_axis1_int16_int64( + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... +) -> None: ... +def take_2d_axis1_int16_float64( + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... +) -> None: ... +def take_2d_axis1_int32_int32( + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... +) -> None: ... +def take_2d_axis1_int32_int64( + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... +) -> None: ... +def take_2d_axis1_int32_float64( + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... +) -> None: ... +def take_2d_axis1_int64_int64( + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... +) -> None: ... +def take_2d_axis1_int64_float64( + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... +) -> None: ... +def take_2d_axis1_float32_float32( + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... +) -> None: ... +def take_2d_axis1_float32_float64( + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... +) -> None: ... +def take_2d_axis1_float64_float64( + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... +) -> None: ... +def take_2d_axis1_object_object( + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... +) -> None: ... +def take_2d_axis1_bool_bool( + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... +) -> None: ... +def take_2d_axis1_bool_object( + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... +) -> None: ... +def take_2d_multi_int8_int8( + values: np.ndarray, + indexer: tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]], + out: np.ndarray, + fill_value=..., +) -> None: ... +def take_2d_multi_int8_int32( + values: np.ndarray, + indexer: tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]], + out: np.ndarray, + fill_value=..., +) -> None: ... +def take_2d_multi_int8_int64( + values: np.ndarray, + indexer: tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]], + out: np.ndarray, + fill_value=..., +) -> None: ... +def take_2d_multi_int8_float64( + values: np.ndarray, + indexer: tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]], + out: np.ndarray, + fill_value=..., +) -> None: ... +def take_2d_multi_int16_int16( + values: np.ndarray, + indexer: tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]], + out: np.ndarray, + fill_value=..., +) -> None: ... +def take_2d_multi_int16_int32( + values: np.ndarray, + indexer: tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]], + out: np.ndarray, + fill_value=..., +) -> None: ... +def take_2d_multi_int16_int64( + values: np.ndarray, + indexer: tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]], + out: np.ndarray, + fill_value=..., +) -> None: ... +def take_2d_multi_int16_float64( + values: np.ndarray, + indexer: tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]], + out: np.ndarray, + fill_value=..., +) -> None: ... +def take_2d_multi_int32_int32( + values: np.ndarray, + indexer: tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]], + out: np.ndarray, + fill_value=..., +) -> None: ... +def take_2d_multi_int32_int64( + values: np.ndarray, + indexer: tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]], + out: np.ndarray, + fill_value=..., +) -> None: ... +def take_2d_multi_int32_float64( + values: np.ndarray, + indexer: tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]], + out: np.ndarray, + fill_value=..., +) -> None: ... +def take_2d_multi_int64_float64( + values: np.ndarray, + indexer: tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]], + out: np.ndarray, + fill_value=..., +) -> None: ... +def take_2d_multi_float32_float32( + values: np.ndarray, + indexer: tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]], + out: np.ndarray, + fill_value=..., +) -> None: ... +def take_2d_multi_float32_float64( + values: np.ndarray, + indexer: tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]], + out: np.ndarray, + fill_value=..., +) -> None: ... +def take_2d_multi_float64_float64( + values: np.ndarray, + indexer: tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]], + out: np.ndarray, + fill_value=..., +) -> None: ... +def take_2d_multi_object_object( + values: np.ndarray, + indexer: tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]], + out: np.ndarray, + fill_value=..., +) -> None: ... +def take_2d_multi_bool_bool( + values: np.ndarray, + indexer: tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]], + out: np.ndarray, + fill_value=..., +) -> None: ... +def take_2d_multi_bool_object( + values: np.ndarray, + indexer: tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]], + out: np.ndarray, + fill_value=..., +) -> None: ... +def take_2d_multi_int64_int64( + values: np.ndarray, + indexer: tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]], + out: np.ndarray, + fill_value=..., +) -> None: ... diff --git a/lib/python3.10/site-packages/pandas/_libs/missing.pyi b/lib/python3.10/site-packages/pandas/_libs/missing.pyi new file mode 100644 index 0000000000000000000000000000000000000000..282dcee3ed6cfdd5cd83e5c78a69c422831b4cac --- /dev/null +++ b/lib/python3.10/site-packages/pandas/_libs/missing.pyi @@ -0,0 +1,16 @@ +import numpy as np +from numpy import typing as npt + +class NAType: + def __new__(cls, *args, **kwargs): ... + +NA: NAType + +def is_matching_na( + left: object, right: object, nan_matches_none: bool = ... +) -> bool: ... +def isposinf_scalar(val: object) -> bool: ... +def isneginf_scalar(val: object) -> bool: ... +def checknull(val: object, inf_as_na: bool = ...) -> bool: ... +def isnaobj(arr: np.ndarray, inf_as_na: bool = ...) -> npt.NDArray[np.bool_]: ... +def is_numeric_na(values: np.ndarray) -> npt.NDArray[np.bool_]: ... diff --git a/lib/python3.10/site-packages/pandas/_typing.py b/lib/python3.10/site-packages/pandas/_typing.py new file mode 100644 index 0000000000000000000000000000000000000000..3df9a47a35fca32547947560a8df1cea1d1863c2 --- /dev/null +++ b/lib/python3.10/site-packages/pandas/_typing.py @@ -0,0 +1,525 @@ +from __future__ import annotations + +from collections.abc import ( + Hashable, + Iterator, + Mapping, + MutableMapping, + Sequence, +) +from datetime import ( + date, + datetime, + timedelta, + tzinfo, +) +from os import PathLike +import sys +from typing import ( + TYPE_CHECKING, + Any, + Callable, + Literal, + Optional, + Protocol, + Type as type_t, + TypeVar, + Union, + overload, +) + +import numpy as np + +# To prevent import cycles place any internal imports in the branch below +# and use a string literal forward reference to it in subsequent types +# https://mypy.readthedocs.io/en/latest/common_issues.html#import-cycles +if TYPE_CHECKING: + import numpy.typing as npt + + from pandas._libs import ( + NaTType, + Period, + Timedelta, + Timestamp, + ) + from pandas._libs.tslibs import BaseOffset + + from pandas.core.dtypes.dtypes import ExtensionDtype + + from pandas import Interval + from pandas.arrays import ( + DatetimeArray, + TimedeltaArray, + ) + from pandas.core.arrays.base import ExtensionArray + from pandas.core.frame import DataFrame + from pandas.core.generic import NDFrame + from pandas.core.groupby.generic import ( + DataFrameGroupBy, + GroupBy, + SeriesGroupBy, + ) + from pandas.core.indexes.base import Index + from pandas.core.internals import ( + ArrayManager, + BlockManager, + SingleArrayManager, + SingleBlockManager, + ) + from pandas.core.resample import Resampler + from pandas.core.series import Series + from pandas.core.window.rolling import BaseWindow + + from pandas.io.formats.format import EngFormatter + from pandas.tseries.holiday import AbstractHolidayCalendar + + ScalarLike_co = Union[ + int, + float, + complex, + str, + bytes, + np.generic, + ] + + # numpy compatible types + NumpyValueArrayLike = Union[ScalarLike_co, npt.ArrayLike] + # Name "npt._ArrayLikeInt_co" is not defined [name-defined] + NumpySorter = Optional[npt._ArrayLikeInt_co] # type: ignore[name-defined] + + from typing import SupportsIndex + + if sys.version_info >= (3, 10): + from typing import TypeGuard # pyright: ignore[reportUnusedImport] + else: + from typing_extensions import TypeGuard # pyright: ignore[reportUnusedImport] + + if sys.version_info >= (3, 11): + from typing import Self # pyright: ignore[reportUnusedImport] + else: + from typing_extensions import Self # pyright: ignore[reportUnusedImport] +else: + npt: Any = None + Self: Any = None + TypeGuard: Any = None + +HashableT = TypeVar("HashableT", bound=Hashable) +MutableMappingT = TypeVar("MutableMappingT", bound=MutableMapping) + +# array-like + +ArrayLike = Union["ExtensionArray", np.ndarray] +AnyArrayLike = Union[ArrayLike, "Index", "Series"] +TimeArrayLike = Union["DatetimeArray", "TimedeltaArray"] + +# list-like + +# from https://github.com/hauntsaninja/useful_types +# includes Sequence-like objects but excludes str and bytes +_T_co = TypeVar("_T_co", covariant=True) + + +class SequenceNotStr(Protocol[_T_co]): + @overload + def __getitem__(self, index: SupportsIndex, /) -> _T_co: + ... + + @overload + def __getitem__(self, index: slice, /) -> Sequence[_T_co]: + ... + + def __contains__(self, value: object, /) -> bool: + ... + + def __len__(self) -> int: + ... + + def __iter__(self) -> Iterator[_T_co]: + ... + + def index(self, value: Any, /, start: int = 0, stop: int = ...) -> int: + ... + + def count(self, value: Any, /) -> int: + ... + + def __reversed__(self) -> Iterator[_T_co]: + ... + + +ListLike = Union[AnyArrayLike, SequenceNotStr, range] + +# scalars + +PythonScalar = Union[str, float, bool] +DatetimeLikeScalar = Union["Period", "Timestamp", "Timedelta"] +PandasScalar = Union["Period", "Timestamp", "Timedelta", "Interval"] +Scalar = Union[PythonScalar, PandasScalar, np.datetime64, np.timedelta64, date] +IntStrT = TypeVar("IntStrT", bound=Union[int, str]) + + +# timestamp and timedelta convertible types + +TimestampConvertibleTypes = Union[ + "Timestamp", date, np.datetime64, np.int64, float, str +] +TimestampNonexistent = Union[ + Literal["shift_forward", "shift_backward", "NaT", "raise"], timedelta +] +TimedeltaConvertibleTypes = Union[ + "Timedelta", timedelta, np.timedelta64, np.int64, float, str +] +Timezone = Union[str, tzinfo] + +ToTimestampHow = Literal["s", "e", "start", "end"] + +# NDFrameT is stricter and ensures that the same subclass of NDFrame always is +# used. E.g. `def func(a: NDFrameT) -> NDFrameT: ...` means that if a +# Series is passed into a function, a Series is always returned and if a DataFrame is +# passed in, a DataFrame is always returned. +NDFrameT = TypeVar("NDFrameT", bound="NDFrame") + +NumpyIndexT = TypeVar("NumpyIndexT", np.ndarray, "Index") + +AxisInt = int +Axis = Union[AxisInt, Literal["index", "columns", "rows"]] +IndexLabel = Union[Hashable, Sequence[Hashable]] +Level = Hashable +Shape = tuple[int, ...] +Suffixes = tuple[Optional[str], Optional[str]] +Ordered = Optional[bool] +JSONSerializable = Optional[Union[PythonScalar, list, dict]] +Frequency = Union[str, "BaseOffset"] +Axes = ListLike + +RandomState = Union[ + int, + np.ndarray, + np.random.Generator, + np.random.BitGenerator, + np.random.RandomState, +] + +# dtypes +NpDtype = Union[str, np.dtype, type_t[Union[str, complex, bool, object]]] +Dtype = Union["ExtensionDtype", NpDtype] +AstypeArg = Union["ExtensionDtype", "npt.DTypeLike"] +# DtypeArg specifies all allowable dtypes in a functions its dtype argument +DtypeArg = Union[Dtype, dict[Hashable, Dtype]] +DtypeObj = Union[np.dtype, "ExtensionDtype"] + +# converters +ConvertersArg = dict[Hashable, Callable[[Dtype], Dtype]] + +# parse_dates +ParseDatesArg = Union[ + bool, list[Hashable], list[list[Hashable]], dict[Hashable, list[Hashable]] +] + +# For functions like rename that convert one label to another +Renamer = Union[Mapping[Any, Hashable], Callable[[Any], Hashable]] + +# to maintain type information across generic functions and parametrization +T = TypeVar("T") + +# used in decorators to preserve the signature of the function it decorates +# see https://mypy.readthedocs.io/en/stable/generics.html#declaring-decorators +FuncType = Callable[..., Any] +F = TypeVar("F", bound=FuncType) + +# types of vectorized key functions for DataFrame::sort_values and +# DataFrame::sort_index, among others +ValueKeyFunc = Optional[Callable[["Series"], Union["Series", AnyArrayLike]]] +IndexKeyFunc = Optional[Callable[["Index"], Union["Index", AnyArrayLike]]] + +# types of `func` kwarg for DataFrame.aggregate and Series.aggregate +AggFuncTypeBase = Union[Callable, str] +AggFuncTypeDict = MutableMapping[ + Hashable, Union[AggFuncTypeBase, list[AggFuncTypeBase]] +] +AggFuncType = Union[ + AggFuncTypeBase, + list[AggFuncTypeBase], + AggFuncTypeDict, +] +AggObjType = Union[ + "Series", + "DataFrame", + "GroupBy", + "SeriesGroupBy", + "DataFrameGroupBy", + "BaseWindow", + "Resampler", +] + +PythonFuncType = Callable[[Any], Any] + +# filenames and file-like-objects +AnyStr_co = TypeVar("AnyStr_co", str, bytes, covariant=True) +AnyStr_contra = TypeVar("AnyStr_contra", str, bytes, contravariant=True) + + +class BaseBuffer(Protocol): + @property + def mode(self) -> str: + # for _get_filepath_or_buffer + ... + + def seek(self, __offset: int, __whence: int = ...) -> int: + # with one argument: gzip.GzipFile, bz2.BZ2File + # with two arguments: zip.ZipFile, read_sas + ... + + def seekable(self) -> bool: + # for bz2.BZ2File + ... + + def tell(self) -> int: + # for zip.ZipFile, read_stata, to_stata + ... + + +class ReadBuffer(BaseBuffer, Protocol[AnyStr_co]): + def read(self, __n: int = ...) -> AnyStr_co: + # for BytesIOWrapper, gzip.GzipFile, bz2.BZ2File + ... + + +class WriteBuffer(BaseBuffer, Protocol[AnyStr_contra]): + def write(self, __b: AnyStr_contra) -> Any: + # for gzip.GzipFile, bz2.BZ2File + ... + + def flush(self) -> Any: + # for gzip.GzipFile, bz2.BZ2File + ... + + +class ReadPickleBuffer(ReadBuffer[bytes], Protocol): + def readline(self) -> bytes: + ... + + +class WriteExcelBuffer(WriteBuffer[bytes], Protocol): + def truncate(self, size: int | None = ...) -> int: + ... + + +class ReadCsvBuffer(ReadBuffer[AnyStr_co], Protocol): + def __iter__(self) -> Iterator[AnyStr_co]: + # for engine=python + ... + + def fileno(self) -> int: + # for _MMapWrapper + ... + + def readline(self) -> AnyStr_co: + # for engine=python + ... + + @property + def closed(self) -> bool: + # for enine=pyarrow + ... + + +FilePath = Union[str, "PathLike[str]"] + +# for arbitrary kwargs passed during reading/writing files +StorageOptions = Optional[dict[str, Any]] + + +# compression keywords and compression +CompressionDict = dict[str, Any] +CompressionOptions = Optional[ + Union[Literal["infer", "gzip", "bz2", "zip", "xz", "zstd", "tar"], CompressionDict] +] + +# types in DataFrameFormatter +FormattersType = Union[ + list[Callable], tuple[Callable, ...], Mapping[Union[str, int], Callable] +] +ColspaceType = Mapping[Hashable, Union[str, int]] +FloatFormatType = Union[str, Callable, "EngFormatter"] +ColspaceArgType = Union[ + str, int, Sequence[Union[str, int]], Mapping[Hashable, Union[str, int]] +] + +# Arguments for fillna() +FillnaOptions = Literal["backfill", "bfill", "ffill", "pad"] +InterpolateOptions = Literal[ + "linear", + "time", + "index", + "values", + "nearest", + "zero", + "slinear", + "quadratic", + "cubic", + "barycentric", + "polynomial", + "krogh", + "piecewise_polynomial", + "spline", + "pchip", + "akima", + "cubicspline", + "from_derivatives", +] + +# internals +Manager = Union[ + "ArrayManager", "SingleArrayManager", "BlockManager", "SingleBlockManager" +] +SingleManager = Union["SingleArrayManager", "SingleBlockManager"] +Manager2D = Union["ArrayManager", "BlockManager"] + +# indexing +# PositionalIndexer -> valid 1D positional indexer, e.g. can pass +# to ndarray.__getitem__ +# ScalarIndexer is for a single value as the index +# SequenceIndexer is for list like or slices (but not tuples) +# PositionalIndexerTuple is extends the PositionalIndexer for 2D arrays +# These are used in various __getitem__ overloads +# TODO(typing#684): add Ellipsis, see +# https://github.com/python/typing/issues/684#issuecomment-548203158 +# https://bugs.python.org/issue41810 +# Using List[int] here rather than Sequence[int] to disallow tuples. +ScalarIndexer = Union[int, np.integer] +SequenceIndexer = Union[slice, list[int], np.ndarray] +PositionalIndexer = Union[ScalarIndexer, SequenceIndexer] +PositionalIndexerTuple = tuple[PositionalIndexer, PositionalIndexer] +PositionalIndexer2D = Union[PositionalIndexer, PositionalIndexerTuple] +if TYPE_CHECKING: + TakeIndexer = Union[Sequence[int], Sequence[np.integer], npt.NDArray[np.integer]] +else: + TakeIndexer = Any + +# Shared by functions such as drop and astype +IgnoreRaise = Literal["ignore", "raise"] + +# Windowing rank methods +WindowingRankType = Literal["average", "min", "max"] + +# read_csv engines +CSVEngine = Literal["c", "python", "pyarrow", "python-fwf"] + +# read_json engines +JSONEngine = Literal["ujson", "pyarrow"] + +# read_xml parsers +XMLParsers = Literal["lxml", "etree"] + +# read_html flavors +HTMLFlavors = Literal["lxml", "html5lib", "bs4"] + +# Interval closed type +IntervalLeftRight = Literal["left", "right"] +IntervalClosedType = Union[IntervalLeftRight, Literal["both", "neither"]] + +# datetime and NaTType +DatetimeNaTType = Union[datetime, "NaTType"] +DateTimeErrorChoices = Union[IgnoreRaise, Literal["coerce"]] + +# sort_index +SortKind = Literal["quicksort", "mergesort", "heapsort", "stable"] +NaPosition = Literal["first", "last"] + +# Arguments for nsmalles and n_largest +NsmallestNlargestKeep = Literal["first", "last", "all"] + +# quantile interpolation +QuantileInterpolation = Literal["linear", "lower", "higher", "midpoint", "nearest"] + +# plotting +PlottingOrientation = Literal["horizontal", "vertical"] + +# dropna +AnyAll = Literal["any", "all"] + +# merge +MergeHow = Literal["left", "right", "inner", "outer", "cross"] +MergeValidate = Literal[ + "one_to_one", + "1:1", + "one_to_many", + "1:m", + "many_to_one", + "m:1", + "many_to_many", + "m:m", +] + +# join +JoinHow = Literal["left", "right", "inner", "outer"] +JoinValidate = Literal[ + "one_to_one", + "1:1", + "one_to_many", + "1:m", + "many_to_one", + "m:1", + "many_to_many", + "m:m", +] + +# reindex +ReindexMethod = Union[FillnaOptions, Literal["nearest"]] + +MatplotlibColor = Union[str, Sequence[float]] +TimeGrouperOrigin = Union[ + "Timestamp", Literal["epoch", "start", "start_day", "end", "end_day"] +] +TimeAmbiguous = Union[Literal["infer", "NaT", "raise"], "npt.NDArray[np.bool_]"] +TimeNonexistent = Union[ + Literal["shift_forward", "shift_backward", "NaT", "raise"], timedelta +] +DropKeep = Literal["first", "last", False] +CorrelationMethod = Union[ + Literal["pearson", "kendall", "spearman"], Callable[[np.ndarray, np.ndarray], float] +] +AlignJoin = Literal["outer", "inner", "left", "right"] +DtypeBackend = Literal["pyarrow", "numpy_nullable"] + +TimeUnit = Literal["s", "ms", "us", "ns"] +OpenFileErrors = Literal[ + "strict", + "ignore", + "replace", + "surrogateescape", + "xmlcharrefreplace", + "backslashreplace", + "namereplace", +] + +# update +UpdateJoin = Literal["left"] + +# applymap +NaAction = Literal["ignore"] + +# from_dict +FromDictOrient = Literal["columns", "index", "tight"] + +# to_gbc +ToGbqIfexist = Literal["fail", "replace", "append"] + +# to_stata +ToStataByteorder = Literal[">", "<", "little", "big"] + +# ExcelWriter +ExcelWriterIfSheetExists = Literal["error", "new", "replace", "overlay"] + +# Offsets +OffsetCalendar = Union[np.busdaycalendar, "AbstractHolidayCalendar"] + +# read_csv: usecols +UsecolsArgType = Union[ + SequenceNotStr[Hashable], + range, + AnyArrayLike, + Callable[[HashableT], bool], + None, +] diff --git a/lib/python3.10/site-packages/pandas/_version.py b/lib/python3.10/site-packages/pandas/_version.py new file mode 100644 index 0000000000000000000000000000000000000000..f8a960630126d021d86f685a160b98cb5eada197 --- /dev/null +++ b/lib/python3.10/site-packages/pandas/_version.py @@ -0,0 +1,692 @@ +# This file helps to compute a version number in source trees obtained from +# git-archive tarball (such as those provided by githubs download-from-tag +# feature). Distribution tarballs (built by setup.py sdist) and build +# directories (produced by setup.py build) will contain a much shorter file +# that just contains the computed version number. + +# This file is released into the public domain. +# Generated by versioneer-0.28 +# https://github.com/python-versioneer/python-versioneer + +"""Git implementation of _version.py.""" + +import errno +import functools +import os +import re +import subprocess +import sys +from typing import Callable + + +def get_keywords(): + """Get the keywords needed to look up the version information.""" + # these strings will be replaced by git during git-archive. + # setup.py/versioneer.py will grep for the variable names, so they must + # each be defined on a line of their own. _version.py will just call + # get_keywords(). + git_refnames = "$Format:%d$" + git_full = "$Format:%H$" + git_date = "$Format:%ci$" + keywords = {"refnames": git_refnames, "full": git_full, "date": git_date} + return keywords + + +class VersioneerConfig: + """Container for Versioneer configuration parameters.""" + + +def get_config(): + """Create, populate and return the VersioneerConfig() object.""" + # these strings are filled in when 'setup.py versioneer' creates + # _version.py + cfg = VersioneerConfig() + cfg.VCS = "git" + cfg.style = "pep440" + cfg.tag_prefix = "v" + cfg.parentdir_prefix = "pandas-" + cfg.versionfile_source = "pandas/_version.py" + cfg.verbose = False + return cfg + + +class NotThisMethod(Exception): + """Exception raised if a method is not valid for the current scenario.""" + + +LONG_VERSION_PY: dict[str, str] = {} +HANDLERS: dict[str, dict[str, Callable]] = {} + + +def register_vcs_handler(vcs, method): # decorator + """Create decorator to mark a method as the handler of a VCS.""" + + def decorate(f): + """Store f in HANDLERS[vcs][method].""" + if vcs not in HANDLERS: + HANDLERS[vcs] = {} + HANDLERS[vcs][method] = f + return f + + return decorate + + +def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, env=None): + """Call the given command(s).""" + assert isinstance(commands, list) + process = None + + popen_kwargs = {} + if sys.platform == "win32": + # This hides the console window if pythonw.exe is used + startupinfo = subprocess.STARTUPINFO() + startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW + popen_kwargs["startupinfo"] = startupinfo + + for command in commands: + dispcmd = str([command] + args) + try: + # remember shell=False, so use git.cmd on windows, not just git + process = subprocess.Popen( + [command] + args, + cwd=cwd, + env=env, + stdout=subprocess.PIPE, + stderr=(subprocess.PIPE if hide_stderr else None), + **popen_kwargs, + ) + break + except OSError: + e = sys.exc_info()[1] + if e.errno == errno.ENOENT: + continue + if verbose: + print(f"unable to run {dispcmd}") + print(e) + return None, None + else: + if verbose: + print(f"unable to find command, tried {commands}") + return None, None + stdout = process.communicate()[0].strip().decode() + if process.returncode != 0: + if verbose: + print(f"unable to run {dispcmd} (error)") + print(f"stdout was {stdout}") + return None, process.returncode + return stdout, process.returncode + + +def versions_from_parentdir(parentdir_prefix, root, verbose): + """Try to determine the version from the parent directory name. + + Source tarballs conventionally unpack into a directory that includes both + the project name and a version string. We will also support searching up + two directory levels for an appropriately named parent directory + """ + rootdirs = [] + + for _ in range(3): + dirname = os.path.basename(root) + if dirname.startswith(parentdir_prefix): + return { + "version": dirname[len(parentdir_prefix) :], + "full-revisionid": None, + "dirty": False, + "error": None, + "date": None, + } + rootdirs.append(root) + root = os.path.dirname(root) # up a level + + if verbose: + print( + f"Tried directories {str(rootdirs)} \ + but none started with prefix {parentdir_prefix}" + ) + raise NotThisMethod("rootdir doesn't start with parentdir_prefix") + + +@register_vcs_handler("git", "get_keywords") +def git_get_keywords(versionfile_abs): + """Extract version information from the given file.""" + # the code embedded in _version.py can just fetch the value of these + # keywords. When used from setup.py, we don't want to import _version.py, + # so we do it with a regexp instead. This function is not used from + # _version.py. + keywords = {} + try: + with open(versionfile_abs, encoding="utf-8") as fobj: + for line in fobj: + if line.strip().startswith("git_refnames ="): + mo = re.search(r'=\s*"(.*)"', line) + if mo: + keywords["refnames"] = mo.group(1) + if line.strip().startswith("git_full ="): + mo = re.search(r'=\s*"(.*)"', line) + if mo: + keywords["full"] = mo.group(1) + if line.strip().startswith("git_date ="): + mo = re.search(r'=\s*"(.*)"', line) + if mo: + keywords["date"] = mo.group(1) + except OSError: + pass + return keywords + + +@register_vcs_handler("git", "keywords") +def git_versions_from_keywords(keywords, tag_prefix, verbose): + """Get version information from git keywords.""" + if "refnames" not in keywords: + raise NotThisMethod("Short version file found") + date = keywords.get("date") + if date is not None: + # Use only the last line. Previous lines may contain GPG signature + # information. + date = date.splitlines()[-1] + + # git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant + # datestamp. However we prefer "%ci" (which expands to an "ISO-8601 + # -like" string, which we must then edit to make compliant), because + # it's been around since git-1.5.3, and it's too difficult to + # discover which version we're using, or to work around using an + # older one. + date = date.strip().replace(" ", "T", 1).replace(" ", "", 1) + refnames = keywords["refnames"].strip() + if refnames.startswith("$Format"): + if verbose: + print("keywords are unexpanded, not using") + raise NotThisMethod("unexpanded keywords, not a git-archive tarball") + refs = {r.strip() for r in refnames.strip("()").split(",")} + # starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of + # just "foo-1.0". If we see a "tag: " prefix, prefer those. + TAG = "tag: " + tags = {r[len(TAG) :] for r in refs if r.startswith(TAG)} + if not tags: + # Either we're using git < 1.8.3, or there really are no tags. We use + # a heuristic: assume all version tags have a digit. The old git %d + # expansion behaves like git log --decorate=short and strips out the + # refs/heads/ and refs/tags/ prefixes that would let us distinguish + # between branches and tags. By ignoring refnames without digits, we + # filter out many common branch names like "release" and + # "stabilization", as well as "HEAD" and "master". + tags = {r for r in refs if re.search(r"\d", r)} + if verbose: + print(f"discarding '{','.join(refs - tags)}', no digits") + if verbose: + print(f"likely tags: {','.join(sorted(tags))}") + for ref in sorted(tags): + # sorting will prefer e.g. "2.0" over "2.0rc1" + if ref.startswith(tag_prefix): + r = ref[len(tag_prefix) :] + # Filter out refs that exactly match prefix or that don't start + # with a number once the prefix is stripped (mostly a concern + # when prefix is '') + if not re.match(r"\d", r): + continue + if verbose: + print(f"picking {r}") + return { + "version": r, + "full-revisionid": keywords["full"].strip(), + "dirty": False, + "error": None, + "date": date, + } + # no suitable tags, so version is "0+unknown", but full hex is still there + if verbose: + print("no suitable tags, using unknown + full revision id") + return { + "version": "0+unknown", + "full-revisionid": keywords["full"].strip(), + "dirty": False, + "error": "no suitable tags", + "date": None, + } + + +@register_vcs_handler("git", "pieces_from_vcs") +def git_pieces_from_vcs(tag_prefix, root, verbose, runner=run_command): + """Get version from 'git describe' in the root of the source tree. + + This only gets called if the git-archive 'subst' keywords were *not* + expanded, and _version.py hasn't already been rewritten with a short + version string, meaning we're inside a checked out source tree. + """ + GITS = ["git"] + if sys.platform == "win32": + GITS = ["git.cmd", "git.exe"] + + # GIT_DIR can interfere with correct operation of Versioneer. + # It may be intended to be passed to the Versioneer-versioned project, + # but that should not change where we get our version from. + env = os.environ.copy() + env.pop("GIT_DIR", None) + runner = functools.partial(runner, env=env) + + _, rc = runner(GITS, ["rev-parse", "--git-dir"], cwd=root, hide_stderr=not verbose) + if rc != 0: + if verbose: + print(f"Directory {root} not under git control") + raise NotThisMethod("'git rev-parse --git-dir' returned error") + + # if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty] + # if there isn't one, this yields HEX[-dirty] (no NUM) + describe_out, rc = runner( + GITS, + [ + "describe", + "--tags", + "--dirty", + "--always", + "--long", + "--match", + f"{tag_prefix}[[:digit:]]*", + ], + cwd=root, + ) + # --long was added in git-1.5.5 + if describe_out is None: + raise NotThisMethod("'git describe' failed") + describe_out = describe_out.strip() + full_out, rc = runner(GITS, ["rev-parse", "HEAD"], cwd=root) + if full_out is None: + raise NotThisMethod("'git rev-parse' failed") + full_out = full_out.strip() + + pieces = {} + pieces["long"] = full_out + pieces["short"] = full_out[:7] # maybe improved later + pieces["error"] = None + + branch_name, rc = runner(GITS, ["rev-parse", "--abbrev-ref", "HEAD"], cwd=root) + # --abbrev-ref was added in git-1.6.3 + if rc != 0 or branch_name is None: + raise NotThisMethod("'git rev-parse --abbrev-ref' returned error") + branch_name = branch_name.strip() + + if branch_name == "HEAD": + # If we aren't exactly on a branch, pick a branch which represents + # the current commit. If all else fails, we are on a branchless + # commit. + branches, rc = runner(GITS, ["branch", "--contains"], cwd=root) + # --contains was added in git-1.5.4 + if rc != 0 or branches is None: + raise NotThisMethod("'git branch --contains' returned error") + branches = branches.split("\n") + + # Remove the first line if we're running detached + if "(" in branches[0]: + branches.pop(0) + + # Strip off the leading "* " from the list of branches. + branches = [branch[2:] for branch in branches] + if "master" in branches: + branch_name = "master" + elif not branches: + branch_name = None + else: + # Pick the first branch that is returned. Good or bad. + branch_name = branches[0] + + pieces["branch"] = branch_name + + # parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty] + # TAG might have hyphens. + git_describe = describe_out + + # look for -dirty suffix + dirty = git_describe.endswith("-dirty") + pieces["dirty"] = dirty + if dirty: + git_describe = git_describe[: git_describe.rindex("-dirty")] + + # now we have TAG-NUM-gHEX or HEX + + if "-" in git_describe: + # TAG-NUM-gHEX + mo = re.search(r"^(.+)-(\d+)-g([0-9a-f]+)$", git_describe) + if not mo: + # unparsable. Maybe git-describe is misbehaving? + pieces["error"] = f"unable to parse git-describe output: '{describe_out}'" + return pieces + + # tag + full_tag = mo.group(1) + if not full_tag.startswith(tag_prefix): + if verbose: + fmt = "tag '%s' doesn't start with prefix '%s'" + print(fmt % (full_tag, tag_prefix)) + pieces[ + "error" + ] = f"tag '{full_tag}' doesn't start with prefix '{tag_prefix}'" + return pieces + pieces["closest-tag"] = full_tag[len(tag_prefix) :] + + # distance: number of commits since tag + pieces["distance"] = int(mo.group(2)) + + # commit: short hex revision ID + pieces["short"] = mo.group(3) + + else: + # HEX: no tags + pieces["closest-tag"] = None + out, rc = runner(GITS, ["rev-list", "HEAD", "--left-right"], cwd=root) + pieces["distance"] = len(out.split()) # total number of commits + + # commit date: see ISO-8601 comment in git_versions_from_keywords() + date = runner(GITS, ["show", "-s", "--format=%ci", "HEAD"], cwd=root)[0].strip() + # Use only the last line. Previous lines may contain GPG signature + # information. + date = date.splitlines()[-1] + pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1) + + return pieces + + +def plus_or_dot(pieces) -> str: + """Return a + if we don't already have one, else return a .""" + if "+" in pieces.get("closest-tag", ""): + return "." + return "+" + + +def render_pep440(pieces): + """Build up version string, with post-release "local version identifier". + + Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you + get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty + + Exceptions: + 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty] + """ + if pieces["closest-tag"]: + rendered = pieces["closest-tag"] + if pieces["distance"] or pieces["dirty"]: + rendered += plus_or_dot(pieces) + rendered += f"{pieces['distance']}.g{pieces['short']}" + if pieces["dirty"]: + rendered += ".dirty" + else: + # exception #1 + rendered = f"0+untagged.{pieces['distance']}.g{pieces['short']}" + if pieces["dirty"]: + rendered += ".dirty" + return rendered + + +def render_pep440_branch(pieces): + """TAG[[.dev0]+DISTANCE.gHEX[.dirty]] . + + The ".dev0" means not master branch. Note that .dev0 sorts backwards + (a feature branch will appear "older" than the master branch). + + Exceptions: + 1: no tags. 0[.dev0]+untagged.DISTANCE.gHEX[.dirty] + """ + if pieces["closest-tag"]: + rendered = pieces["closest-tag"] + if pieces["distance"] or pieces["dirty"]: + if pieces["branch"] != "master": + rendered += ".dev0" + rendered += plus_or_dot(pieces) + rendered += f"{pieces['distance']}.g{pieces['short']}" + if pieces["dirty"]: + rendered += ".dirty" + else: + # exception #1 + rendered = "0" + if pieces["branch"] != "master": + rendered += ".dev0" + rendered += f"+untagged.{pieces['distance']}.g{pieces['short']}" + if pieces["dirty"]: + rendered += ".dirty" + return rendered + + +def pep440_split_post(ver): + """Split pep440 version string at the post-release segment. + + Returns the release segments before the post-release and the + post-release version number (or -1 if no post-release segment is present). + """ + vc = str.split(ver, ".post") + return vc[0], int(vc[1] or 0) if len(vc) == 2 else None + + +def render_pep440_pre(pieces): + """TAG[.postN.devDISTANCE] -- No -dirty. + + Exceptions: + 1: no tags. 0.post0.devDISTANCE + """ + if pieces["closest-tag"]: + if pieces["distance"]: + # update the post release segment + tag_version, post_version = pep440_split_post(pieces["closest-tag"]) + rendered = tag_version + if post_version is not None: + rendered += f".post{post_version + 1}.dev{pieces['distance']}" + else: + rendered += f".post0.dev{pieces['distance']}" + else: + # no commits, use the tag as the version + rendered = pieces["closest-tag"] + else: + # exception #1 + rendered = f"0.post0.dev{pieces['distance']}" + return rendered + + +def render_pep440_post(pieces): + """TAG[.postDISTANCE[.dev0]+gHEX] . + + The ".dev0" means dirty. Note that .dev0 sorts backwards + (a dirty tree will appear "older" than the corresponding clean one), + but you shouldn't be releasing software with -dirty anyways. + + Exceptions: + 1: no tags. 0.postDISTANCE[.dev0] + """ + if pieces["closest-tag"]: + rendered = pieces["closest-tag"] + if pieces["distance"] or pieces["dirty"]: + rendered += f".post{pieces['distance']}" + if pieces["dirty"]: + rendered += ".dev0" + rendered += plus_or_dot(pieces) + rendered += f"g{pieces['short']}" + else: + # exception #1 + rendered = f"0.post{pieces['distance']}" + if pieces["dirty"]: + rendered += ".dev0" + rendered += f"+g{pieces['short']}" + return rendered + + +def render_pep440_post_branch(pieces): + """TAG[.postDISTANCE[.dev0]+gHEX[.dirty]] . + + The ".dev0" means not master branch. + + Exceptions: + 1: no tags. 0.postDISTANCE[.dev0]+gHEX[.dirty] + """ + if pieces["closest-tag"]: + rendered = pieces["closest-tag"] + if pieces["distance"] or pieces["dirty"]: + rendered += f".post{pieces['distance']}" + if pieces["branch"] != "master": + rendered += ".dev0" + rendered += plus_or_dot(pieces) + rendered += f"g{pieces['short']}" + if pieces["dirty"]: + rendered += ".dirty" + else: + # exception #1 + rendered = f"0.post{pieces['distance']}" + if pieces["branch"] != "master": + rendered += ".dev0" + rendered += f"+g{pieces['short']}" + if pieces["dirty"]: + rendered += ".dirty" + return rendered + + +def render_pep440_old(pieces): + """TAG[.postDISTANCE[.dev0]] . + + The ".dev0" means dirty. + + Exceptions: + 1: no tags. 0.postDISTANCE[.dev0] + """ + if pieces["closest-tag"]: + rendered = pieces["closest-tag"] + if pieces["distance"] or pieces["dirty"]: + rendered += f"0.post{pieces['distance']}" + if pieces["dirty"]: + rendered += ".dev0" + else: + # exception #1 + rendered = f"0.post{pieces['distance']}" + if pieces["dirty"]: + rendered += ".dev0" + return rendered + + +def render_git_describe(pieces): + """TAG[-DISTANCE-gHEX][-dirty]. + + Like 'git describe --tags --dirty --always'. + + Exceptions: + 1: no tags. HEX[-dirty] (note: no 'g' prefix) + """ + if pieces["closest-tag"]: + rendered = pieces["closest-tag"] + if pieces["distance"]: + rendered += f"-{pieces['distance']}-g{pieces['short']}" + else: + # exception #1 + rendered = pieces["short"] + if pieces["dirty"]: + rendered += "-dirty" + return rendered + + +def render_git_describe_long(pieces): + """TAG-DISTANCE-gHEX[-dirty]. + + Like 'git describe --tags --dirty --always -long'. + The distance/hash is unconditional. + + Exceptions: + 1: no tags. HEX[-dirty] (note: no 'g' prefix) + """ + if pieces["closest-tag"]: + rendered = pieces["closest-tag"] + rendered += f"-{pieces['distance']}-g{pieces['short']}" + else: + # exception #1 + rendered = pieces["short"] + if pieces["dirty"]: + rendered += "-dirty" + return rendered + + +def render(pieces, style): + """Render the given version pieces into the requested style.""" + if pieces["error"]: + return { + "version": "unknown", + "full-revisionid": pieces.get("long"), + "dirty": None, + "error": pieces["error"], + "date": None, + } + + if not style or style == "default": + style = "pep440" # the default + + if style == "pep440": + rendered = render_pep440(pieces) + elif style == "pep440-branch": + rendered = render_pep440_branch(pieces) + elif style == "pep440-pre": + rendered = render_pep440_pre(pieces) + elif style == "pep440-post": + rendered = render_pep440_post(pieces) + elif style == "pep440-post-branch": + rendered = render_pep440_post_branch(pieces) + elif style == "pep440-old": + rendered = render_pep440_old(pieces) + elif style == "git-describe": + rendered = render_git_describe(pieces) + elif style == "git-describe-long": + rendered = render_git_describe_long(pieces) + else: + raise ValueError(f"unknown style '{style}'") + + return { + "version": rendered, + "full-revisionid": pieces["long"], + "dirty": pieces["dirty"], + "error": None, + "date": pieces.get("date"), + } + + +def get_versions(): + """Get version information or return default if unable to do so.""" + # I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have + # __file__, we can work backwards from there to the root. Some + # py2exe/bbfreeze/non-CPython implementations don't do __file__, in which + # case we can only use expanded keywords. + + cfg = get_config() + verbose = cfg.verbose + + try: + return git_versions_from_keywords(get_keywords(), cfg.tag_prefix, verbose) + except NotThisMethod: + pass + + try: + root = os.path.realpath(__file__) + # versionfile_source is the relative path from the top of the source + # tree (where the .git directory might live) to this file. Invert + # this to find the root from __file__. + for _ in cfg.versionfile_source.split("/"): + root = os.path.dirname(root) + except NameError: + return { + "version": "0+unknown", + "full-revisionid": None, + "dirty": None, + "error": "unable to find root of source tree", + "date": None, + } + + try: + pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose) + return render(pieces, cfg.style) + except NotThisMethod: + pass + + try: + if cfg.parentdir_prefix: + return versions_from_parentdir(cfg.parentdir_prefix, root, verbose) + except NotThisMethod: + pass + + return { + "version": "0+unknown", + "full-revisionid": None, + "dirty": None, + "error": "unable to compute version", + "date": None, + } diff --git a/lib/python3.10/site-packages/pandas/_version_meson.py b/lib/python3.10/site-packages/pandas/_version_meson.py new file mode 100644 index 0000000000000000000000000000000000000000..973a8d3fdc4a7233dcfb068268a80e4484253212 --- /dev/null +++ b/lib/python3.10/site-packages/pandas/_version_meson.py @@ -0,0 +1,2 @@ +__version__="2.2.3" +__git_version__="0691c5cf90477d3503834d983f69350f250a6ff7" diff --git a/lib/python3.10/site-packages/pandas/arrays/__init__.py b/lib/python3.10/site-packages/pandas/arrays/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..a11755275d00e070bea6ab73a881b98d0b976551 --- /dev/null +++ b/lib/python3.10/site-packages/pandas/arrays/__init__.py @@ -0,0 +1,53 @@ +""" +All of pandas' ExtensionArrays. + +See :ref:`extending.extension-types` for more. +""" +from pandas.core.arrays import ( + ArrowExtensionArray, + ArrowStringArray, + BooleanArray, + Categorical, + DatetimeArray, + FloatingArray, + IntegerArray, + IntervalArray, + NumpyExtensionArray, + PeriodArray, + SparseArray, + StringArray, + TimedeltaArray, +) + +__all__ = [ + "ArrowExtensionArray", + "ArrowStringArray", + "BooleanArray", + "Categorical", + "DatetimeArray", + "FloatingArray", + "IntegerArray", + "IntervalArray", + "NumpyExtensionArray", + "PeriodArray", + "SparseArray", + "StringArray", + "TimedeltaArray", +] + + +def __getattr__(name: str) -> type[NumpyExtensionArray]: + if name == "PandasArray": + # GH#53694 + import warnings + + from pandas.util._exceptions import find_stack_level + + warnings.warn( + "PandasArray has been renamed NumpyExtensionArray. Use that " + "instead. This alias will be removed in a future version.", + FutureWarning, + stacklevel=find_stack_level(), + ) + return NumpyExtensionArray + raise AttributeError(f"module 'pandas.arrays' has no attribute '{name}'") diff --git a/lib/python3.10/site-packages/pandas/conftest.py b/lib/python3.10/site-packages/pandas/conftest.py new file mode 100644 index 0000000000000000000000000000000000000000..10134c90f8eeb861ba6368e6677e0d2120a09290 --- /dev/null +++ b/lib/python3.10/site-packages/pandas/conftest.py @@ -0,0 +1,1980 @@ +""" +This file is very long and growing, but it was decided to not split it yet, as +it's still manageable (2020-03-17, ~1.1k LoC). See gh-31989 + +Instead of splitting it was decided to define sections here: +- Configuration / Settings +- Autouse fixtures +- Common arguments +- Missing values & co. +- Classes +- Indices +- Series' +- DataFrames +- Operators & Operations +- Data sets/files +- Time zones +- Dtypes +- Misc +""" +from __future__ import annotations + +from collections import abc +from datetime import ( + date, + datetime, + time, + timedelta, + timezone, +) +from decimal import Decimal +import operator +import os +from typing import ( + TYPE_CHECKING, + Callable, +) + +from dateutil.tz import ( + tzlocal, + tzutc, +) +import hypothesis +from hypothesis import strategies as st +import numpy as np +import pytest +from pytz import ( + FixedOffset, + utc, +) + +from pandas._config.config import _get_option + +import pandas.util._test_decorators as td + +from pandas.core.dtypes.dtypes import ( + DatetimeTZDtype, + IntervalDtype, +) + +import pandas as pd +from pandas import ( + CategoricalIndex, + DataFrame, + Interval, + IntervalIndex, + Period, + RangeIndex, + Series, + Timedelta, + Timestamp, + date_range, + period_range, + timedelta_range, +) +import pandas._testing as tm +from pandas.core import ops +from pandas.core.indexes.api import ( + Index, + MultiIndex, +) +from pandas.util.version import Version + +if TYPE_CHECKING: + from collections.abc import ( + Hashable, + Iterator, + ) + +try: + import pyarrow as pa +except ImportError: + has_pyarrow = False +else: + del pa + has_pyarrow = True + +import zoneinfo + +try: + zoneinfo.ZoneInfo("UTC") +except zoneinfo.ZoneInfoNotFoundError: + zoneinfo = None # type: ignore[assignment] + + +# ---------------------------------------------------------------- +# Configuration / Settings +# ---------------------------------------------------------------- +# pytest + + +def pytest_addoption(parser) -> None: + parser.addoption( + "--no-strict-data-files", + action="store_false", + help="Don't fail if a test is skipped for missing data file.", + ) + + +def ignore_doctest_warning(item: pytest.Item, path: str, message: str) -> None: + """Ignore doctest warning. + + Parameters + ---------- + item : pytest.Item + pytest test item. + path : str + Module path to Python object, e.g. "pandas.core.frame.DataFrame.append". A + warning will be filtered when item.name ends with in given path. So it is + sufficient to specify e.g. "DataFrame.append". + message : str + Message to be filtered. + """ + if item.name.endswith(path): + item.add_marker(pytest.mark.filterwarnings(f"ignore:{message}")) + + +def pytest_collection_modifyitems(items, config) -> None: + is_doctest = config.getoption("--doctest-modules") or config.getoption( + "--doctest-cython", default=False + ) + + # Warnings from doctests that can be ignored; place reason in comment above. + # Each entry specifies (path, message) - see the ignore_doctest_warning function + ignored_doctest_warnings = [ + ("is_int64_dtype", "is_int64_dtype is deprecated"), + ("is_interval_dtype", "is_interval_dtype is deprecated"), + ("is_period_dtype", "is_period_dtype is deprecated"), + ("is_datetime64tz_dtype", "is_datetime64tz_dtype is deprecated"), + ("is_categorical_dtype", "is_categorical_dtype is deprecated"), + ("is_sparse", "is_sparse is deprecated"), + ("DataFrameGroupBy.fillna", "DataFrameGroupBy.fillna is deprecated"), + ("NDFrame.replace", "The 'method' keyword"), + ("NDFrame.replace", "Series.replace without 'value'"), + ("NDFrame.clip", "Downcasting behavior in Series and DataFrame methods"), + ("Series.idxmin", "The behavior of Series.idxmin"), + ("Series.idxmax", "The behavior of Series.idxmax"), + ("SeriesGroupBy.fillna", "SeriesGroupBy.fillna is deprecated"), + ("SeriesGroupBy.idxmin", "The behavior of Series.idxmin"), + ("SeriesGroupBy.idxmax", "The behavior of Series.idxmax"), + # Docstring divides by zero to show behavior difference + ("missing.mask_zero_div_zero", "divide by zero encountered"), + ( + "to_pydatetime", + "The behavior of DatetimeProperties.to_pydatetime is deprecated", + ), + ( + "pandas.core.generic.NDFrame.bool", + "(Series|DataFrame).bool is now deprecated and will be removed " + "in future version of pandas", + ), + ( + "pandas.core.generic.NDFrame.first", + "first is deprecated and will be removed in a future version. " + "Please create a mask and filter using `.loc` instead", + ), + ( + "Resampler.fillna", + "DatetimeIndexResampler.fillna is deprecated", + ), + ( + "DataFrameGroupBy.fillna", + "DataFrameGroupBy.fillna with 'method' is deprecated", + ), + ( + "DataFrameGroupBy.fillna", + "DataFrame.fillna with 'method' is deprecated", + ), + ("read_parquet", "Passing a BlockManager to DataFrame is deprecated"), + ] + + if is_doctest: + for item in items: + for path, message in ignored_doctest_warnings: + ignore_doctest_warning(item, path, message) + + +hypothesis_health_checks = [hypothesis.HealthCheck.too_slow] +if Version(hypothesis.__version__) >= Version("6.83.2"): + hypothesis_health_checks.append(hypothesis.HealthCheck.differing_executors) + +# Hypothesis +hypothesis.settings.register_profile( + "ci", + # Hypothesis timing checks are tuned for scalars by default, so we bump + # them from 200ms to 500ms per test case as the global default. If this + # is too short for a specific test, (a) try to make it faster, and (b) + # if it really is slow add `@settings(deadline=...)` with a working value, + # or `deadline=None` to entirely disable timeouts for that test. + # 2022-02-09: Changed deadline from 500 -> None. Deadline leads to + # non-actionable, flaky CI failures (# GH 24641, 44969, 45118, 44969) + deadline=None, + suppress_health_check=tuple(hypothesis_health_checks), +) +hypothesis.settings.load_profile("ci") + +# Registering these strategies makes them globally available via st.from_type, +# which is use for offsets in tests/tseries/offsets/test_offsets_properties.py +for name in "MonthBegin MonthEnd BMonthBegin BMonthEnd".split(): + cls = getattr(pd.tseries.offsets, name) + st.register_type_strategy( + cls, st.builds(cls, n=st.integers(-99, 99), normalize=st.booleans()) + ) + +for name in "YearBegin YearEnd BYearBegin BYearEnd".split(): + cls = getattr(pd.tseries.offsets, name) + st.register_type_strategy( + cls, + st.builds( + cls, + n=st.integers(-5, 5), + normalize=st.booleans(), + month=st.integers(min_value=1, max_value=12), + ), + ) + +for name in "QuarterBegin QuarterEnd BQuarterBegin BQuarterEnd".split(): + cls = getattr(pd.tseries.offsets, name) + st.register_type_strategy( + cls, + st.builds( + cls, + n=st.integers(-24, 24), + normalize=st.booleans(), + startingMonth=st.integers(min_value=1, max_value=12), + ), + ) + + +# ---------------------------------------------------------------- +# Autouse fixtures +# ---------------------------------------------------------------- + + +# https://github.com/pytest-dev/pytest/issues/11873 +# Would like to avoid autouse=True, but cannot as of pytest 8.0.0 +@pytest.fixture(autouse=True) +def add_doctest_imports(doctest_namespace) -> None: + """ + Make `np` and `pd` names available for doctests. + """ + doctest_namespace["np"] = np + doctest_namespace["pd"] = pd + + +@pytest.fixture(autouse=True) +def configure_tests() -> None: + """ + Configure settings for all tests and test modules. + """ + pd.set_option("chained_assignment", "raise") + + +# ---------------------------------------------------------------- +# Common arguments +# ---------------------------------------------------------------- +@pytest.fixture(params=[0, 1, "index", "columns"], ids=lambda x: f"axis={repr(x)}") +def axis(request): + """ + Fixture for returning the axis numbers of a DataFrame. + """ + return request.param + + +axis_frame = axis + + +@pytest.fixture(params=[1, "columns"], ids=lambda x: f"axis={repr(x)}") +def axis_1(request): + """ + Fixture for returning aliases of axis 1 of a DataFrame. + """ + return request.param + + +@pytest.fixture(params=[True, False, None]) +def observed(request): + """ + Pass in the observed keyword to groupby for [True, False] + This indicates whether categoricals should return values for + values which are not in the grouper [False / None], or only values which + appear in the grouper [True]. [None] is supported for future compatibility + if we decide to change the default (and would need to warn if this + parameter is not passed). + """ + return request.param + + +@pytest.fixture(params=[True, False, None]) +def ordered(request): + """ + Boolean 'ordered' parameter for Categorical. + """ + return request.param + + +@pytest.fixture(params=[True, False]) +def skipna(request): + """ + Boolean 'skipna' parameter. + """ + return request.param + + +@pytest.fixture(params=["first", "last", False]) +def keep(request): + """ + Valid values for the 'keep' parameter used in + .duplicated or .drop_duplicates + """ + return request.param + + +@pytest.fixture(params=["both", "neither", "left", "right"]) +def inclusive_endpoints_fixture(request): + """ + Fixture for trying all interval 'inclusive' parameters. + """ + return request.param + + +@pytest.fixture(params=["left", "right", "both", "neither"]) +def closed(request): + """ + Fixture for trying all interval closed parameters. + """ + return request.param + + +@pytest.fixture(params=["left", "right", "both", "neither"]) +def other_closed(request): + """ + Secondary closed fixture to allow parametrizing over all pairs of closed. + """ + return request.param + + +@pytest.fixture( + params=[ + None, + "gzip", + "bz2", + "zip", + "xz", + "tar", + pytest.param("zstd", marks=td.skip_if_no("zstandard")), + ] +) +def compression(request): + """ + Fixture for trying common compression types in compression tests. + """ + return request.param + + +@pytest.fixture( + params=[ + "gzip", + "bz2", + "zip", + "xz", + "tar", + pytest.param("zstd", marks=td.skip_if_no("zstandard")), + ] +) +def compression_only(request): + """ + Fixture for trying common compression types in compression tests excluding + uncompressed case. + """ + return request.param + + +@pytest.fixture(params=[True, False]) +def writable(request): + """ + Fixture that an array is writable. + """ + return request.param + + +@pytest.fixture(params=["inner", "outer", "left", "right"]) +def join_type(request): + """ + Fixture for trying all types of join operations. + """ + return request.param + + +@pytest.fixture(params=["nlargest", "nsmallest"]) +def nselect_method(request): + """ + Fixture for trying all nselect methods. + """ + return request.param + + +# ---------------------------------------------------------------- +# Missing values & co. +# ---------------------------------------------------------------- +@pytest.fixture(params=tm.NULL_OBJECTS, ids=lambda x: type(x).__name__) +def nulls_fixture(request): + """ + Fixture for each null type in pandas. + """ + return request.param + + +nulls_fixture2 = nulls_fixture # Generate cartesian product of nulls_fixture + + +@pytest.fixture(params=[None, np.nan, pd.NaT]) +def unique_nulls_fixture(request): + """ + Fixture for each null type in pandas, each null type exactly once. + """ + return request.param + + +# Generate cartesian product of unique_nulls_fixture: +unique_nulls_fixture2 = unique_nulls_fixture + + +@pytest.fixture(params=tm.NP_NAT_OBJECTS, ids=lambda x: type(x).__name__) +def np_nat_fixture(request): + """ + Fixture for each NaT type in numpy. + """ + return request.param + + +# Generate cartesian product of np_nat_fixture: +np_nat_fixture2 = np_nat_fixture + + +# ---------------------------------------------------------------- +# Classes +# ---------------------------------------------------------------- + + +@pytest.fixture(params=[DataFrame, Series]) +def frame_or_series(request): + """ + Fixture to parametrize over DataFrame and Series. + """ + return request.param + + +@pytest.fixture(params=[Index, Series], ids=["index", "series"]) +def index_or_series(request): + """ + Fixture to parametrize over Index and Series, made necessary by a mypy + bug, giving an error: + + List item 0 has incompatible type "Type[Series]"; expected "Type[PandasObject]" + + See GH#29725 + """ + return request.param + + +# Generate cartesian product of index_or_series fixture: +index_or_series2 = index_or_series + + +@pytest.fixture(params=[Index, Series, pd.array], ids=["index", "series", "array"]) +def index_or_series_or_array(request): + """ + Fixture to parametrize over Index, Series, and ExtensionArray + """ + return request.param + + +@pytest.fixture(params=[Index, Series, DataFrame, pd.array], ids=lambda x: x.__name__) +def box_with_array(request): + """ + Fixture to test behavior for Index, Series, DataFrame, and pandas Array + classes + """ + return request.param + + +box_with_array2 = box_with_array + + +@pytest.fixture +def dict_subclass() -> type[dict]: + """ + Fixture for a dictionary subclass. + """ + + class TestSubDict(dict): + def __init__(self, *args, **kwargs) -> None: + dict.__init__(self, *args, **kwargs) + + return TestSubDict + + +@pytest.fixture +def non_dict_mapping_subclass() -> type[abc.Mapping]: + """ + Fixture for a non-mapping dictionary subclass. + """ + + class TestNonDictMapping(abc.Mapping): + def __init__(self, underlying_dict) -> None: + self._data = underlying_dict + + def __getitem__(self, key): + return self._data.__getitem__(key) + + def __iter__(self) -> Iterator: + return self._data.__iter__() + + def __len__(self) -> int: + return self._data.__len__() + + return TestNonDictMapping + + +# ---------------------------------------------------------------- +# Indices +# ---------------------------------------------------------------- +@pytest.fixture +def multiindex_year_month_day_dataframe_random_data(): + """ + DataFrame with 3 level MultiIndex (year, month, day) covering + first 100 business days from 2000-01-01 with random data + """ + tdf = DataFrame( + np.random.default_rng(2).standard_normal((100, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=100, freq="B"), + ) + ymd = tdf.groupby([lambda x: x.year, lambda x: x.month, lambda x: x.day]).sum() + # use int64 Index, to make sure things work + ymd.index = ymd.index.set_levels([lev.astype("i8") for lev in ymd.index.levels]) + ymd.index.set_names(["year", "month", "day"], inplace=True) + return ymd + + +@pytest.fixture +def lexsorted_two_level_string_multiindex() -> MultiIndex: + """ + 2-level MultiIndex, lexsorted, with string names. + """ + return MultiIndex( + levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]], + codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]], + names=["first", "second"], + ) + + +@pytest.fixture +def multiindex_dataframe_random_data( + lexsorted_two_level_string_multiindex, +) -> DataFrame: + """DataFrame with 2 level MultiIndex with random data""" + index = lexsorted_two_level_string_multiindex + return DataFrame( + np.random.default_rng(2).standard_normal((10, 3)), + index=index, + columns=Index(["A", "B", "C"], name="exp"), + ) + + +def _create_multiindex(): + """ + MultiIndex used to test the general functionality of this object + """ + + # See Also: tests.multi.conftest.idx + major_axis = Index(["foo", "bar", "baz", "qux"]) + minor_axis = Index(["one", "two"]) + + major_codes = np.array([0, 0, 1, 2, 3, 3]) + minor_codes = np.array([0, 1, 0, 1, 0, 1]) + index_names = ["first", "second"] + return MultiIndex( + levels=[major_axis, minor_axis], + codes=[major_codes, minor_codes], + names=index_names, + verify_integrity=False, + ) + + +def _create_mi_with_dt64tz_level(): + """ + MultiIndex with a level that is a tzaware DatetimeIndex. + """ + # GH#8367 round trip with pickle + return MultiIndex.from_product( + [[1, 2], ["a", "b"], date_range("20130101", periods=3, tz="US/Eastern")], + names=["one", "two", "three"], + ) + + +indices_dict = { + "string": Index([f"pandas_{i}" for i in range(100)]), + "datetime": date_range("2020-01-01", periods=100), + "datetime-tz": date_range("2020-01-01", periods=100, tz="US/Pacific"), + "period": period_range("2020-01-01", periods=100, freq="D"), + "timedelta": timedelta_range(start="1 day", periods=100, freq="D"), + "range": RangeIndex(100), + "int8": Index(np.arange(100), dtype="int8"), + "int16": Index(np.arange(100), dtype="int16"), + "int32": Index(np.arange(100), dtype="int32"), + "int64": Index(np.arange(100), dtype="int64"), + "uint8": Index(np.arange(100), dtype="uint8"), + "uint16": Index(np.arange(100), dtype="uint16"), + "uint32": Index(np.arange(100), dtype="uint32"), + "uint64": Index(np.arange(100), dtype="uint64"), + "float32": Index(np.arange(100), dtype="float32"), + "float64": Index(np.arange(100), dtype="float64"), + "bool-object": Index([True, False] * 5, dtype=object), + "bool-dtype": Index([True, False] * 5, dtype=bool), + "complex64": Index( + np.arange(100, dtype="complex64") + 1.0j * np.arange(100, dtype="complex64") + ), + "complex128": Index( + np.arange(100, dtype="complex128") + 1.0j * np.arange(100, dtype="complex128") + ), + "categorical": CategoricalIndex(list("abcd") * 25), + "interval": IntervalIndex.from_breaks(np.linspace(0, 100, num=101)), + "empty": Index([]), + "tuples": MultiIndex.from_tuples(zip(["foo", "bar", "baz"], [1, 2, 3])), + "mi-with-dt64tz-level": _create_mi_with_dt64tz_level(), + "multi": _create_multiindex(), + "repeats": Index([0, 0, 1, 1, 2, 2]), + "nullable_int": Index(np.arange(100), dtype="Int64"), + "nullable_uint": Index(np.arange(100), dtype="UInt16"), + "nullable_float": Index(np.arange(100), dtype="Float32"), + "nullable_bool": Index(np.arange(100).astype(bool), dtype="boolean"), + "string-python": Index( + pd.array([f"pandas_{i}" for i in range(100)], dtype="string[python]") + ), +} +if has_pyarrow: + idx = Index(pd.array([f"pandas_{i}" for i in range(100)], dtype="string[pyarrow]")) + indices_dict["string-pyarrow"] = idx + + +@pytest.fixture(params=indices_dict.keys()) +def index(request): + """ + Fixture for many "simple" kinds of indices. + + These indices are unlikely to cover corner cases, e.g. + - no names + - no NaTs/NaNs + - no values near implementation bounds + - ... + """ + # copy to avoid mutation, e.g. setting .name + return indices_dict[request.param].copy() + + +# Needed to generate cartesian product of indices +index_fixture2 = index + + +@pytest.fixture( + params=[ + key for key, value in indices_dict.items() if not isinstance(value, MultiIndex) + ] +) +def index_flat(request): + """ + index fixture, but excluding MultiIndex cases. + """ + key = request.param + return indices_dict[key].copy() + + +# Alias so we can test with cartesian product of index_flat +index_flat2 = index_flat + + +@pytest.fixture( + params=[ + key + for key, value in indices_dict.items() + if not ( + key.startswith(("int", "uint", "float")) + or key in ["range", "empty", "repeats", "bool-dtype"] + ) + and not isinstance(value, MultiIndex) + ] +) +def index_with_missing(request): + """ + Fixture for indices with missing values. + + Integer-dtype and empty cases are excluded because they cannot hold missing + values. + + MultiIndex is excluded because isna() is not defined for MultiIndex. + """ + + # GH 35538. Use deep copy to avoid illusive bug on np-dev + # GHA pipeline that writes into indices_dict despite copy + ind = indices_dict[request.param].copy(deep=True) + vals = ind.values.copy() + if request.param in ["tuples", "mi-with-dt64tz-level", "multi"]: + # For setting missing values in the top level of MultiIndex + vals = ind.tolist() + vals[0] = (None,) + vals[0][1:] + vals[-1] = (None,) + vals[-1][1:] + return MultiIndex.from_tuples(vals) + else: + vals[0] = None + vals[-1] = None + return type(ind)(vals) + + +# ---------------------------------------------------------------- +# Series' +# ---------------------------------------------------------------- +@pytest.fixture +def string_series() -> Series: + """ + Fixture for Series of floats with Index of unique strings + """ + return Series( + np.arange(30, dtype=np.float64) * 1.1, + index=Index([f"i_{i}" for i in range(30)], dtype=object), + name="series", + ) + + +@pytest.fixture +def object_series() -> Series: + """ + Fixture for Series of dtype object with Index of unique strings + """ + data = [f"foo_{i}" for i in range(30)] + index = Index([f"bar_{i}" for i in range(30)], dtype=object) + return Series(data, index=index, name="objects", dtype=object) + + +@pytest.fixture +def datetime_series() -> Series: + """ + Fixture for Series of floats with DatetimeIndex + """ + return Series( + np.random.default_rng(2).standard_normal(30), + index=date_range("2000-01-01", periods=30, freq="B"), + name="ts", + ) + + +def _create_series(index): + """Helper for the _series dict""" + size = len(index) + data = np.random.default_rng(2).standard_normal(size) + return Series(data, index=index, name="a", copy=False) + + +_series = { + f"series-with-{index_id}-index": _create_series(index) + for index_id, index in indices_dict.items() +} + + +@pytest.fixture +def series_with_simple_index(index) -> Series: + """ + Fixture for tests on series with changing types of indices. + """ + return _create_series(index) + + +_narrow_series = { + f"{dtype.__name__}-series": Series( + range(30), index=[f"i-{i}" for i in range(30)], name="a", dtype=dtype + ) + for dtype in tm.NARROW_NP_DTYPES +} + + +_index_or_series_objs = {**indices_dict, **_series, **_narrow_series} + + +@pytest.fixture(params=_index_or_series_objs.keys()) +def index_or_series_obj(request): + """ + Fixture for tests on indexes, series and series with a narrow dtype + copy to avoid mutation, e.g. setting .name + """ + return _index_or_series_objs[request.param].copy(deep=True) + + +_typ_objects_series = { + f"{dtype.__name__}-series": Series(dtype) for dtype in tm.PYTHON_DATA_TYPES +} + + +_index_or_series_memory_objs = { + **indices_dict, + **_series, + **_narrow_series, + **_typ_objects_series, +} + + +@pytest.fixture(params=_index_or_series_memory_objs.keys()) +def index_or_series_memory_obj(request): + """ + Fixture for tests on indexes, series, series with a narrow dtype and + series with empty objects type + copy to avoid mutation, e.g. setting .name + """ + return _index_or_series_memory_objs[request.param].copy(deep=True) + + +# ---------------------------------------------------------------- +# DataFrames +# ---------------------------------------------------------------- +@pytest.fixture +def int_frame() -> DataFrame: + """ + Fixture for DataFrame of ints with index of unique strings + + Columns are ['A', 'B', 'C', 'D'] + """ + return DataFrame( + np.ones((30, 4), dtype=np.int64), + index=Index([f"foo_{i}" for i in range(30)], dtype=object), + columns=Index(list("ABCD"), dtype=object), + ) + + +@pytest.fixture +def float_frame() -> DataFrame: + """ + Fixture for DataFrame of floats with index of unique strings + + Columns are ['A', 'B', 'C', 'D']. + """ + return DataFrame( + np.random.default_rng(2).standard_normal((30, 4)), + index=Index([f"foo_{i}" for i in range(30)]), + columns=Index(list("ABCD")), + ) + + +@pytest.fixture +def rand_series_with_duplicate_datetimeindex() -> Series: + """ + Fixture for Series with a DatetimeIndex that has duplicates. + """ + dates = [ + datetime(2000, 1, 2), + datetime(2000, 1, 2), + datetime(2000, 1, 2), + datetime(2000, 1, 3), + datetime(2000, 1, 3), + datetime(2000, 1, 3), + datetime(2000, 1, 4), + datetime(2000, 1, 4), + datetime(2000, 1, 4), + datetime(2000, 1, 5), + ] + + return Series(np.random.default_rng(2).standard_normal(len(dates)), index=dates) + + +# ---------------------------------------------------------------- +# Scalars +# ---------------------------------------------------------------- +@pytest.fixture( + params=[ + (Interval(left=0, right=5), IntervalDtype("int64", "right")), + (Interval(left=0.1, right=0.5), IntervalDtype("float64", "right")), + (Period("2012-01", freq="M"), "period[M]"), + (Period("2012-02-01", freq="D"), "period[D]"), + ( + Timestamp("2011-01-01", tz="US/Eastern"), + DatetimeTZDtype(unit="s", tz="US/Eastern"), + ), + (Timedelta(seconds=500), "timedelta64[ns]"), + ] +) +def ea_scalar_and_dtype(request): + return request.param + + +# ---------------------------------------------------------------- +# Operators & Operations +# ---------------------------------------------------------------- + + +@pytest.fixture(params=tm.arithmetic_dunder_methods) +def all_arithmetic_operators(request): + """ + Fixture for dunder names for common arithmetic operations. + """ + return request.param + + +@pytest.fixture( + params=[ + operator.add, + ops.radd, + operator.sub, + ops.rsub, + operator.mul, + ops.rmul, + operator.truediv, + ops.rtruediv, + operator.floordiv, + ops.rfloordiv, + operator.mod, + ops.rmod, + operator.pow, + ops.rpow, + operator.eq, + operator.ne, + operator.lt, + operator.le, + operator.gt, + operator.ge, + operator.and_, + ops.rand_, + operator.xor, + ops.rxor, + operator.or_, + ops.ror_, + ] +) +def all_binary_operators(request): + """ + Fixture for operator and roperator arithmetic, comparison, and logical ops. + """ + return request.param + + +@pytest.fixture( + params=[ + operator.add, + ops.radd, + operator.sub, + ops.rsub, + operator.mul, + ops.rmul, + operator.truediv, + ops.rtruediv, + operator.floordiv, + ops.rfloordiv, + operator.mod, + ops.rmod, + operator.pow, + ops.rpow, + ] +) +def all_arithmetic_functions(request): + """ + Fixture for operator and roperator arithmetic functions. + + Notes + ----- + This includes divmod and rdivmod, whereas all_arithmetic_operators + does not. + """ + return request.param + + +_all_numeric_reductions = [ + "count", + "sum", + "max", + "min", + "mean", + "prod", + "std", + "var", + "median", + "kurt", + "skew", + "sem", +] + + +@pytest.fixture(params=_all_numeric_reductions) +def all_numeric_reductions(request): + """ + Fixture for numeric reduction names. + """ + return request.param + + +_all_boolean_reductions = ["all", "any"] + + +@pytest.fixture(params=_all_boolean_reductions) +def all_boolean_reductions(request): + """ + Fixture for boolean reduction names. + """ + return request.param + + +_all_reductions = _all_numeric_reductions + _all_boolean_reductions + + +@pytest.fixture(params=_all_reductions) +def all_reductions(request): + """ + Fixture for all (boolean + numeric) reduction names. + """ + return request.param + + +@pytest.fixture( + params=[ + operator.eq, + operator.ne, + operator.gt, + operator.ge, + operator.lt, + operator.le, + ] +) +def comparison_op(request): + """ + Fixture for operator module comparison functions. + """ + return request.param + + +@pytest.fixture(params=["__le__", "__lt__", "__ge__", "__gt__"]) +def compare_operators_no_eq_ne(request): + """ + Fixture for dunder names for compare operations except == and != + + * >= + * > + * < + * <= + """ + return request.param + + +@pytest.fixture( + params=["__and__", "__rand__", "__or__", "__ror__", "__xor__", "__rxor__"] +) +def all_logical_operators(request): + """ + Fixture for dunder names for common logical operations + + * | + * & + * ^ + """ + return request.param + + +_all_numeric_accumulations = ["cumsum", "cumprod", "cummin", "cummax"] + + +@pytest.fixture(params=_all_numeric_accumulations) +def all_numeric_accumulations(request): + """ + Fixture for numeric accumulation names + """ + return request.param + + +# ---------------------------------------------------------------- +# Data sets/files +# ---------------------------------------------------------------- +@pytest.fixture +def strict_data_files(pytestconfig): + """ + Returns the configuration for the test setting `--no-strict-data-files`. + """ + return pytestconfig.getoption("--no-strict-data-files") + + +@pytest.fixture +def datapath(strict_data_files: str) -> Callable[..., str]: + """ + Get the path to a data file. + + Parameters + ---------- + path : str + Path to the file, relative to ``pandas/tests/`` + + Returns + ------- + path including ``pandas/tests``. + + Raises + ------ + ValueError + If the path doesn't exist and the --no-strict-data-files option is not set. + """ + BASE_PATH = os.path.join(os.path.dirname(__file__), "tests") + + def deco(*args): + path = os.path.join(BASE_PATH, *args) + if not os.path.exists(path): + if strict_data_files: + raise ValueError( + f"Could not find file {path} and --no-strict-data-files is not set." + ) + pytest.skip(f"Could not find {path}.") + return path + + return deco + + +# ---------------------------------------------------------------- +# Time zones +# ---------------------------------------------------------------- +TIMEZONES = [ + None, + "UTC", + "US/Eastern", + "Asia/Tokyo", + "dateutil/US/Pacific", + "dateutil/Asia/Singapore", + "+01:15", + "-02:15", + "UTC+01:15", + "UTC-02:15", + tzutc(), + tzlocal(), + FixedOffset(300), + FixedOffset(0), + FixedOffset(-300), + timezone.utc, + timezone(timedelta(hours=1)), + timezone(timedelta(hours=-1), name="foo"), +] +if zoneinfo is not None: + TIMEZONES.extend( + [ + zoneinfo.ZoneInfo("US/Pacific"), # type: ignore[list-item] + zoneinfo.ZoneInfo("UTC"), # type: ignore[list-item] + ] + ) +TIMEZONE_IDS = [repr(i) for i in TIMEZONES] + + +@td.parametrize_fixture_doc(str(TIMEZONE_IDS)) +@pytest.fixture(params=TIMEZONES, ids=TIMEZONE_IDS) +def tz_naive_fixture(request): + """ + Fixture for trying timezones including default (None): {0} + """ + return request.param + + +@td.parametrize_fixture_doc(str(TIMEZONE_IDS[1:])) +@pytest.fixture(params=TIMEZONES[1:], ids=TIMEZONE_IDS[1:]) +def tz_aware_fixture(request): + """ + Fixture for trying explicit timezones: {0} + """ + return request.param + + +# Generate cartesian product of tz_aware_fixture: +tz_aware_fixture2 = tz_aware_fixture + + +_UTCS = ["utc", "dateutil/UTC", utc, tzutc(), timezone.utc] +if zoneinfo is not None: + _UTCS.append(zoneinfo.ZoneInfo("UTC")) + + +@pytest.fixture(params=_UTCS) +def utc_fixture(request): + """ + Fixture to provide variants of UTC timezone strings and tzinfo objects. + """ + return request.param + + +utc_fixture2 = utc_fixture + + +@pytest.fixture(params=["s", "ms", "us", "ns"]) +def unit(request): + """ + datetime64 units we support. + """ + return request.param + + +unit2 = unit + + +# ---------------------------------------------------------------- +# Dtypes +# ---------------------------------------------------------------- +@pytest.fixture(params=tm.STRING_DTYPES) +def string_dtype(request): + """ + Parametrized fixture for string dtypes. + + * str + * 'str' + * 'U' + """ + return request.param + + +@pytest.fixture( + params=[ + "string[python]", + pytest.param("string[pyarrow]", marks=td.skip_if_no("pyarrow")), + ] +) +def nullable_string_dtype(request): + """ + Parametrized fixture for string dtypes. + + * 'string[python]' + * 'string[pyarrow]' + """ + return request.param + + +@pytest.fixture( + params=[ + "python", + pytest.param("pyarrow", marks=td.skip_if_no("pyarrow")), + pytest.param("pyarrow_numpy", marks=td.skip_if_no("pyarrow")), + ] +) +def string_storage(request): + """ + Parametrized fixture for pd.options.mode.string_storage. + + * 'python' + * 'pyarrow' + * 'pyarrow_numpy' + """ + return request.param + + +@pytest.fixture( + params=[ + "numpy_nullable", + pytest.param("pyarrow", marks=td.skip_if_no("pyarrow")), + ] +) +def dtype_backend(request): + """ + Parametrized fixture for pd.options.mode.string_storage. + + * 'python' + * 'pyarrow' + """ + return request.param + + +# Alias so we can test with cartesian product of string_storage +string_storage2 = string_storage + + +@pytest.fixture(params=tm.BYTES_DTYPES) +def bytes_dtype(request): + """ + Parametrized fixture for bytes dtypes. + + * bytes + * 'bytes' + """ + return request.param + + +@pytest.fixture(params=tm.OBJECT_DTYPES) +def object_dtype(request): + """ + Parametrized fixture for object dtypes. + + * object + * 'object' + """ + return request.param + + +@pytest.fixture( + params=[ + "object", + "string[python]", + pytest.param("string[pyarrow]", marks=td.skip_if_no("pyarrow")), + pytest.param("string[pyarrow_numpy]", marks=td.skip_if_no("pyarrow")), + ] +) +def any_string_dtype(request): + """ + Parametrized fixture for string dtypes. + * 'object' + * 'string[python]' + * 'string[pyarrow]' + """ + return request.param + + +@pytest.fixture(params=tm.DATETIME64_DTYPES) +def datetime64_dtype(request): + """ + Parametrized fixture for datetime64 dtypes. + + * 'datetime64[ns]' + * 'M8[ns]' + """ + return request.param + + +@pytest.fixture(params=tm.TIMEDELTA64_DTYPES) +def timedelta64_dtype(request): + """ + Parametrized fixture for timedelta64 dtypes. + + * 'timedelta64[ns]' + * 'm8[ns]' + """ + return request.param + + +@pytest.fixture +def fixed_now_ts() -> Timestamp: + """ + Fixture emits fixed Timestamp.now() + """ + return Timestamp( # pyright: ignore[reportGeneralTypeIssues] + year=2021, month=1, day=1, hour=12, minute=4, second=13, microsecond=22 + ) + + +@pytest.fixture(params=tm.FLOAT_NUMPY_DTYPES) +def float_numpy_dtype(request): + """ + Parameterized fixture for float dtypes. + + * float + * 'float32' + * 'float64' + """ + return request.param + + +@pytest.fixture(params=tm.FLOAT_EA_DTYPES) +def float_ea_dtype(request): + """ + Parameterized fixture for float dtypes. + + * 'Float32' + * 'Float64' + """ + return request.param + + +@pytest.fixture(params=tm.ALL_FLOAT_DTYPES) +def any_float_dtype(request): + """ + Parameterized fixture for float dtypes. + + * float + * 'float32' + * 'float64' + * 'Float32' + * 'Float64' + """ + return request.param + + +@pytest.fixture(params=tm.COMPLEX_DTYPES) +def complex_dtype(request): + """ + Parameterized fixture for complex dtypes. + + * complex + * 'complex64' + * 'complex128' + """ + return request.param + + +@pytest.fixture(params=tm.COMPLEX_FLOAT_DTYPES) +def complex_or_float_dtype(request): + """ + Parameterized fixture for complex and numpy float dtypes. + + * complex + * 'complex64' + * 'complex128' + * float + * 'float32' + * 'float64' + """ + return request.param + + +@pytest.fixture(params=tm.SIGNED_INT_NUMPY_DTYPES) +def any_signed_int_numpy_dtype(request): + """ + Parameterized fixture for signed integer dtypes. + + * int + * 'int8' + * 'int16' + * 'int32' + * 'int64' + """ + return request.param + + +@pytest.fixture(params=tm.UNSIGNED_INT_NUMPY_DTYPES) +def any_unsigned_int_numpy_dtype(request): + """ + Parameterized fixture for unsigned integer dtypes. + + * 'uint8' + * 'uint16' + * 'uint32' + * 'uint64' + """ + return request.param + + +@pytest.fixture(params=tm.ALL_INT_NUMPY_DTYPES) +def any_int_numpy_dtype(request): + """ + Parameterized fixture for any integer dtype. + + * int + * 'int8' + * 'uint8' + * 'int16' + * 'uint16' + * 'int32' + * 'uint32' + * 'int64' + * 'uint64' + """ + return request.param + + +@pytest.fixture(params=tm.ALL_INT_EA_DTYPES) +def any_int_ea_dtype(request): + """ + Parameterized fixture for any nullable integer dtype. + + * 'UInt8' + * 'Int8' + * 'UInt16' + * 'Int16' + * 'UInt32' + * 'Int32' + * 'UInt64' + * 'Int64' + """ + return request.param + + +@pytest.fixture(params=tm.ALL_INT_DTYPES) +def any_int_dtype(request): + """ + Parameterized fixture for any nullable integer dtype. + + * int + * 'int8' + * 'uint8' + * 'int16' + * 'uint16' + * 'int32' + * 'uint32' + * 'int64' + * 'uint64' + * 'UInt8' + * 'Int8' + * 'UInt16' + * 'Int16' + * 'UInt32' + * 'Int32' + * 'UInt64' + * 'Int64' + """ + return request.param + + +@pytest.fixture(params=tm.ALL_INT_EA_DTYPES + tm.FLOAT_EA_DTYPES) +def any_numeric_ea_dtype(request): + """ + Parameterized fixture for any nullable integer dtype and + any float ea dtypes. + + * 'UInt8' + * 'Int8' + * 'UInt16' + * 'Int16' + * 'UInt32' + * 'Int32' + * 'UInt64' + * 'Int64' + * 'Float32' + * 'Float64' + """ + return request.param + + +# Unsupported operand types for + ("List[Union[str, ExtensionDtype, dtype[Any], +# Type[object]]]" and "List[str]") +@pytest.fixture( + params=tm.ALL_INT_EA_DTYPES + + tm.FLOAT_EA_DTYPES + + tm.ALL_INT_PYARROW_DTYPES_STR_REPR + + tm.FLOAT_PYARROW_DTYPES_STR_REPR # type: ignore[operator] +) +def any_numeric_ea_and_arrow_dtype(request): + """ + Parameterized fixture for any nullable integer dtype and + any float ea dtypes. + + * 'UInt8' + * 'Int8' + * 'UInt16' + * 'Int16' + * 'UInt32' + * 'Int32' + * 'UInt64' + * 'Int64' + * 'Float32' + * 'Float64' + * 'uint8[pyarrow]' + * 'int8[pyarrow]' + * 'uint16[pyarrow]' + * 'int16[pyarrow]' + * 'uint32[pyarrow]' + * 'int32[pyarrow]' + * 'uint64[pyarrow]' + * 'int64[pyarrow]' + * 'float32[pyarrow]' + * 'float64[pyarrow]' + """ + return request.param + + +@pytest.fixture(params=tm.SIGNED_INT_EA_DTYPES) +def any_signed_int_ea_dtype(request): + """ + Parameterized fixture for any signed nullable integer dtype. + + * 'Int8' + * 'Int16' + * 'Int32' + * 'Int64' + """ + return request.param + + +@pytest.fixture(params=tm.ALL_REAL_NUMPY_DTYPES) +def any_real_numpy_dtype(request): + """ + Parameterized fixture for any (purely) real numeric dtype. + + * int + * 'int8' + * 'uint8' + * 'int16' + * 'uint16' + * 'int32' + * 'uint32' + * 'int64' + * 'uint64' + * float + * 'float32' + * 'float64' + """ + return request.param + + +@pytest.fixture(params=tm.ALL_REAL_DTYPES) +def any_real_numeric_dtype(request): + """ + Parameterized fixture for any (purely) real numeric dtype. + + * int + * 'int8' + * 'uint8' + * 'int16' + * 'uint16' + * 'int32' + * 'uint32' + * 'int64' + * 'uint64' + * float + * 'float32' + * 'float64' + + and associated ea dtypes. + """ + return request.param + + +@pytest.fixture(params=tm.ALL_NUMPY_DTYPES) +def any_numpy_dtype(request): + """ + Parameterized fixture for all numpy dtypes. + + * bool + * 'bool' + * int + * 'int8' + * 'uint8' + * 'int16' + * 'uint16' + * 'int32' + * 'uint32' + * 'int64' + * 'uint64' + * float + * 'float32' + * 'float64' + * complex + * 'complex64' + * 'complex128' + * str + * 'str' + * 'U' + * bytes + * 'bytes' + * 'datetime64[ns]' + * 'M8[ns]' + * 'timedelta64[ns]' + * 'm8[ns]' + * object + * 'object' + """ + return request.param + + +@pytest.fixture(params=tm.ALL_REAL_NULLABLE_DTYPES) +def any_real_nullable_dtype(request): + """ + Parameterized fixture for all real dtypes that can hold NA. + + * float + * 'float32' + * 'float64' + * 'Float32' + * 'Float64' + * 'UInt8' + * 'UInt16' + * 'UInt32' + * 'UInt64' + * 'Int8' + * 'Int16' + * 'Int32' + * 'Int64' + * 'uint8[pyarrow]' + * 'uint16[pyarrow]' + * 'uint32[pyarrow]' + * 'uint64[pyarrow]' + * 'int8[pyarrow]' + * 'int16[pyarrow]' + * 'int32[pyarrow]' + * 'int64[pyarrow]' + * 'float[pyarrow]' + * 'double[pyarrow]' + """ + return request.param + + +@pytest.fixture(params=tm.ALL_NUMERIC_DTYPES) +def any_numeric_dtype(request): + """ + Parameterized fixture for all numeric dtypes. + + * int + * 'int8' + * 'uint8' + * 'int16' + * 'uint16' + * 'int32' + * 'uint32' + * 'int64' + * 'uint64' + * float + * 'float32' + * 'float64' + * complex + * 'complex64' + * 'complex128' + * 'UInt8' + * 'Int8' + * 'UInt16' + * 'Int16' + * 'UInt32' + * 'Int32' + * 'UInt64' + * 'Int64' + * 'Float32' + * 'Float64' + """ + return request.param + + +# categoricals are handled separately +_any_skipna_inferred_dtype = [ + ("string", ["a", np.nan, "c"]), + ("string", ["a", pd.NA, "c"]), + ("mixed", ["a", pd.NaT, "c"]), # pd.NaT not considered valid by is_string_array + ("bytes", [b"a", np.nan, b"c"]), + ("empty", [np.nan, np.nan, np.nan]), + ("empty", []), + ("mixed-integer", ["a", np.nan, 2]), + ("mixed", ["a", np.nan, 2.0]), + ("floating", [1.0, np.nan, 2.0]), + ("integer", [1, np.nan, 2]), + ("mixed-integer-float", [1, np.nan, 2.0]), + ("decimal", [Decimal(1), np.nan, Decimal(2)]), + ("boolean", [True, np.nan, False]), + ("boolean", [True, pd.NA, False]), + ("datetime64", [np.datetime64("2013-01-01"), np.nan, np.datetime64("2018-01-01")]), + ("datetime", [Timestamp("20130101"), np.nan, Timestamp("20180101")]), + ("date", [date(2013, 1, 1), np.nan, date(2018, 1, 1)]), + ("complex", [1 + 1j, np.nan, 2 + 2j]), + # The following dtype is commented out due to GH 23554 + # ('timedelta64', [np.timedelta64(1, 'D'), + # np.nan, np.timedelta64(2, 'D')]), + ("timedelta", [timedelta(1), np.nan, timedelta(2)]), + ("time", [time(1), np.nan, time(2)]), + ("period", [Period(2013), pd.NaT, Period(2018)]), + ("interval", [Interval(0, 1), np.nan, Interval(0, 2)]), +] +ids, _ = zip(*_any_skipna_inferred_dtype) # use inferred type as fixture-id + + +@pytest.fixture(params=_any_skipna_inferred_dtype, ids=ids) +def any_skipna_inferred_dtype(request): + """ + Fixture for all inferred dtypes from _libs.lib.infer_dtype + + The covered (inferred) types are: + * 'string' + * 'empty' + * 'bytes' + * 'mixed' + * 'mixed-integer' + * 'mixed-integer-float' + * 'floating' + * 'integer' + * 'decimal' + * 'boolean' + * 'datetime64' + * 'datetime' + * 'date' + * 'timedelta' + * 'time' + * 'period' + * 'interval' + + Returns + ------- + inferred_dtype : str + The string for the inferred dtype from _libs.lib.infer_dtype + values : np.ndarray + An array of object dtype that will be inferred to have + `inferred_dtype` + + Examples + -------- + >>> from pandas._libs import lib + >>> + >>> def test_something(any_skipna_inferred_dtype): + ... inferred_dtype, values = any_skipna_inferred_dtype + ... # will pass + ... assert lib.infer_dtype(values, skipna=True) == inferred_dtype + """ + inferred_dtype, values = request.param + values = np.array(values, dtype=object) # object dtype to avoid casting + + # correctness of inference tested in tests/dtypes/test_inference.py + return inferred_dtype, values + + +# ---------------------------------------------------------------- +# Misc +# ---------------------------------------------------------------- +@pytest.fixture +def ip(): + """ + Get an instance of IPython.InteractiveShell. + + Will raise a skip if IPython is not installed. + """ + pytest.importorskip("IPython", minversion="6.0.0") + from IPython.core.interactiveshell import InteractiveShell + + # GH#35711 make sure sqlite history file handle is not leaked + from traitlets.config import Config # isort:skip + + c = Config() + c.HistoryManager.hist_file = ":memory:" + + return InteractiveShell(config=c) + + +@pytest.fixture(params=["bsr", "coo", "csc", "csr", "dia", "dok", "lil"]) +def spmatrix(request): + """ + Yields scipy sparse matrix classes. + """ + sparse = pytest.importorskip("scipy.sparse") + + return getattr(sparse, request.param + "_matrix") + + +@pytest.fixture( + params=[ + getattr(pd.offsets, o) + for o in pd.offsets.__all__ + if issubclass(getattr(pd.offsets, o), pd.offsets.Tick) and o != "Tick" + ] +) +def tick_classes(request): + """ + Fixture for Tick based datetime offsets available for a time series. + """ + return request.param + + +@pytest.fixture(params=[None, lambda x: x]) +def sort_by_key(request): + """ + Simple fixture for testing keys in sorting methods. + Tests None (no key) and the identity key. + """ + return request.param + + +@pytest.fixture( + params=[ + ("foo", None, None), + ("Egon", "Venkman", None), + ("NCC1701D", "NCC1701D", "NCC1701D"), + # possibly-matching NAs + (np.nan, np.nan, np.nan), + (np.nan, pd.NaT, None), + (np.nan, pd.NA, None), + (pd.NA, pd.NA, pd.NA), + ] +) +def names(request) -> tuple[Hashable, Hashable, Hashable]: + """ + A 3-tuple of names, the first two for operands, the last for a result. + """ + return request.param + + +@pytest.fixture(params=[tm.setitem, tm.loc, tm.iloc]) +def indexer_sli(request): + """ + Parametrize over __setitem__, loc.__setitem__, iloc.__setitem__ + """ + return request.param + + +@pytest.fixture(params=[tm.loc, tm.iloc]) +def indexer_li(request): + """ + Parametrize over loc.__getitem__, iloc.__getitem__ + """ + return request.param + + +@pytest.fixture(params=[tm.setitem, tm.iloc]) +def indexer_si(request): + """ + Parametrize over __setitem__, iloc.__setitem__ + """ + return request.param + + +@pytest.fixture(params=[tm.setitem, tm.loc]) +def indexer_sl(request): + """ + Parametrize over __setitem__, loc.__setitem__ + """ + return request.param + + +@pytest.fixture(params=[tm.at, tm.loc]) +def indexer_al(request): + """ + Parametrize over at.__setitem__, loc.__setitem__ + """ + return request.param + + +@pytest.fixture(params=[tm.iat, tm.iloc]) +def indexer_ial(request): + """ + Parametrize over iat.__setitem__, iloc.__setitem__ + """ + return request.param + + +@pytest.fixture +def using_array_manager() -> bool: + """ + Fixture to check if the array manager is being used. + """ + return _get_option("mode.data_manager", silent=True) == "array" + + +@pytest.fixture +def using_copy_on_write() -> bool: + """ + Fixture to check if Copy-on-Write is enabled. + """ + return ( + pd.options.mode.copy_on_write is True + and _get_option("mode.data_manager", silent=True) == "block" + ) + + +@pytest.fixture +def warn_copy_on_write() -> bool: + """ + Fixture to check if Copy-on-Write is in warning mode. + """ + return ( + pd.options.mode.copy_on_write == "warn" + and _get_option("mode.data_manager", silent=True) == "block" + ) + + +@pytest.fixture +def using_infer_string() -> bool: + """ + Fixture to check if infer string option is enabled. + """ + return pd.options.future.infer_string is True + + +warsaws = ["Europe/Warsaw", "dateutil/Europe/Warsaw"] +if zoneinfo is not None: + warsaws.append(zoneinfo.ZoneInfo("Europe/Warsaw")) # type: ignore[arg-type] + + +@pytest.fixture(params=warsaws) +def warsaw(request) -> str: + """ + tzinfo for Europe/Warsaw using pytz, dateutil, or zoneinfo. + """ + return request.param + + +@pytest.fixture() +def arrow_string_storage(): + return ("pyarrow", "pyarrow_numpy") diff --git a/lib/python3.10/site-packages/pandas/pyproject.toml b/lib/python3.10/site-packages/pandas/pyproject.toml new file mode 100644 index 0000000000000000000000000000000000000000..238abd85dcdb1cc6d3bf83ceee3fc2d5a6933650 --- /dev/null +++ b/lib/python3.10/site-packages/pandas/pyproject.toml @@ -0,0 +1,811 @@ +[build-system] +# Minimum requirements for the build system to execute. +# See https://github.com/scipy/scipy/pull/12940 for the AIX issue. +requires = [ + "meson-python==0.13.1", + "meson==1.2.1", + "wheel", + "Cython~=3.0.5", # Note: sync with setup.py, environment.yml and asv.conf.json + # Force numpy higher than 2.0, so that built wheels are compatible + # with both numpy 1 and 2 + "numpy>=2.0", + "versioneer[toml]" +] + +build-backend = "mesonpy" + +[project] +name = 'pandas' +dynamic = [ + 'version' +] +description = 'Powerful data structures for data analysis, time series, and statistics' +readme = 'README.md' +authors = [ + { name = 'The Pandas Development Team', email='pandas-dev@python.org' }, +] +license = {file = 'LICENSE'} +requires-python = '>=3.9' +dependencies = [ + "numpy>=1.22.4; python_version<'3.11'", + "numpy>=1.23.2; python_version=='3.11'", + "numpy>=1.26.0; python_version>='3.12'", + "python-dateutil>=2.8.2", + "pytz>=2020.1", + "tzdata>=2022.7" +] +classifiers = [ + 'Development Status :: 5 - Production/Stable', + 'Environment :: Console', + 'Intended Audience :: Science/Research', + 'License :: OSI Approved :: BSD License', + 'Operating System :: OS Independent', + 'Programming Language :: Cython', + 'Programming Language :: Python', + 'Programming Language :: Python :: 3', + 'Programming Language :: Python :: 3 :: Only', + 'Programming Language :: Python :: 3.9', + 'Programming Language :: Python :: 3.10', + 'Programming Language :: Python :: 3.11', + 'Programming Language :: Python :: 3.12', + 'Topic :: Scientific/Engineering' +] + +[project.urls] +homepage = 'https://pandas.pydata.org' +documentation = 'https://pandas.pydata.org/docs/' +repository = 'https://github.com/pandas-dev/pandas' + +[project.entry-points."pandas_plotting_backends"] +matplotlib = "pandas:plotting._matplotlib" + +[project.optional-dependencies] +test = ['hypothesis>=6.46.1', 'pytest>=7.3.2', 'pytest-xdist>=2.2.0'] +pyarrow = ['pyarrow>=10.0.1'] +performance = ['bottleneck>=1.3.6', 'numba>=0.56.4', 'numexpr>=2.8.4'] +computation = ['scipy>=1.10.0', 'xarray>=2022.12.0'] +fss = ['fsspec>=2022.11.0'] +aws = ['s3fs>=2022.11.0'] +gcp = ['gcsfs>=2022.11.0', 'pandas-gbq>=0.19.0'] +excel = ['odfpy>=1.4.1', 'openpyxl>=3.1.0', 'python-calamine>=0.1.7', 'pyxlsb>=1.0.10', 'xlrd>=2.0.1', 'xlsxwriter>=3.0.5'] +parquet = ['pyarrow>=10.0.1'] +feather = ['pyarrow>=10.0.1'] +hdf5 = [# blosc only available on conda (https://github.com/Blosc/python-blosc/issues/297) + #'blosc>=1.20.1', + 'tables>=3.8.0'] +spss = ['pyreadstat>=1.2.0'] +postgresql = ['SQLAlchemy>=2.0.0', 'psycopg2>=2.9.6', 'adbc-driver-postgresql>=0.8.0'] +mysql = ['SQLAlchemy>=2.0.0', 'pymysql>=1.0.2'] +sql-other = ['SQLAlchemy>=2.0.0', 'adbc-driver-postgresql>=0.8.0', 'adbc-driver-sqlite>=0.8.0'] +html = ['beautifulsoup4>=4.11.2', 'html5lib>=1.1', 'lxml>=4.9.2'] +xml = ['lxml>=4.9.2'] +plot = ['matplotlib>=3.6.3'] +output-formatting = ['jinja2>=3.1.2', 'tabulate>=0.9.0'] +clipboard = ['PyQt5>=5.15.9', 'qtpy>=2.3.0'] +compression = ['zstandard>=0.19.0'] +consortium-standard = ['dataframe-api-compat>=0.1.7'] +all = ['adbc-driver-postgresql>=0.8.0', + 'adbc-driver-sqlite>=0.8.0', + 'beautifulsoup4>=4.11.2', + # blosc only available on conda (https://github.com/Blosc/python-blosc/issues/297) + #'blosc>=1.21.3', + 'bottleneck>=1.3.6', + 'dataframe-api-compat>=0.1.7', + 'fastparquet>=2022.12.0', + 'fsspec>=2022.11.0', + 'gcsfs>=2022.11.0', + 'html5lib>=1.1', + 'hypothesis>=6.46.1', + 'jinja2>=3.1.2', + 'lxml>=4.9.2', + 'matplotlib>=3.6.3', + 'numba>=0.56.4', + 'numexpr>=2.8.4', + 'odfpy>=1.4.1', + 'openpyxl>=3.1.0', + 'pandas-gbq>=0.19.0', + 'psycopg2>=2.9.6', + 'pyarrow>=10.0.1', + 'pymysql>=1.0.2', + 'PyQt5>=5.15.9', + 'pyreadstat>=1.2.0', + 'pytest>=7.3.2', + 'pytest-xdist>=2.2.0', + 'python-calamine>=0.1.7', + 'pyxlsb>=1.0.10', + 'qtpy>=2.3.0', + 'scipy>=1.10.0', + 's3fs>=2022.11.0', + 'SQLAlchemy>=2.0.0', + 'tables>=3.8.0', + 'tabulate>=0.9.0', + 'xarray>=2022.12.0', + 'xlrd>=2.0.1', + 'xlsxwriter>=3.0.5', + 'zstandard>=0.19.0'] + +# TODO: Remove after setuptools support is dropped. +[tool.setuptools] +include-package-data = true + +[tool.setuptools.packages.find] +include = ["pandas", "pandas.*"] +namespaces = false + +[tool.setuptools.exclude-package-data] +"*" = ["*.c", "*.h"] + +# See the docstring in versioneer.py for instructions. Note that you must +# re-run 'versioneer.py setup' after changing this section, and commit the +# resulting files. +[tool.versioneer] +VCS = "git" +style = "pep440" +versionfile_source = "pandas/_version.py" +versionfile_build = "pandas/_version.py" +tag_prefix = "v" +parentdir_prefix = "pandas-" + +[tool.meson-python.args] +setup = ['--vsenv'] # For Windows + +[tool.cibuildwheel] +skip = "cp36-* cp37-* cp38-* pp* *_i686 *_ppc64le *_s390x" +build-verbosity = "3" +environment = {LDFLAGS="-Wl,--strip-all"} +# pytz 2024.2 causing some failures +test-requires = "hypothesis>=6.46.1 pytest>=7.3.2 pytest-xdist>=2.2.0 pytz<2024.2" +test-command = """ + PANDAS_CI='1' python -c 'import pandas as pd; \ + pd.test(extra_args=["-m not clipboard and not single_cpu and not slow and not network and not db", "-n 2", "--no-strict-data-files"]); \ + pd.test(extra_args=["-m not clipboard and single_cpu and not slow and not network and not db", "--no-strict-data-files"]);' \ + """ +free-threaded-support = true +before-build = "PACKAGE_DIR={package} bash {package}/scripts/cibw_before_build.sh" + +[tool.cibuildwheel.windows] +before-build = "pip install delvewheel && bash {package}/scripts/cibw_before_build.sh" +repair-wheel-command = "delvewheel repair -w {dest_dir} {wheel}" + +[[tool.cibuildwheel.overrides]] +select = "*-manylinux_aarch64*" +test-command = """ + PANDAS_CI='1' python -c 'import pandas as pd; \ + pd.test(extra_args=["-m not clipboard and not single_cpu and not slow and not network and not db and not fails_arm_wheels", "-n 2", "--no-strict-data-files"]); \ + pd.test(extra_args=["-m not clipboard and single_cpu and not slow and not network and not db", "--no-strict-data-files"]);' \ + """ + +[[tool.cibuildwheel.overrides]] +select = "*-musllinux*" +before-test = "apk update && apk add musl-locales" + +[[tool.cibuildwheel.overrides]] +select = "*-win*" +# We test separately for Windows, since we use +# the windowsservercore docker image to check if any dlls are +# missing from the wheel +test-command = "" + +[[tool.cibuildwheel.overrides]] +# Don't strip wheels on macOS. +# macOS doesn't support stripping wheels with linker +# https://github.com/MacPython/numpy-wheels/pull/87#issuecomment-624878264 +select = "*-macosx*" +environment = {CFLAGS="-g0"} + +[tool.black] +target-version = ['py39', 'py310'] +required-version = '23.11.0' +exclude = ''' +( + asv_bench/env + | \.egg + | \.git + | \.hg + | \.mypy_cache + | \.nox + | \.tox + | \.venv + | _build + | buck-out + | build + | dist + | setup.py +) +''' + +[tool.ruff] +line-length = 88 +target-version = "py310" +fix = true +unfixable = [] +typing-modules = ["pandas._typing"] + +select = [ + # pyflakes + "F", + # pycodestyle + "E", "W", + # flake8-2020 + "YTT", + # flake8-bugbear + "B", + # flake8-quotes + "Q", + # flake8-debugger + "T10", + # flake8-gettext + "INT", + # pylint + "PL", + # misc lints + "PIE", + # flake8-pyi + "PYI", + # tidy imports + "TID", + # implicit string concatenation + "ISC", + # type-checking imports + "TCH", + # comprehensions + "C4", + # pygrep-hooks + "PGH", + # Ruff-specific rules + "RUF", + # flake8-bandit: exec-builtin + "S102", + # numpy-legacy-random + "NPY002", + # Perflint + "PERF", + # flynt + "FLY", + # flake8-logging-format + "G", + # flake8-future-annotations + "FA", +] + +ignore = [ + ### Intentionally disabled + # space before : (needed for how black formats slicing) + "E203", + # module level import not at top of file + "E402", + # do not assign a lambda expression, use a def + "E731", + # line break before binary operator + # "W503", # not yet implemented + # line break after binary operator + # "W504", # not yet implemented + # controversial + "B006", + # controversial + "B007", + # controversial + "B008", + # setattr is used to side-step mypy + "B009", + # getattr is used to side-step mypy + "B010", + # tests use assert False + "B011", + # tests use comparisons but not their returned value + "B015", + # false positives + "B019", + # Loop control variable overrides iterable it iterates + "B020", + # Function definition does not bind loop variable + "B023", + # Functions defined inside a loop must not use variables redefined in the loop + # "B301", # not yet implemented + # Only works with python >=3.10 + "B905", + # Too many arguments to function call + "PLR0913", + # Too many returns + "PLR0911", + # Too many branches + "PLR0912", + # Too many statements + "PLR0915", + # Redefined loop name + "PLW2901", + # Global statements are discouraged + "PLW0603", + # Docstrings should not be included in stubs + "PYI021", + # Use `typing.NamedTuple` instead of `collections.namedtuple` + "PYI024", + # No builtin `eval()` allowed + "PGH001", + # compare-to-empty-string + "PLC1901", + # while int | float can be shortened to float, the former is more explicit + "PYI041", + # incorrect-dict-iterator, flags valid Series.items usage + "PERF102", + # try-except-in-loop, becomes useless in Python 3.11 + "PERF203", + + + ### TODO: Enable gradually + # Useless statement + "B018", + # Within an except clause, raise exceptions with ... + "B904", + # Magic number + "PLR2004", + # comparison-with-itself + "PLR0124", + # Consider `elif` instead of `else` then `if` to remove indentation level + "PLR5501", + # collection-literal-concatenation + "RUF005", + # pairwise-over-zipped (>=PY310 only) + "RUF007", + # explicit-f-string-type-conversion + "RUF010", + # mutable-class-default + "RUF012" +] + +exclude = [ + "doc/sphinxext/*.py", + "doc/build/*.py", + "doc/temp/*.py", + ".eggs/*.py", + # vendored files + "pandas/util/version/*", + "pandas/io/clipboard/__init__.py", + # exclude asv benchmark environments from linting + "env", +] + +[tool.ruff.per-file-ignores] +# relative imports allowed for asv_bench +"asv_bench/*" = ["TID", "NPY002"] +# to be enabled gradually +"pandas/core/*" = ["PLR5501"] +"pandas/tests/*" = ["B028", "FLY"] +"scripts/*" = ["B028"] +# Keep this one enabled +"pandas/_typing.py" = ["TCH"] + +[tool.pylint.messages_control] +max-line-length = 88 +disable = [ + # intentionally turned off + "bad-mcs-classmethod-argument", + "broad-except", + "c-extension-no-member", + "comparison-with-itself", + "consider-using-enumerate", + "import-error", + "import-outside-toplevel", + "invalid-name", + "invalid-unary-operand-type", + "line-too-long", + "no-else-continue", + "no-else-raise", + "no-else-return", + "no-member", + "no-name-in-module", + "not-an-iterable", + "overridden-final-method", + "pointless-statement", + "redundant-keyword-arg", + "singleton-comparison", + "too-many-ancestors", + "too-many-arguments", + "too-many-boolean-expressions", + "too-many-branches", + "too-many-function-args", + "too-many-instance-attributes", + "too-many-locals", + "too-many-nested-blocks", + "too-many-public-methods", + "too-many-return-statements", + "too-many-statements", + "unexpected-keyword-arg", + "ungrouped-imports", + "unsubscriptable-object", + "unsupported-assignment-operation", + "unsupported-membership-test", + "unused-import", + "use-dict-literal", + "use-implicit-booleaness-not-comparison", + "use-implicit-booleaness-not-len", + "wrong-import-order", + "wrong-import-position", + "redefined-loop-name", + + # misc + "abstract-class-instantiated", + "no-value-for-parameter", + "undefined-variable", + "unpacking-non-sequence", + "used-before-assignment", + + # pylint type "C": convention, for programming standard violation + "missing-class-docstring", + "missing-function-docstring", + "missing-module-docstring", + "superfluous-parens", + "too-many-lines", + "unidiomatic-typecheck", + "unnecessary-dunder-call", + "unnecessary-lambda-assignment", + + # pylint type "R": refactor, for bad code smell + "consider-using-with", + "cyclic-import", + "duplicate-code", + "inconsistent-return-statements", + "redefined-argument-from-local", + "too-few-public-methods", + + # pylint type "W": warning, for python specific problems + "abstract-method", + "arguments-differ", + "arguments-out-of-order", + "arguments-renamed", + "attribute-defined-outside-init", + "broad-exception-raised", + "comparison-with-callable", + "dangerous-default-value", + "deprecated-module", + "eval-used", + "expression-not-assigned", + "fixme", + "global-statement", + "invalid-overridden-method", + "keyword-arg-before-vararg", + "possibly-unused-variable", + "protected-access", + "raise-missing-from", + "redefined-builtin", + "redefined-outer-name", + "self-cls-assignment", + "signature-differs", + "super-init-not-called", + "try-except-raise", + "unnecessary-lambda", + "unused-argument", + "unused-variable", + "using-constant-test" +] + +[tool.pytest.ini_options] +# sync minversion with pyproject.toml & install.rst +minversion = "7.3.2" +addopts = "--strict-markers --strict-config --capture=no --durations=30 --junitxml=test-data.xml" +empty_parameter_set_mark = "fail_at_collect" +xfail_strict = true +testpaths = "pandas" +doctest_optionflags = [ + "NORMALIZE_WHITESPACE", + "IGNORE_EXCEPTION_DETAIL", + "ELLIPSIS", +] +filterwarnings = [ + "error:::pandas", + "error::ResourceWarning", + "error::pytest.PytestUnraisableExceptionWarning", + # TODO(PY311-minimum): Specify EncodingWarning + # Ignore 3rd party EncodingWarning but raise on pandas' + "ignore:.*encoding.* argument not specified", + "error:.*encoding.* argument not specified::pandas", + "ignore:.*ssl.SSLSocket:pytest.PytestUnraisableExceptionWarning", + "ignore:.*ssl.SSLSocket:ResourceWarning", + # GH 44844: Can remove once minimum matplotlib version >= 3.7 + "ignore:.*FileIO:pytest.PytestUnraisableExceptionWarning", + "ignore:.*BufferedRandom:ResourceWarning", + "ignore::ResourceWarning:asyncio", + # From plotting doctests + "ignore:More than 20 figures have been opened:RuntimeWarning", + # Will be fixed in numba 0.56: https://github.com/numba/numba/issues/7758 + "ignore:`np.MachAr` is deprecated:DeprecationWarning:numba", + "ignore:.*urllib3:DeprecationWarning:botocore", + "ignore:Setuptools is replacing distutils.:UserWarning:_distutils_hack", + # https://github.com/PyTables/PyTables/issues/822 + "ignore:a closed node found in the registry:UserWarning:tables", + "ignore:`np.object` is a deprecated:DeprecationWarning:tables", + "ignore:tostring:DeprecationWarning:tables", + "ignore:distutils Version classes are deprecated:DeprecationWarning:pandas_datareader", + "ignore:distutils Version classes are deprecated:DeprecationWarning:numexpr", + "ignore:distutils Version classes are deprecated:DeprecationWarning:fastparquet", + "ignore:distutils Version classes are deprecated:DeprecationWarning:fsspec", + # Can be removed once https://github.com/numpy/numpy/pull/24794 is merged + "ignore:.*In the future `np.long` will be defined as.*:FutureWarning", +] +junit_family = "xunit2" +markers = [ + "single_cpu: tests that should run on a single cpu only", + "slow: mark a test as slow", + "network: mark a test as network", + "db: tests requiring a database (mysql or postgres)", + "clipboard: mark a pd.read_clipboard test", + "arm_slow: mark a test as slow for arm64 architecture", + "skip_ubsan: Tests known to fail UBSAN check", + # TODO: someone should investigate this ... + # these tests only fail in the wheel builder and don't fail in regular + # ARM CI + "fails_arm_wheels: Tests that fail in the ARM wheel build only", +] + +[tool.mypy] +# Import discovery +mypy_path = "typings" +files = ["pandas", "typings"] +namespace_packages = false +explicit_package_bases = false +ignore_missing_imports = true +follow_imports = "normal" +follow_imports_for_stubs = false +no_site_packages = false +no_silence_site_packages = false +# Platform configuration +python_version = "3.11" +platform = "linux-64" +# Disallow dynamic typing +disallow_any_unimported = false # TODO +disallow_any_expr = false # TODO +disallow_any_decorated = false # TODO +disallow_any_explicit = false # TODO +disallow_any_generics = false # TODO +disallow_subclassing_any = false # TODO +# Untyped definitions and calls +disallow_untyped_calls = true +disallow_untyped_defs = true +disallow_incomplete_defs = true +check_untyped_defs = true +disallow_untyped_decorators = true +# None and Optional handling +no_implicit_optional = true +strict_optional = true +# Configuring warnings +warn_redundant_casts = true +warn_unused_ignores = true +warn_no_return = true +warn_return_any = false # TODO +warn_unreachable = false # GH#27396 +# Suppressing errors +ignore_errors = false +enable_error_code = "ignore-without-code" +# Miscellaneous strictness flags +allow_untyped_globals = false +allow_redefinition = false +local_partial_types = false +implicit_reexport = true +strict_equality = true +# Configuring error messages +show_error_context = false +show_column_numbers = false +show_error_codes = true + +[[tool.mypy.overrides]] +module = [ + "pandas._config.config", # TODO + "pandas._libs.*", + "pandas._testing.*", # TODO + "pandas.arrays", # TODO + "pandas.compat.numpy.function", # TODO + "pandas.compat._optional", # TODO + "pandas.compat.compressors", # TODO + "pandas.compat.pickle_compat", # TODO + "pandas.core._numba.executor", # TODO + "pandas.core.array_algos.datetimelike_accumulations", # TODO + "pandas.core.array_algos.masked_accumulations", # TODO + "pandas.core.array_algos.masked_reductions", # TODO + "pandas.core.array_algos.putmask", # TODO + "pandas.core.array_algos.quantile", # TODO + "pandas.core.array_algos.replace", # TODO + "pandas.core.array_algos.take", # TODO + "pandas.core.arrays.*", # TODO + "pandas.core.computation.*", # TODO + "pandas.core.dtypes.astype", # TODO + "pandas.core.dtypes.cast", # TODO + "pandas.core.dtypes.common", # TODO + "pandas.core.dtypes.concat", # TODO + "pandas.core.dtypes.dtypes", # TODO + "pandas.core.dtypes.generic", # TODO + "pandas.core.dtypes.inference", # TODO + "pandas.core.dtypes.missing", # TODO + "pandas.core.groupby.categorical", # TODO + "pandas.core.groupby.generic", # TODO + "pandas.core.groupby.grouper", # TODO + "pandas.core.groupby.groupby", # TODO + "pandas.core.groupby.ops", # TODO + "pandas.core.indexers.*", # TODO + "pandas.core.indexes.*", # TODO + "pandas.core.interchange.column", # TODO + "pandas.core.interchange.dataframe_protocol", # TODO + "pandas.core.interchange.from_dataframe", # TODO + "pandas.core.internals.*", # TODO + "pandas.core.methods.*", # TODO + "pandas.core.ops.array_ops", # TODO + "pandas.core.ops.common", # TODO + "pandas.core.ops.invalid", # TODO + "pandas.core.ops.mask_ops", # TODO + "pandas.core.ops.missing", # TODO + "pandas.core.reshape.*", # TODO + "pandas.core.strings.*", # TODO + "pandas.core.tools.*", # TODO + "pandas.core.window.common", # TODO + "pandas.core.window.ewm", # TODO + "pandas.core.window.expanding", # TODO + "pandas.core.window.numba_", # TODO + "pandas.core.window.online", # TODO + "pandas.core.window.rolling", # TODO + "pandas.core.accessor", # TODO + "pandas.core.algorithms", # TODO + "pandas.core.apply", # TODO + "pandas.core.arraylike", # TODO + "pandas.core.base", # TODO + "pandas.core.common", # TODO + "pandas.core.config_init", # TODO + "pandas.core.construction", # TODO + "pandas.core.flags", # TODO + "pandas.core.frame", # TODO + "pandas.core.generic", # TODO + "pandas.core.indexing", # TODO + "pandas.core.missing", # TODO + "pandas.core.nanops", # TODO + "pandas.core.resample", # TODO + "pandas.core.roperator", # TODO + "pandas.core.sample", # TODO + "pandas.core.series", # TODO + "pandas.core.sorting", # TODO + "pandas.errors", # TODO + "pandas.io.clipboard", # TODO + "pandas.io.excel._base", # TODO + "pandas.io.excel._odfreader", # TODO + "pandas.io.excel._odswriter", # TODO + "pandas.io.excel._openpyxl", # TODO + "pandas.io.excel._pyxlsb", # TODO + "pandas.io.excel._xlrd", # TODO + "pandas.io.excel._xlsxwriter", # TODO + "pandas.io.formats.console", # TODO + "pandas.io.formats.css", # TODO + "pandas.io.formats.excel", # TODO + "pandas.io.formats.format", # TODO + "pandas.io.formats.info", # TODO + "pandas.io.formats.printing", # TODO + "pandas.io.formats.style", # TODO + "pandas.io.formats.style_render", # TODO + "pandas.io.formats.xml", # TODO + "pandas.io.json.*", # TODO + "pandas.io.parsers.*", # TODO + "pandas.io.sas.sas_xport", # TODO + "pandas.io.sas.sas7bdat", # TODO + "pandas.io.clipboards", # TODO + "pandas.io.common", # TODO + "pandas.io.gbq", # TODO + "pandas.io.html", # TODO + "pandas.io.gbq", # TODO + "pandas.io.parquet", # TODO + "pandas.io.pytables", # TODO + "pandas.io.sql", # TODO + "pandas.io.stata", # TODO + "pandas.io.xml", # TODO + "pandas.plotting.*", # TODO + "pandas.tests.*", + "pandas.tseries.frequencies", # TODO + "pandas.tseries.holiday", # TODO + "pandas.util._decorators", # TODO + "pandas.util._doctools", # TODO + "pandas.util._print_versions", # TODO + "pandas.util._test_decorators", # TODO + "pandas.util._validators", # TODO + "pandas.util", # TODO + "pandas._version", + "pandas.conftest", + "pandas" +] +disallow_untyped_calls = false +disallow_untyped_defs = false +disallow_incomplete_defs = false + +[[tool.mypy.overrides]] +module = [ + "pandas.tests.*", + "pandas._version", + "pandas.io.clipboard", +] +check_untyped_defs = false + +[[tool.mypy.overrides]] +module = [ + "pandas.tests.apply.test_series_apply", + "pandas.tests.arithmetic.conftest", + "pandas.tests.arrays.sparse.test_combine_concat", + "pandas.tests.dtypes.test_common", + "pandas.tests.frame.methods.test_to_records", + "pandas.tests.groupby.test_rank", + "pandas.tests.groupby.transform.test_transform", + "pandas.tests.indexes.interval.test_interval", + "pandas.tests.indexing.test_categorical", + "pandas.tests.io.excel.test_writers", + "pandas.tests.reductions.test_reductions", + "pandas.tests.test_expressions", +] +ignore_errors = true + +# To be kept consistent with "Import Formatting" section in contributing.rst +[tool.isort] +known_pre_libs = "pandas._config" +known_pre_core = ["pandas._libs", "pandas._typing", "pandas.util._*", "pandas.compat", "pandas.errors"] +known_dtypes = "pandas.core.dtypes" +known_post_core = ["pandas.tseries", "pandas.io", "pandas.plotting"] +sections = ["FUTURE", "STDLIB", "THIRDPARTY" ,"PRE_LIBS" , "PRE_CORE", "DTYPES", "FIRSTPARTY", "POST_CORE", "LOCALFOLDER"] +profile = "black" +combine_as_imports = true +force_grid_wrap = 2 +force_sort_within_sections = true +skip_glob = "env" +skip = "pandas/__init__.py" + +[tool.pyright] +pythonVersion = "3.11" +typeCheckingMode = "basic" +useLibraryCodeForTypes = false +include = ["pandas", "typings"] +exclude = ["pandas/tests", "pandas/io/clipboard", "pandas/util/version", "pandas/core/_numba/extensions.py"] +# enable subset of "strict" +reportDuplicateImport = true +reportInconsistentConstructor = true +reportInvalidStubStatement = true +reportOverlappingOverload = true +reportPropertyTypeMismatch = true +reportUntypedClassDecorator = true +reportUntypedFunctionDecorator = true +reportUntypedNamedTuple = true +reportUnusedImport = true +disableBytesTypePromotions = true +# disable subset of "basic" +reportGeneralTypeIssues = false +reportMissingModuleSource = false +reportOptionalCall = false +reportOptionalIterable = false +reportOptionalMemberAccess = false +reportOptionalOperand = false +reportOptionalSubscript = false +reportPrivateImportUsage = false +reportUnboundVariable = false + +[tool.coverage.run] +branch = true +omit = ["pandas/_typing.py", "pandas/_version.py"] +plugins = ["Cython.Coverage"] +source = ["pandas"] + +[tool.coverage.report] +ignore_errors = false +show_missing = true +omit = ["pandas/_version.py"] +exclude_lines = [ + # Have to re-enable the standard pragma + "pragma: no cover", + # Don't complain about missing debug-only code:s + "def __repr__", + "if self.debug", + # Don't complain if tests don't hit defensive assertion code: + "raise AssertionError", + "raise NotImplementedError", + "AbstractMethodError", + # Don't complain if non-runnable code isn't run: + "if 0:", + "if __name__ == .__main__.:", + "if TYPE_CHECKING:", +] + +[tool.coverage.html] +directory = "coverage_html_report" + +[tool.codespell] +ignore-words-list = "blocs, coo, hist, nd, sav, ser, recuse, nin, timere, expec, expecs" +ignore-regex = 'https://([\w/\.])+' diff --git a/lib/python3.10/site-packages/pandas/testing.py b/lib/python3.10/site-packages/pandas/testing.py new file mode 100644 index 0000000000000000000000000000000000000000..841b55df48556561904b9144a05f747d889ea621 --- /dev/null +++ b/lib/python3.10/site-packages/pandas/testing.py @@ -0,0 +1,18 @@ +""" +Public testing utility functions. +""" + + +from pandas._testing import ( + assert_extension_array_equal, + assert_frame_equal, + assert_index_equal, + assert_series_equal, +) + +__all__ = [ + "assert_extension_array_equal", + "assert_frame_equal", + "assert_series_equal", + "assert_index_equal", +] diff --git a/lib/python3.10/site-packages/pandas/util/__init__.py b/lib/python3.10/site-packages/pandas/util/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..82b3aa56c653cd1241872c67e9d9016df04a6c5a --- /dev/null +++ b/lib/python3.10/site-packages/pandas/util/__init__.py @@ -0,0 +1,29 @@ +def __getattr__(key: str): + # These imports need to be lazy to avoid circular import errors + if key == "hash_array": + from pandas.core.util.hashing import hash_array + + return hash_array + if key == "hash_pandas_object": + from pandas.core.util.hashing import hash_pandas_object + + return hash_pandas_object + if key == "Appender": + from pandas.util._decorators import Appender + + return Appender + if key == "Substitution": + from pandas.util._decorators import Substitution + + return Substitution + + if key == "cache_readonly": + from pandas.util._decorators import cache_readonly + + return cache_readonly + + raise AttributeError(f"module 'pandas.util' has no attribute '{key}'") + + +def capitalize_first_letter(s): + return s[:1].upper() + s[1:] diff --git a/lib/python3.10/site-packages/pandas/util/_decorators.py b/lib/python3.10/site-packages/pandas/util/_decorators.py new file mode 100644 index 0000000000000000000000000000000000000000..4e8189e72c427d75da0367532edfe04d0a7581e8 --- /dev/null +++ b/lib/python3.10/site-packages/pandas/util/_decorators.py @@ -0,0 +1,508 @@ +from __future__ import annotations + +from functools import wraps +import inspect +from textwrap import dedent +from typing import ( + TYPE_CHECKING, + Any, + Callable, + cast, +) +import warnings + +from pandas._libs.properties import cache_readonly +from pandas._typing import ( + F, + T, +) +from pandas.util._exceptions import find_stack_level + +if TYPE_CHECKING: + from collections.abc import Mapping + + +def deprecate( + name: str, + alternative: Callable[..., Any], + version: str, + alt_name: str | None = None, + klass: type[Warning] | None = None, + stacklevel: int = 2, + msg: str | None = None, +) -> Callable[[F], F]: + """ + Return a new function that emits a deprecation warning on use. + + To use this method for a deprecated function, another function + `alternative` with the same signature must exist. The deprecated + function will emit a deprecation warning, and in the docstring + it will contain the deprecation directive with the provided version + so it can be detected for future removal. + + Parameters + ---------- + name : str + Name of function to deprecate. + alternative : func + Function to use instead. + version : str + Version of pandas in which the method has been deprecated. + alt_name : str, optional + Name to use in preference of alternative.__name__. + klass : Warning, default FutureWarning + stacklevel : int, default 2 + msg : str + The message to display in the warning. + Default is '{name} is deprecated. Use {alt_name} instead.' + """ + alt_name = alt_name or alternative.__name__ + klass = klass or FutureWarning + warning_msg = msg or f"{name} is deprecated, use {alt_name} instead." + + @wraps(alternative) + def wrapper(*args, **kwargs) -> Callable[..., Any]: + warnings.warn(warning_msg, klass, stacklevel=stacklevel) + return alternative(*args, **kwargs) + + # adding deprecated directive to the docstring + msg = msg or f"Use `{alt_name}` instead." + doc_error_msg = ( + "deprecate needs a correctly formatted docstring in " + "the target function (should have a one liner short " + "summary, and opening quotes should be in their own " + f"line). Found:\n{alternative.__doc__}" + ) + + # when python is running in optimized mode (i.e. `-OO`), docstrings are + # removed, so we check that a docstring with correct formatting is used + # but we allow empty docstrings + if alternative.__doc__: + if alternative.__doc__.count("\n") < 3: + raise AssertionError(doc_error_msg) + empty1, summary, empty2, doc_string = alternative.__doc__.split("\n", 3) + if empty1 or empty2 and not summary: + raise AssertionError(doc_error_msg) + wrapper.__doc__ = dedent( + f""" + {summary.strip()} + + .. deprecated:: {version} + {msg} + + {dedent(doc_string)}""" + ) + # error: Incompatible return value type (got "Callable[[VarArg(Any), KwArg(Any)], + # Callable[...,Any]]", expected "Callable[[F], F]") + return wrapper # type: ignore[return-value] + + +def deprecate_kwarg( + old_arg_name: str, + new_arg_name: str | None, + mapping: Mapping[Any, Any] | Callable[[Any], Any] | None = None, + stacklevel: int = 2, +) -> Callable[[F], F]: + """ + Decorator to deprecate a keyword argument of a function. + + Parameters + ---------- + old_arg_name : str + Name of argument in function to deprecate + new_arg_name : str or None + Name of preferred argument in function. Use None to raise warning that + ``old_arg_name`` keyword is deprecated. + mapping : dict or callable + If mapping is present, use it to translate old arguments to + new arguments. A callable must do its own value checking; + values not found in a dict will be forwarded unchanged. + + Examples + -------- + The following deprecates 'cols', using 'columns' instead + + >>> @deprecate_kwarg(old_arg_name='cols', new_arg_name='columns') + ... def f(columns=''): + ... print(columns) + ... + >>> f(columns='should work ok') + should work ok + + >>> f(cols='should raise warning') # doctest: +SKIP + FutureWarning: cols is deprecated, use columns instead + warnings.warn(msg, FutureWarning) + should raise warning + + >>> f(cols='should error', columns="can\'t pass do both") # doctest: +SKIP + TypeError: Can only specify 'cols' or 'columns', not both + + >>> @deprecate_kwarg('old', 'new', {'yes': True, 'no': False}) + ... def f(new=False): + ... print('yes!' if new else 'no!') + ... + >>> f(old='yes') # doctest: +SKIP + FutureWarning: old='yes' is deprecated, use new=True instead + warnings.warn(msg, FutureWarning) + yes! + + To raise a warning that a keyword will be removed entirely in the future + + >>> @deprecate_kwarg(old_arg_name='cols', new_arg_name=None) + ... def f(cols='', another_param=''): + ... print(cols) + ... + >>> f(cols='should raise warning') # doctest: +SKIP + FutureWarning: the 'cols' keyword is deprecated and will be removed in a + future version please takes steps to stop use of 'cols' + should raise warning + >>> f(another_param='should not raise warning') # doctest: +SKIP + should not raise warning + + >>> f(cols='should raise warning', another_param='') # doctest: +SKIP + FutureWarning: the 'cols' keyword is deprecated and will be removed in a + future version please takes steps to stop use of 'cols' + should raise warning + """ + if mapping is not None and not hasattr(mapping, "get") and not callable(mapping): + raise TypeError( + "mapping from old to new argument values must be dict or callable!" + ) + + def _deprecate_kwarg(func: F) -> F: + @wraps(func) + def wrapper(*args, **kwargs) -> Callable[..., Any]: + old_arg_value = kwargs.pop(old_arg_name, None) + + if old_arg_value is not None: + if new_arg_name is None: + msg = ( + f"the {repr(old_arg_name)} keyword is deprecated and " + "will be removed in a future version. Please take " + f"steps to stop the use of {repr(old_arg_name)}" + ) + warnings.warn(msg, FutureWarning, stacklevel=stacklevel) + kwargs[old_arg_name] = old_arg_value + return func(*args, **kwargs) + + elif mapping is not None: + if callable(mapping): + new_arg_value = mapping(old_arg_value) + else: + new_arg_value = mapping.get(old_arg_value, old_arg_value) + msg = ( + f"the {old_arg_name}={repr(old_arg_value)} keyword is " + "deprecated, use " + f"{new_arg_name}={repr(new_arg_value)} instead." + ) + else: + new_arg_value = old_arg_value + msg = ( + f"the {repr(old_arg_name)} keyword is deprecated, " + f"use {repr(new_arg_name)} instead." + ) + + warnings.warn(msg, FutureWarning, stacklevel=stacklevel) + if kwargs.get(new_arg_name) is not None: + msg = ( + f"Can only specify {repr(old_arg_name)} " + f"or {repr(new_arg_name)}, not both." + ) + raise TypeError(msg) + kwargs[new_arg_name] = new_arg_value + return func(*args, **kwargs) + + return cast(F, wrapper) + + return _deprecate_kwarg + + +def _format_argument_list(allow_args: list[str]) -> str: + """ + Convert the allow_args argument (either string or integer) of + `deprecate_nonkeyword_arguments` function to a string describing + it to be inserted into warning message. + + Parameters + ---------- + allowed_args : list, tuple or int + The `allowed_args` argument for `deprecate_nonkeyword_arguments`, + but None value is not allowed. + + Returns + ------- + str + The substring describing the argument list in best way to be + inserted to the warning message. + + Examples + -------- + `format_argument_list([])` -> '' + `format_argument_list(['a'])` -> "except for the arguments 'a'" + `format_argument_list(['a', 'b'])` -> "except for the arguments 'a' and 'b'" + `format_argument_list(['a', 'b', 'c'])` -> + "except for the arguments 'a', 'b' and 'c'" + """ + if "self" in allow_args: + allow_args.remove("self") + if not allow_args: + return "" + elif len(allow_args) == 1: + return f" except for the argument '{allow_args[0]}'" + else: + last = allow_args[-1] + args = ", ".join(["'" + x + "'" for x in allow_args[:-1]]) + return f" except for the arguments {args} and '{last}'" + + +def future_version_msg(version: str | None) -> str: + """Specify which version of pandas the deprecation will take place in.""" + if version is None: + return "In a future version of pandas" + else: + return f"Starting with pandas version {version}" + + +def deprecate_nonkeyword_arguments( + version: str | None, + allowed_args: list[str] | None = None, + name: str | None = None, +) -> Callable[[F], F]: + """ + Decorator to deprecate a use of non-keyword arguments of a function. + + Parameters + ---------- + version : str, optional + The version in which positional arguments will become + keyword-only. If None, then the warning message won't + specify any particular version. + + allowed_args : list, optional + In case of list, it must be the list of names of some + first arguments of the decorated functions that are + OK to be given as positional arguments. In case of None value, + defaults to list of all arguments not having the + default value. + + name : str, optional + The specific name of the function to show in the warning + message. If None, then the Qualified name of the function + is used. + """ + + def decorate(func): + old_sig = inspect.signature(func) + + if allowed_args is not None: + allow_args = allowed_args + else: + allow_args = [ + p.name + for p in old_sig.parameters.values() + if p.kind in (p.POSITIONAL_ONLY, p.POSITIONAL_OR_KEYWORD) + and p.default is p.empty + ] + + new_params = [ + p.replace(kind=p.KEYWORD_ONLY) + if ( + p.kind in (p.POSITIONAL_ONLY, p.POSITIONAL_OR_KEYWORD) + and p.name not in allow_args + ) + else p + for p in old_sig.parameters.values() + ] + new_params.sort(key=lambda p: p.kind) + new_sig = old_sig.replace(parameters=new_params) + + num_allow_args = len(allow_args) + msg = ( + f"{future_version_msg(version)} all arguments of " + f"{name or func.__qualname__}{{arguments}} will be keyword-only." + ) + + @wraps(func) + def wrapper(*args, **kwargs): + if len(args) > num_allow_args: + warnings.warn( + msg.format(arguments=_format_argument_list(allow_args)), + FutureWarning, + stacklevel=find_stack_level(), + ) + return func(*args, **kwargs) + + # error: "Callable[[VarArg(Any), KwArg(Any)], Any]" has no + # attribute "__signature__" + wrapper.__signature__ = new_sig # type: ignore[attr-defined] + return wrapper + + return decorate + + +def doc(*docstrings: None | str | Callable, **params) -> Callable[[F], F]: + """ + A decorator to take docstring templates, concatenate them and perform string + substitution on them. + + This decorator will add a variable "_docstring_components" to the wrapped + callable to keep track the original docstring template for potential usage. + If it should be consider as a template, it will be saved as a string. + Otherwise, it will be saved as callable, and later user __doc__ and dedent + to get docstring. + + Parameters + ---------- + *docstrings : None, str, or callable + The string / docstring / docstring template to be appended in order + after default docstring under callable. + **params + The string which would be used to format docstring template. + """ + + def decorator(decorated: F) -> F: + # collecting docstring and docstring templates + docstring_components: list[str | Callable] = [] + if decorated.__doc__: + docstring_components.append(dedent(decorated.__doc__)) + + for docstring in docstrings: + if docstring is None: + continue + if hasattr(docstring, "_docstring_components"): + docstring_components.extend( + docstring._docstring_components # pyright: ignore[reportGeneralTypeIssues] + ) + elif isinstance(docstring, str) or docstring.__doc__: + docstring_components.append(docstring) + + params_applied = [ + component.format(**params) + if isinstance(component, str) and len(params) > 0 + else component + for component in docstring_components + ] + + decorated.__doc__ = "".join( + [ + component + if isinstance(component, str) + else dedent(component.__doc__ or "") + for component in params_applied + ] + ) + + # error: "F" has no attribute "_docstring_components" + decorated._docstring_components = ( # type: ignore[attr-defined] + docstring_components + ) + return decorated + + return decorator + + +# Substitution and Appender are derived from matplotlib.docstring (1.1.0) +# module https://matplotlib.org/users/license.html + + +class Substitution: + """ + A decorator to take a function's docstring and perform string + substitution on it. + + This decorator should be robust even if func.__doc__ is None + (for example, if -OO was passed to the interpreter) + + Usage: construct a docstring.Substitution with a sequence or + dictionary suitable for performing substitution; then + decorate a suitable function with the constructed object. e.g. + + sub_author_name = Substitution(author='Jason') + + @sub_author_name + def some_function(x): + "%(author)s wrote this function" + + # note that some_function.__doc__ is now "Jason wrote this function" + + One can also use positional arguments. + + sub_first_last_names = Substitution('Edgar Allen', 'Poe') + + @sub_first_last_names + def some_function(x): + "%s %s wrote the Raven" + """ + + def __init__(self, *args, **kwargs) -> None: + if args and kwargs: + raise AssertionError("Only positional or keyword args are allowed") + + self.params = args or kwargs + + def __call__(self, func: F) -> F: + func.__doc__ = func.__doc__ and func.__doc__ % self.params + return func + + def update(self, *args, **kwargs) -> None: + """ + Update self.params with supplied args. + """ + if isinstance(self.params, dict): + self.params.update(*args, **kwargs) + + +class Appender: + """ + A function decorator that will append an addendum to the docstring + of the target function. + + This decorator should be robust even if func.__doc__ is None + (for example, if -OO was passed to the interpreter). + + Usage: construct a docstring.Appender with a string to be joined to + the original docstring. An optional 'join' parameter may be supplied + which will be used to join the docstring and addendum. e.g. + + add_copyright = Appender("Copyright (c) 2009", join='\n') + + @add_copyright + def my_dog(has='fleas'): + "This docstring will have a copyright below" + pass + """ + + addendum: str | None + + def __init__(self, addendum: str | None, join: str = "", indents: int = 0) -> None: + if indents > 0: + self.addendum = indent(addendum, indents=indents) + else: + self.addendum = addendum + self.join = join + + def __call__(self, func: T) -> T: + func.__doc__ = func.__doc__ if func.__doc__ else "" + self.addendum = self.addendum if self.addendum else "" + docitems = [func.__doc__, self.addendum] + func.__doc__ = dedent(self.join.join(docitems)) + return func + + +def indent(text: str | None, indents: int = 1) -> str: + if not text or not isinstance(text, str): + return "" + jointext = "".join(["\n"] + [" "] * indents) + return jointext.join(text.split("\n")) + + +__all__ = [ + "Appender", + "cache_readonly", + "deprecate", + "deprecate_kwarg", + "deprecate_nonkeyword_arguments", + "doc", + "future_version_msg", + "Substitution", +] diff --git a/lib/python3.10/site-packages/pandas/util/_doctools.py b/lib/python3.10/site-packages/pandas/util/_doctools.py new file mode 100644 index 0000000000000000000000000000000000000000..12619abf4baaf336dfd3d5ae78a9bc2133f310c0 --- /dev/null +++ b/lib/python3.10/site-packages/pandas/util/_doctools.py @@ -0,0 +1,202 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING + +import numpy as np + +import pandas as pd + +if TYPE_CHECKING: + from collections.abc import Iterable + + +class TablePlotter: + """ + Layout some DataFrames in vertical/horizontal layout for explanation. + Used in merging.rst + """ + + def __init__( + self, + cell_width: float = 0.37, + cell_height: float = 0.25, + font_size: float = 7.5, + ) -> None: + self.cell_width = cell_width + self.cell_height = cell_height + self.font_size = font_size + + def _shape(self, df: pd.DataFrame) -> tuple[int, int]: + """ + Calculate table shape considering index levels. + """ + row, col = df.shape + return row + df.columns.nlevels, col + df.index.nlevels + + def _get_cells(self, left, right, vertical) -> tuple[int, int]: + """ + Calculate appropriate figure size based on left and right data. + """ + if vertical: + # calculate required number of cells + vcells = max(sum(self._shape(df)[0] for df in left), self._shape(right)[0]) + hcells = max(self._shape(df)[1] for df in left) + self._shape(right)[1] + else: + vcells = max([self._shape(df)[0] for df in left] + [self._shape(right)[0]]) + hcells = sum([self._shape(df)[1] for df in left] + [self._shape(right)[1]]) + return hcells, vcells + + def plot(self, left, right, labels: Iterable[str] = (), vertical: bool = True): + """ + Plot left / right DataFrames in specified layout. + + Parameters + ---------- + left : list of DataFrames before operation is applied + right : DataFrame of operation result + labels : list of str to be drawn as titles of left DataFrames + vertical : bool, default True + If True, use vertical layout. If False, use horizontal layout. + """ + from matplotlib import gridspec + import matplotlib.pyplot as plt + + if not isinstance(left, list): + left = [left] + left = [self._conv(df) for df in left] + right = self._conv(right) + + hcells, vcells = self._get_cells(left, right, vertical) + + if vertical: + figsize = self.cell_width * hcells, self.cell_height * vcells + else: + # include margin for titles + figsize = self.cell_width * hcells, self.cell_height * vcells + fig = plt.figure(figsize=figsize) + + if vertical: + gs = gridspec.GridSpec(len(left), hcells) + # left + max_left_cols = max(self._shape(df)[1] for df in left) + max_left_rows = max(self._shape(df)[0] for df in left) + for i, (_left, _label) in enumerate(zip(left, labels)): + ax = fig.add_subplot(gs[i, 0:max_left_cols]) + self._make_table(ax, _left, title=_label, height=1.0 / max_left_rows) + # right + ax = plt.subplot(gs[:, max_left_cols:]) + self._make_table(ax, right, title="Result", height=1.05 / vcells) + fig.subplots_adjust(top=0.9, bottom=0.05, left=0.05, right=0.95) + else: + max_rows = max(self._shape(df)[0] for df in left + [right]) + height = 1.0 / np.max(max_rows) + gs = gridspec.GridSpec(1, hcells) + # left + i = 0 + for df, _label in zip(left, labels): + sp = self._shape(df) + ax = fig.add_subplot(gs[0, i : i + sp[1]]) + self._make_table(ax, df, title=_label, height=height) + i += sp[1] + # right + ax = plt.subplot(gs[0, i:]) + self._make_table(ax, right, title="Result", height=height) + fig.subplots_adjust(top=0.85, bottom=0.05, left=0.05, right=0.95) + + return fig + + def _conv(self, data): + """ + Convert each input to appropriate for table outplot. + """ + if isinstance(data, pd.Series): + if data.name is None: + data = data.to_frame(name="") + else: + data = data.to_frame() + data = data.fillna("NaN") + return data + + def _insert_index(self, data): + # insert is destructive + data = data.copy() + idx_nlevels = data.index.nlevels + if idx_nlevels == 1: + data.insert(0, "Index", data.index) + else: + for i in range(idx_nlevels): + data.insert(i, f"Index{i}", data.index._get_level_values(i)) + + col_nlevels = data.columns.nlevels + if col_nlevels > 1: + col = data.columns._get_level_values(0) + values = [ + data.columns._get_level_values(i)._values for i in range(1, col_nlevels) + ] + col_df = pd.DataFrame(values) + data.columns = col_df.columns + data = pd.concat([col_df, data]) + data.columns = col + return data + + def _make_table(self, ax, df, title: str, height: float | None = None) -> None: + if df is None: + ax.set_visible(False) + return + + from pandas import plotting + + idx_nlevels = df.index.nlevels + col_nlevels = df.columns.nlevels + # must be convert here to get index levels for colorization + df = self._insert_index(df) + tb = plotting.table(ax, df, loc=9) + tb.set_fontsize(self.font_size) + + if height is None: + height = 1.0 / (len(df) + 1) + + props = tb.properties() + for (r, c), cell in props["celld"].items(): + if c == -1: + cell.set_visible(False) + elif r < col_nlevels and c < idx_nlevels: + cell.set_visible(False) + elif r < col_nlevels or c < idx_nlevels: + cell.set_facecolor("#AAAAAA") + cell.set_height(height) + + ax.set_title(title, size=self.font_size) + ax.axis("off") + + +def main() -> None: + import matplotlib.pyplot as plt + + p = TablePlotter() + + df1 = pd.DataFrame({"A": [10, 11, 12], "B": [20, 21, 22], "C": [30, 31, 32]}) + df2 = pd.DataFrame({"A": [10, 12], "C": [30, 32]}) + + p.plot([df1, df2], pd.concat([df1, df2]), labels=["df1", "df2"], vertical=True) + plt.show() + + df3 = pd.DataFrame({"X": [10, 12], "Z": [30, 32]}) + + p.plot( + [df1, df3], pd.concat([df1, df3], axis=1), labels=["df1", "df2"], vertical=False + ) + plt.show() + + idx = pd.MultiIndex.from_tuples( + [(1, "A"), (1, "B"), (1, "C"), (2, "A"), (2, "B"), (2, "C")] + ) + column = pd.MultiIndex.from_tuples([(1, "A"), (1, "B")]) + df3 = pd.DataFrame({"v1": [1, 2, 3, 4, 5, 6], "v2": [5, 6, 7, 8, 9, 10]}, index=idx) + df3.columns = column + p.plot(df3, df3, labels=["df3"]) + plt.show() + + +if __name__ == "__main__": + main() diff --git a/lib/python3.10/site-packages/pandas/util/_exceptions.py b/lib/python3.10/site-packages/pandas/util/_exceptions.py new file mode 100644 index 0000000000000000000000000000000000000000..5f50838d373154868ff7414775763a1c66853c65 --- /dev/null +++ b/lib/python3.10/site-packages/pandas/util/_exceptions.py @@ -0,0 +1,103 @@ +from __future__ import annotations + +import contextlib +import inspect +import os +import re +from typing import TYPE_CHECKING +import warnings + +if TYPE_CHECKING: + from collections.abc import Generator + from types import FrameType + + +@contextlib.contextmanager +def rewrite_exception(old_name: str, new_name: str) -> Generator[None, None, None]: + """ + Rewrite the message of an exception. + """ + try: + yield + except Exception as err: + if not err.args: + raise + msg = str(err.args[0]) + msg = msg.replace(old_name, new_name) + args: tuple[str, ...] = (msg,) + if len(err.args) > 1: + args = args + err.args[1:] + err.args = args + raise + + +def find_stack_level() -> int: + """ + Find the first place in the stack that is not inside pandas + (tests notwithstanding). + """ + + import pandas as pd + + pkg_dir = os.path.dirname(pd.__file__) + test_dir = os.path.join(pkg_dir, "tests") + + # https://stackoverflow.com/questions/17407119/python-inspect-stack-is-slow + frame: FrameType | None = inspect.currentframe() + try: + n = 0 + while frame: + filename = inspect.getfile(frame) + if filename.startswith(pkg_dir) and not filename.startswith(test_dir): + frame = frame.f_back + n += 1 + else: + break + finally: + # See note in + # https://docs.python.org/3/library/inspect.html#inspect.Traceback + del frame + return n + + +@contextlib.contextmanager +def rewrite_warning( + target_message: str, + target_category: type[Warning], + new_message: str, + new_category: type[Warning] | None = None, +) -> Generator[None, None, None]: + """ + Rewrite the message of a warning. + + Parameters + ---------- + target_message : str + Warning message to match. + target_category : Warning + Warning type to match. + new_message : str + New warning message to emit. + new_category : Warning or None, default None + New warning type to emit. When None, will be the same as target_category. + """ + if new_category is None: + new_category = target_category + with warnings.catch_warnings(record=True) as record: + yield + if len(record) > 0: + match = re.compile(target_message) + for warning in record: + if warning.category is target_category and re.search( + match, str(warning.message) + ): + category = new_category + message: Warning | str = new_message + else: + category, message = warning.category, warning.message + warnings.warn_explicit( + message=message, + category=category, + filename=warning.filename, + lineno=warning.lineno, + ) diff --git a/lib/python3.10/site-packages/pandas/util/_print_versions.py b/lib/python3.10/site-packages/pandas/util/_print_versions.py new file mode 100644 index 0000000000000000000000000000000000000000..4ede5627c28b9a3eaf97f09f6a28642523ce5833 --- /dev/null +++ b/lib/python3.10/site-packages/pandas/util/_print_versions.py @@ -0,0 +1,158 @@ +from __future__ import annotations + +import codecs +import json +import locale +import os +import platform +import struct +import sys +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + from pandas._typing import JSONSerializable + +from pandas.compat._optional import ( + VERSIONS, + get_version, + import_optional_dependency, +) + + +def _get_commit_hash() -> str | None: + """ + Use vendored versioneer code to get git hash, which handles + git worktree correctly. + """ + try: + from pandas._version_meson import ( # pyright: ignore [reportMissingImports] + __git_version__, + ) + + return __git_version__ + except ImportError: + from pandas._version import get_versions + + versions = get_versions() + return versions["full-revisionid"] + + +def _get_sys_info() -> dict[str, JSONSerializable]: + """ + Returns system information as a JSON serializable dictionary. + """ + uname_result = platform.uname() + language_code, encoding = locale.getlocale() + return { + "commit": _get_commit_hash(), + "python": platform.python_version(), + "python-bits": struct.calcsize("P") * 8, + "OS": uname_result.system, + "OS-release": uname_result.release, + "Version": uname_result.version, + "machine": uname_result.machine, + "processor": uname_result.processor, + "byteorder": sys.byteorder, + "LC_ALL": os.environ.get("LC_ALL"), + "LANG": os.environ.get("LANG"), + "LOCALE": {"language-code": language_code, "encoding": encoding}, + } + + +def _get_dependency_info() -> dict[str, JSONSerializable]: + """ + Returns dependency information as a JSON serializable dictionary. + """ + deps = [ + "pandas", + # required + "numpy", + "pytz", + "dateutil", + # install / build, + "pip", + "Cython", + # docs + "sphinx", + # Other, not imported. + "IPython", + ] + # Optional dependencies + deps.extend(list(VERSIONS)) + + result: dict[str, JSONSerializable] = {} + for modname in deps: + try: + mod = import_optional_dependency(modname, errors="ignore") + except Exception: + # Dependency conflicts may cause a non ImportError + result[modname] = "N/A" + else: + result[modname] = get_version(mod) if mod else None + return result + + +def show_versions(as_json: str | bool = False) -> None: + """ + Provide useful information, important for bug reports. + + It comprises info about hosting operation system, pandas version, + and versions of other installed relative packages. + + Parameters + ---------- + as_json : str or bool, default False + * If False, outputs info in a human readable form to the console. + * If str, it will be considered as a path to a file. + Info will be written to that file in JSON format. + * If True, outputs info in JSON format to the console. + + Examples + -------- + >>> pd.show_versions() # doctest: +SKIP + Your output may look something like this: + INSTALLED VERSIONS + ------------------ + commit : 37ea63d540fd27274cad6585082c91b1283f963d + python : 3.10.6.final.0 + python-bits : 64 + OS : Linux + OS-release : 5.10.102.1-microsoft-standard-WSL2 + Version : #1 SMP Wed Mar 2 00:30:59 UTC 2022 + machine : x86_64 + processor : x86_64 + byteorder : little + LC_ALL : None + LANG : en_GB.UTF-8 + LOCALE : en_GB.UTF-8 + pandas : 2.0.1 + numpy : 1.24.3 + ... + """ + sys_info = _get_sys_info() + deps = _get_dependency_info() + + if as_json: + j = {"system": sys_info, "dependencies": deps} + + if as_json is True: + sys.stdout.writelines(json.dumps(j, indent=2)) + else: + assert isinstance(as_json, str) # needed for mypy + with codecs.open(as_json, "wb", encoding="utf8") as f: + json.dump(j, f, indent=2) + + else: + assert isinstance(sys_info["LOCALE"], dict) # needed for mypy + language_code = sys_info["LOCALE"]["language-code"] + encoding = sys_info["LOCALE"]["encoding"] + sys_info["LOCALE"] = f"{language_code}.{encoding}" + + maxlen = max(len(x) for x in deps) + print("\nINSTALLED VERSIONS") + print("------------------") + for k, v in sys_info.items(): + print(f"{k:<{maxlen}}: {v}") + print("") + for k, v in deps.items(): + print(f"{k:<{maxlen}}: {v}") diff --git a/lib/python3.10/site-packages/pandas/util/_test_decorators.py b/lib/python3.10/site-packages/pandas/util/_test_decorators.py new file mode 100644 index 0000000000000000000000000000000000000000..2c1912bce856dd2694447d820ea2c5124be9c1a0 --- /dev/null +++ b/lib/python3.10/site-packages/pandas/util/_test_decorators.py @@ -0,0 +1,173 @@ +""" +This module provides decorator functions which can be applied to test objects +in order to skip those objects when certain conditions occur. A sample use case +is to detect if the platform is missing ``matplotlib``. If so, any test objects +which require ``matplotlib`` and decorated with ``@td.skip_if_no("matplotlib")`` +will be skipped by ``pytest`` during the execution of the test suite. + +To illustrate, after importing this module: + +import pandas.util._test_decorators as td + +The decorators can be applied to classes: + +@td.skip_if_no("package") +class Foo: + ... + +Or individual functions: + +@td.skip_if_no("package") +def test_foo(): + ... + +For more information, refer to the ``pytest`` documentation on ``skipif``. +""" +from __future__ import annotations + +import locale +from typing import ( + TYPE_CHECKING, + Callable, +) + +import pytest + +from pandas._config import get_option + +if TYPE_CHECKING: + from pandas._typing import F + +from pandas._config.config import _get_option + +from pandas.compat import ( + IS64, + is_platform_windows, +) +from pandas.compat._optional import import_optional_dependency + + +def skip_if_installed(package: str) -> pytest.MarkDecorator: + """ + Skip a test if a package is installed. + + Parameters + ---------- + package : str + The name of the package. + + Returns + ------- + pytest.MarkDecorator + a pytest.mark.skipif to use as either a test decorator or a + parametrization mark. + """ + return pytest.mark.skipif( + bool(import_optional_dependency(package, errors="ignore")), + reason=f"Skipping because {package} is installed.", + ) + + +def skip_if_no(package: str, min_version: str | None = None) -> pytest.MarkDecorator: + """ + Generic function to help skip tests when required packages are not + present on the testing system. + + This function returns a pytest mark with a skip condition that will be + evaluated during test collection. An attempt will be made to import the + specified ``package`` and optionally ensure it meets the ``min_version`` + + The mark can be used as either a decorator for a test class or to be + applied to parameters in pytest.mark.parametrize calls or parametrized + fixtures. Use pytest.importorskip if an imported moduled is later needed + or for test functions. + + If the import and version check are unsuccessful, then the test function + (or test case when used in conjunction with parametrization) will be + skipped. + + Parameters + ---------- + package: str + The name of the required package. + min_version: str or None, default None + Optional minimum version of the package. + + Returns + ------- + pytest.MarkDecorator + a pytest.mark.skipif to use as either a test decorator or a + parametrization mark. + """ + msg = f"Could not import '{package}'" + if min_version: + msg += f" satisfying a min_version of {min_version}" + return pytest.mark.skipif( + not bool( + import_optional_dependency( + package, errors="ignore", min_version=min_version + ) + ), + reason=msg, + ) + + +skip_if_32bit = pytest.mark.skipif(not IS64, reason="skipping for 32 bit") +skip_if_windows = pytest.mark.skipif(is_platform_windows(), reason="Running on Windows") +skip_if_not_us_locale = pytest.mark.skipif( + locale.getlocale()[0] != "en_US", + reason=f"Set local {locale.getlocale()[0]} is not en_US", +) + + +def parametrize_fixture_doc(*args) -> Callable[[F], F]: + """ + Intended for use as a decorator for parametrized fixture, + this function will wrap the decorated function with a pytest + ``parametrize_fixture_doc`` mark. That mark will format + initial fixture docstring by replacing placeholders {0}, {1} etc + with parameters passed as arguments. + + Parameters + ---------- + args: iterable + Positional arguments for docstring. + + Returns + ------- + function + The decorated function wrapped within a pytest + ``parametrize_fixture_doc`` mark + """ + + def documented_fixture(fixture): + fixture.__doc__ = fixture.__doc__.format(*args) + return fixture + + return documented_fixture + + +def mark_array_manager_not_yet_implemented(request) -> None: + mark = pytest.mark.xfail(reason="Not yet implemented for ArrayManager") + request.applymarker(mark) + + +skip_array_manager_not_yet_implemented = pytest.mark.xfail( + _get_option("mode.data_manager", silent=True) == "array", + reason="Not yet implemented for ArrayManager", +) + +skip_array_manager_invalid_test = pytest.mark.skipif( + _get_option("mode.data_manager", silent=True) == "array", + reason="Test that relies on BlockManager internals or specific behaviour", +) + +skip_copy_on_write_not_yet_implemented = pytest.mark.xfail( + get_option("mode.copy_on_write") is True, + reason="Not yet implemented/adapted for Copy-on-Write mode", +) + +skip_copy_on_write_invalid_test = pytest.mark.skipif( + get_option("mode.copy_on_write") is True, + reason="Test not valid for Copy-on-Write mode", +) diff --git a/lib/python3.10/site-packages/pandas/util/_tester.py b/lib/python3.10/site-packages/pandas/util/_tester.py new file mode 100644 index 0000000000000000000000000000000000000000..7cfddef7ddff87275ebf31eb7ec10e65d26f8668 --- /dev/null +++ b/lib/python3.10/site-packages/pandas/util/_tester.py @@ -0,0 +1,53 @@ +""" +Entrypoint for testing from the top-level namespace. +""" +from __future__ import annotations + +import os +import sys + +from pandas.compat._optional import import_optional_dependency + +PKG = os.path.dirname(os.path.dirname(__file__)) + + +def test(extra_args: list[str] | None = None, run_doctests: bool = False) -> None: + """ + Run the pandas test suite using pytest. + + By default, runs with the marks -m "not slow and not network and not db" + + Parameters + ---------- + extra_args : list[str], default None + Extra marks to run the tests. + run_doctests : bool, default False + Whether to only run the Python and Cython doctests. If you would like to run + both doctests/regular tests, just append "--doctest-modules"/"--doctest-cython" + to extra_args. + + Examples + -------- + >>> pd.test() # doctest: +SKIP + running: pytest... + """ + pytest = import_optional_dependency("pytest") + import_optional_dependency("hypothesis") + cmd = ["-m not slow and not network and not db"] + if extra_args: + if not isinstance(extra_args, list): + extra_args = [extra_args] + cmd = extra_args + if run_doctests: + cmd = [ + "--doctest-modules", + "--doctest-cython", + f"--ignore={os.path.join(PKG, 'tests')}", + ] + cmd += [PKG] + joined = " ".join(cmd) + print(f"running: pytest {joined}") + sys.exit(pytest.main(cmd)) + + +__all__ = ["test"] diff --git a/lib/python3.10/site-packages/pandas/util/_validators.py b/lib/python3.10/site-packages/pandas/util/_validators.py new file mode 100644 index 0000000000000000000000000000000000000000..cb0b4d549f49ea972d50c97986c60be64c021c3c --- /dev/null +++ b/lib/python3.10/site-packages/pandas/util/_validators.py @@ -0,0 +1,456 @@ +""" +Module that contains many useful utilities +for validating data or function arguments +""" +from __future__ import annotations + +from collections.abc import ( + Iterable, + Sequence, +) +from typing import ( + TypeVar, + overload, +) + +import numpy as np + +from pandas._libs import lib + +from pandas.core.dtypes.common import ( + is_bool, + is_integer, +) + +BoolishT = TypeVar("BoolishT", bool, int) +BoolishNoneT = TypeVar("BoolishNoneT", bool, int, None) + + +def _check_arg_length(fname, args, max_fname_arg_count, compat_args) -> None: + """ + Checks whether 'args' has length of at most 'compat_args'. Raises + a TypeError if that is not the case, similar to in Python when a + function is called with too many arguments. + """ + if max_fname_arg_count < 0: + raise ValueError("'max_fname_arg_count' must be non-negative") + + if len(args) > len(compat_args): + max_arg_count = len(compat_args) + max_fname_arg_count + actual_arg_count = len(args) + max_fname_arg_count + argument = "argument" if max_arg_count == 1 else "arguments" + + raise TypeError( + f"{fname}() takes at most {max_arg_count} {argument} " + f"({actual_arg_count} given)" + ) + + +def _check_for_default_values(fname, arg_val_dict, compat_args) -> None: + """ + Check that the keys in `arg_val_dict` are mapped to their + default values as specified in `compat_args`. + + Note that this function is to be called only when it has been + checked that arg_val_dict.keys() is a subset of compat_args + """ + for key in arg_val_dict: + # try checking equality directly with '=' operator, + # as comparison may have been overridden for the left + # hand object + try: + v1 = arg_val_dict[key] + v2 = compat_args[key] + + # check for None-ness otherwise we could end up + # comparing a numpy array vs None + if (v1 is not None and v2 is None) or (v1 is None and v2 is not None): + match = False + else: + match = v1 == v2 + + if not is_bool(match): + raise ValueError("'match' is not a boolean") + + # could not compare them directly, so try comparison + # using the 'is' operator + except ValueError: + match = arg_val_dict[key] is compat_args[key] + + if not match: + raise ValueError( + f"the '{key}' parameter is not supported in " + f"the pandas implementation of {fname}()" + ) + + +def validate_args(fname, args, max_fname_arg_count, compat_args) -> None: + """ + Checks whether the length of the `*args` argument passed into a function + has at most `len(compat_args)` arguments and whether or not all of these + elements in `args` are set to their default values. + + Parameters + ---------- + fname : str + The name of the function being passed the `*args` parameter + args : tuple + The `*args` parameter passed into a function + max_fname_arg_count : int + The maximum number of arguments that the function `fname` + can accept, excluding those in `args`. Used for displaying + appropriate error messages. Must be non-negative. + compat_args : dict + A dictionary of keys and their associated default values. + In order to accommodate buggy behaviour in some versions of `numpy`, + where a signature displayed keyword arguments but then passed those + arguments **positionally** internally when calling downstream + implementations, a dict ensures that the original + order of the keyword arguments is enforced. + + Raises + ------ + TypeError + If `args` contains more values than there are `compat_args` + ValueError + If `args` contains values that do not correspond to those + of the default values specified in `compat_args` + """ + _check_arg_length(fname, args, max_fname_arg_count, compat_args) + + # We do this so that we can provide a more informative + # error message about the parameters that we are not + # supporting in the pandas implementation of 'fname' + kwargs = dict(zip(compat_args, args)) + _check_for_default_values(fname, kwargs, compat_args) + + +def _check_for_invalid_keys(fname, kwargs, compat_args) -> None: + """ + Checks whether 'kwargs' contains any keys that are not + in 'compat_args' and raises a TypeError if there is one. + """ + # set(dict) --> set of the dictionary's keys + diff = set(kwargs) - set(compat_args) + + if diff: + bad_arg = next(iter(diff)) + raise TypeError(f"{fname}() got an unexpected keyword argument '{bad_arg}'") + + +def validate_kwargs(fname, kwargs, compat_args) -> None: + """ + Checks whether parameters passed to the **kwargs argument in a + function `fname` are valid parameters as specified in `*compat_args` + and whether or not they are set to their default values. + + Parameters + ---------- + fname : str + The name of the function being passed the `**kwargs` parameter + kwargs : dict + The `**kwargs` parameter passed into `fname` + compat_args: dict + A dictionary of keys that `kwargs` is allowed to have and their + associated default values + + Raises + ------ + TypeError if `kwargs` contains keys not in `compat_args` + ValueError if `kwargs` contains keys in `compat_args` that do not + map to the default values specified in `compat_args` + """ + kwds = kwargs.copy() + _check_for_invalid_keys(fname, kwargs, compat_args) + _check_for_default_values(fname, kwds, compat_args) + + +def validate_args_and_kwargs( + fname, args, kwargs, max_fname_arg_count, compat_args +) -> None: + """ + Checks whether parameters passed to the *args and **kwargs argument in a + function `fname` are valid parameters as specified in `*compat_args` + and whether or not they are set to their default values. + + Parameters + ---------- + fname: str + The name of the function being passed the `**kwargs` parameter + args: tuple + The `*args` parameter passed into a function + kwargs: dict + The `**kwargs` parameter passed into `fname` + max_fname_arg_count: int + The minimum number of arguments that the function `fname` + requires, excluding those in `args`. Used for displaying + appropriate error messages. Must be non-negative. + compat_args: dict + A dictionary of keys that `kwargs` is allowed to + have and their associated default values. + + Raises + ------ + TypeError if `args` contains more values than there are + `compat_args` OR `kwargs` contains keys not in `compat_args` + ValueError if `args` contains values not at the default value (`None`) + `kwargs` contains keys in `compat_args` that do not map to the default + value as specified in `compat_args` + + See Also + -------- + validate_args : Purely args validation. + validate_kwargs : Purely kwargs validation. + + """ + # Check that the total number of arguments passed in (i.e. + # args and kwargs) does not exceed the length of compat_args + _check_arg_length( + fname, args + tuple(kwargs.values()), max_fname_arg_count, compat_args + ) + + # Check there is no overlap with the positional and keyword + # arguments, similar to what is done in actual Python functions + args_dict = dict(zip(compat_args, args)) + + for key in args_dict: + if key in kwargs: + raise TypeError( + f"{fname}() got multiple values for keyword argument '{key}'" + ) + + kwargs.update(args_dict) + validate_kwargs(fname, kwargs, compat_args) + + +def validate_bool_kwarg( + value: BoolishNoneT, + arg_name: str, + none_allowed: bool = True, + int_allowed: bool = False, +) -> BoolishNoneT: + """ + Ensure that argument passed in arg_name can be interpreted as boolean. + + Parameters + ---------- + value : bool + Value to be validated. + arg_name : str + Name of the argument. To be reflected in the error message. + none_allowed : bool, default True + Whether to consider None to be a valid boolean. + int_allowed : bool, default False + Whether to consider integer value to be a valid boolean. + + Returns + ------- + value + The same value as input. + + Raises + ------ + ValueError + If the value is not a valid boolean. + """ + good_value = is_bool(value) + if none_allowed: + good_value = good_value or (value is None) + + if int_allowed: + good_value = good_value or isinstance(value, int) + + if not good_value: + raise ValueError( + f'For argument "{arg_name}" expected type bool, received ' + f"type {type(value).__name__}." + ) + return value # pyright: ignore[reportGeneralTypeIssues] + + +def validate_fillna_kwargs(value, method, validate_scalar_dict_value: bool = True): + """ + Validate the keyword arguments to 'fillna'. + + This checks that exactly one of 'value' and 'method' is specified. + If 'method' is specified, this validates that it's a valid method. + + Parameters + ---------- + value, method : object + The 'value' and 'method' keyword arguments for 'fillna'. + validate_scalar_dict_value : bool, default True + Whether to validate that 'value' is a scalar or dict. Specifically, + validate that it is not a list or tuple. + + Returns + ------- + value, method : object + """ + from pandas.core.missing import clean_fill_method + + if value is None and method is None: + raise ValueError("Must specify a fill 'value' or 'method'.") + if value is None and method is not None: + method = clean_fill_method(method) + + elif value is not None and method is None: + if validate_scalar_dict_value and isinstance(value, (list, tuple)): + raise TypeError( + '"value" parameter must be a scalar or dict, but ' + f'you passed a "{type(value).__name__}"' + ) + + elif value is not None and method is not None: + raise ValueError("Cannot specify both 'value' and 'method'.") + + return value, method + + +def validate_percentile(q: float | Iterable[float]) -> np.ndarray: + """ + Validate percentiles (used by describe and quantile). + + This function checks if the given float or iterable of floats is a valid percentile + otherwise raises a ValueError. + + Parameters + ---------- + q: float or iterable of floats + A single percentile or an iterable of percentiles. + + Returns + ------- + ndarray + An ndarray of the percentiles if valid. + + Raises + ------ + ValueError if percentiles are not in given interval([0, 1]). + """ + q_arr = np.asarray(q) + # Don't change this to an f-string. The string formatting + # is too expensive for cases where we don't need it. + msg = "percentiles should all be in the interval [0, 1]" + if q_arr.ndim == 0: + if not 0 <= q_arr <= 1: + raise ValueError(msg) + else: + if not all(0 <= qs <= 1 for qs in q_arr): + raise ValueError(msg) + return q_arr + + +@overload +def validate_ascending(ascending: BoolishT) -> BoolishT: + ... + + +@overload +def validate_ascending(ascending: Sequence[BoolishT]) -> list[BoolishT]: + ... + + +def validate_ascending( + ascending: bool | int | Sequence[BoolishT], +) -> bool | int | list[BoolishT]: + """Validate ``ascending`` kwargs for ``sort_index`` method.""" + kwargs = {"none_allowed": False, "int_allowed": True} + if not isinstance(ascending, Sequence): + return validate_bool_kwarg(ascending, "ascending", **kwargs) + + return [validate_bool_kwarg(item, "ascending", **kwargs) for item in ascending] + + +def validate_endpoints(closed: str | None) -> tuple[bool, bool]: + """ + Check that the `closed` argument is among [None, "left", "right"] + + Parameters + ---------- + closed : {None, "left", "right"} + + Returns + ------- + left_closed : bool + right_closed : bool + + Raises + ------ + ValueError : if argument is not among valid values + """ + left_closed = False + right_closed = False + + if closed is None: + left_closed = True + right_closed = True + elif closed == "left": + left_closed = True + elif closed == "right": + right_closed = True + else: + raise ValueError("Closed has to be either 'left', 'right' or None") + + return left_closed, right_closed + + +def validate_inclusive(inclusive: str | None) -> tuple[bool, bool]: + """ + Check that the `inclusive` argument is among {"both", "neither", "left", "right"}. + + Parameters + ---------- + inclusive : {"both", "neither", "left", "right"} + + Returns + ------- + left_right_inclusive : tuple[bool, bool] + + Raises + ------ + ValueError : if argument is not among valid values + """ + left_right_inclusive: tuple[bool, bool] | None = None + + if isinstance(inclusive, str): + left_right_inclusive = { + "both": (True, True), + "left": (True, False), + "right": (False, True), + "neither": (False, False), + }.get(inclusive) + + if left_right_inclusive is None: + raise ValueError( + "Inclusive has to be either 'both', 'neither', 'left' or 'right'" + ) + + return left_right_inclusive + + +def validate_insert_loc(loc: int, length: int) -> int: + """ + Check that we have an integer between -length and length, inclusive. + + Standardize negative loc to within [0, length]. + + The exceptions we raise on failure match np.insert. + """ + if not is_integer(loc): + raise TypeError(f"loc must be an integer between -{length} and {length}") + + if loc < 0: + loc += length + if not 0 <= loc <= length: + raise IndexError(f"loc must be an integer between -{length} and {length}") + return loc # pyright: ignore[reportGeneralTypeIssues] + + +def check_dtype_backend(dtype_backend) -> None: + if dtype_backend is not lib.no_default: + if dtype_backend not in ["numpy_nullable", "pyarrow"]: + raise ValueError( + f"dtype_backend {dtype_backend} is invalid, only 'numpy_nullable' and " + f"'pyarrow' are allowed.", + ) diff --git a/lib/python3.10/site-packages/pandas/util/version/__init__.py b/lib/python3.10/site-packages/pandas/util/version/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..3a5efbbb09c1e5085bf0564d819be345edd8c827 --- /dev/null +++ b/lib/python3.10/site-packages/pandas/util/version/__init__.py @@ -0,0 +1,579 @@ +# Vendored from https://github.com/pypa/packaging/blob/main/packaging/_structures.py +# and https://github.com/pypa/packaging/blob/main/packaging/_structures.py +# changeset ae891fd74d6dd4c6063bb04f2faeadaac6fc6313 +# 04/30/2021 + +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. Licence at LICENSES/PACKAGING_LICENSE +from __future__ import annotations + +import collections +from collections.abc import Iterator +import itertools +import re +from typing import ( + Callable, + SupportsInt, + Tuple, + Union, +) +import warnings + +__all__ = ["parse", "Version", "LegacyVersion", "InvalidVersion", "VERSION_PATTERN"] + + +class InfinityType: + def __repr__(self) -> str: + return "Infinity" + + def __hash__(self) -> int: + return hash(repr(self)) + + def __lt__(self, other: object) -> bool: + return False + + def __le__(self, other: object) -> bool: + return False + + def __eq__(self, other: object) -> bool: + return isinstance(other, type(self)) + + def __ne__(self, other: object) -> bool: + return not isinstance(other, type(self)) + + def __gt__(self, other: object) -> bool: + return True + + def __ge__(self, other: object) -> bool: + return True + + def __neg__(self: object) -> NegativeInfinityType: + return NegativeInfinity + + +Infinity = InfinityType() + + +class NegativeInfinityType: + def __repr__(self) -> str: + return "-Infinity" + + def __hash__(self) -> int: + return hash(repr(self)) + + def __lt__(self, other: object) -> bool: + return True + + def __le__(self, other: object) -> bool: + return True + + def __eq__(self, other: object) -> bool: + return isinstance(other, type(self)) + + def __ne__(self, other: object) -> bool: + return not isinstance(other, type(self)) + + def __gt__(self, other: object) -> bool: + return False + + def __ge__(self, other: object) -> bool: + return False + + def __neg__(self: object) -> InfinityType: + return Infinity + + +NegativeInfinity = NegativeInfinityType() + + +InfiniteTypes = Union[InfinityType, NegativeInfinityType] +PrePostDevType = Union[InfiniteTypes, tuple[str, int]] +SubLocalType = Union[InfiniteTypes, int, str] +LocalType = Union[ + NegativeInfinityType, + tuple[ + Union[ + SubLocalType, + tuple[SubLocalType, str], + tuple[NegativeInfinityType, SubLocalType], + ], + ..., + ], +] +CmpKey = tuple[ + int, tuple[int, ...], PrePostDevType, PrePostDevType, PrePostDevType, LocalType +] +LegacyCmpKey = tuple[int, tuple[str, ...]] +VersionComparisonMethod = Callable[ + [Union[CmpKey, LegacyCmpKey], Union[CmpKey, LegacyCmpKey]], bool +] + +_Version = collections.namedtuple( + "_Version", ["epoch", "release", "dev", "pre", "post", "local"] +) + + +def parse(version: str) -> LegacyVersion | Version: + """ + Parse the given version string and return either a :class:`Version` object + or a :class:`LegacyVersion` object depending on if the given version is + a valid PEP 440 version or a legacy version. + """ + try: + return Version(version) + except InvalidVersion: + return LegacyVersion(version) + + +class InvalidVersion(ValueError): + """ + An invalid version was found, users should refer to PEP 440. + + Examples + -------- + >>> pd.util.version.Version('1.') + Traceback (most recent call last): + InvalidVersion: Invalid version: '1.' + """ + + +class _BaseVersion: + _key: CmpKey | LegacyCmpKey + + def __hash__(self) -> int: + return hash(self._key) + + # Please keep the duplicated `isinstance` check + # in the six comparisons hereunder + # unless you find a way to avoid adding overhead function calls. + def __lt__(self, other: _BaseVersion) -> bool: + if not isinstance(other, _BaseVersion): + return NotImplemented + + return self._key < other._key + + def __le__(self, other: _BaseVersion) -> bool: + if not isinstance(other, _BaseVersion): + return NotImplemented + + return self._key <= other._key + + def __eq__(self, other: object) -> bool: + if not isinstance(other, _BaseVersion): + return NotImplemented + + return self._key == other._key + + def __ge__(self, other: _BaseVersion) -> bool: + if not isinstance(other, _BaseVersion): + return NotImplemented + + return self._key >= other._key + + def __gt__(self, other: _BaseVersion) -> bool: + if not isinstance(other, _BaseVersion): + return NotImplemented + + return self._key > other._key + + def __ne__(self, other: object) -> bool: + if not isinstance(other, _BaseVersion): + return NotImplemented + + return self._key != other._key + + +class LegacyVersion(_BaseVersion): + def __init__(self, version: str) -> None: + self._version = str(version) + self._key = _legacy_cmpkey(self._version) + + warnings.warn( + "Creating a LegacyVersion has been deprecated and will be " + "removed in the next major release.", + DeprecationWarning, + ) + + def __str__(self) -> str: + return self._version + + def __repr__(self) -> str: + return f"" + + @property + def public(self) -> str: + return self._version + + @property + def base_version(self) -> str: + return self._version + + @property + def epoch(self) -> int: + return -1 + + @property + def release(self) -> None: + return None + + @property + def pre(self) -> None: + return None + + @property + def post(self) -> None: + return None + + @property + def dev(self) -> None: + return None + + @property + def local(self) -> None: + return None + + @property + def is_prerelease(self) -> bool: + return False + + @property + def is_postrelease(self) -> bool: + return False + + @property + def is_devrelease(self) -> bool: + return False + + +_legacy_version_component_re = re.compile(r"(\d+ | [a-z]+ | \.| -)", re.VERBOSE) + +_legacy_version_replacement_map = { + "pre": "c", + "preview": "c", + "-": "final-", + "rc": "c", + "dev": "@", +} + + +def _parse_version_parts(s: str) -> Iterator[str]: + for part in _legacy_version_component_re.split(s): + mapped_part = _legacy_version_replacement_map.get(part, part) + + if not mapped_part or mapped_part == ".": + continue + + if mapped_part[:1] in "0123456789": + # pad for numeric comparison + yield mapped_part.zfill(8) + else: + yield "*" + mapped_part + + # ensure that alpha/beta/candidate are before final + yield "*final" + + +def _legacy_cmpkey(version: str) -> LegacyCmpKey: + # We hardcode an epoch of -1 here. A PEP 440 version can only have a epoch + # greater than or equal to 0. This will effectively put the LegacyVersion, + # which uses the defacto standard originally implemented by setuptools, + # as before all PEP 440 versions. + epoch = -1 + + # This scheme is taken from pkg_resources.parse_version setuptools prior to + # it's adoption of the packaging library. + parts: list[str] = [] + for part in _parse_version_parts(version.lower()): + if part.startswith("*"): + # remove "-" before a prerelease tag + if part < "*final": + while parts and parts[-1] == "*final-": + parts.pop() + + # remove trailing zeros from each series of numeric parts + while parts and parts[-1] == "00000000": + parts.pop() + + parts.append(part) + + return epoch, tuple(parts) + + +# Deliberately not anchored to the start and end of the string, to make it +# easier for 3rd party code to reuse +VERSION_PATTERN = r""" + v? + (?: + (?:(?P[0-9]+)!)? # epoch + (?P[0-9]+(?:\.[0-9]+)*) # release segment + (?P
                                          # pre-release
+            [-_\.]?
+            (?P(a|b|c|rc|alpha|beta|pre|preview))
+            [-_\.]?
+            (?P[0-9]+)?
+        )?
+        (?P                                         # post release
+            (?:-(?P[0-9]+))
+            |
+            (?:
+                [-_\.]?
+                (?Ppost|rev|r)
+                [-_\.]?
+                (?P[0-9]+)?
+            )
+        )?
+        (?P                                          # dev release
+            [-_\.]?
+            (?Pdev)
+            [-_\.]?
+            (?P[0-9]+)?
+        )?
+    )
+    (?:\+(?P[a-z0-9]+(?:[-_\.][a-z0-9]+)*))?       # local version
+"""
+
+
+class Version(_BaseVersion):
+    _regex = re.compile(r"^\s*" + VERSION_PATTERN + r"\s*$", re.VERBOSE | re.IGNORECASE)
+
+    def __init__(self, version: str) -> None:
+        # Validate the version and parse it into pieces
+        match = self._regex.search(version)
+        if not match:
+            raise InvalidVersion(f"Invalid version: '{version}'")
+
+        # Store the parsed out pieces of the version
+        self._version = _Version(
+            epoch=int(match.group("epoch")) if match.group("epoch") else 0,
+            release=tuple(int(i) for i in match.group("release").split(".")),
+            pre=_parse_letter_version(match.group("pre_l"), match.group("pre_n")),
+            post=_parse_letter_version(
+                match.group("post_l"), match.group("post_n1") or match.group("post_n2")
+            ),
+            dev=_parse_letter_version(match.group("dev_l"), match.group("dev_n")),
+            local=_parse_local_version(match.group("local")),
+        )
+
+        # Generate a key which will be used for sorting
+        self._key = _cmpkey(
+            self._version.epoch,
+            self._version.release,
+            self._version.pre,
+            self._version.post,
+            self._version.dev,
+            self._version.local,
+        )
+
+    def __repr__(self) -> str:
+        return f""
+
+    def __str__(self) -> str:
+        parts = []
+
+        # Epoch
+        if self.epoch != 0:
+            parts.append(f"{self.epoch}!")
+
+        # Release segment
+        parts.append(".".join([str(x) for x in self.release]))
+
+        # Pre-release
+        if self.pre is not None:
+            parts.append("".join([str(x) for x in self.pre]))
+
+        # Post-release
+        if self.post is not None:
+            parts.append(f".post{self.post}")
+
+        # Development release
+        if self.dev is not None:
+            parts.append(f".dev{self.dev}")
+
+        # Local version segment
+        if self.local is not None:
+            parts.append(f"+{self.local}")
+
+        return "".join(parts)
+
+    @property
+    def epoch(self) -> int:
+        _epoch: int = self._version.epoch
+        return _epoch
+
+    @property
+    def release(self) -> tuple[int, ...]:
+        _release: tuple[int, ...] = self._version.release
+        return _release
+
+    @property
+    def pre(self) -> tuple[str, int] | None:
+        _pre: tuple[str, int] | None = self._version.pre
+        return _pre
+
+    @property
+    def post(self) -> int | None:
+        return self._version.post[1] if self._version.post else None
+
+    @property
+    def dev(self) -> int | None:
+        return self._version.dev[1] if self._version.dev else None
+
+    @property
+    def local(self) -> str | None:
+        if self._version.local:
+            return ".".join([str(x) for x in self._version.local])
+        else:
+            return None
+
+    @property
+    def public(self) -> str:
+        return str(self).split("+", 1)[0]
+
+    @property
+    def base_version(self) -> str:
+        parts = []
+
+        # Epoch
+        if self.epoch != 0:
+            parts.append(f"{self.epoch}!")
+
+        # Release segment
+        parts.append(".".join([str(x) for x in self.release]))
+
+        return "".join(parts)
+
+    @property
+    def is_prerelease(self) -> bool:
+        return self.dev is not None or self.pre is not None
+
+    @property
+    def is_postrelease(self) -> bool:
+        return self.post is not None
+
+    @property
+    def is_devrelease(self) -> bool:
+        return self.dev is not None
+
+    @property
+    def major(self) -> int:
+        return self.release[0] if len(self.release) >= 1 else 0
+
+    @property
+    def minor(self) -> int:
+        return self.release[1] if len(self.release) >= 2 else 0
+
+    @property
+    def micro(self) -> int:
+        return self.release[2] if len(self.release) >= 3 else 0
+
+
+def _parse_letter_version(
+    letter: str, number: str | bytes | SupportsInt
+) -> tuple[str, int] | None:
+    if letter:
+        # We consider there to be an implicit 0 in a pre-release if there is
+        # not a numeral associated with it.
+        if number is None:
+            number = 0
+
+        # We normalize any letters to their lower case form
+        letter = letter.lower()
+
+        # We consider some words to be alternate spellings of other words and
+        # in those cases we want to normalize the spellings to our preferred
+        # spelling.
+        if letter == "alpha":
+            letter = "a"
+        elif letter == "beta":
+            letter = "b"
+        elif letter in ["c", "pre", "preview"]:
+            letter = "rc"
+        elif letter in ["rev", "r"]:
+            letter = "post"
+
+        return letter, int(number)
+    if not letter and number:
+        # We assume if we are given a number, but we are not given a letter
+        # then this is using the implicit post release syntax (e.g. 1.0-1)
+        letter = "post"
+
+        return letter, int(number)
+
+    return None
+
+
+_local_version_separators = re.compile(r"[\._-]")
+
+
+def _parse_local_version(local: str) -> LocalType | None:
+    """
+    Takes a string like abc.1.twelve and turns it into ("abc", 1, "twelve").
+    """
+    if local is not None:
+        return tuple(
+            part.lower() if not part.isdigit() else int(part)
+            for part in _local_version_separators.split(local)
+        )
+    return None
+
+
+def _cmpkey(
+    epoch: int,
+    release: tuple[int, ...],
+    pre: tuple[str, int] | None,
+    post: tuple[str, int] | None,
+    dev: tuple[str, int] | None,
+    local: tuple[SubLocalType] | None,
+) -> CmpKey:
+    # When we compare a release version, we want to compare it with all of the
+    # trailing zeros removed. So we'll use a reverse the list, drop all the now
+    # leading zeros until we come to something non zero, then take the rest
+    # re-reverse it back into the correct order and make it a tuple and use
+    # that for our sorting key.
+    _release = tuple(
+        reversed(list(itertools.dropwhile(lambda x: x == 0, reversed(release))))
+    )
+
+    # We need to "trick" the sorting algorithm to put 1.0.dev0 before 1.0a0.
+    # We'll do this by abusing the pre segment, but we _only_ want to do this
+    # if there is not a pre or a post segment. If we have one of those then
+    # the normal sorting rules will handle this case correctly.
+    if pre is None and post is None and dev is not None:
+        _pre: PrePostDevType = NegativeInfinity
+    # Versions without a pre-release (except as noted above) should sort after
+    # those with one.
+    elif pre is None:
+        _pre = Infinity
+    else:
+        _pre = pre
+
+    # Versions without a post segment should sort before those with one.
+    if post is None:
+        _post: PrePostDevType = NegativeInfinity
+
+    else:
+        _post = post
+
+    # Versions without a development segment should sort after those with one.
+    if dev is None:
+        _dev: PrePostDevType = Infinity
+
+    else:
+        _dev = dev
+
+    if local is None:
+        # Versions without a local segment should sort before those with one.
+        _local: LocalType = NegativeInfinity
+    else:
+        # Versions with a local segment need that segment parsed to implement
+        # the sorting rules in PEP440.
+        # - Alpha numeric segments sort before numeric segments
+        # - Alpha numeric segments sort lexicographically
+        # - Numeric segments sort numerically
+        # - Shorter versions sort before longer versions when the prefixes
+        #   match exactly
+        _local = tuple(
+            (i, "") if isinstance(i, int) else (NegativeInfinity, i) for i in local
+        )
+
+    return epoch, _release, _pre, _post, _dev, _local
diff --git a/lib/python3.10/site-packages/pyctcdecode-0.5.0.dist-info/AUTHORS.md b/lib/python3.10/site-packages/pyctcdecode-0.5.0.dist-info/AUTHORS.md
new file mode 100644
index 0000000000000000000000000000000000000000..5ece1f45728130538957c964d2e2ad9cee029da0
--- /dev/null
+++ b/lib/python3.10/site-packages/pyctcdecode-0.5.0.dist-info/AUTHORS.md
@@ -0,0 +1,6 @@
+# Authors
+
+- Georg Kucsko (github: gkucsko)
+- Patrick O'Neill (github: poneill)
+- Mikey Shulman (github: mikeyshulman)
+- Jeremy Lopez (github: lopez86)
diff --git a/lib/python3.10/site-packages/pyctcdecode-0.5.0.dist-info/INSTALLER b/lib/python3.10/site-packages/pyctcdecode-0.5.0.dist-info/INSTALLER
new file mode 100644
index 0000000000000000000000000000000000000000..5c69047b2eb8235994febeeae1da4a82365a240a
--- /dev/null
+++ b/lib/python3.10/site-packages/pyctcdecode-0.5.0.dist-info/INSTALLER
@@ -0,0 +1 @@
+uv
\ No newline at end of file
diff --git a/lib/python3.10/site-packages/pyctcdecode-0.5.0.dist-info/RECORD b/lib/python3.10/site-packages/pyctcdecode-0.5.0.dist-info/RECORD
new file mode 100644
index 0000000000000000000000000000000000000000..0ee1d52a74083a0573d68b3b777e71fd5bf80f5a
--- /dev/null
+++ b/lib/python3.10/site-packages/pyctcdecode-0.5.0.dist-info/RECORD
@@ -0,0 +1,16 @@
+pyctcdecode-0.5.0.dist-info/AUTHORS.md,sha256=aGFXvJf-kONAwBoLv18dVB2_ylaxXUvU2_HukqIVhWs,158
+pyctcdecode-0.5.0.dist-info/INSTALLER,sha256=5hhM4Q4mYTT9z6QB6PGpUAW81PGNFrYrdXMj4oM_6ak,2
+pyctcdecode-0.5.0.dist-info/LICENSE,sha256=3CkxYE8cBhlOIBOwDLT0kR3j-RWZi6VShTTtinFEsK4,11364
+pyctcdecode-0.5.0.dist-info/METADATA,sha256=d9G4OMp77qZO-zITSEoNUXvLtyNof0datKPXQIQLHvc,20756
+pyctcdecode-0.5.0.dist-info/RECORD,,
+pyctcdecode-0.5.0.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+pyctcdecode-0.5.0.dist-info/WHEEL,sha256=bb2Ot9scclHKMOLDEHY6B2sicWOgugjFKaJsT7vwMQo,110
+pyctcdecode-0.5.0.dist-info/top_level.txt,sha256=mUwwjDhf8CXGDQ6l0MdbqUqTtFoAt5eHP-9k7Ig5Z1k,12
+pyctcdecode/__init__.py,sha256=Acx8eKR0temswYP_Z9SY8FrL0sZMSajaWicDiKYxkR4,208
+pyctcdecode/alphabet.py,sha256=uK_8yjE1EYm7pMkGKpKnqar622OUeyts76wHug7d9RA,6772
+pyctcdecode/constants.py,sha256=EPnE2QPOX1mEeHB3KikUYNmBMlLqD4ZhIZoUi9SHRcU,617
+pyctcdecode/decoder.py,sha256=vBL-7z5CjcBIz9z46mCOdx-Ky_cfVoLxmbebsXcItog,37821
+pyctcdecode/language_model.py,sha256=yzoYZ-CGLi2DHP895qwmsKx34sp_uk-TbYwwbNyVNJw,17866
+pyctcdecode/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+pyctcdecode/tests/sample_data/bugs_bunny_kenlm.arpa,sha256=WlNs-K60dCdEDrITJG2Xvoen-Xrb_nI4P6IPUs6Zbs0,169
+pyctcdecode/tests/sample_data/libri_logits.json,sha256=e5pyEDXKxH0m69zoGK5WME-vuqVq_vGVSfRHHT2Ov6o,53612
diff --git a/lib/python3.10/site-packages/pyctcdecode-0.5.0.dist-info/REQUESTED b/lib/python3.10/site-packages/pyctcdecode-0.5.0.dist-info/REQUESTED
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/lib/python3.10/site-packages/pyctcdecode-0.5.0.dist-info/WHEEL b/lib/python3.10/site-packages/pyctcdecode-0.5.0.dist-info/WHEEL
new file mode 100644
index 0000000000000000000000000000000000000000..9d8f872bbf2275e6d1785238e90b0321f4b6f323
--- /dev/null
+++ b/lib/python3.10/site-packages/pyctcdecode-0.5.0.dist-info/WHEEL
@@ -0,0 +1,6 @@
+Wheel-Version: 1.0
+Generator: bdist_wheel (0.38.4)
+Root-Is-Purelib: true
+Tag: py2-none-any
+Tag: py3-none-any
+