diff --git a/.gitattributes b/.gitattributes index b7f8fe79de23ce5fd666d20fcc0042e19e1a14c0..2e63ff8ca94d552adc6ef365f5467f44e021c93a 100644 --- a/.gitattributes +++ b/.gitattributes @@ -807,3 +807,4 @@ parrot/lib/python3.10/site-packages/nvidia/cusparse/lib/libcusparse.so.12 filter parrot/lib/python3.10/site-packages/torch/__pycache__/_meta_registrations.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text mplug_owl2/lib/python3.10/site-packages/nvidia/curand/lib/libcurand.so.10 filter=lfs diff=lfs merge=lfs -text videochat2/lib/python3.10/site-packages/tensorflow/python/_pywrap_py_exception_registry.so filter=lfs diff=lfs merge=lfs -text +videochat2/lib/python3.10/site-packages/tensorflow/python/_pywrap_tfcompile.so filter=lfs diff=lfs merge=lfs -text diff --git a/llava_next/share/terminfo/M/MtxOrb162 b/llava_next/share/terminfo/M/MtxOrb162 new file mode 100644 index 0000000000000000000000000000000000000000..e009b47da51b1249a96b066b7bdbfbcb083884ac Binary files /dev/null and b/llava_next/share/terminfo/M/MtxOrb162 differ diff --git a/llava_next/share/terminfo/q/qansi b/llava_next/share/terminfo/q/qansi new file mode 100644 index 0000000000000000000000000000000000000000..633429959ef4371a2ff58d482b244e9c4f832e9f Binary files /dev/null and b/llava_next/share/terminfo/q/qansi differ diff --git a/llava_next/share/terminfo/q/qvt103-w b/llava_next/share/terminfo/q/qvt103-w new file mode 100644 index 0000000000000000000000000000000000000000..5a30648924f7bc764d8c0909cba407e0bcfb8778 Binary files /dev/null and b/llava_next/share/terminfo/q/qvt103-w differ diff --git a/llava_next/share/terminfo/q/qvt119+-25 b/llava_next/share/terminfo/q/qvt119+-25 new file mode 100644 index 0000000000000000000000000000000000000000..7635a5a4bbd7e94f526541bffd0d3c5f17d7b902 Binary files /dev/null and b/llava_next/share/terminfo/q/qvt119+-25 differ diff --git a/parrot/lib/python3.10/site-packages/torch/fx/__pycache__/_compatibility.cpython-310.pyc b/parrot/lib/python3.10/site-packages/torch/fx/__pycache__/_compatibility.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ecf5631eed62448efce4f11dd7f74f5c1808a82b Binary files /dev/null and b/parrot/lib/python3.10/site-packages/torch/fx/__pycache__/_compatibility.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/torch/fx/__pycache__/_lazy_graph_module.cpython-310.pyc b/parrot/lib/python3.10/site-packages/torch/fx/__pycache__/_lazy_graph_module.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6006ead308217e8506bde5a0a55f5a7b024cc0cb Binary files /dev/null and b/parrot/lib/python3.10/site-packages/torch/fx/__pycache__/_lazy_graph_module.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/torch/fx/__pycache__/_symbolic_trace.cpython-310.pyc b/parrot/lib/python3.10/site-packages/torch/fx/__pycache__/_symbolic_trace.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3e7861f74fe911f7ffa265fffa052c3bb243f62d Binary files /dev/null and b/parrot/lib/python3.10/site-packages/torch/fx/__pycache__/_symbolic_trace.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/torch/fx/__pycache__/_utils.cpython-310.pyc b/parrot/lib/python3.10/site-packages/torch/fx/__pycache__/_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b1c076f7606f26aa2a066e5b5d259286e12e67f7 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/torch/fx/__pycache__/_utils.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/torch/fx/__pycache__/annotate.cpython-310.pyc b/parrot/lib/python3.10/site-packages/torch/fx/__pycache__/annotate.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..50cc448285d0b92a5ee2c2f413c29d0bb38dd2a7 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/torch/fx/__pycache__/annotate.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/torch/fx/__pycache__/config.cpython-310.pyc b/parrot/lib/python3.10/site-packages/torch/fx/__pycache__/config.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5d5740c2be46300158bb072ed9a47c1770f0f595 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/torch/fx/__pycache__/config.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/torch/fx/__pycache__/graph.cpython-310.pyc b/parrot/lib/python3.10/site-packages/torch/fx/__pycache__/graph.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2a6299f541cd523258d5a959e1c0bfac09c91d1a Binary files /dev/null and b/parrot/lib/python3.10/site-packages/torch/fx/__pycache__/graph.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/torch/fx/__pycache__/graph_module.cpython-310.pyc b/parrot/lib/python3.10/site-packages/torch/fx/__pycache__/graph_module.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a95d378acf6a7ea2ca4a88a0a38a5e0eac67d0e5 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/torch/fx/__pycache__/graph_module.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/torch/fx/__pycache__/immutable_collections.cpython-310.pyc b/parrot/lib/python3.10/site-packages/torch/fx/__pycache__/immutable_collections.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8d2a6ea0d274496a164efbbb60cb74fe42ff0694 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/torch/fx/__pycache__/immutable_collections.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/torch/fx/__pycache__/node.cpython-310.pyc b/parrot/lib/python3.10/site-packages/torch/fx/__pycache__/node.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..44a8bdf7156995d70ea86310e8e3d7981fa2c45a Binary files /dev/null and b/parrot/lib/python3.10/site-packages/torch/fx/__pycache__/node.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/torch/fx/__pycache__/operator_schemas.cpython-310.pyc b/parrot/lib/python3.10/site-packages/torch/fx/__pycache__/operator_schemas.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0e1bb663346c9488bcf4bb649e66ce9f6e8659ad Binary files /dev/null and b/parrot/lib/python3.10/site-packages/torch/fx/__pycache__/operator_schemas.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/torch/fx/__pycache__/proxy.cpython-310.pyc b/parrot/lib/python3.10/site-packages/torch/fx/__pycache__/proxy.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ff57514e991f9ab97c5eaf52382c5eca7cba8340 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/torch/fx/__pycache__/proxy.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/torch/fx/__pycache__/subgraph_rewriter.cpython-310.pyc b/parrot/lib/python3.10/site-packages/torch/fx/__pycache__/subgraph_rewriter.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a532b1772927a5b81d7ba8055c638420c2e4f2ec Binary files /dev/null and b/parrot/lib/python3.10/site-packages/torch/fx/__pycache__/subgraph_rewriter.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/torch/fx/__pycache__/tensor_type.cpython-310.pyc b/parrot/lib/python3.10/site-packages/torch/fx/__pycache__/tensor_type.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f78174e0c34fabb23f33175876de86ffcbb818ca Binary files /dev/null and b/parrot/lib/python3.10/site-packages/torch/fx/__pycache__/tensor_type.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/accelerator_partitioner.cpython-310.pyc b/parrot/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/accelerator_partitioner.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3ca8243ddb337947239f2c649ae18069b3ff53d7 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/accelerator_partitioner.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/meta_tracer.cpython-310.pyc b/parrot/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/meta_tracer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8099670af3c4ff51dfbd075209688c312d872e2a Binary files /dev/null and b/parrot/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/meta_tracer.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/__pycache__/__init__.cpython-310.pyc b/parrot/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e6df416b77f06851760ecf8677de8c88d3083117 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/__pycache__/__init__.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/__pycache__/constraint.cpython-310.pyc b/parrot/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/__pycache__/constraint.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9db8b78584b93e5914a56ff8b535ca431fadcc3f Binary files /dev/null and b/parrot/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/__pycache__/constraint.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/__pycache__/constraint_generator.cpython-310.pyc b/parrot/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/__pycache__/constraint_generator.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..56132776f6e55f447051ffb0b0c21192477ec8e4 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/__pycache__/constraint_generator.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/__pycache__/util.cpython-310.pyc b/parrot/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/__pycache__/util.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5ee985e974718fbd12fec20bb7d0b14fd24dfc53 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/__pycache__/util.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/constraint.py b/parrot/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/constraint.py new file mode 100644 index 0000000000000000000000000000000000000000..45038837cae608219e497fcfe06cb509ce7c4fc9 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/constraint.py @@ -0,0 +1,558 @@ +# mypy: allow-untyped-defs +from torch.fx.experimental.migrate_gradual_types.operation import op_add, op_sub, op_mul, op_div, \ + op_mod, op_gt, op_lt, op_neq, op_eq +from torch.fx.tensor_type import TensorType, Dyn + + +class Constraint: + pass + + +class Conj(Constraint): + def __init__(self, conjuncts): + """ + :param conjuncts: Conjunction of constraints + """ + self.conjucts = conjuncts + + def __eq__(self, other): + if isinstance(other, Conj): + return self.conjucts == other.conjucts and self.conjucts == other.conjucts + else: + return False + + def __repr__(self): + return f'And({self.conjucts})' + + +class Disj(Constraint): + def __init__(self, disjuncts): + """ + :param disjuncts: Disjunction of constraints + """ + self.disjuncts = disjuncts + + def __eq__(self, other): + if isinstance(other, Disj): + return self.disjuncts == other.disjuncts and self.disjuncts == other.disjuncts + else: + return False + + def __repr__(self): + return f'Or({self.disjuncts})' + + +class Prod(Constraint): + def __init__(self, products): + """ + :param products: lists of dimensions to multiply + """ + self.products = products + + def __eq__(self, other): + if isinstance(other, Prod): + return self.products == other.products and self.products == other.products + else: + return False + + def __repr__(self): + return f'Product({self.products})' + + +class T(Constraint): + """ + True + """ + def __init__(self): + pass + + def __eq__(self, other): + return isinstance(other, T) + + def __repr__(self): + return 'True' + +class F(Constraint): + """ + False + """ + def __init__(self): + pass + + def __eq__(self, other): + return isinstance(other, F) + + def __repr__(self): + return 'False' + + +class BinaryConstraint(Constraint): + """ + Represents all binary operations + """ + def __init__(self, lhs, rhs, op): + """ + :param lhs: lhs of the constraint + :param rhs: rhs of the constraint + :param op: string representing the operation + """ + self.lhs = lhs + self.rhs = rhs + self.op = op + + def __eq__(self, other): + if isinstance(other, BinaryConstraint): + return self.lhs == other.lhs and self.rhs == other.rhs and self.op == other.op + else: + return False + + def __repr__(self): + return f'({self.lhs} {self.op} {self.rhs})' + + +class BinConstraintT(BinaryConstraint): + """ + Binary constraints about tensors + """ + def __init__(self, lhs, rhs, op): + assert (isinstance(lhs, (TVar, TensorType, int)) or lhs == Dyn) and \ + (isinstance(rhs, (TVar, TensorType, int)) or rhs == Dyn) + super().__init__(lhs, rhs, op) + + def __eq__(self, other): + return super().__eq__(other) + + +class BinConstraintD(BinaryConstraint): + """ + Binary constraints about dimensions + """ + def __init__(self, lhs, rhs, op): + assert is_algebraic_expression(lhs) or is_dim(lhs) or is_bool_expr(lhs) + assert is_algebraic_expression(rhs) or is_dim(rhs) or is_bool_expr(rhs) + + super().__init__(lhs, rhs, op) + + def __eq__(self, other): + return super().__eq__(other) + + + +class TGreatestUpperBound(Constraint): + """ + Greatest Upper bound for tensors with dynamic type + """ + def __init__(self, res, rhs1, rhs2): + """ + :param res: tensor variable that stores the result of the outout + :param rhs1: tensor or tensor variable + :param rhs2: tensor or tensor variabke + """ + self.res = res + self.rhs1 = rhs1 + self.rhs2 = rhs2 + + def __repr__(self): + return f'{self.res} = {self.rhs1}\u2294*{self.rhs2}' + + def __eq__(self, other): + if isinstance(other, TGreatestUpperBound): + return self.res == other.res and self.rhs1 == other.rhs1 and self.rhs2 == other.rhs2 + else: + return False + + +class DGreatestUpperBound(Constraint): + """ + Greatest Upper bound for dimensions + """ + def __init__(self, res, rhs1, rhs2): + """ + :param res: Dimension variable to store the result + :param rhs1: dimension variable 1 + :param rhs2: dimension variable 2 + """ + assert is_dim(res) + assert is_dim(rhs1) + assert is_dim(rhs2) + + self.res = res + self.rhs1 = rhs1 + self.rhs2 = rhs2 + + def __repr__(self): + return f'{self.res} = {self.rhs1}\u2294{self.rhs2}' + + def __eq__(self, other): + if isinstance(other, DGreatestUpperBound): + return self.res == other.res and self.rhs1 == other.rhs1 and self.rhs2 == other.rhs2 + else: + return False + + +class CanReshape(Constraint): + """ + can_reshape constraint + """ + def __init__(self, src, target): + """ + :param src: tensor variable + :param target: tensor + """ + self.src = src + self.target = target + + def __repr__(self): + return f'can-reshape({self.src}, {self.target})' + + def __eq__(self, other): + if isinstance(other, CanReshape): + return self.src == other.src and self.target == other.target + else: + return False + + +class IndexSelect(Constraint): + + def __init__(self, tensor_size, input_var, dim_replace, index, output): + """ + Args: + input_var: input to index_select + tensor_size: tensor size we are considering + dim_replace: the dimension of the output at "index" + index: location of the dimensions to replace in the input + output: variable to store the result + """ + assert isinstance(input_var, TVar) + assert isinstance(output, TVar) + assert isinstance(dim_replace, DVar) or dim_replace == Dyn + assert isinstance(index, int) + + self.input_var = input_var + self.tensor_size = tensor_size + self.dim_replace = dim_replace + self.index = index + self.output = output + + def __repr__(self): + + return f' {self.output} = ' \ + f'IndexSelect({self.input_var}, ' \ + f'tensor_size: {self.tensor_size}, ' \ + f'{self.dim_replace}, ' \ + f'{self.index})' + + def __eq__(self, other): + if isinstance(other, IndexSelect): + return self.tensor_size == other.tensor_size and \ + self.dim_replace == other.dim_replace and \ + self.index == other.index and \ + self.output == other.output and \ + self.input_var == other.input_var + else: + return False + + +class Transpose(Constraint): + + def __init__(self, tensor_size, input_var, index1, index2, output): + """ + Args: + tensor_size: current tensor size + input_var: variable to hold input + index1: dimension 1 + index2: dimension 2 + output: output that stores result + """ + assert isinstance(input_var, TVar) + assert isinstance(output, TVar) + assert isinstance(index1, int) + assert isinstance(index2, int) + + self.input_var = input_var + self.tensor_size = tensor_size + self.index1 = index1 + self.index2 = index2 + self.output = output + + def __repr__(self): + + return f' {self.output} = ' \ + f'Transpose({self.input_var}, ' \ + f'tensor_size: {self.tensor_size}, ' \ + f'{self.index1}, ' \ + f'{self.index2})' + + def __eq__(self, other): + if isinstance(other, Transpose): + return self.tensor_size == other.tensor_size and \ + self.index1 == other.index1 and \ + self.index2 == other.index2 and \ + self.output == other.output and \ + self.input_var == other.input_var + else: + return False + + +class GetItem(Constraint): + + def __init__(self, tensor_size, index, res, input_var): + """ + Constraint for getting item given a tensor size + :param tensor_size: actual number + :param index: actual number representing the index + :param res: dimension variable to carry the item we get + :param input_var: a tensor variable from which we will get item + """ + assert isinstance(res, DVar) + + self.res = res + self.tensor_size = tensor_size + self.index = index + self.input_var = input_var + + def __repr__(self): + return f' {self.res} = GetItem({self.input_var}, tensor_size: {self.tensor_size}, {self.index})' + + def __eq__(self, other): + if isinstance(other, GetItem): + return self.res == other.res and \ + self.tensor_size == other.tensor_size and \ + self.index == other.index and \ + self.input_var == other.input_var + else: + return False + +class GetItemTensor(Constraint): + + def __init__(self, tensor_size, index_tuple, res, input_var): + """ + Constraint for getting item given a tensor size + However, when the argument is a tuple, we will + expect a tensor + :param tensor_size: actual number representing the rank + :param index_tuple: tuple for indexing + :param res: tensor variable to carry the item we get + :param input_var: a tensor variable from which we will get item + """ + assert isinstance(res, TVar) + + self.res = res + self.tensor_size = tensor_size + self.index_tuple = index_tuple + self.input_var = input_var + + def __repr__(self): + return f' {self.res} = GetItemT({self.input_var}, tensor_size: {self.tensor_size}, {self.index_tuple})' + + def __eq__(self, other): + if isinstance(other, GetItemTensor): + return self.res == other.res and \ + self.tensor_size == other.tensor_size and \ + self.index_tuple == other.index_tuple and \ + self.input_var == other.input_var + else: + return False + +class CalcConv(Constraint): + + def __init__(self, conv_result, input_var, c_out, kernel, padding, stride, dilation, matching_constraint_vars): + """ + :param conv_result: the convolution result + :param input_var: input to convolution + :param c_out: output chanel type + :param kernel: kernel tuple + """ + self.conv_result = conv_result + self.input_var = input_var + self.c_out = c_out + self.kernel = kernel + self.padding = padding + self.stride = stride + self.dilation = dilation + self.matching_constraint = matching_constraint_vars + + def __repr__(self): + return f'{self.conv_result} =' \ + f' calc-conv({self.input_var},' \ + f' {self.c_out}, {self.kernel}, ' \ + f'{self.padding}, {self.stride},' \ + f' {self.dilation})' + + def __eq__(self, other): + if isinstance(other, CalcConv): + return self.conv_result == other.conv_result and self.input_var == other.input_var and \ + self.c_out == other.c_out and self.kernel == other.kernel and self.padding == other.padding \ + and self.stride == other.stride and self.dilation == other.dilation \ + and self.matching_constraint == other.matching_constraint + else: + return False + + +class CalcMaxPool(Constraint): + + def __init__(self, maxpool_result, input_var, kernel, padding, stride, dilation, matching_constraint_vars): + """ + :param maxpool_result: the result of maxpool + :param input_var: input to convolution + :param kernel: kernel tuple + """ + self.maxpool_result = maxpool_result + self.input_var = input_var + self.kernel = kernel + self.padding = padding + self.stride = stride + self.dilation = dilation + self.matching_constraint = matching_constraint_vars + + def __repr__(self): + return f'{self.maxpool_result} =' \ + f' calc-maxpool({self.input_var},' \ + f' {self.kernel}, ' \ + f'{self.padding}, {self.stride},' \ + f' {self.dilation})' + + def __eq__(self, other): + if isinstance(other, CalcMaxPool): + return self.maxpool_result == other.maxpool_result and self.input_var == other.input_var \ + and self.kernel == other.kernel and self.padding == other.padding \ + and self.stride == other.stride and self.dilation == other.dilation \ + and self.matching_constraint == other.matching_constraint + else: + return False + + +class ApplyBroadcasting(Constraint): + def __init__(self, res1, res2, input1, input2): + """ + :param res1: resulting tensor 1 + :param res2: resulting tensor 2 + :param input1: tensor variable 1 + :param input2: tensor variable 2 + """ + self.res1 = res1 + self.res2 = res2 + self.input1 = input1 + self.input2 = input2 + + def __eq__(self, other): + if isinstance(other, ApplyBroadcasting): + return self.res1 == other.res1 \ + and self.res2 == other.res2 \ + and self.input1 == other.input1 \ + and self.input2 == other.input2 + else: + return False + + def __repr__(self): + return f'{self.res1}, {self.res2} ='f' apply-broadcasting({self.input1},' f' {self.input2})' + + +class CalcProduct(Constraint): + """ + Given correct dimensions, calculate the product for flatten accounting for Dyn + """ + def __init__(self, start, end, flattened, dims_to_flatten): + """ + :param start: start index + :param end: end index + :param flattened: variable to store the product + :param dims_to_flatten: the type which we will flatten + """ + assert isinstance(dims_to_flatten, list) + assert isinstance(flattened, TVar) + assert isinstance(start, int) + assert isinstance(end, int) + + self.start = start + self.end = end + self.dims_to_flatten = dims_to_flatten + self.flattened = flattened + + def __eq__(self, other): + if isinstance(other, CalcProduct): + return self.start == other.start and self.end == other.end and \ + self.dims_to_flatten == other.dims_to_flatten and self.flattened == other.flattened + + else: + return False + + def __repr__(self): + return f'{self.flattened} = CalcProduct({self.start}, {self.end}, {self.dims_to_flatten})' + + +class TVar: + """ + Tensor variable with no tensor constructor + """ + def __init__(self, tvar): + """ + :param tvar: tensor variable + """ + self.tvar = tvar + + def __repr__(self): + return f'TV({self.tvar})' + + def __eq__(self, other): + if isinstance(other, TVar): + return self.tvar == other.tvar + else: + return False + + +class DVar: + """ + Dimension variable + """ + def __init__(self, c): + """ + :param c: character or number + """ + self.c = c + + def __repr__(self): + return f'DV({self.c})' + + def __eq__(self, other): + if isinstance(other, DVar): + return self.c == other.c + else: + return False + + +class BVar: + """ + Boolean variable + """ + def __init__(self, c): + """ + :param c: character or number + """ + self.c = c + + def __repr__(self): + return f'BV({self.c})' + + def __eq__(self, other): + if isinstance(other, BVar): + return self.c == other.c + else: + return False + + +def is_algebraic_expression(constraint): + if isinstance(constraint, BinConstraintD): + return constraint.op in [op_add, op_sub, op_div, op_mul, op_mod] + else: + return isinstance(constraint, Prod) + + +def is_bool_expr(constraint): + if isinstance(constraint, BinConstraintD): + return constraint.op in [op_gt, op_lt, op_neq, op_eq] + else: + return isinstance(constraint, (BVar, Conj, Disj)) + +def is_dim(d): + return isinstance(d, (DVar, int)) or d == Dyn diff --git a/parrot/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/constraint_transformation.py b/parrot/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/constraint_transformation.py new file mode 100644 index 0000000000000000000000000000000000000000..439e3d6195e654147f5f583b6b13fa9611757372 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/constraint_transformation.py @@ -0,0 +1,1040 @@ +# mypy: ignore-errors +import copy +import itertools +from torch.fx.experimental.migrate_gradual_types.constraint_generator import BinConstraintT, MAX_TENSOR_RANK +from torch.fx.experimental.migrate_gradual_types.constraint import T, BinConstraintD, Conj, Constraint, DVar, TVar, \ + Transpose +from torch.fx.experimental.migrate_gradual_types.constraint import Disj, TGreatestUpperBound +from torch.fx.experimental.migrate_gradual_types.constraint import DGreatestUpperBound +from torch.fx.experimental.migrate_gradual_types.constraint import CalcConv, CalcMaxPool +from torch.fx.experimental.migrate_gradual_types.constraint import CalcProduct, CanReshape +from torch.fx.experimental.migrate_gradual_types.constraint import ApplyBroadcasting, Prod, F, GetItem, GetItemTensor, IndexSelect +from torch.fx.experimental.migrate_gradual_types.operation import op_eq, op_precision, op_leq, op_matching +from torch.fx.experimental.migrate_gradual_types.operation import op_consistency, op_neq +from torch.fx.experimental.migrate_gradual_types.operation import op_mul, op_add, op_sub, op_div, op_mod +from torch.fx.experimental.migrate_gradual_types.util import gen_tensor_dims, gen_nat_constraints, gen_dvar +from torch.fx.tensor_type import TensorType, Dyn +from typing import Callable, Dict, List + +_TRANSFORMATION_RULES: Dict[Constraint, Callable] = {} + + +def register_transformation_rule(call_target): + def register(fn): + if call_target in _TRANSFORMATION_RULES: + raise RuntimeError(f'Transformation rule already registered for {call_target}!') + _TRANSFORMATION_RULES[call_target] = fn + return fn + return register + + +def valid_index(index, dims): + """ + Given a list of dimensions, checks if an index is valid in the list + """ + try: + dims[index] + return T() + except IndexError: + return F() + + +@register_transformation_rule(Transpose) +def transform_transpose(constraint, counter): + """ + Similar to a sequence of two index-selects + """ + dims, counter = gen_tensor_dims(constraint.tensor_size, counter) + is_valid_index1 = valid_index(constraint.index1, dims) + is_valid_index2 = valid_index(constraint.index2, dims) + new_dims = copy.deepcopy(dims) + nat_constraints = gen_nat_constraints(dims) + + if is_valid_index1 == T() and is_valid_index2 == T(): + new_dims[constraint.index1] = dims[constraint.index2] + new_dims[constraint.index2] = dims[constraint.index1] + + transformed_constraint = Conj([BinConstraintT(constraint.input_var, TensorType(dims), op_eq), + *nat_constraints, + is_valid_index1, is_valid_index2, + BinConstraintT(constraint.output, TensorType(new_dims), op_eq)]) + return transformed_constraint, counter + + +@register_transformation_rule(IndexSelect) +def transform_index_select(constraint, counter): + """ + The constraints consider the given tensor size, checks if the index is valid + and if so, generates a constraint for replacing the input dimension + with the required dimension + """ + dims, counter = gen_tensor_dims(constraint.tensor_size, counter) + is_valid_index = valid_index(constraint.index, dims) + nat_constraints = gen_nat_constraints(dims) + + # if the index is valid then replace the input dimension with the new dimension + # otherwise the dimension will not be replaced and the clause will contain False + if is_valid_index == T(): + new_dims = copy.deepcopy(dims) + new_dims[constraint.index] = constraint.dim_replace + + transformed_constraint = Conj([BinConstraintT(constraint.input_var, TensorType(dims), op_eq), + *nat_constraints, + is_valid_index, + BinConstraintT(constraint.output, TensorType(new_dims), op_eq)]) + + # print(constraints) + return transformed_constraint, counter + + +@register_transformation_rule(GetItem) +def transform_get_item(constraint, counter): + """ + generate an equality of the form: + t = [a1, ..., an] + then generate constraints that check if the given index is valid + given this particular tensor size. + If the index is valid, generate a constraint to get the item + Note that we already handled the Dyn input case in the previous + step. + Args: + constraint: GetItem which assumes we are getting an item from a tensor (not Dyn) + counter: variable tracking + Returns: simplified constraints for GetItem + + """ + dims, counter = gen_tensor_dims(constraint.tensor_size, counter) + nat_constraints = gen_nat_constraints(dims) + + + is_valid_index = valid_index(constraint.index, dims) + + all_constraints = [BinConstraintT(constraint.input_var, TensorType(dims), op_eq), + *nat_constraints, + is_valid_index] + + # if the index is valid, we generate a constraint for getting an item + # otherwise this clause will have been UNSAT due to the wrong index + if is_valid_index == T(): + all_constraints.append(BinConstraintD(constraint.res, dims[constraint.index], op_eq)) + + return Conj(all_constraints), counter + +def valid_index_tensor(index, dims): + """ + if the slice instances exceed the length of the dimensions + then this is a type error so we return False + """ + slice_count = 0 + for s in index: + if isinstance(s, slice): + slice_count += 1 + if slice_count > len(dims): + return F() + else: + return T() + +@register_transformation_rule(GetItemTensor) +def transform_get_item_tensor(constraint, counter): + """ + When the index is a tuple, then the output will be a tensor + TODO: we have to check if this is the case for all HF models + + The cases we are covering here are a tuple with one of: + - slice with default argument + - None + + None appends 1 to the input tensor dimensions + so each occurrence of 'None' increases the rank by 1 + + slice with default arguments does not change the rank + """ + assert isinstance(constraint.index_tuple, tuple) + + + # generate a result tensor of the expected size + dims, counter = gen_tensor_dims(constraint.tensor_size, counter) + nat_constraints = gen_nat_constraints(dims) + + # generate a place-holder list of the right rank + # where "slice" does not contribute to the rank and "None" does + none_c = constraint.index_tuple.count(None) + resulting_tensor_dims = (none_c + len(dims)) * [None] + + dim_index = 0 + for i in range(len(constraint.index_tuple)): + + # append 1 to the right location of the resulting tensor + if constraint.index_tuple[i] is None: + resulting_tensor_dims[i] = 1 + + elif constraint.index_tuple[i] == slice(None, None, None): + pass + + else: + raise NotImplementedError('Method not yet implemented') + + # append the remaining dimensions to the right location + dim_index = 0 + for i in range(len(resulting_tensor_dims)): + if resulting_tensor_dims[i] is None: + resulting_tensor_dims[i] = dims[dim_index] + dim_index += 1 + + # check if the index is valid + is_valid_index = valid_index_tensor(constraint.index_tuple, dims) + + # check if the resulting tensor is within bounds + if len(resulting_tensor_dims) > 4: + return F(), counter + + else: + constraints = [BinConstraintT(constraint.input_var, TensorType(dims), op_eq), + BinConstraintT(constraint.res, TensorType(resulting_tensor_dims), op_eq), + *nat_constraints, + is_valid_index] + return Conj(constraints), counter + + +@register_transformation_rule(BinConstraintT) +def generate_binconstraint_t(constraint, counter): + """ + Transform binary constraints for tensors + """ + + # precision constraints + if constraint.op == op_precision: + if constraint.lhs == Dyn: + return T(), counter + elif isinstance(constraint.lhs, TensorType): + is_fully_static = all(d != Dyn for d in constraint.lhs.__args__) + if is_fully_static: + return BinConstraintT(constraint.lhs, constraint.rhs, op_eq), counter + else: + new_dims = [] + + for _ in range(len(constraint.lhs.__args__)): + dim, counter = gen_dvar(counter) + new_dims.append(dim) + + new_dim_constraints = [BinConstraintD(old_dim, new_dim, op_precision) for + new_dim, old_dim in zip(new_dims, constraint.lhs.__args__)] + \ + [BinConstraintT(constraint.rhs, TensorType(new_dims), op_eq)] + \ + [BinConstraintD(1, new_dim, op_leq) for + new_dim in new_dims] + return Conj(new_dim_constraints), counter + + # matching + elif constraint.op == op_matching: + assert isinstance(constraint.rhs, TensorType) + d1 = constraint.rhs.__args__[0] + d2 = constraint.rhs.__args__[1] + d3 = constraint.rhs.__args__[2] + d4 = constraint.rhs.__args__[3] + + conj = [BinConstraintT(constraint.lhs, Dyn, op_eq), + BinConstraintD(d1, Dyn, op_eq), + BinConstraintD(d2, Dyn, op_eq), + BinConstraintD(d3, Dyn, op_eq), + BinConstraintD(d4, Dyn, op_eq)] + return Disj([Conj(conj), + BinConstraintT(constraint.lhs, TensorType([d1, d2, d3, d4]), op_eq)]), counter + + elif constraint.op == op_consistency: + c_dyn = Disj([BinConstraintT(constraint.lhs, Dyn, op_eq), BinConstraintT(constraint.rhs, Dyn, op_eq)]) + [c_tensor_1, c_tensor_2, c_tensor_3, c_tensor_4], counter = gen_consistency_constraints(constraint, counter) + + return Disj([c_dyn, c_tensor_1, c_tensor_2, c_tensor_3, c_tensor_4]), counter + + elif constraint.op == op_leq: + assert isinstance(constraint.rhs, int) + disj = [BinConstraintT(constraint.lhs, Dyn, op_eq)] + for i in range(1, constraint.rhs + 1): + dims = [] + for j in range(1, i + 1): + dim_var, counter = gen_dvar(counter) + dims.append(dim_var) + disj.append(BinConstraintT(constraint.lhs, TensorType(dims), op_eq)) + return Disj(disj), counter + else: + return constraint, counter + + +@register_transformation_rule(BinConstraintD) +def generate_binconstraint_d(constraint, counter): + """ + Transform binary constraints for dimensions + """ + if constraint.op == op_precision: + if isinstance(constraint.lhs, int): + return BinConstraintD(constraint.lhs, constraint.rhs, op_eq), counter + elif constraint.lhs == Dyn: + return T(), counter + + elif constraint.op == op_consistency: + return Disj([BinConstraintD(constraint.lhs, constraint.rhs, op_eq), + BinConstraintD(constraint.rhs, Dyn, op_eq), BinConstraintD(constraint.lhs, Dyn, op_eq)]), counter + + else: + return constraint, counter + + +@register_transformation_rule(Conj) +def generate_conj(constraint, counter): + """ + Transform conjunctions + """ + new = [] + for c in constraint.conjucts: + new_c, counter = transform_constraint(c, counter) + new.append(new_c) + return Conj(new), counter + + +@register_transformation_rule(Disj) +def generate_disj(constraint, counter): + """ + Transform disjunctions + """ + new = [] + for c in constraint.disjuncts: + new_c, counter = transform_constraint(c, counter) + new.append(new_c) + return Disj(new), counter + + +@register_transformation_rule(TGreatestUpperBound) +def generate_gub(constraint, counter): + """ + Transform greatest upper bound for tensors. Results in equality and Greatest Upper Bound + on dimensions + """ + c1 = Conj([Disj([BinConstraintT(constraint.rhs1, Dyn, op_eq), + BinConstraintT(constraint.rhs2, Dyn, op_eq)]), BinConstraintT(constraint.res, Dyn, op_eq)]) + + [c2, c3, c4, c5], counter = gen_greatest_upper_bound(constraint, counter) + + return Disj([c1, c2, c3, c4, c5]), counter + + +@register_transformation_rule(DGreatestUpperBound) +def generate_d_gub(constraint, counter): + """ + Transform greatest upper bound for dimensions into equality constraints + """ + c1 = Conj([BinConstraintD(constraint.rhs1, Dyn, op_eq), BinConstraintD(constraint.res, constraint.rhs2, op_eq)]) + c2 = Conj([BinConstraintD(constraint.rhs2, Dyn, op_eq), BinConstraintD(constraint.res, constraint.rhs1, op_eq)]) + c3 = Conj([BinConstraintD(constraint.rhs2, constraint.rhs1, op_eq), BinConstraintD(constraint.res, constraint.rhs1, op_eq)]) + return Disj([c1, c2, c3]), counter + + +@register_transformation_rule(CalcConv) +def generate_calc_conv(constraint, counter): + d, counter = gen_tensor_dims(4, counter) + conv_result = TensorType([d[0], d[1], d[2], d[3]]) + + # the convolution result is a tensor of size 4 + c1 = BinConstraintT(constraint.conv_result, conv_result, op_eq) + + # the second dimension of the output is equal to the output channels + c2 = Conj([BinConstraintD(d[1], constraint.c_out, op_eq), BinConstraintD(d[1], Dyn, op_neq)]) + + # the input corresponds to the output in the first dimension of the convolution + c3 = BinConstraintD(constraint.matching_constraint[0], d[0], op_eq) + + c4, c5 = calc_last_two_dims(constraint, d) + + leq_constraints = Conj([BinConstraintD(0, d[0], op_leq), + BinConstraintD(0, d[1], op_leq), + BinConstraintD(0, d[2], op_leq), + BinConstraintD(0, d[3], op_leq)]) + + return Conj([c1, c2, c3, c4, c5, leq_constraints]), counter + + +@register_transformation_rule(CalcMaxPool) +def generate_calc_maxpool(constraint, counter): + """ + Transform maxpool constraints + """ + d, counter = gen_tensor_dims(4, counter) + maxpool_result = TensorType([d[0], d[1], d[2], d[3]]) + + # the maxpool result is a tensor of size 4 + c1 = BinConstraintT(constraint.maxpool_result, maxpool_result, op_eq) + + # the input corresponds to the output in the first and second dimension of maxpool + c2 = BinConstraintD(constraint.matching_constraint[1], d[1], op_eq) + c3 = BinConstraintD(constraint.matching_constraint[0], d[0], op_eq) + c4, c5 = calc_last_two_dims(constraint, d) + + leq_constraints = Conj([BinConstraintD(0, d[0], op_leq), + BinConstraintD(0, d[1], op_leq), + BinConstraintD(0, d[2], op_leq), + BinConstraintD(0, d[3], op_leq)]) + + return Conj([c1, c2, c3, c4, c5, leq_constraints]), counter + + +@register_transformation_rule(CalcProduct) +def generate_calc_product(constraint, counter): + """ + Transform flatten constraints + """ + start = constraint.start + end = constraint.end + dims = constraint.dims_to_flatten + flattened = constraint.flattened + n = len(constraint.dims_to_flatten) + + # this will be evaluated right here + boundary_check = (0 <= start and start < end and end <= n) + + c_boundary = T() if boundary_check else F() + + lhs = dims[0:start] + rhs = dims[end:] + mid = dims[start:end] + + all_possibilities = generate_all_int_dyn_dim_possibilities(mid) + + all_constraints = [] + + for p in all_possibilities: + p = list(p) + # this tells us there is a dynamic variable + contains_dyn = not all(constraint.op == op_neq for constraint in p) + if contains_dyn: + mid_var = [Dyn] + total_constraints = lhs + mid_var + rhs + if len(total_constraints) > 4: + all_constraints.append(F()) + else: + all_constraints.append(Conj([BinConstraintT(flattened, TensorType(lhs + mid_var + rhs), op_eq)] + p)) + else: + new_var, counter = gen_dvar(counter) + mid_eq_prod = Conj([BinConstraintD(new_var, Prod(mid), op_eq), BinConstraintD(new_var, Dyn, op_neq)]) + mid_var = [new_var] + total_constraints = lhs + mid_var + rhs + if len(total_constraints) > 4: + all_constraints.append(F()) + else: + all_constraints.append(Conj([BinConstraintT(flattened, TensorType(lhs + mid_var + rhs), op_eq), mid_eq_prod] + p)) + + return Conj([Disj(all_constraints), c_boundary]), counter + + +@register_transformation_rule(CanReshape) +def generate_reshape(constraint, counter): + """ + Transform reshape constraints + """ + d, counter = gen_tensor_dims(4, counter) + + d1 = d[0] + d2 = d[1] + d3 = d[2] + d4 = d[3] + + target = constraint.target.__args__ + + is_fully_static = all(d != Dyn for d in target) + + # dynamic tensor + c1_dyn = BinConstraintT(constraint.src, Dyn, op_eq) + c2_tensor1 = BinConstraintT(constraint.src, TensorType([d1]), op_eq) + c2_tensor2 = BinConstraintT(constraint.src, TensorType([d1, d2]), op_eq) + c2_tensor3 = BinConstraintT(constraint.src, TensorType([d1, d2, d3]), op_eq) + c2_tensor4 = BinConstraintT(constraint.src, TensorType([d1, d2, d3, d4]), op_eq) + + d1_eq_dyn = BinConstraintD(d1, Dyn, op_eq) + d1_neq_dyn = BinConstraintD(d1, Dyn, op_neq) + + d2_eq_dyn = BinConstraintD(d2, Dyn, op_eq) + d2_neq_dyn = BinConstraintD(d2, Dyn, op_neq) + + d3_eq_dyn = BinConstraintD(d3, Dyn, op_eq) + d3_neq_dyn = BinConstraintD(d3, Dyn, op_neq) + + d4_eq_dyn = BinConstraintD(d3, Dyn, op_eq) + d4_neq_dyn = BinConstraintD(d3, Dyn, op_neq) + + nat_d1 = BinConstraintD(0, d1, op_leq) + nat_d2 = BinConstraintD(0, d2, op_leq) + nat_d3 = BinConstraintD(0, d3, op_leq) + nat_d4 = BinConstraintD(0, d4, op_leq) + + if is_fully_static: + # size 1 tensor + c3_tensor1 = Disj([d1_eq_dyn, + (Conj([d1_neq_dyn, + BinConstraintD(d1, Prod(target), op_eq)]))]) + all_tensor_1 = Conj([c2_tensor1, c3_tensor1]) + + # size 2 tensor + all_tensor_2 = Conj([c2_tensor2, gen_all_reshape_possibilities([d1, d2], target)]) + + # size 3 tensor + all_tensor_3 = Conj([c2_tensor3, gen_all_reshape_possibilities([d1, d2, d3], target)]) + + # size 4 tensor + all_tensor_4 = Conj([c2_tensor4, gen_all_reshape_possibilities([d1, d2, d3, d4], target)]) + + return Conj([Disj([c1_dyn, all_tensor_1, all_tensor_2, all_tensor_3, all_tensor_4]), + nat_d1, nat_d2, nat_d3, nat_d4]), counter + + # then there must be exactly one occurrence of dyn + else: + new_target = [] + + for n in target: + if n != Dyn: + new_target.append(n) + + # tensor 1 + c3_tensor1 = Disj([d1_eq_dyn, + (Conj([d1_neq_dyn, + is_dim_div_by_target(new_target, d1)]))]) + all_tensor_1 = Conj([c2_tensor1, c3_tensor1]) + + # tensor 2 + c21 = Disj([d1_eq_dyn, d2_eq_dyn]) + c22 = Conj([d1_neq_dyn, d2_neq_dyn, is_dim_div_by_target(new_target, Prod([d1, d2]))]) + all_tensor_2 = Conj([c2_tensor2, Disj([c21, c22])]) + + # tensor 3 + c31 = Disj([d1_eq_dyn, d2_eq_dyn, d3_eq_dyn]) + c32 = Conj([d1_neq_dyn, d2_neq_dyn, d3_neq_dyn, is_dim_div_by_target(new_target, Prod([d1, d2, d3]))]) + all_tensor_3 = Conj([c2_tensor3, Disj([c31, c32])]) + + # tensor 4 + c41 = Disj([d1_eq_dyn, d2_eq_dyn, d3_eq_dyn, d4_eq_dyn]) + c42 = Conj([d1_neq_dyn, d2_neq_dyn, d3_neq_dyn, d4_neq_dyn, is_dim_div_by_target(new_target, Prod([d1, d2, d3, d4]))]) + all_tensor_4 = Conj([c2_tensor4, Disj([c41, c42])]) + + return Conj([Disj([c1_dyn, all_tensor_1, all_tensor_2, all_tensor_3, all_tensor_4]), + nat_d1, nat_d2, nat_d3, nat_d4]), counter + + +@register_transformation_rule(ApplyBroadcasting) +def generate_broadcasting(constraint, counter): + """ + Transform broadcasting constraints + """ + e11, e12 = constraint.res1, constraint.res2 + e1, e2 = constraint.input1, constraint.input2 + + e1_dyn = BinConstraintT(e1, Dyn, op_eq) + e2_dyn = BinConstraintT(e2, Dyn, op_eq) + + # Introduce dimensions + e1_equal_e11 = BinConstraintT(e1, e11, op_eq) + e2_equal_e12 = BinConstraintT(e2, e12, op_eq) + + # dyn possibility + e1_dyn_constraint = Conj([e1_dyn, e1_equal_e11, e2_equal_e12]) + e2_dyn_constraint = Conj([e2_dyn, e1_equal_e11, e2_equal_e12]) + + # tensor possibility + # generate dimensions to create tensors of size 1 + final_tensor_1_constraint, _, _, nat_dims_1, counter = \ + gen_broadcasting_constraints(e1, e2, e11, e12, 1, counter) + + # generate dimensions to create tensors of size 2 + final_tensor_2_constraint_no_padding, final_tensor_2_constraint_padding_arg1, \ + final_tensor_2_constraint_padding_arg2, nat_dims_2, counter = \ + gen_broadcasting_constraints(e1, e2, e11, e12, 2, counter) + + # generate dimensions to create tensors of size 3 + final_tensor_3_constraint_no_padding, final_tensor_3_constraint_padding_arg1, \ + final_tensor_3_constraint_padding_arg2, nat_dims_3, counter = \ + gen_broadcasting_constraints(e1, e2, e11, e12, 3, counter) + + # generate dimensions to create tensors of size 4 + final_tensor_4_constraint_no_padding, final_tensor_4_constraint_padding_arg1, \ + final_tensor_4_constraint_padding_arg2, nat_dims_4, counter = \ + gen_broadcasting_constraints(e1, e2, e11, e12, 4, counter) + + final_result = Disj([ + e1_dyn_constraint, + e2_dyn_constraint, + final_tensor_1_constraint, + final_tensor_2_constraint_no_padding, + final_tensor_2_constraint_padding_arg1, + final_tensor_2_constraint_padding_arg2, + final_tensor_3_constraint_no_padding, + final_tensor_3_constraint_padding_arg1, + final_tensor_3_constraint_padding_arg2, + final_tensor_4_constraint_no_padding, + final_tensor_4_constraint_padding_arg1, + final_tensor_4_constraint_padding_arg2 + ]) + + return Conj([final_result, *nat_dims_1, *nat_dims_2, *nat_dims_3, *nat_dims_4]), counter + + +def transform_constraint(constraint: Constraint, counter: int): + """ + Transforms a constraint into a simpler constraint. + Ex: precision and consistency are transformed to equality + Args: + constraint: constraint to be transformed + counter: for variable tracking + + Returns: Constraint + + """ + if type(constraint) in _TRANSFORMATION_RULES: + return _TRANSFORMATION_RULES[type(constraint)](constraint, counter) + + else: + return constraint, counter + + + + +def calc_last_two_dims(constraint, d: List[DVar]): + """ + Generates constraints for the last two dimensions of a convolution or a maxpool output + Args: + constraint: CalcConv or CalcMaxPool + d: The list of output dimensions + + Returns: Constraints for calculating the last two dimensions of the output + + """ + + assert isinstance(constraint, (CalcConv, CalcMaxPool)) + + b3 = constraint.matching_constraint[2] + b4 = constraint.matching_constraint[3] + + b3_dyn = Conj([BinConstraintD(d[2], Dyn, op_eq), BinConstraintD(b3, Dyn, op_eq)]) + b4_dyn = Conj([BinConstraintD(d[3], Dyn, op_eq), BinConstraintD(b4, Dyn, op_eq)]) + + d3_not_dyn = Conj([BinConstraintD(d[2], Dyn, op_neq), BinConstraintD(b3, Dyn, op_neq)]) + d4_not_dyn = Conj([BinConstraintD(d[3], Dyn, op_neq), BinConstraintD(b4, Dyn, op_neq)]) + + # transform parameters into tuples incase they are not already + padding = (constraint.padding, constraint.padding) \ + if isinstance(constraint.padding, int) else constraint.padding + kernel = (constraint.kernel, constraint.kernel) \ + if isinstance(constraint.kernel, int) else constraint.kernel + stride = (constraint.stride, constraint.stride) \ + if isinstance(constraint.stride, int) else constraint.stride + dilation = (constraint.dilation, constraint.dilation) \ + if isinstance(constraint.dilation, int) else constraint.dilation + + f1 = BinConstraintD(b3, BinConstraintD(2, padding[0], op_mul), op_add) + f2 = BinConstraintD(dilation[0], BinConstraintD(kernel[0], 1, op_sub), op_mul) + f3 = BinConstraintD(BinConstraintD(BinConstraintD(f1, f2, op_sub), 1, op_sub), stride[0], op_div) + f4 = BinConstraintD(f3, 1, op_add) + + c4 = Disj([b3_dyn, Conj([d3_not_dyn, BinConstraintD(d[2], f4, op_eq)])]) + + f11 = BinConstraintD(b4, BinConstraintD(2, padding[1], op_mul), op_add) + f22 = BinConstraintD(dilation[1], BinConstraintD(kernel[1], 1, op_sub), op_mul) + f33 = BinConstraintD(BinConstraintD(BinConstraintD(f11, f22, op_sub), 1, op_sub), stride[1], op_div) + f44 = BinConstraintD(f33, 1, op_add) + + c5 = Disj([b4_dyn, Conj([d4_not_dyn, BinConstraintD(d[3], f44, op_eq)])]) + + return c4, c5 + + +def generate_all_int_dyn_dim_possibilities(my_list: List[DVar]): + """ + Generate all possibilities of being equal or not equal to dyn for my_list + Args: + my_list: List of tensor dimensions + + Returns: A list of a list of constraints. Each list of constraints corresponds to + one possibility about the values of the dimension variables + """ + # generate all possibilities of being equal or not equal to dyn for my_list + eq_possibilities = [BinConstraintD(my_list[i], Dyn, op_eq) for i in range(len(my_list))] + neq_possibilities = [BinConstraintD(my_list[i], Dyn, op_neq) for i in range(len(my_list))] + d_possibilities = [] + + for i in zip(eq_possibilities, neq_possibilities): + d_possibilities.append(list(i)) + all_possibilities = list(itertools.product(*d_possibilities)) + return all_possibilities + + +def is_target_div_by_dim(target: List[int], dim: List[DVar]): + """ + Generate constraints to check if the target dimensions are divisible by the input dimensions + Args: + target: Target dimensions + dim: Input dimensions + + Returns: Constraints to check divisibility + + """ + return BinConstraintD(BinConstraintD(Prod(target), dim, op_mod), 0, op_eq) + + +def is_dim_div_by_target(target: List[int], dim: List[DVar]): + """ + Generate constraints to check if the input dimensions is divisible by the target dimensions + Args: + target: Target dimensions + dim: Input dimensions + + Returns: Constraints to check divisibility + + """ + return BinConstraintD(BinConstraintD(dim, Prod(target), op_mod), 0, op_eq) + + +def gen_all_reshape_possibilities(list_of_dims, target): + """ + Consider all possibilities what the input dimensions could be (number or dynamic) + Then generate the appropriate constraints using multiplication or mod depending on the possibility + The possibilities we consider here are the cross product of being equal to dyn or not equal to dyn + for the input. Target is fixed because at most one dimension could be dyn. + We have different cases for this. + + Args: + list_of_dims: The input list of dimensions + target: The tensor we want to reshape to + + Returns: A disjunction of transformed reshape constraints + + """ + all_possibilities = generate_all_int_dyn_dim_possibilities(list_of_dims) + + all_constraints = [] + + for p in all_possibilities: + to_multiply = [] + + p = list(p) + + for constraint in p: + assert isinstance(constraint, BinConstraintD) + if constraint.op == op_neq: + to_multiply.append(constraint.lhs) + + if not to_multiply: + all_constraints.append(Conj(p)) + + elif len(to_multiply) < len(list_of_dims): + all_constraints.append(Conj(p + [is_target_div_by_dim(target, Prod(to_multiply))])) + else: + all_constraints.append(Conj(p + [BinConstraintD(Prod(list_of_dims), + Prod(target), op_eq)])) + + return Disj(all_constraints) + + +def broadcast_dim(tensor_input1, tensor_input2, res1, res2, index, padding=False): + """ + Apply broadcasting to the 'index' dimension of tensor_input1. + Args: + tensor_input1: should represent [d1, ..., d_index, ...] where d_index = 1 + tensor_input2: represents the second input + res1: broadcasted result 1 + res2: broadcasted result 2 + index: the index to broadcast + padding: If padding was used, then tensor_input1[index] does not exist + + Returns: + + """ + if tensor_input1[index] is None: + assert padding + + + if not padding: + # then the inputs are the same length so they all have dimensions at "index" + return Conj([BinConstraintD(tensor_input1[index], 1, op_eq), + BinConstraintD(res1[index], res2[index], op_eq), + BinConstraintD(res2[index], tensor_input2[index], op_eq)]) + + else: + # we don't set the input dimension to 1, since it doesn't exist. + return Conj([BinConstraintD(res1[index], res2[index], op_eq), + BinConstraintD(res2[index], tensor_input2[index], op_eq)]) + + +def apply_padding(e1_var: TVar, + e11: BinConstraintT, + e2: BinConstraintT, + e12: BinConstraintT, + d2: List[DVar], + d11: List[DVar], + d12: List[DVar], + counter: int): + """ + We are considering the possibility where one input has less dimensions than + another input, so we apply padding to the broadcasted results + + Args: + e1_var: Variable representing the first input where padding will be + e11: constraint of the form e11 = Tensortype[d1, ..., dn] + e2: constraint of the form e2 = Tensortype[d1, ..., dn] + e12: constraint of the form e11 = Tensortype[d1, ..., dn] + d2: Tensor variables for the second input + d11: Tensor variables for the broadcasted first input + d12: Tensor variables for the broadcasted second input + counter: variable tracking + + Returns: A new constraint whose goal is to apply padding to the broadcasted result + + """ + + res = [] + + # pad the shorter input with None so we can pass it to the broadcasting helper function + for i in range(1, len(d2)): + + d1, counter = gen_tensor_dims(i, counter) + + nat_constraints = gen_nat_constraints(d1 + d2 + d11 + d12) + + e1 = BinConstraintT(e1_var, TensorType(d1), op_eq) + + simulate_padding = [None] * (len(d2) - i) + + assert len(simulate_padding + d1) == len(d2) + + broadcast_padding = [] + + # for every padding size, we also consider broadcasting + for j in range(len(d2) - i): + broadcast_padding.append(broadcast_dim(simulate_padding, d2, d11, d12, j, True)) + + # we consider the possibilities for broadcasting for every dimension. Since we already + # padded d1, we do not consider it while broadcasting + all_broadcasting_possibilities = generate_all_broadcasting_possibilities_no_padding(d1, + d2[(len(d2) - i):], + d11[(len(d2) - i):], + d12[(len(d2) - i):]) + # combine all constraints into a conjunction + c = Conj([e1, e11, e2, e12, + *broadcast_padding, + all_broadcasting_possibilities, + *nat_constraints + ]) + res.append(c) + + return Disj(res), counter + + +def no_broadcast_dim_with_index(d1: List[DVar], + d2: List[DVar], + d3: List[DVar], + d4: List[DVar], + i: int): + """ + Args: + d1: input 1 + d2: input 2 + d3: simulated broadcasting for input 1 + d4: simulated broadcasting for input 2 + i: the rank of the resulting tensor addition + + Returns: Constraints for when no broadcasting occurs + """ + return Conj([ + Disj([ + Conj([BinConstraintD(d1[i], 1, op_eq), + BinConstraintD(d2[i], 1, op_eq)]), + + Conj([BinConstraintD(d1[i], 1, op_neq), + BinConstraintD(d2[i], 1, op_neq)])]), + + BinConstraintD(d1[i], d3[i], op_eq), + BinConstraintD(d2[i], d4[i], op_eq)]) + + + +def gen_lists_of_dims(num_tensors: int, dim_size: int, counter: int): + """ + Generate lists of DVar to represent tensor dimensions + Args: + num_tensors: the required number of tensors + dim_size: the number of dimensions for each tensor + counter: variable tracking + + Returns: A list of a list of tensor dimensions + + """ + res = [] + + for _ in range(num_tensors): + dims, counter = gen_tensor_dims(dim_size, counter) + res.append(dims) + + return res, counter + + +def create_equality_constraints_for_broadcasting(e1: TVar, + e2: TVar, + e11: TVar, + e12: TVar, + d1: List[DVar], + d2: List[DVar], + d11: List[DVar], + d12: List[DVar]): + """ + Create equality constraints for when no broadcasting occurs + Args: + e1: Input 1 + e2: Input 2 + e11: Broadcasted input 1 + e12: Broadcasted input 2 + d1: Variables that store dimensions for e1 + d2: Variables that store dimensions for e2 + d11: Variables that store dimensions for e11 + d12: Variables that store dimensions for e22 + + Returns: Four equality constraints + + """ + + e1_tensor = BinConstraintT(e1, TensorType(d1), op_eq) + e11_tensor = BinConstraintT(e11, TensorType(d11), op_eq) + e2_tensor = BinConstraintT(e2, TensorType(d2), op_eq) + e12_tensor = BinConstraintT(e12, TensorType(d12), op_eq) + return [e1_tensor, e11_tensor, e2_tensor, e12_tensor] + + +def gen_consistency_constraints(constraint: Constraint, counter: int): + """ + Args: + constraint: Consistency constraint on tensors + counter: for variable tracking + + Returns: Equality and consistency constraints on dimensions + + """ + + all_constraints = [] + + for i in range(1, MAX_TENSOR_RANK + 1): + new_dims_rhs_1, counter = gen_tensor_dims(i, counter) + new_dims_rhs_2, counter = gen_tensor_dims(i, counter) + + nat_constraints = gen_nat_constraints(new_dims_rhs_1 + new_dims_rhs_2) + + c_tensor_i = Conj([BinConstraintT(constraint.lhs, TensorType(new_dims_rhs_1), op_eq), + BinConstraintT(constraint.rhs, TensorType(new_dims_rhs_2), op_eq)] + + [BinConstraintD(d1, d2, op_consistency) for + d1, d2 in zip(new_dims_rhs_1, new_dims_rhs_2)] + nat_constraints) + + all_constraints.append(c_tensor_i) + + return all_constraints, counter + + +def gen_greatest_upper_bound(constraint: TGreatestUpperBound, counter: int): + """ + Args: + constraint: Greatest upper bound on tensors + counter: variable tracking + + Returns: A set of equality constraints and DGreatestUpperBound constraints + + """ + + all_constraints = [] + + for i in range(1, MAX_TENSOR_RANK + 1): + c = [] + dims1, counter = gen_tensor_dims(i, counter) + c1tensor = TensorType(dims1) + + dims2, counter = gen_tensor_dims(i, counter) + c2tensor = TensorType(dims2) + + dims3, counter = gen_tensor_dims(i, counter) + c3tensor = TensorType(dims3) + + c += [BinConstraintT(constraint.rhs1, c1tensor, op_eq), + BinConstraintT(constraint.rhs2, c2tensor, op_eq), + BinConstraintT(constraint.res, c3tensor, op_eq)] + \ + gen_nat_constraints(dims1 + dims2 + dims3) + + assert len(c3tensor.__args__) == len(c1tensor.__args__) == len(c2tensor.__args__) + for i in range(len(c3tensor.__args__)): + c.append(DGreatestUpperBound(c3tensor.__args__[i], + c1tensor.__args__[i], + c2tensor.__args__[i])) + + all_constraints.append(Conj(c)) + return all_constraints, counter + + +def generate_all_broadcasting_possibilities_no_padding(d1: List[DVar], d2: List[DVar], d11: List[DVar], d12: List[DVar]): + """ + Generate broadcasting constraints assuming no padding. Broadcasting can happen at any dimension. + We look at all combinations for all dimensions in d1 and d2 + Args: + d1: input1 dimensions + d2: input2 dimensions + d11: broadcasted input1 dimensions + d12: broadcasted input2 dimensions + + Returns: broadcasting constraints relating the input dimensions to the broadcasted dimensions + + """ + + size = len(d1) + + res2 = [] + + for i in range(size): + t1 = broadcast_dim(d1, d2, d11, d12, i) + t2 = broadcast_dim(d2, d1, d12, d11, i) + t3 = no_broadcast_dim_with_index(d1, d2, d11, d12, i) + + res2.append(Disj([t1, t2, t3])) + + return Conj(res2) + + +def gen_broadcasting_constraints(e1: TVar, e2: TVar, e11: TVar, e12: TVar, i: int, counter: int): + """ + Simulates broadcasting on e1 and e2 and returns the results + respectively in e11 and e12. Because of gradual types, + e1 and e2 may not be equal. Similarly, e11 and e12 may not + be equal. e11 and e12 should be guaranteed to be consistent + as they represent the shapes of the tensors to be added after + broadcasting. + Args: + e1: TVar representing the type of input 1 + e2: TVar representing the type of input 2 + e11: TVar representing the representing broadcasted input 1 + e12: TVar representing the representing broadcasted input 2 + i: The rank of the resulting type of addition + counter: for variable tracking + + Returns: Simplified broadcasting constraints + + """ + dims, counter = gen_lists_of_dims(4, i, counter) + [d1, d2, d3, d4] = dims + nat_dims_i = gen_nat_constraints(list(itertools.chain.from_iterable(dims))) + + initialize_tensors_constraints = create_equality_constraints_for_broadcasting(e1, e2, e11, e12, + d1, d2, d3, d4) + + [e1_tensor, e11_tensor, e2_tensor, e12_tensor] = initialize_tensors_constraints + + # without padding, broadcast all possibilities for tensors of size i + final_tensor_constraint_no_padding = Conj([*initialize_tensors_constraints, + generate_all_broadcasting_possibilities_no_padding(d1, d2, d3, d4)]) + + # with padding, broadcast all possibilities for tensors of size i + final_tensor_constraint_padding_arg1, counter = \ + apply_padding(e1, e11_tensor, e2_tensor, e12_tensor, d2, d3, d4, counter) + + final_tensor_constraint_padding_arg2, counter = \ + apply_padding(e2, e12_tensor, e1_tensor, e11_tensor, d1, d4, d3, counter) + + return final_tensor_constraint_no_padding, \ + final_tensor_constraint_padding_arg1, \ + final_tensor_constraint_padding_arg2, nat_dims_i, counter diff --git a/parrot/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/transform_to_z3.py b/parrot/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/transform_to_z3.py new file mode 100644 index 0000000000000000000000000000000000000000..c8cf70006cd84c662f2f2ffd36e208b54bc1bbea --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/transform_to_z3.py @@ -0,0 +1,349 @@ +# mypy: allow-untyped-defs +from torch.fx.experimental.migrate_gradual_types.constraint import Conj, Disj, T, F, BinConstraintT, BVar, is_bool_expr +from torch.fx.experimental.migrate_gradual_types.constraint import BinConstraintD, TVar, DVar +from torch.fx.experimental.migrate_gradual_types.constraint import Prod, is_algebraic_expression, is_dim +from torch.fx.experimental.migrate_gradual_types.constraint_generator import ConstraintGenerator +from torch.fx.experimental.migrate_gradual_types.constraint_transformation import transform_constraint +from torch.fx.experimental.migrate_gradual_types.operation import op_add, op_eq, op_neq, op_gt, op_lt +from torch.fx.experimental.migrate_gradual_types.operation import op_leq, op_sub, op_div, op_mul, op_mod +from torch.fx.tensor_type import TensorType, Dyn + +try: + import z3 # type: ignore[import] + from torch.fx.experimental.migrate_gradual_types.z3_types import tensor_type, z3_dyn, D + HAS_Z3 = True + + def transform_to_z3(constraint, counter, dimension_dict): + if isinstance(constraint, Conj): + conjuncts = [] + for c in constraint.conjucts: + new_c, counter = transform_to_z3(c, counter, dimension_dict) + conjuncts.append(new_c) + return z3.And(conjuncts), counter + + elif isinstance(constraint, Disj): + disjuncts = [] + for c in constraint.disjuncts: + new_c, counter = transform_to_z3(c, counter, dimension_dict) + disjuncts.append(new_c) + return z3.Or(disjuncts), counter + + elif isinstance(constraint, T): + return True, counter + + elif isinstance(constraint, F): + return False, counter + + elif isinstance(constraint, BinConstraintT): + if constraint.op == op_eq: + lhs, counter = transform_var(constraint.lhs, counter, dimension_dict) + rhs, counter = transform_var(constraint.rhs, counter, dimension_dict) + return (lhs == rhs), counter + + else: + raise NotImplementedError('Method not yet implemented') + + elif isinstance(constraint, BinConstraintD): + if constraint.op == op_eq: + + if isinstance(constraint.lhs, BVar) and is_bool_expr(constraint.rhs): + transformed_rhs, counter = transform_to_z3(constraint.rhs, counter, dimension_dict) + transformed_lhs = z3.Bool(constraint.lhs.c) + return transformed_lhs == transformed_rhs, counter + + elif is_dim(constraint.lhs) and is_dim(constraint.rhs): + # with dimension transformations we consider the encoding + lhs, counter = transform_dimension(constraint.lhs, counter, dimension_dict) + rhs, counter = transform_dimension(constraint.rhs, counter, dimension_dict) + return lhs == rhs, counter + + else: + # then we have an algebraic expression which means that we disregard the + # first element of the encoding + lhs, counter = transform_algebraic_expression(constraint.lhs, counter, dimension_dict) + rhs, counter = transform_algebraic_expression(constraint.rhs, counter, dimension_dict) + return lhs == rhs, counter + + # The assumption here is that the LHS and RHS must be dimensions + elif constraint.op == op_neq: + assert is_dim(constraint.lhs) + assert is_dim(constraint.rhs) + lhs, counter = transform_dimension(constraint.lhs, counter, dimension_dict) + rhs, counter = transform_dimension(constraint.rhs, counter, dimension_dict) + if constraint.rhs == Dyn or constraint.lhs == Dyn: + if constraint.rhs == Dyn: + return lhs.arg(0) == 1, counter + elif constraint.lhs == Dyn: + return rhs.arg(0) == 1, counter + + # if one of the instances is a number + elif isinstance(constraint.lhs, int) or isinstance(constraint.rhs, int): + if isinstance(constraint.lhs, int): + return z3.Or([rhs.arg(0) == 0, z3.And([rhs.arg(0) == 1, lhs.arg(1) != rhs.arg(1)])]), counter + + elif isinstance(constraint.rhs, int): + return z3.Or([lhs.arg(0) == 0, z3.And([lhs.arg(0) == 1, lhs.arg(1) != rhs.arg(1)])]), counter + + else: + return z3.Or([z3.And([lhs.arg(0) == 0, rhs.arg(0) != 0]), + z3.And([lhs.arg(0) != 0, rhs.arg(0) == 0]), + z3.And([lhs.arg(0) != 0, rhs.arg(0) != 0, lhs.arg(1) != rhs.arg(1)])]), counter + + + elif constraint.op == op_leq: + # if the dimensions are not dyn, this will come into effect + # there would have been another constraint specifying if a given dimension + # is dyn or not + assert is_dim(constraint.lhs) and is_dim(constraint.rhs) + lhs, counter = transform_algebraic_expression(constraint.lhs, counter, dimension_dict) + rhs, counter = transform_algebraic_expression(constraint.rhs, counter, dimension_dict) + return lhs <= rhs, counter + + elif constraint.op == op_gt: + assert is_dim(constraint.lhs) and is_dim(constraint.rhs) + lhs, counter = transform_algebraic_expression(constraint.lhs, counter, dimension_dict) + rhs, counter = transform_algebraic_expression(constraint.rhs, counter, dimension_dict) + return lhs > rhs, counter + + elif constraint.op == op_lt: + assert is_dim(constraint.lhs) and is_dim(constraint.rhs) + lhs, counter = transform_algebraic_expression(constraint.lhs, counter, dimension_dict) + rhs, counter = transform_algebraic_expression(constraint.rhs, counter, dimension_dict) + return lhs < rhs, counter + + else: + raise NotImplementedError('operation not yet implemented') + + else: + raise NotImplementedError('Operation not yet implemented') + + + def transform_var(tensor, counter, dimension_dict): + """ + Transforms tensor variables to a format understood by z3 + Args: + tensor: Tensor variable or a tensor type potentially with variable dimensions + Returns: Transformed variable to a z3 format + + """ + if isinstance(tensor, TensorType): + res = [] + for t in tensor.__args__: + transformed, counter = transform_dimension(t, counter, dimension_dict) + res.append(transformed) + + assert len(res) <= 4 + if len(tensor.__args__) == 1: + return tensor_type.tensor1(res[0]), counter + elif len(tensor.__args__) == 2: + return tensor_type.tensor2(res[0], res[1]), counter + elif len(tensor.__args__) == 3: + return tensor_type.tensor3(res[0], res[1], res[2]), counter + elif len(tensor.__args__) == 4: + return tensor_type.tensor4(res[0], res[1], res[2], res[3]), counter + + elif tensor == Dyn: + return z3_dyn, counter + + elif isinstance(tensor, TVar): + return z3.Const(tensor.tvar, tensor_type), counter + + def transform_dimension(dimension, counter, dimension_dict): + """ + Takes a dimension variable or a number and transforms it to a tuple + according to our scheme + Args: + dimension: The dimension to be transformed + counter: variable tracking + + Returns: tuple and the current counter + + """ + if dimension == Dyn: + counter += 1 + return D(0, z3.Int(counter)), counter + elif isinstance(dimension, int): + return D(1, dimension), counter + elif isinstance(dimension, DVar): + if dimension.c in dimension_dict: + return D(z3.Int(dimension_dict[dimension.c]), z3.Int(dimension.c)), counter + else: + counter += 1 + dimension_dict[dimension.c] = counter + return D(z3.Int(counter), z3.Int(dimension.c)), counter + + + def transform_algebraic_expression(expr, counter, dimension_dict): + """ + Transforms an algebraic expression to z3 format + Args: + expr: An expression is either a dimension variable or an algebraic-expression + + + Returns: the transformed expression + + """ + assert is_algebraic_expression(expr) or is_dim(expr) + + if is_dim(expr): + transformed, counter = transform_dimension(expr, counter, dimension_dict) + return transformed.arg(1), counter + + elif isinstance(expr, Prod): + + dims = [] + for dim in expr.products: + assert is_dim(dim) + d, counter = transform_dimension(dim, counter, dimension_dict) + dims.append(d.arg(1)) + return z3.Product(dims), counter + + elif is_algebraic_expression(expr): + + lhs, counter = transform_algebraic_expression(expr.lhs, counter, dimension_dict) + rhs, counter = transform_algebraic_expression(expr.rhs, counter, dimension_dict) + + if expr.op == op_sub: + c = lhs - rhs + + elif expr.op == op_add: + c = lhs + rhs + + elif expr.op == op_div: + c = lhs / rhs + + elif expr.op == op_mul: + c = lhs * rhs + + elif expr.op == op_mod: + c = lhs % rhs + + else: + raise NotImplementedError('operation not yet implemented') + + return c, counter + + else: + raise RuntimeError + + + def transform_all_constraints(traced, counter=0): + """ + Given a trace, generates constraints and transforms them to z3 format + + """ + dimension_dict = {} # type: ignore[var-annotated] + + generator = ConstraintGenerator(traced) + new_constraints, counter = generator.generate_constraints(counter) + + # print(new_constraints.conjucts[0]) + # print(*new_constraints.conjucts, sep='\n') + + # transform precision, matching, consistency till obtaining a fixed point + new_constraints, counter = iterate_till_fixed_point(new_constraints, counter) + # print(new_constraints) + # print(new_constraints.conjucts) + # new_constraints.conjucts = new_constraints.conjucts[:-1] + # print(*new_constraints.conjucts, sep='\n') + + transformed, counter = transform_to_z3(new_constraints, counter, dimension_dict) + # print(transformed) + return transformed + + def iterate_till_fixed_point(constraints, counter): + """ + Transform constraints till reaching a fixed point + """ + old_c = None + while old_c != constraints: + old_c = constraints + constraints, counter = transform_constraint(constraints, counter) + return constraints, counter + + def transform_all_constraints_trace_time(tracer_root, graph, node, counter=0): + """ + Takes a node and a graph and generates two sets of constraints. + One set constraints the node's constraints and another set + constraints the negation of the node's constraints + Args: + tracer_root: the root for getting the module instances + graph: the graph so far in the tracing process + node: node that represents a conditional + counter: variable tracking + + Returns: Two sets of constraints. One with a conjunction with the + the conditional constraint and the other with a conjunction with + its negation. + + """ + dimension_dict = {} # type: ignore[var-annotated] + + generator = ConstraintGenerator(tracer_root, graph) + new_constraints, counter = generator.generate_constraints(counter) + + condition_constraint = new_constraints.conjucts[-1] + + # we know the constraint is a conjunction where the last constraint is about the conditional + # so remove the last constraint + new_constraints.conjucts = new_constraints.conjucts[:-1] + + # transform precision, matching, consistency till obtaining a fixed point + new_constraints, counter = iterate_till_fixed_point(new_constraints, counter) + + + # since the function returns a list of one element, we get the first element + # we are only interested in the RHS in this case because the LHS just stores + # the result + + # we make sure the constraint is of the form: + # c = b where b is a boolean expression + # and we consider b (constraint.rhs) for transformation + assert isinstance(condition_constraint.lhs, BVar) + assert is_bool_expr(condition_constraint.rhs) + condition_constraint_rhs = condition_constraint.rhs + + # transform the condition constraint + condition_constraint_rhs, counter = iterate_till_fixed_point(condition_constraint_rhs, counter) + + transformed, counter = transform_to_z3(new_constraints, counter, dimension_dict) + + transformed_condition_constraint, counter = transform_to_z3(condition_constraint_rhs, counter, dimension_dict) + + negation_transformed_condition_constraint = z3.Not(transformed_condition_constraint) + + return z3.And([transformed, transformed_condition_constraint]), \ + z3.And([transformed, negation_transformed_condition_constraint]) + + + def evaluate_conditional_with_constraints(tracer_root, graph, node, counter=0, user_constraints=None): + """ + Given an IR and a node representing a conditional, evaluate the conditional + and its negation + Args: + tracer_root: Tracer root for module instances + node: The node to be evaluated + + Returns: the results of evaluating the condition and the negation with + the rest of the constraints + + """ + + transformed_positive, transformed_negative = \ + transform_all_constraints_trace_time(tracer_root, graph, node, counter) + + s = z3.Solver() + s.add(transformed_positive) + if user_constraints is not None: + s.add(user_constraints) + condition = s.check() + + s = z3.Solver() + s.add(transformed_negative) + if user_constraints is not None: + s.add(user_constraints) + negation = s.check() + return condition, negation + +except ImportError: + HAS_Z3 = False diff --git a/parrot/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/util.py b/parrot/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/util.py new file mode 100644 index 0000000000000000000000000000000000000000..99f94609f2650b6642bdce586907f757b032409b --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/util.py @@ -0,0 +1,53 @@ +# mypy: allow-untyped-defs +from torch.fx.experimental.migrate_gradual_types.constraint import TVar, DVar, BinConstraintD, \ + BVar +from torch.fx.experimental.migrate_gradual_types.operation import op_leq + + +def gen_tvar(curr): + """ + Generate a tensor variable + :param curr: The current counter + :return: a tensor variable and the updated counter + """ + curr += 1 + return TVar(curr), curr + + +def gen_dvar(curr): + """ + Generate a dimension variable + :param curr: the current counter + :return: a dimension variable and an updated counter + """ + curr += 1 + return DVar(curr), curr + +def gen_bvar(curr): + """ + Generate a boolean variable + :param curr: the current counter + :return: a boolean variable and an updated counter + """ + curr += 1 + return BVar(curr), curr + +def gen_tensor_dims(n, curr): + """ + Generate a list of tensor dimensions + :param n: the number of dimensions + :param curr: the current counter + :return: a list of dimension variables and an updated counter + """ + dims = [] + for _ in range(n): + dvar, curr = gen_dvar(curr) + dims.append(dvar) + return dims, curr + + +def gen_nat_constraints(list_of_dims): + """ + Generate natural number constraints for dimensions + """ + return [BinConstraintD(0, d, op_leq) for d in list_of_dims] diff --git a/parrot/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/z3_types.py b/parrot/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/z3_types.py new file mode 100644 index 0000000000000000000000000000000000000000..897a79d5697573a51f5886d5e9965a98e2c4cf6a --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/z3_types.py @@ -0,0 +1,29 @@ +try: + import z3 # type: ignore[import] + HAS_Z3 = True + # dynamic type + dyn = z3.DeclareSort('Dyn') + dyn_type = z3.Const('dyn', dyn) + + # dimension + dim = z3.Datatype('dim') + dim.declare('dim', ('0', z3.IntSort()), ('1', z3.IntSort())) + dim = dim.create() + + # tensors + tensor_type = z3.Datatype('TensorType') + tensor_type.declare('Dyn', ('dyn', dyn)) + tensor_type.declare('tensor1', ('0', dim)) + tensor_type.declare('tensor2', ('0', dim), ('1', dim)) + tensor_type.declare('tensor3', ('0', dim), ('1', dim), ('2', dim)) + tensor_type.declare('tensor4', ('0', dim), ('1', dim), ('2', dim), ('3', dim)) + tensor_type = tensor_type.create() + + # create dimension + D = dim.dim + + z3_dyn = tensor_type.Dyn(dyn_type) + + +except ImportError: + HAS_Z3 = False diff --git a/parrot/lib/python3.10/site-packages/torch/fx/operator_schemas.py b/parrot/lib/python3.10/site-packages/torch/fx/operator_schemas.py new file mode 100644 index 0000000000000000000000000000000000000000..becd1ffcd6f43e9affc21b2014d0efe911fd9db9 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/fx/operator_schemas.py @@ -0,0 +1,442 @@ +# mypy: allow-untyped-defs +import torch +import inspect +import numbers +import types +import typing +import enum +import warnings +from typing import Any, Callable, Dict, List, Optional, Tuple, NamedTuple, cast, TYPE_CHECKING +from torch._jit_internal import boolean_dispatched +from ._compatibility import compatibility +from torch._ops import OpOverloadPacket, OpOverload + +if TYPE_CHECKING: + from .node import Argument + +__all__ = ["ArgsKwargsPair", "check_for_mutable_operation", "get_signature_for_torch_op", "create_type_hint", + "type_matches", "normalize_function", "normalize_module"] + +@compatibility(is_backward_compatible=False) +class ArgsKwargsPair(NamedTuple): + """ + Simple named tuple for wrapping args/kwargs pairs. + """ + args: Tuple[Any, ...] + kwargs: Dict[str, Any] + +_manual_overrides : Dict[Callable, List[inspect.Signature]] = {} + +def _nonzero_schemas(): + signatures = [] + + def nonzero(self): + pass + signatures.append(inspect.signature(nonzero)) + + def nonzero(self, *, as_tuple : bool): # type: ignore[no-redef] + pass + signatures.append(inspect.signature(nonzero)) + + return signatures + +_manual_overrides[torch.nonzero] = _nonzero_schemas() + +class _FakeGlobalNamespace: + def __getattr__(self, name): + if name == 'torch': + return torch + raise RuntimeError('Expected a torch namespace lookup') + +_type_eval_globals = {'Tensor' : torch.Tensor, 'Device' : torch.device, 'Layout' : torch.layout, + 'number' : numbers.Number, 'Future' : torch.jit.Future, + 'AnyEnumType' : enum.Enum, 'QScheme' : torch.qscheme, + '__torch__': _FakeGlobalNamespace(), 'NoneType': type(None), + 'Storage': torch.UntypedStorage, + 't': typing.TypeVar('t')} +for k in dir(typing): + _type_eval_globals[k] = getattr(typing, k) + +def _torchscript_type_to_python_type(ts_type : 'torch._C.JitType') -> Any: + """ + Convert a TorchScript type to a Python type (including subtypes) via + eval'ing the annotation_str. _type_eval_globals sets up expressions + like "List" and "Future" to map to actual types (typing.List and jit.Future) + """ + return eval(ts_type.annotation_str, _type_eval_globals) + +def _torchscript_schema_to_signature_impl(ts_schema : torch._C.FunctionSchema) -> inspect.Signature: + from inspect import Parameter + parameters : List[Parameter] = [] + for arg in ts_schema.arguments: + arg_type = _torchscript_type_to_python_type(arg.type) + default = arg.default_value if arg.has_default_value() else Parameter.empty + # TODO: Figure out if this is safe. It seems like when generating the type signatures for + # PythonArgParser, we emit signatures with `input` instead of `self` as the first tensor + # argument name. Downstream, if someone converts that positional argument to a keyword + # argument, the name mismatch will break things, so here we're going to normalize the + # name to "input" + name = arg.name if arg.name != 'self' else 'input' + kind = Parameter.KEYWORD_ONLY if arg.kwarg_only else Parameter.POSITIONAL_OR_KEYWORD + # "from" is a keyword therefore it must be a POSITIONAL_ONLY argument + if name == "from": + assert kind == Parameter.POSITIONAL_OR_KEYWORD + # ParameterKind type is internal implementation detail to inspec package + # which makes it hard to do type annotation + kind = Parameter.POSITIONAL_ONLY # type: ignore[assignment] + # This renders all previous arguments to positional only + for idx, p in enumerate(parameters): + assert p.kind == Parameter.POSITIONAL_OR_KEYWORD + parameters[idx] = Parameter(name=p.name, kind=Parameter.POSITIONAL_ONLY, default=p.default, annotation=p.annotation) + parameters.append(Parameter(name=name, kind=kind, default=default, annotation=arg_type)) + return_types = [_torchscript_type_to_python_type(ret.type) for ret in ts_schema.returns] + if len(return_types) == 0: + return_type = None + elif len(return_types) == 1: + return_type = return_types[0] + else: + return_type = tuple(return_types) + + return inspect.Signature(parameters, return_annotation=return_type) + +_SCHEMA_TO_SIGNATURE_CACHE : Dict[Tuple[str, str], inspect.Signature] = {} + +def _torchscript_schema_to_signature(ts_schema : torch._C.FunctionSchema) -> inspect.Signature: + # Cached as it's called in the hot path of FakeTensor dispatch + cache_key = ts_schema.name, ts_schema.overload_name + cache_val = _SCHEMA_TO_SIGNATURE_CACHE.get(cache_key) + if cache_val is not None: + return cache_val + + res = _torchscript_schema_to_signature_impl(ts_schema) + _SCHEMA_TO_SIGNATURE_CACHE[cache_key] = res + return res + +@compatibility(is_backward_compatible=False) +def check_for_mutable_operation(target : Callable, args : Tuple['Argument', ...], kwargs : Dict[str, 'Argument']): + signatures, schemas = get_signature_for_torch_op(target, return_schemas=True) + + if signatures and schemas: + matched_schemas = [] + + # Iterate through all of the schema until we find one that matches + # If one matches, populate `new_args_and_kwargs` with the new args/kwargs + # values. If none matches, `new_args_and_kwargs` will be None + for candidate_signature, schema in zip(signatures, schemas): + try: + candidate_signature.bind(*args, **kwargs) + matched_schemas.append((candidate_signature, schema)) + except TypeError as e: + continue + + def throw_if_mutable(schema): + if schema.is_mutable: + raise RuntimeError(f'Tried to trace mutable operation {schema}. FX only supports functional ' + f'code, so operations that mutate operands in-place (e.g. via `out` arguments) ' + f'are not supported') + + if len(matched_schemas) == 0: + # Did not match any schema. Cannot check for mutation + pass + elif len(matched_schemas) == 1: + # Matched exactly one schema, unambiguous + _, schema_to_check = matched_schemas[0] + throw_if_mutable(schema_to_check) + pass + else: + # Ambiguous schema match. Since mutability checking is best effort, + # do nothing. + pass + +@compatibility(is_backward_compatible=False) +def get_signature_for_torch_op(op : Callable, return_schemas : bool = False): + """ + Given an operator on the `torch` namespace, return a list of `inspect.Signature` + objects corresponding to the overloads of that op.. May return `None` if a signature + could not be retrieved. + + Args: + op (Callable): An operator on the `torch` namespace to look up a signature for + + Returns: + Optional[List[inspect.Signature]]: A list of signatures for the overloads of this + operator, or None if the operator signatures could not be retrieved. If + return_schemas=True, returns a tuple containing the optional Python signatures + and the optional TorchScript Function signature + """ + if isinstance(op, OpOverload): + schemas = [op._schema] + elif isinstance(op, OpOverloadPacket): + schemas = [getattr(op, overload)._schema for overload in op.overloads()] + else: + override = _manual_overrides.get(op) + if override: + return (override, None) if return_schemas else None + + aten_fn = torch.jit._builtins._find_builtin(op) + + if aten_fn is None: + return (None, None) if return_schemas else None + schemas = torch._C._jit_get_schemas_for_operator(aten_fn) + + signatures = [_torchscript_schema_to_signature(schema) for schema in schemas] + return (signatures, schemas) if return_schemas else signatures + +@compatibility(is_backward_compatible=False) +def create_type_hint(x): + try: + if isinstance(x, (list, tuple)): + # todo(chilli): Figure out the right way for mypy to handle this + if isinstance(x, list): + def ret_type(x): + return List[x] # type: ignore[valid-type] + else: + def ret_type(x): + return Tuple[x, ...] + if len(x) == 0: + return ret_type(Any) + base_type = x[0] + for t in x: + if issubclass(t, base_type): + continue + elif issubclass(base_type, t): + base_type = t + else: + return ret_type(Any) + return ret_type(base_type) + except Exception as e: + # We tried to create a type hint for list but failed. + warnings.warn(f"We were not able to successfully create type hint from the type {x}") + pass + return x + +@compatibility(is_backward_compatible=False) +def type_matches(signature_type : Any, argument_type : Any): + sig_origin_type = getattr(signature_type, '__origin__', signature_type) + + if signature_type is argument_type: + return True + + # Union types in signature. Given type needs to match one of the + # contained types in the Union + if sig_origin_type is typing.Union and signature_type != argument_type: + sig_contained = signature_type.__args__ + return any(type_matches(c, argument_type) for c in sig_contained) + + if signature_type is List[int] and argument_type is int: + # int can be promoted to List[int] + return True + + if getattr(signature_type, '__origin__', None) in {list, List}: + sig_el_type = signature_type.__args__[0] + if not inspect.isclass(sig_el_type): + warnings.warn( + f"Does not support nested parametric types, got {signature_type}. Please file a bug.") + return False + if getattr(argument_type, '__origin__', None) in {list, List}: + return issubclass(argument_type.__args__[0], sig_el_type) + + def is_homogeneous_tuple(t): + if getattr(t, "__origin__", None) not in {tuple, Tuple}: + return False + contained = t.__args__ + if t.__args__ == ((),): # Tuple[()].__args__ == ((),) for some reason + return True + return all((c is Ellipsis) or issubclass(c, sig_el_type) for c in contained) + + # Tuple[T] is accepted for List[T] parameters + return is_homogeneous_tuple(argument_type) + + # Dtype is an int in schemas + if signature_type is int and argument_type is torch.dtype: + return True + + if signature_type is numbers.Number and argument_type in {int, float}: + return True + if inspect.isclass(argument_type) and inspect.isclass(signature_type): + return issubclass(argument_type, signature_type) + + return False + +@compatibility(is_backward_compatible=False) +def normalize_function( + target: Callable, args: Tuple[Any], kwargs : Optional[Dict[str, Any]] = None, arg_types : Optional[Tuple[Any]] = None, + kwarg_types : Optional[Dict[str, Any]] = None, + normalize_to_only_use_kwargs : bool = False) -> Optional[ArgsKwargsPair]: + """ + Returns normalized arguments to PyTorch functions. This means that + `args/kwargs` will be matched up to the functional's + signature and return exclusively kwargs in positional order if + `normalize_to_only_use_kwargs` is True. + Also populates default values. Does not support positional-only + parameters or varargs parameters (*args, **kwargs). Does not support modules. + + May require `arg_types` and `kwarg_types` in order to disambiguate overloads. + + Args: + target (Callable): Function that we are normalizing + args (Tuple[Any]): Tuple of args to the function + kwargs (Optional[Dict[str, Any]]): Dict of kwargs to the function + arg_types (Optional[Tuple[Any]]): Tuple of arg types for the args + kwarg_types (Optional[Dict[str, Any]]): Dict of arg types for the kwargs + normalize_to_only_use_kwargs (bool): Whether to normalize to only use kwargs. + + Returns: + + Returns normalized_args_and_kwargs, or `None` if not successful. + """ + if kwargs is None: + kwargs = {} + new_args_and_kwargs = None + if not isinstance(target, types.BuiltinFunctionType) and not ( + isinstance(target, (OpOverloadPacket, OpOverload)) + ): + target_for_analysis = target + if target in boolean_dispatched: + # HACK: `boolean_dispatch` as used in `torch.nn.functional` makes it so that we have + # a 2-way dispatch based on a boolean value. Here we check that the `true` and `false` + # branches of the dispatch have exactly the same signature. If they do, use the `true` + # branch signature for analysis. Otherwise, leave this un-normalized + assert not isinstance(target, str) + dispatched = boolean_dispatched[target] + if_true, if_false = dispatched['if_true'], dispatched['if_false'] + if inspect.signature(if_true).parameters != inspect.signature(if_false).parameters: + return None + target_for_analysis = if_true + + assert callable(target_for_analysis) + sig = inspect.signature(inspect.unwrap(target_for_analysis)) + new_args_and_kwargs = _args_kwargs_to_normalized_args_kwargs(sig, args, kwargs, normalize_to_only_use_kwargs) + else: + assert callable(target) + torch_op_schemas = get_signature_for_torch_op(target) + matched_schemas = [] + if torch_op_schemas: + # Iterate through all of the schema until we find one that matches + # If one matches, populate `new_args_and_kwargs` with the new args/kwargs + # values. If none matches, `new_args_and_kwargs` will be None + for candidate_signature in torch_op_schemas: + try: + candidate_signature.bind(*args, **kwargs) + matched_schemas.append(candidate_signature) + except TypeError as e: + continue + + if len(matched_schemas) == 0: + # Did not match any schema. Cannot normalize + pass + elif len(matched_schemas) == 1: + # Matched exactly one schema, unambiguous + new_args_and_kwargs = _args_kwargs_to_normalized_args_kwargs(matched_schemas[0], args, kwargs, + normalize_to_only_use_kwargs) + else: + if arg_types is not None or kwarg_types is not None: + arg_types = arg_types if arg_types else cast(Tuple[Any], ()) + kwarg_types = kwarg_types if kwarg_types else {} + for candidate_signature in torch_op_schemas: + sig_matches = True + try: + bound_types = candidate_signature.bind(*arg_types, **kwarg_types) + for arg_name, arg_type in bound_types.arguments.items(): + param = candidate_signature.parameters[arg_name] + sig_matches = sig_matches and type_matches(param.annotation, arg_type) + except TypeError as e: + sig_matches = False + if sig_matches: + new_args_and_kwargs = _args_kwargs_to_normalized_args_kwargs(candidate_signature, args, kwargs, + normalize_to_only_use_kwargs) + break + else: + # Matched more than one schema. In this situation, the caller must provide the types of + # the arguments of the overload they expect. + schema_printouts = '\n'.join(str(schema) for schema in matched_schemas) + raise RuntimeError(f'Tried to normalize arguments to {torch.typename(target)} but ' + f'the schema match was ambiguous! Please provide argument types to ' + f'the normalize_arguments() call. Available schemas:\n{schema_printouts}') + + return new_args_and_kwargs + +@compatibility(is_backward_compatible=False) +def normalize_module( + root: torch.nn.Module, target: str, args: Tuple[Any], kwargs : Optional[Dict[str, Any]] = None, + normalize_to_only_use_kwargs : bool = False) -> Optional[ArgsKwargsPair]: + """ + Returns normalized arguments to PyTorch modules. This means that + `args/kwargs` will be matched up to the functional's + signature and return exclusively kwargs in positional order if + `normalize_to_only_use_kwargs` is True. + Also populates default values. Does not support positional-only + parameters or varargs parameters (*args, **kwargs). + + Args: + root (nn.Module): root module upon which we query modules + target (Callable): Function that we are normalizing + args (Tuple[Any]): Tuple of args to the function + kwargs (Optional[Dict[str, Any]]): Dict of kwargs to the function + normalize_to_only_use_kwargs (bool): Whether to normalize to only use kwargs. + + Returns: + + Returns normalized_args_and_kwargs, or `None` if not successful. + """ + try: + submod = root.get_submodule(target) + except AttributeError as e: + raise RuntimeError(f"Tried to normalize node with target {target} but root did not " + f"have that target!") from e + if hasattr(submod.__class__, '__name__'): + classname = submod.__class__.__name__ + if getattr(torch.nn, classname, None) == submod.__class__: + sig = inspect.signature(inspect.unwrap(submod.forward)) + if kwargs is None: + kwargs = {} + new_args_and_kwargs = _args_kwargs_to_normalized_args_kwargs(sig, args, kwargs, + normalize_to_only_use_kwargs) + return new_args_and_kwargs + return None + +def _args_kwargs_to_normalized_args_kwargs(sig : inspect.Signature, args : Tuple[Any, ...], + kwargs : Dict[str, Any], + normalize_to_only_use_kwargs : bool) -> Optional[ArgsKwargsPair]: + """ + Given a call target, args, and kwargs, return the arguments normalized into + an ArgsKwargsPair, or None if the type signature is not supported by + this normalization. + + Args: + + sig (inspect.Signature): Signature object for the target + args (Tuple): Arguments that appear at the callsite for `target` + kwargs (Dict): Keyword arguments that appear at the callsite for `target` + normalize_to_only_use_kwargs (bool): Whether to normalize to only use kwargs. + + Returns: + + Optional[ArgsKwargsPair]: Normalized args and kwargs for `target`, or `None` if + this target is not supported. + """ + + # Don't currently support positional-only + # or varargs (*args, **kwargs) signatures + supported_parameter_types = { + inspect.Parameter.POSITIONAL_OR_KEYWORD, inspect.Parameter.KEYWORD_ONLY} + if any(p.kind not in supported_parameter_types for p in sig.parameters.values()): + # Add an exception for one signature, which is common for random/uniform, i.e.: + # Tensor(a!) self, float from=0, float to=1, *, Generator? generator=None + # `from` is Python keyword and as such functions with that signature should have + # positional-only args, but at the same time they could be dispatched as kwargs + if list(sig.parameters.keys()) != ['input', 'from', 'to', 'generator']: + return None + + bound_args = sig.bind(*args, **kwargs) + bound_args.apply_defaults() + + new_kwargs : Dict[str, Any] = {} + new_args : List[Any] = [] + for i, param in enumerate(sig.parameters): + if not normalize_to_only_use_kwargs and i < len(args): + new_args.append(bound_args.arguments[param]) + else: + new_kwargs[param] = bound_args.arguments[param] + + return ArgsKwargsPair(tuple(new_args), new_kwargs) diff --git a/parrot/lib/python3.10/site-packages/torch/fx/passes/annotate_getitem_nodes.py b/parrot/lib/python3.10/site-packages/torch/fx/passes/annotate_getitem_nodes.py new file mode 100644 index 0000000000000000000000000000000000000000..0399cef526205f8f82a0c53555bc16fdab67a550 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/fx/passes/annotate_getitem_nodes.py @@ -0,0 +1,44 @@ +import operator + +import torch + + +def annotate_getitem_nodes(graph: torch.fx.Graph) -> None: + """ + Annotate the type of getitem nodes, inferred from the type of sequence node. + If sequence node is not annotated with a type, do nothing. + Currently support getitem nodes from Tuple, List, and NamedTuple sequence node. + + This is helpful since annotations on local names within function are lost during FX transforms. + Adding back known type annotation for getitem nodes to improve jit scriptability. + + Args: + graph (Graph): The graph to be annotated + """ + for node in graph.nodes: + if node.target == operator.getitem: + sequence_node, index_node = node.args + if not sequence_node.type: + continue + # container types + if hasattr(sequence_node.type, "_name"): + parameterized_types = sequence_node.type.__args__ + if sequence_node.type._name == "Tuple": + if len(parameterized_types) == 2 and isinstance( + parameterized_types[1], type(...) + ): + node.type = parameterized_types[0] + else: + assert len(parameterized_types) > index_node + node_type = parameterized_types[index_node] + node.type = node_type + elif sequence_node.type._name == "List": + assert len(parameterized_types) == 1 + node.type = parameterized_types[0] + # NamedTuple type + elif hasattr(sequence_node.type, "__annotations__"): + if sequence_node.type == torch.Tensor: + continue + sequence_node_field_types = sequence_node.type.__annotations__ + field_name = sequence_node.type._fields[index_node] + node.type = sequence_node_field_types[field_name] diff --git a/parrot/lib/python3.10/site-packages/torch/fx/passes/dialect/__init__.py b/parrot/lib/python3.10/site-packages/torch/fx/passes/dialect/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/parrot/lib/python3.10/site-packages/torch/fx/passes/graph_transform_observer.py b/parrot/lib/python3.10/site-packages/torch/fx/passes/graph_transform_observer.py new file mode 100644 index 0000000000000000000000000000000000000000..83975a930115005a6613db37235c8a8428f11424 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/fx/passes/graph_transform_observer.py @@ -0,0 +1,88 @@ +# mypy: allow-untyped-defs +import os +from typing import Optional + +from torch.fx.graph_module import GraphModule + +from .graph_drawer import FxGraphDrawer + +__all__ = ["GraphTransformObserver"] + + +class GraphTransformObserver: + __pass_count = 0 + + def __init__(self, gm: GraphModule, passname: str, log_url: Optional[str] = None): + # If log_url is None, we don't log anything + self.log_url = log_url + if self.log_url is None: + return + GraphTransformObserver.__pass_count += 1 + self.gm = gm + self.passname = passname + + self.input_dot_graph = FxGraphDrawer( + self.gm, + self.passname, + ignore_getattr=True, + ignore_parameters_and_buffers=True, + ).get_dot_graph() + + @classmethod + def get_current_pass_count(cls): + return cls.__pass_count + + def __enter__(self): + if self.log_url is None or self.gm is None: + return self + + self.erased_nodes = set() + self.created_nodes = set() + self.gm._register_create_node_hook(self.on_node_creation) + self.gm._register_erase_node_hook(self.on_node_erase) + + return self + + def __exit__(self, type, value, tb): + if self.log_url is None or self.gm is None: + return + + self.gm._unregister_create_node_hook(self.on_node_creation) + self.gm._unregister_erase_node_hook(self.on_node_erase) + + if len(self.created_nodes) > 0 or len(self.erased_nodes) > 0: + for e in self.input_dot_graph.get_node_list(): + if e.get_name() in self.erased_nodes: + e.obj_dict["attributes"]["fillcolor"] = "yellow" + else: + e.obj_dict["attributes"]["fillcolor"] = "grey" + self.input_dot_graph.write_svg( + os.path.join( + self.log_url, + f"pass_{GraphTransformObserver.__pass_count}_{self.passname}_input_graph.svg", + ) + ) + + output_dot_graph = FxGraphDrawer( + self.gm, + self.passname, + ignore_getattr=True, + ignore_parameters_and_buffers=True, + ).get_dot_graph() + for e in output_dot_graph.get_node_list(): + if e.get_name() in self.created_nodes: + e.obj_dict["attributes"]["fillcolor"] = "yellow" + else: + e.obj_dict["attributes"]["fillcolor"] = "grey" + output_dot_graph.write_svg( + os.path.join( + self.log_url, + f"pass_{GraphTransformObserver.__pass_count}_{self.passname}_output_graph.svg", + ) + ) + + def on_node_creation(self, node): + self.created_nodes.add(node.name) + + def on_node_erase(self, node): + self.erased_nodes.add(node.name) diff --git a/parrot/lib/python3.10/site-packages/torch/fx/passes/net_min_base.py b/parrot/lib/python3.10/site-packages/torch/fx/passes/net_min_base.py new file mode 100644 index 0000000000000000000000000000000000000000..e250dd09a1212a7b87dc590a4ab93162da9e215a --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/fx/passes/net_min_base.py @@ -0,0 +1,924 @@ +# mypy: allow-untyped-defs +import logging +from dataclasses import dataclass +from typing import Any, Callable, Dict, List, Optional, Tuple + +import torch +import torch.fx + +from torch.fx._compatibility import compatibility +from torch.fx.node import map_arg + +from .shape_prop import ShapeProp +from .split_utils import split_by_tags +from .tools_common import ( + CALLABLE_NODE_OPS, + FxNetAccFusionsFinder, + Names, + NodeList, + NodeSet, + TensorOrTensors, + Tensors, +) + +__all__ = [ + "FxNetMinimizerBadModuleError", + "FxNetMinimizerRunFuncError", + "FxNetMinimizerResultMismatchError", +] + +_LOGGER = logging.getLogger(__name__) + + +@compatibility(is_backward_compatible=False) +class FxNetMinimizerBadModuleError(Exception): + """ + Raised if failed to split out a minimize module + """ + + pass + + +@compatibility(is_backward_compatible=False) +class FxNetMinimizerRunFuncError(Exception): + """ + Raised if error occurs during run_a or run_b functions + """ + + pass + + +@compatibility(is_backward_compatible=False) +class FxNetMinimizerResultMismatchError(Exception): + """ + Raised if comparing function thinks the results are mismatching. + """ + + pass + + +@dataclass +class _MinimizerSettingBase: + """ + Args: + `accumulate_error`: Instead of using a's input for both converted module to verify + , use the previous outputs of each converted module as input to accumulate the + errors. + + `traverse_method`: "sequential" or "binary" or "accumulate" + Determine the way of traverse the nodes in FX module. + + `find_all`: Minimizer will go through the entire model and return all problematic nodes. + + `return_intermediate`: If true, when using `run_nodes()` function to run the + model, intermediate results of all the ops will be returned as output. + """ + + accumulate_error: bool = False + traverse_method: str = "sequential" + find_all: bool = False + return_intermediate: bool = False + + def __str__(self): + settings_str = "FX Minimizer Settings:\n" + + for k, v in vars(self).items(): + settings_str += f"\t{k}: {v}\n" + + return settings_str + + +class _MinimizerBase: + """ + This class is used to automatically find problematic nodes in a model. It takes a FX + graphmodule and generate some submodules while traverse the graph. Then two functions + `run_a` and `run_b` will be used to run the same submodule and a function `compare_fn` + will be used to compare the results. + + Currently we provides two ways to traverse the graph and generate submodules. + 1. Sequential traversal: this will traverse the graph node by node and generate + one submodule with one sigle node. + 2. Binary searching: this will do a binary search style traversal on the graph. + + For internal Users, a guide can be found here https://fb.quip.com/HDtuAgiKGfkP. + """ + + def __init__( + self, + module: torch.fx.GraphModule, + sample_input: Tensors, + compare_fn: Callable[ + [TensorOrTensors, TensorOrTensors, Names], Tuple[float, bool] + ], + settings: _MinimizerSettingBase, + module_exporter: Optional[ + Callable[ + [Tensors, torch.fx.GraphModule, str], + None + ] + ] = None, + exclusion_fn: Optional[ + Callable[[NodeList, int, int], None] + ] = None, + ): + assert isinstance(module, torch.fx.GraphModule) + + self.module = module + self.sample_input = sample_input + self.compare_fn = compare_fn + self.module_exporter = module_exporter + self.settings = settings + self.exclusion_fn = exclusion_fn + + # Stores outputs of run_a function + self.a_outputs: Dict[str, Any] = {} + + # Stores outputs of run_b function + self.b_outputs: Dict[str, Any] = {} + + # Stores the results of compare_fn + self.results: Dict[Any, Any] = {} + + # Stores the report for the runs + self.reports: List[List[str]] = [] + + # Current iteration + self.iteration: int = 0 + + callable_nodes = { + node for node in self.module.graph.nodes if node.op in CALLABLE_NODE_OPS + } + ShapeProp(self.module).propagate(*self.sample_input) + self.fusions = FxNetAccFusionsFinder(self.module, callable_nodes)() + + # Check if number of input in sample_input matches the number of placeholders + placeholders = [ + node.name for node in self.module.graph.nodes if node.op == "placeholder" + ] + assert len(placeholders) == len(self.sample_input) + + # Store sample_input + for i, name in enumerate(placeholders): + self.a_outputs[name] = sample_input[i] + self.b_outputs[name] = sample_input[i] + + def run_a(self, mod: torch.fx.GraphModule, inputs: Tensors, report_idx: int = -1) -> TensorOrTensors: + """ + Run `mod` with `inputs` and generate output. The output will be compared with + output of run_b(). + """ + raise RuntimeError("run_a() is not implemented.") + + def run_b(self, mod: torch.fx.GraphModule, inputs: Tensors, report_idx: int = -1) -> TensorOrTensors: + """ + Run `mod` with `inputs` and generate output. The output will be compared with + output of run_a(). + """ + raise RuntimeError("run_b() is not implemented.") + + def _store_outputs( + self, + a_result: TensorOrTensors, + b_result: TensorOrTensors, + submodule: torch.fx.GraphModule, + ): + """ + Store the outputs of self.run_a() and self.run_b() into self.a_outputs and + self.b_outputs, so that we can use them when execute preceding nodes that + use those outputs as inputs. + + Args: + a_result: Output of self.run_a(). Could be a tensor or tensors. + b_result: Output of self.run_b(). Could be a tensor or tensors. + submodule: The module that generates a_result and b_result. + """ + output_node = next( + node for node in submodule.graph.nodes if node.op == "output" + ) + + # Only one output + if isinstance(output_node.args[0], torch.fx.Node): + self.a_outputs[output_node.args[0].name] = a_result + self.b_outputs[output_node.args[0].name] = b_result + # Multiple outputs + else: + for i, arg in enumerate(output_node.args[0]): + self.a_outputs[arg.name] = a_result[i] + self.b_outputs[arg.name] = b_result[i] + + def _get_submod_inputs( + self, main_module: torch.fx.GraphModule, submod_path: str + ) -> Tuple[Tensors, Tensors]: + """ + Try get submodule inputs from stored outputs. If not found then use + torch_glow.get_submod_inputs to get the inputs. + + If accumulate_error is False, use a_input for run_a() and run_b() + otherwise use a_input for run_a and b_input for run_b. + + Args: + main_module: Top-levlel fx module. + submod_path: Path to the submodule we want to run and compare results. + + Returns: + a_input: List of tensor(s) that will be used by run_a() as submodule inputs. + b_input: List of tensor(s) that will be used by run_b() as submodule inputs. + """ + a_input = [] + b_input = [] + submodule = getattr(main_module, submod_path) + placeholders = [ + node.name for node in submodule.graph.nodes if node.op == "placeholder" + ] + + # If all placeholder can be found in stored outputs, use stored + # outputs as inputs. Otherwise, use `torch_glow.get_submod_inputs` + # to get the inputs. + if set(placeholders) <= self.a_outputs.keys(): + for name in placeholders: + a_input.append(self.a_outputs[name]) + b_input.append(self.b_outputs[name]) + else: + if self.settings.accumulate_error: + print(f"Can't find previous stored outputs named {placeholders}!") + + def get_inputs(self: torch.nn.Module, inputs: Any): + nonlocal a_input + a_input = inputs + + # Use forward hook to get the inputs to the submodule + handle = submodule.register_forward_pre_hook(get_inputs) + main_module(*self.sample_input) + handle.remove() + + b_input = a_input + + if not self.settings.accumulate_error: + return a_input, a_input + + return a_input, b_input + + def _tag_nodes(self, selected_nodes: NodeSet): + """ + Tag selected nodes with tag "minimize". Nodes with the same tags will + be split to the same submodule afterwards. + + Args: + selected_nodes: Nodes that we want to minimize. We will tag those nodes + with "minimize", all preceding nodes with "main_0" and all following + nodes with "main_1". + """ + for node in self.module.graph.nodes: + if node.op not in CALLABLE_NODE_OPS: + continue + + if node in selected_nodes: + node.tag = "minimize" + elif any( + n.tag in {"minimize", "main_1"} + for n in node.all_input_nodes + if n.op in CALLABLE_NODE_OPS + ): + node.tag = "main_1" + else: + node.tag = "main_0" + + def _build_submodule(self, nodes: NodeSet) -> Tuple[torch.fx.GraphModule, str]: + """ + Split self.module so that one submodule consists of `nodes` and only `nodes`. + + Args: + nodes: Nodes that we want to include in the minimize submodule. + + Returns: + split_module (torch.fx.GraphModule): the module after split. + submodule_name (str): the name of the submodule that consists of `nodes`. + """ + # Color provided nodes + self._tag_nodes(nodes) + + # Split module based on coloring + split_module = split_by_tags(self.module, ["main_0", "minimize", "main_1"]) + + # Find submodule containing colored nodes + submodule_name: str = "" + for child_name, _ in split_module.named_children(): + # Skip submodules we're not interested in at the moment + if "minimize" not in child_name: + continue + + if submodule_name == "": + submodule_name = child_name + else: + raise FxNetMinimizerBadModuleError( + f"Expected only one minimize submodule with nodes {nodes}" + ) + + if submodule_name == "": + raise FxNetMinimizerBadModuleError( + f"Minimize submodule was not found with nodes {nodes}" + ) + + return split_module, submodule_name + + def _run_and_compare( + self, + split_module: torch.fx.GraphModule, + submod_name: str, + output_names: Names, + report_idx: int = -1 + ): + """ + Run the submodule in `split_module` that has name `submod_name` + using `self.run_a` and `self.run_b` and compare their results. + + Args: + split_module: Main module that contains the minimize submodule. + submod_name: Name of the minimize submodule. + output_names: Names of the node we want to output. If None, we + will use the original output. + """ + submodule = getattr(split_module, submod_name) + a_input, b_input = self._get_submod_inputs(split_module, submod_name) + + if len(self.reports) == 0: + self.reports.append([]) + self.iteration = 1 + + report = self.reports[report_idx if report_idx >= 0 else self.iteration - 1] + report.append("Run and compare ...") + + if output_names: + output_nodes: NodeList = [] + for node in submodule.graph.nodes: + if node.op == "output": + submodule.graph.erase_node(node) + + if node.name in output_names: + output_nodes.append(node) + + submodule.graph.output( + output_nodes[0] if len(output_nodes) == 1 else tuple(output_nodes) + ) + submodule.graph.lint() + submodule.recompile() + + # Use name of args in output node as key to store comparison result + for node in submodule.graph.nodes: + if node.op == "output": + result_key = map_arg(node.args, lambda x: x.name) + + try: + a_result = self.run_a(submodule, a_input, report_idx) + b_result = self.run_b(submodule, b_input, report_idx) + self._store_outputs(a_result, b_result, submodule) + except Exception as e: + report.append(f"Exception raised when running {submod_name}: {e}") + raise FxNetMinimizerRunFuncError( # noqa: B904 + f"Exception raised when running {submod_name}: {e}" + ) + + # Compare results + names: Names = output_names + if output_names is None: + names = [str(v) for v in result_key] # type: ignore[possibly-undefined] + + numeric_result, bool_result = self.compare_fn(a_result, b_result, names) + + self.results[result_key] = numeric_result # type: ignore[possibly-undefined] + report.append(f"Numerical accuracy = {numeric_result}") + if not bool_result: + report.append(f"Result mismatch for {result_key}") + if self.module_exporter: + self.module_exporter( + a_input, submodule, str(result_key[0]) + "_cpu", + ) + self.module_exporter( + b_input, submodule, str(result_key[0]) + "_acc", + ) + raise FxNetMinimizerResultMismatchError(f"Result mismatch for {result_key}") + + def _binary_search_impl( + self, all_nodes: NodeList, start_idx: int, end_idx: int + ) -> NodeSet: + """ + Recursive binary search implementation. + """ + culprits: NodeSet = set() + nodes: NodeList = all_nodes[start_idx:end_idx] + + report: List[str] = [] + if self.exclusion_fn is not None: + self.exclusion_fn(nodes, start_idx, end_idx) + if len(nodes) == 0: + report = ["All nodes are excluded by user"] + self.reports.append(report) + return culprits + + first_node_name = nodes[0].name + output_node_name = nodes[-1].name + self.iteration += 1 + self.reports.append(report) + report.append(f"Binary search iteration {self.iteration}") + report.append( + f"From node index {start_idx}:{first_node_name} to {end_idx-1}:{output_node_name}. " + f"Size of the interested node list is {len(nodes)}" + ) + cur_nodes: NodeSet = set(nodes) + + try: + split_module, submod_name = self._build_submodule(cur_nodes) + self._run_and_compare(split_module, submod_name, [output_node_name]) + + except (FxNetMinimizerRunFuncError, FxNetMinimizerResultMismatchError): + + if len(nodes) == 1: + report.append( + f"This is the last node in the sub-module. " + f"Search in the current branch is successful with culprit = {cur_nodes}." + ) + self.print_report(report) + return cur_nodes + + report.append( + "Proceed to split and lower the halves of the current " + "sub-module individually." + ) + self.print_report(report) + + mid = len(nodes) // 2 + culprits = self._binary_search_impl(all_nodes, start_idx, start_idx + mid) + + if len(culprits) != 0 and not self.settings.find_all: + return culprits + + culprits = self._binary_search_impl(all_nodes, start_idx + mid, end_idx) + + if len(culprits) == 0: + report.append( + f"Further split and lowering found no errors. " + f"Unable to minimize the submodule with list of nodes: {nodes}" + ) + self.print_report(report) + + return culprits + else: + report.append("No discrepancy found.") + self.print_report(report) + return set() + + def _binary_traverse(self, nodes: NodeList) -> NodeSet: + """ + Binary search on `nodes` for culprit. + """ + return self._binary_search_impl(nodes, 0, len(nodes)) + + def _sequential_traverse(self, nodes: NodeList) -> NodeSet: + """ + Traverse `nodes` one by one and determine if any of them is a culprit. + """ + culprits: NodeSet = set() + + for node in nodes: + report: List[str] = [] + self.reports.append(report) + self.iteration += 1 + report.append(f"Sequential traverse iteration {self.iteration}.") + report.append(f"Visit node: {node.name}") + + _LOGGER.info("Visit node: %s", node.name) + node_list: NodeList = [node] + if self.exclusion_fn is not None: + self.exclusion_fn(node_list, -1, -1) + if len(node_list) == 0: + report.append(f"User exclusion : {node.name}") + self.print_report(report) + return culprits + + cur_nodes: NodeSet = {node} + + if node in self.fusions: + cur_nodes = self.fusions[node] + + try: + split_module, submod_name = self._build_submodule(cur_nodes) + self._run_and_compare(split_module, submod_name, [node.name]) + self.print_report(report) + except (FxNetMinimizerResultMismatchError): + culprits.add(node) + report.append(f"Found culprit from numeric error: {node}") + self.print_report(report) + if not self.settings.find_all: + return culprits + except (FxNetMinimizerRunFuncError): + culprits.update(cur_nodes) + report.append(f"Found culprit from run error: {node}") + self.print_report(report) + if not self.settings.find_all: + return culprits + + return culprits + + + def _block_traverse_impl(self, nodes: NodeList, start_idx: int, end_idx: int, find_last_node: bool) -> int: + """ + Recursive block search implementation. + find_last_node: If True, search for the last node which result in numerics difference + if False: find first node in sorted node list + """ + report: List[str] = [] + + mid = (start_idx + end_idx) // 2 + cur_nodes_list: NodeList = nodes[:mid + 1] if find_last_node else nodes[mid:] + + if self.exclusion_fn: + self.exclusion_fn(cur_nodes_list, -1, -1) + + cur_nodes = set(cur_nodes_list) + + first_node_name = cur_nodes_list[0].name + last_node_name = cur_nodes_list[-1].name + target_node_name = last_node_name if find_last_node else first_node_name + + self.iteration += 1 + self.reports.append(report) + report.extend( + [ + "=" * 30, + f"Block search iteration {self.iteration}", + ] + ) + report.extend( + [ + f"Search for {'last' if find_last_node else 'first'} node in culprits", + f"From node index {start_idx}:{nodes[start_idx].name} to {end_idx}:{nodes[end_idx].name}. ", + f"Subgraph constructed by {first_node_name} to {last_node_name}", + f"Targeting node: {target_node_name}", + f"Size of the interested node list is {end_idx - start_idx + 1}", + ] + ) + report_idx = len(self.reports) - 1 + + try: + split_module, submod_name = self._build_submodule(cur_nodes) + self._run_and_compare(split_module, submod_name, [last_node_name], report_idx) + except (FxNetMinimizerResultMismatchError, FxNetMinimizerRunFuncError): + report.append(f"Culprits found from node {first_node_name} to {last_node_name}.") + + if start_idx == mid: + report.extend( + [ + "This is the last node in the sub-module. ", + "Search in the current branch is successful with node :", + f"{start_idx}, node name: {nodes[start_idx].name}." + ] + ) + self.print_report(report) + return start_idx + + report.append( + "Proceed to split and lower the halves of the current " + "sub-module individually." + ) + self.print_report(report) + + if find_last_node: + return self._block_traverse_impl(nodes, start_idx, mid, find_last_node) + else: + return self._block_traverse_impl(nodes, mid + 1, end_idx, find_last_node) + else: + report.append(f"Culprits not found from node start to {mid}:{nodes[mid].name}.") + + if start_idx == mid: + report.extend( + [ + "This is the last node in the sub-module. ", + "Search in the current branch is successful with node", + f"{start_idx}, node name: {nodes[start_idx].name}.", + ] + ) + self.print_report(report) + return start_idx + 1 if find_last_node else start_idx - 1 + + report.append( + "Proceed to split and lower the halves of the current " + "sub-module individually." + ) + self.print_report(report) + + if find_last_node: + return self._block_traverse_impl(nodes, mid + 1, end_idx, find_last_node) + else: + return self._block_traverse_impl(nodes, start_idx, mid, find_last_node) + + + def _block_traverse(self, nodes: NodeList, find_last_node: Optional[bool]) -> NodeSet: + """ + Traverse topologically sorted node list + Find minimium block (start_idx, end_idx) which contains the culprit + 1st pass: search for end_idx by finding the last node in culprit block + where Numerical accuracy (0, end_idx) > threshold + 2nd pass: search for start_idx by finding the first node in culprit block + where Numerical accuracy (start_idx, end_idx) < threshold + Form minimum block by (start_idx - 1, end_idx) + """ + culprits: NodeSet = set() + first_node_name = nodes[0].name + last_node_name = nodes[-1].name + last_node_report = [f"Block search from {first_node_name} to {last_node_name}"] + last_node_report.append("*" * 50) + self.reports.append(last_node_report) + + start_idx = 0 + end_idx = len(nodes) - 1 + run_both = True if find_last_node is None else False + + # step 1: find (0, end_idx) of culprit block + if run_both or find_last_node: + last_node_report.append("Start searching for last node in culprit") + self.print_report(last_node_report) + end_idx = self._block_traverse_impl(nodes, start_idx, end_idx, True) + last_node_report.extend( + [ + "Finish Pass 1", + f"Find end_idx = {end_idx}:{nodes[end_idx].name}" + ] + ) + self.print_report(last_node_report) + + # step 2: reduce culprit block to (start_idx, end_idx) + if run_both or not find_last_node: + first_node_report = ["Start searching for first node in culprit"] + self.print_report(first_node_report) + start_idx = self._block_traverse_impl(nodes[0:end_idx + 1], start_idx, end_idx, False) + first_node_report.append("*" * 50) + self.reports.append(first_node_report) + first_node_report.extend( + [ + "Finish Pass 2", + f"Find start_idx = {start_idx}:{nodes[start_idx].name}" + ] + ) + self.print_report(first_node_report) + + # step 3: form module with minimum culprits + culprits.update(nodes[start_idx:end_idx + 1]) + result_report = [f"Finish searching, found minimum block ({nodes[start_idx]},{nodes[end_idx]})"] + self.reports.append(result_report) + self.print_report(result_report) + return culprits + + + def _defined_traverse(self, nodes: NodeList) -> NodeSet: + """ + run user defined `nodes` and determine if it is a culprit. + """ + culprits: NodeSet = set() + if self.exclusion_fn is not None: + self.exclusion_fn(nodes, -1, -1) + if len(nodes) == 0: + report = ["All nodes are excluded by user"] + self.reports.append(report) + return culprits + + first_node_name = nodes[0].name + output_node_name = nodes[-1].name + report = [f"Defined graph from {first_node_name} to {output_node_name}"] + cur_nodes: NodeSet = set(nodes) + try: + split_module, submod_name = self._build_submodule(cur_nodes) + self._run_and_compare(split_module, submod_name, [output_node_name]) + self.print_report(report) + except (FxNetMinimizerResultMismatchError, FxNetMinimizerRunFuncError): + report.append(f"Found culprit {cur_nodes}") + self.print_report(report) + return culprits + + return culprits + + def _accumulate_traverse(self, nodes: NodeList) -> NodeSet: + culprits: NodeSet = set() + nodes_to_run: NodeSet = set() + + # find_all is not supported for accumulate traversal because all the + # ops run on NNPI. So we return after the first op that raises error. + if self.settings.find_all: + print("'Find All' mode is not supported in accumulate traversal.") + return culprits + + for node in nodes: + report: List[str] = [] + self.reports.append(report) + self.iteration += 1 + report.append(f"Accumulate traverse iteration {self.iteration}.") + + nodes_to_run.add(node) + + node_name = node.name + if node_name is not None and isinstance(node_name, tuple): + node_name = node_name[0] + assert node_name is not None and isinstance( + node_name, str + ), f"minimize: node_name: {node_name}" + + report.append(f"Add node: {node_name}") + + try: + split_module, submod_name = self._build_submodule(nodes_to_run) + self._run_and_compare(split_module, submod_name, [node_name]) + self.print_report(report) + except (FxNetMinimizerResultMismatchError, FxNetMinimizerRunFuncError): + culprits.add(node) + report.append(f"Found culprit {node}") + self.print_report(report) + return culprits + + return culprits + + def _skip_traverse_impl(self, all_nodes: NodeList, start_idx: int, end_idx: int) -> NodeSet: + """ + Skip certain nodes in graph based on settings + """ + culprits: NodeSet = set() + nodes: NodeList = all_nodes[start_idx:end_idx] + cur_nodes: NodeSet = set(nodes) + if self.exclusion_fn is not None: + self.exclusion_fn(nodes, start_idx, end_idx) + cur_nodes = set(nodes) + else: + for node in nodes: + if node in self.fusions: + cur_nodes.update(self.fusions[node]) + report: List[str] = [] + self.reports.append(report) + self.iteration += 1 + report.append(f" Nodes block {self.iteration}.") + report.append( + f"From node index {start_idx} to {end_idx-1}. " + f"Size of the interested node list is {len(nodes)}" + ) + + try: + split_module, submod_name = self._build_submodule(cur_nodes) + self._run_and_compare(split_module, submod_name, []) + except (FxNetMinimizerResultMismatchError): + culprits.update(cur_nodes) + report.append(f"Found culprit from numeric error: {cur_nodes}") + self.print_report(report) + return culprits + except (FxNetMinimizerRunFuncError): + culprits.update(cur_nodes) + report.append(f"Found culprit from run error: {cur_nodes}") + self.print_report(report) + return culprits + else: + report.append("No discrepancy found.") + self.print_report(report) + return set() + + + def _skip_traverse(self, all_nodes: NodeList, skip_nodes: List) -> NodeSet: + """ + Skip certain nodes in graph based on settings + """ + start_idx = 0 + num_nodes = len(all_nodes) + idx = 0 + culprits = set() + while idx < num_nodes: + node = all_nodes[idx] + if (node.name in skip_nodes): # skip the node + if idx > start_idx: + culprits = self._skip_traverse_impl(all_nodes, start_idx, idx) + start_idx = idx + 1 + elif idx == num_nodes - 1 and start_idx <= idx: # last node + culprits = self._skip_traverse_impl(all_nodes, start_idx, idx + 1) + idx += 1 + + return culprits + + + + def _collect_nodes(self, start: Optional[str], end: Optional[str]) -> NodeList: + """ + Collect nodes in the model that between nodes with name of `start` and `end`. + These two nodes are also included. + """ + nodes: NodeList = [] + add_node = start is None + + for node in self.module.graph.nodes: + if node.op not in CALLABLE_NODE_OPS: + continue + + if node.name == start: + add_node = True + + if add_node: + nodes.append(node) + + if node.name == end: + break + + return nodes + + def run_nodes(self, start: Optional[str] = None, end: Optional[str] = None): + """ + Run part of the model from `start` node to `end` node. If `start` is None + then we start from the beginning of the model. If `end` is None then we + stop at the end of the model. + + Args: + start: The name of the node which is the first node of the submodule + we want to run. If set to None, then we'll start with the first + node of the model. + end: The name of the node which is the last node of the submodule we + want to run. If set to None, we'll end with the last node of the + model. + """ + nodes = self._collect_nodes(start, end) + cur_nodes = set(nodes) + + for node in nodes: + if node in self.fusions: + cur_nodes.update(self.fusions[node]) + + output_names = [] + if self.settings.return_intermediate: + output_names = [node.name for node in nodes] + + try: + split_module, submod_name = self._build_submodule(cur_nodes) + self._run_and_compare(split_module, submod_name, output_names) + except ( + FxNetMinimizerRunFuncError, + FxNetMinimizerResultMismatchError, + ) as e: + print(e) + + def print_report(self, report: List[str]): + for i in range(len(report)): + if i > 0: + print(" . " + report[i]) + else: + print(report[i]) + + def print_reports(self): + for report in self.reports: + self.print_report(report) + + def minimize( + self, + start: Optional[str] = None, + end: Optional[str] = None, + skip_nodes: Optional[List] = None, + find_last_node: Optional[bool] = None, + ) -> NodeSet: + """ + Minimizing the model from node with name `start` to node with name `end` base + on self.settings. Find culprits that causes FxNetMinimizerRunFuncError or + FxNetMinimizerResultMismatchError errors. + + Args: + start: The name of the node where we want to start minimizing. If set + to None, then we'll start with the first node of the model. + end: The name of the node where we want to terminate minimizing. If + set to None, we'll end with the last node of the model. + skip_nodes: The names of nodes where we want to skip during minimizing. + It'll create subgraphs without these skip nodes under the hood. + Only applicable in mode "skip". + find_last_node: True if only last_node of a culprits is needed in mode "block". + False if only the first_node of a culprits is needed. + Only applicable in mode "block". + + Returns: + nodes: A list of nodes that causes FxNetMinimizerRunFuncError or + FxNetMinimizerResultMismatchError errors during minimizing. + """ + + print(self.settings) + print(self.module.graph) + + nodes = self._collect_nodes(start, end) + + if self.settings.traverse_method == "sequential": + return self._sequential_traverse(nodes) + + if self.settings.traverse_method == "binary": + return self._binary_traverse(nodes) + + if self.settings.traverse_method == "accumulate": + return self._accumulate_traverse(nodes) + + if self.settings.traverse_method == "skip": + if (skip_nodes is None): + raise RuntimeError("'skip_nodes' can't be None when 'traverse_method' is 'skip'.") + return self._skip_traverse(nodes, skip_nodes) + + if self.settings.traverse_method == "defined": + return self._defined_traverse(nodes) + + if self.settings.traverse_method == "block": + return self._block_traverse(nodes, find_last_node) + + raise RuntimeError(f"Unknown traverse method {self.settings.traverse_method}!") diff --git a/parrot/lib/python3.10/site-packages/torch/fx/passes/param_fetch.py b/parrot/lib/python3.10/site-packages/torch/fx/passes/param_fetch.py new file mode 100644 index 0000000000000000000000000000000000000000..5979e29fcc6b2650a1f73be4845e2ad3dcda0920 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/fx/passes/param_fetch.py @@ -0,0 +1,66 @@ +from torch.fx.graph_module import GraphModule +from typing import Any, Callable, Dict, List, Tuple, Type +import torch +import torch.nn as nn + +from torch.fx._compatibility import compatibility + +__all__ = ['default_matching', 'extract_attrs_for_lowering', 'lift_lowering_attrs_to_nodes'] + +# Matching method matches the attribute name of current version to the attribute name of `target_version` +@compatibility(is_backward_compatible=False) +def default_matching(name: str, target_version: int) -> str: + """Default matching method + """ + return name + +# This dict maps the nn.Module class name to the attribute name list that we want to fetch for lowering. +# The first integer in the tuple is the version number of the nn.Module class when we create the parameter list. +# If there's a version mismatch then it means the parameter names in the book might be mismatched with nn.Module. +module_fetch_book: Dict[Type, Tuple[int, List[str], Callable[[str, int], str]]] = { + torch.nn.modules.linear.Linear: (1, ["weight", "bias"], default_matching), + torch.nn.modules.conv.Conv2d: ( + 1, ["weight", "bias", "kernel_size", "stride", "padding", "dilation", "groups", "padding_mode"], default_matching + ), + torch.nn.modules.batchnorm.BatchNorm2d: (2, ["weight", "bias", "running_mean", "running_var", "eps"], default_matching), + torch.nn.modules.pooling.AdaptiveAvgPool2d: (1, [], default_matching), + torch.nn.modules.pooling.MaxPool2d: ( + 1, ["kernel_size", "stride", "padding", "dilation", "return_indices", "ceil_mode"], default_matching + ), + torch.nn.modules.activation.ReLU: (1, ["inplace"], default_matching), +} + +@compatibility(is_backward_compatible=False) +def extract_attrs_for_lowering(mod: nn.Module) -> Dict[str, Any]: + """If `mod` is in `module_fetch_book`, fetch the mod's attributes that in the `module_fetch_book` + after checking module's version is compatible with the `module_fetch_book`. + """ + attrs_for_lowering: Dict[str, Any] = {} + attrs_for_lowering["name"] = torch.typename(mod) + + if type(mod) in module_fetch_book: + version, param_to_fetch, matching_method = module_fetch_book[type(mod)] + if version < mod._version: + raise RuntimeError(f"Fetcher version {version} try to fetch {torch.typename(mod)} version {mod._version}, " + "please upgrade the module_fetch_book, open an issue and @842974287 " + "or report a bug to AIACC team directly.") + for attr in param_to_fetch: + attrs_for_lowering[attr] = getattr(mod, matching_method(attr, mod._version)) + else: + raise RuntimeError(f"{torch.typename(mod)} is not in the module_fetch_book yet, " + "please add it to the module_fetch_book, open an issue and @842974287 " + "or report a bug to AIACC team directly.") + return attrs_for_lowering + +@compatibility(is_backward_compatible=False) +def lift_lowering_attrs_to_nodes(fx_module: GraphModule) -> None: + """Recursively traverse all `fx_module` nodes and fetch the module's attributes if the node is a leaf module. + """ + submodules = dict(fx_module.named_modules()) + + for node in fx_module.graph.nodes: + if node.op == "call_module": + if isinstance(submodules[node.target], GraphModule): + lift_lowering_attrs_to_nodes(submodules[node.target]) + else: + node.attrs_for_lowering = extract_attrs_for_lowering(submodules[node.target]) diff --git a/parrot/lib/python3.10/site-packages/torch/fx/passes/runtime_assert.py b/parrot/lib/python3.10/site-packages/torch/fx/passes/runtime_assert.py new file mode 100644 index 0000000000000000000000000000000000000000..66b8fbe29d9fdd926a9cce298d6e67906ff55d28 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/fx/passes/runtime_assert.py @@ -0,0 +1,392 @@ +# mypy: allow-untyped-defs +import logging +import operator +from typing import Any, Dict, Optional, Set, TYPE_CHECKING + +# Import sympy and ShapeEnv during TYPE_CHECKING since importing sympy is slow +if TYPE_CHECKING: + from torch.fx.experimental.symbolic_shapes import ShapeEnv +else: + ShapeEnv = Any + +import torch +import torch.utils._pytree as pytree +from torch import fx +from torch.fx._compatibility import compatibility +from torch.fx._utils import lazy_format_graph_code +from torch.fx.experimental.sym_node import SymNode +from torch.fx.graph_module import GraphModule + +log = logging.getLogger(__name__) +graph_code_log = torch._logging.getArtifactLogger(__name__, "graph_code") + + +def _get_example_value(node: fx.Node) -> Optional[str]: + """ + Get the example value key for a node, since dynamo uses "example_value" + while non-strict export uses "val. + """ + if "example_value" in node.meta: + return node.meta["example_value"] + elif "val" in node.meta: + return node.meta["val"] + else: + return None + + +@compatibility(is_backward_compatible=True) +def insert_deferred_runtime_asserts( + gm: GraphModule, + shape_env: ShapeEnv, + name: str, + export: bool = False, +) -> None: + """ + During tracing, we may have discovered that some data-dependent values + had runtime assert on them; e.g., torch.empty(x.item()) induces a runtime + that x.item() >= 0. This asserts can happen unpredictably during fake + tensor propagation, so we cannot conveniently insert them into the FX graph + when they occur. Instead, we accumulate them in the ShapeEnv, and in this + pass insert them into the graph as proper tests. + """ + + # We hash (node_name, min_val, max_val) + nodes_that_already_have_sym_constraint_range = set() + + # We hash only node name here because size don't take min/max + nodes_that_already_have_sym_constraint_size = set() + # TODO this only works for top-level nodes today, also + # we should potentially use it not create duplicate + # assert_async nodes + for node in gm.graph.nodes: + if ( + node.op == "call_function" + and node.target == torch.ops.aten.sym_constrain_range.default + ): + assert len(node.args) == 1 + nodes_that_already_have_sym_constraint_range.add( + (node.args[0], node.kwargs["min"], node.kwargs["max"]) + ) + if ( + node.op == "call_function" + and node.target == torch.ops.aten.sym_constrain_range_for_size.default + ): + assert len(node.args) == 1 + nodes_that_already_have_sym_constraint_size.add(node.args[0]) + + # Import sympy locally + import sympy + + from torch.fx.experimental.symbolic_shapes import ( + CallMethodKey, + cast_symbool_to_symint_guardless, + ConvertIntKey, + DivideByKey, + free_symbols, + InnerTensorKey, + ) + from torch.utils._sympy.interp import sympy_interp + from torch.utils._sympy.reference import PythonReferenceAnalysis + + # TODO: Request simplification on runtime asserts before emitting them + ras_by_symbol = shape_env.deferred_runtime_asserts.copy() + graph = gm.graph + + if not any(ras for ras in ras_by_symbol.values()): + return + + graph_code_log.debug( + "%s", + lazy_format_graph_code(f"pre insert_deferred_runtime_asserts {name}", gm), + ) + + # deduplicate unassociated runtime assertions + # we could do better, some guards might be redundant, + # e.g. Eq(s0, 4) & Eq(2*s0, 8) + # but unclear how to handle all of that right now. + # TODO(pianpwk): better way of doing this + new_ras = [] + ras_exprs: Set[sympy.Expr] = set() + for ras in ras_by_symbol.pop(None, []): # type: ignore[call-overload] + if ras.expr not in ras_exprs: + new_ras.append(ras) + ras_exprs.add(ras.expr) + ras_by_symbol[None] = new_ras # type: ignore[index] + + # We are going to mutate the dict + symbol_to_proxy: Dict[sympy.Symbol, fx.Proxy] = {} + placeholders = set() + last_placeholder = None + for node in graph.nodes: + if node.op != "placeholder": + break + last_placeholder = node + placeholders.add(node) + if last_placeholder is None: # no placeholders, just insert before first node + last_placeholder = next(iter(graph.nodes)) + + # Identify what symbols we need to reify. This isn't strictly needed + # but helps reduce churn on the graph + needed_symbols: Set[sympy.Symbol] = set() + for ras in ras_by_symbol.values(): + for ra in ras: + needed_symbols.update(free_symbols(ra.expr)) + + log.debug("needed_symbols = %s", needed_symbols) + + def add_runtime_asserts(ras): + for ra in ras: + log.debug("inserting runtime assert %s", ra.expr) + # Need to process ALL free symbols, not just unbacked ones + fvs = free_symbols(ra.expr) + missing = fvs - symbol_to_proxy.keys() + if missing: + i1 = min(missing, key=str) + # TODO: Remove relaxing assert on unbacked_symint https://github.com/pytorch/pytorch/issues/119689 + # assert shape_env.is_unbacked_symint(i1), i1 + ras_by_symbol.setdefault(i1, []).append(ra) + else: + # Convert the sympy expression into a sequence of FX + # nodes + res = sympy_interp( + PythonReferenceAnalysis, symbol_to_proxy, ra.expr + ).node + graph.call_function( + torch.ops.aten._assert_scalar.default, + # TODO: use ra.msg here, but it's pretty + # useless right now + ( + res, + f"Runtime assertion failed for expression {ra.expr} on node '{res}'", + ), + ) + + inserted_sym_nodes = 0 # for inserting unassociated runtime asserts + nodes = list(graph.nodes) + for i, node in enumerate(nodes[:-1]): + # Placeholders can match symbols, but when we destructure them + # with size we have to make sure we insert the nodes after all + # the placeholders + with graph.inserting_before( + nodes[i + 1] if node not in placeholders else last_placeholder.next + ): + # Unfortunately, this logic still must remain because manual + # make_fx calls may not explicitly bind all symbolic ints as + # arguments to the function, so we must infer it from the other + # arguments + if ( + node in placeholders + and (example_value := _get_example_value(node)) is not None + ): + + def match_symbol(symint, cb): + if ( + isinstance(symint, torch.SymInt) + and isinstance(symint.node, SymNode) + and isinstance(s := symint.node.expr, sympy.Symbol) + and s not in symbol_to_proxy + and s in needed_symbols + ): + symbol_to_proxy[s] = fx.Proxy(cb()) + log.debug("symbol_to_proxy[%s] = %s", s, symbol_to_proxy[s]) + nonlocal inserted_sym_nodes + inserted_sym_nodes += 1 + + match_symbol(example_value, lambda: node) + if isinstance(t := example_value, torch.Tensor): + for i, s in enumerate(t.size()): + match_symbol( + s, + lambda: graph.call_function( + torch.ops.aten.sym_size.int, (node, i) + ), + ) + for i, s in enumerate(t.stride()): + match_symbol( + s, + lambda: graph.call_function( + torch.ops.aten.sym_stride.int, (node, i) + ), + ) + match_symbol( + t.storage_offset(), + lambda: graph.call_function( + torch.ops.aten.sym_storage_offset.default, (node,) + ), + ) + + # Handle asserts that aren't associated with any symbol. This + # doesn't really have to be in the loop as it will only run once, + # it just needs to happen right after the placeholders. + # insert this after placeholders & added sym nodes, and before non-placeholders. + if node not in placeholders: + last_sym_node = last_placeholder + for _ in range(inserted_sym_nodes): + last_sym_node = last_sym_node.next + with graph.inserting_before(last_sym_node.next): + add_runtime_asserts(ras_by_symbol.pop(None, [])) # type: ignore[call-overload] + + defs = [] + + if unbacked_bindings := node.meta.get("unbacked_bindings"): + for s, keypath in unbacked_bindings.items(): + defs.append(s) + + # TODO: some CSE when generating these nodes can probably + # help reduce graph size and improve compile itme + def go(node, keypath): + if keypath == (): + return node + if ( + len(keypath) >= 2 + and isinstance(keypath[0], CallMethodKey) + and isinstance(keypath[1], pytree.SequenceKey) + ): + if keypath[0].name == "size": + return go( + graph.call_function( + torch.ops.aten.sym_size.int, + (node, keypath[1].idx), + ), + keypath[2:], + ) + if keypath[0].name == "stride": + return go( + graph.call_function( + torch.ops.aten.stride.int, + (node, keypath[1].idx), + ), + keypath[2:], + ) + return go( + graph.call_method( + keypath[0].name, (node, keypath[1].idx) + ), + keypath[2:], + ) + elif isinstance(keypath[0], CallMethodKey): + return go( + graph.call_method(keypath[0].name, (node,)), keypath[1:] + ) + elif isinstance(keypath[0], pytree.SequenceKey): + return go( + graph.call_function( + operator.getitem, (node, keypath[0].idx) + ), + keypath[1:], + ) + elif isinstance(keypath[0], ConvertIntKey): + return go( + graph.call_function( + cast_symbool_to_symint_guardless, (node,) + ), + keypath[1:], + ) + elif isinstance(keypath[0], DivideByKey): + # TODO: need to assert divisibility + return go( + graph.call_function( + operator.floordiv, (node, keypath[0].divisor) + ), + keypath[1:], + ) + elif isinstance(keypath[0], InnerTensorKey): + return go( + graph.call_function( + getattr, (node, keypath[0].inner_name) + ), + keypath[1:], + ) + else: + raise AssertionError(f"unrecognized keypath {keypath}") + + symbol_to_proxy[s] = fx.Proxy(go(node, keypath)) + log.debug("symbol_to_proxy[%s] = %s", s, symbol_to_proxy[s]) + + for i0 in defs: + ras = ras_by_symbol.pop(i0, []) + # Before we perform any asserts, first apply range + # refinement. This is important, because if we are going + # to retrace the graph (and we typically are if we send + # the graph to AOTAutograd), we need to make sure we apply + # range refinement (ala _check_is_size) first, BEFORE we + # run any of the asserts. Otherwise, we may decide to + # perform substitutions based on the asserts which we then + # can't back out, because value ranges can only be applied + # to asserts.) + # + # A perhaps better long term plan is to avoid this order + # dependence by making it possible to refine ranges on + # arbitrary expressions, not just symbols. But it is not + # so easy to make use of this information, see + # https://twitter.com/ezyang/status/1745801370299482492 + # We actually made an attempt at this in + # https://github.com/pytorch/pytorch/pull/119043 + # which didn't work. + # + # Another ideas for how to do this: + # - Have bound_sympy be the source of truth of the ranges of any expression + # - Cache intermediate results for every subexpression of bound_sympy + # - This cache should be possible to edit to refine ranges + # + # One issue with this proposal is that if + # we have a bound on 2x, we are not going to be able to + # apply it for 4x. Similarly, we may have bounds for an + # equivalent expression that we are not applying because + # it's not a perfect match (e.g. x < y vs y > x)". + # + # The first issue we already have it and it's impossible + # to solve in general, so any implementation on a best + # effort basis should do. + # + # The second issue is a preexisting one. It can be mitigated + # with a normalisation algorithm. In general, it may also + # be on a best effort basis, but since our grammar is not + # terribly difficult, chances are we could even fully + # normalise SymPy expressions... who knows. + + if i0 in shape_env.size_like: + if export: + if ( + symbol_to_proxy[i0].node + not in nodes_that_already_have_sym_constraint_size + ): + graph.call_function( + torch.ops.aten.sym_constrain_range_for_size.default, + (symbol_to_proxy[i0].node,), + ) + else: + graph.call_function( + torch._check_is_size, (symbol_to_proxy[i0].node,) + ) + + vr = shape_env.var_to_range[i0] + if not shape_env._default_unspecified_value_range().issubset(vr): + # The runtime range is constrained, so add a runtime + # assert and also explicitly refine the range + # (refinement should not be necessary once runtime + # asserts cause refinement, but that's NYI) + def convert(s): + try: + return int(s) + except TypeError: + return None + + min_val = convert(vr.lower) + max_val = convert(vr.upper) + + if ( + symbol_to_proxy[i0].node, + min_val, + max_val, + ) not in nodes_that_already_have_sym_constraint_range: + graph.call_function( + torch.ops.aten.sym_constrain_range.default, + (symbol_to_proxy[i0].node,), + { + "min": convert(vr.lower), + "max": convert(vr.upper), + }, + ) + + add_runtime_asserts(ras) diff --git a/parrot/lib/python3.10/site-packages/torch/fx/passes/tools_common.py b/parrot/lib/python3.10/site-packages/torch/fx/passes/tools_common.py new file mode 100644 index 0000000000000000000000000000000000000000..aac071ace8c2daff5c727e462f34bb47ff0f820a --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/fx/passes/tools_common.py @@ -0,0 +1,303 @@ +# mypy: allow-untyped-defs +from typing import List, Tuple, Union, Dict, Any, Set, Mapping, Optional +import collections +from dataclasses import dataclass +import operator + +import torch +import torch.fx +from torch.fx.node import _get_qualified_name +from torch.fx._compatibility import compatibility + +__all__ = ['get_acc_ops_name', 'get_node_target', 'is_node_output_tensor', 'FxNetAccFusionsFinder', 'legalize_graph'] + +Tensors = Union[Tuple[torch.Tensor], List[torch.Tensor]] +TensorOrTensors = Union[torch.Tensor, Tensors] +NodeList = List[torch.fx.Node] +NodeSet = Set[torch.fx.Node] +Names = List[str] +CALLABLE_NODE_OPS = {"call_module", "call_function", "call_method"} + + +@compatibility(is_backward_compatible=False) +def get_acc_ops_name(k): + if isinstance(k, str): + return k + elif k.__module__ and "acc_ops" in k.__module__: + return f"acc_ops.{k.__name__}" + else: + module = k.__module__.replace('torch._ops', 'torch.ops') # WAR for bug in how torch.ops assigns module + return f"{module if module else ''}.{k.__name__}" + + +@compatibility(is_backward_compatible=False) +def get_node_target(submodules: Mapping[str, torch.nn.Module], node: torch.fx.Node) -> str: + """ + Given a `node` returns its target typename. + + For "call_method" node, return node.target which is the name of that method being called. + This could potential lead to conflict but should be okay because normally it's on a tensor. + + For "call_function" node, return typename of node.target. + + For "call_module" node, return typename of the module that node.target point to. + + If seeing "_VariableFunctionsClass" in the target name string, it will be replaced by + "torch". e.g. _VariableFunctionsClass.relu would become torch.relu. + """ + + assert node.op in CALLABLE_NODE_OPS, ( + "Expect op types of " + ", ".join(CALLABLE_NODE_OPS) + f", but found {node.op}" + ) + + if node.op == "call_module": + assert isinstance(node.target, str) + submod = submodules[node.target] + submod_type = getattr(submod, "_base_class_origin", type(submod)) + return get_acc_ops_name(submod_type) + elif node.op == "call_function": + target: Any = node.target + return ( + f"acc_ops.{target.__name__}" + if target.__module__ is not None and "acc_ops" in target.__module__ + else _get_qualified_name(target) + ) + else: + assert isinstance(node.target, str) + return node.target + +@compatibility(is_backward_compatible=False) +def is_node_output_tensor(node: torch.fx.Node) -> bool: + """Checks if the node output produces a Tensor or not. + + NOTE: This requires to run `ShapeProp` on the containing fx graph before + calling this function. This is because it works by checking the `type` + metadata on the node. This metadata is produced by the `ShapeProp`. + """ + type_ = node.meta.get("type", None) + return type_ is not None and issubclass(type_, torch.Tensor) + +@compatibility(is_backward_compatible=False) +class FxNetAccFusionsFinder: + """ + Finds groups of connected ACC nodes that pass non-tensor data between each other. + Such groups are called fusion groups. + """ + + def __init__(self, module: torch.fx.GraphModule, acc_nodes: NodeSet): + self.module = module + self.nodes = list(module.graph.nodes) + self.acc_nodes = acc_nodes + + @dataclass + class FusionGroup: + # The smallest idx of nodes in the fusion group after topological sorting all the nodes in the model. + top_node_idx: int + + # Nodes in this fusion group. + nodes: NodeSet + + # Inputs to this fusion group. + inputs: NodeSet + + # Nodes that in the fusion group that haven't been processed yet. + nodes_need_process: NodeSet + + def add_node(self, node): + """ + Add a node to fusion group. + """ + if node in self.nodes: + return + + self.nodes_need_process.add(node) + self.nodes.add(node) + self.inputs.discard(node) + self.inputs.update( + { + n + for n in node.all_input_nodes + if n.op in CALLABLE_NODE_OPS and n not in self.nodes + } + ) + + def recursive_add_node( + self, + fusion_group: "FxNetAccFusionsFinder.FusionGroup", + inputs: Union[NodeSet, NodeList], + visited: Optional[NodeSet] = None, + ): + """ + Start from inputs and going reverse topological order. If any upstream node + is in the fusion group, add all the nodes in this path to fusion group. + """ + for arg in inputs: + # skip the node if already seen + if visited is not None: + if arg in visited: + continue + visited.add(arg) + + # Skip placeholder and get_attr because they won't be in the fusion group. + if arg.op not in CALLABLE_NODE_OPS: + continue + + # If the node has smaller idx, it's already an upstream node of the fusion + # group. We don't need to check it anymore. + if self.nodes.index(arg) < fusion_group.top_node_idx: + continue + + # If the node is in the fusion group, return True. + if arg in fusion_group.nodes: + return True + + # Check the upstream nodes of the node, if any of them is in the fusion group + # we'll add this node to fusion group and return True. + if self.recursive_add_node(fusion_group, arg.all_input_nodes, visited): + fusion_group.add_node(arg) + return True + + return False + + def __call__(self) -> Dict[torch.fx.Node, NodeSet]: + result: Dict[torch.fx.Node, NodeSet] = {} + acc_nodes = list(self.acc_nodes) + + for node in acc_nodes: + if node in result: + continue + if node.op not in CALLABLE_NODE_OPS: + continue + if "tensor_meta" in node.meta: + continue + if node not in self.acc_nodes: + continue + + fusion_group: FxNetAccFusionsFinder.FusionGroup = self.FusionGroup( + top_node_idx=self.nodes.index(node), + nodes={node}, + inputs=set(node.all_input_nodes), + nodes_need_process={node}, + ) + while fusion_group.nodes_need_process: + node = fusion_group.nodes_need_process.pop() + self.recursive_add_node( + fusion_group, + fusion_group.inputs, + visited=set(), + ) + + # Optionally add downstream nodes + if "tensor_meta" not in node.meta: + for user in node.users: + if user.op not in CALLABLE_NODE_OPS: + continue + if user in fusion_group.nodes: + continue + + fusion_group.add_node(user) + self.recursive_add_node( + fusion_group, + fusion_group.inputs, + visited=set(), + ) + + # Add some upstream nodes + for arg in node.all_input_nodes: + if arg.op not in CALLABLE_NODE_OPS: + continue + if "tensor_meta" in arg.meta: + continue + if arg in fusion_group.nodes: + continue + + fusion_group.add_node(arg) + fusion_group.top_node_idx = min( + fusion_group.top_node_idx, self.nodes.index(arg) + ) + self.recursive_add_node( + fusion_group, + fusion_group.inputs, + visited=set(), + ) + + if not (set(fusion_group.nodes) <= self.acc_nodes): + self.acc_nodes -= fusion_group.nodes + else: + for n in fusion_group.nodes: + result[n] = fusion_group.nodes + + return result + + +@compatibility(is_backward_compatible=False) +def legalize_graph(gm: torch.fx.GraphModule) -> torch.fx.GraphModule: + """ + Replace the graph of the given GraphModule with one that contains the same nodes as the + original, but in topologically sorted order. + + This is used by the merge_matmul transformation below, which disturbs the topologically sorted + order of its input GraphModule, so that this order is restored before further transformation. + + Arguments: + gm: The graph module to topologically sort. It is modified in-place. + + Returns: + The graph module in-place sorted + """ + + # These operators are used for making runtime assertions before any + # data-dependent operators occur. We want to prioritize sorting these to + # ensure that these assertions appear before any data-dependent operations + # in the graph. + PRIORITIZED_OPS = [ + operator.add, + operator.mul, + operator.sub, + operator.floordiv, + operator.truediv, + operator.mod, + operator.le, + operator.lt, + operator.ge, + operator.gt, + operator.eq, + operator.ne, + torch.ops.aten.sym_constrain_range.default, + torch.ops.aten.sym_constrain_range_for_size.default, + torch.ops.aten._assert_async.msg, + torch.ops.aten.scalar_tensor.default, + torch.ops.aten._assert_scalar.default, + ] + + indeg = dict.fromkeys(gm.graph.nodes, 0) + new_graph = torch.fx.Graph() + # Track how many unfulfilled dependencies each node has + for node in gm.graph.nodes: + for user in node.users: + indeg[user] += 1 + queue: collections.deque = collections.deque() + # Add all nodes with no dependencies to the queue + for node in gm.graph.nodes: + if indeg[node] == 0: + queue.append(node) + env: Dict[torch.fx.Node, torch.fx.Node] = {} + # Pop nodes from the queue, and add nodes that have had all their + # dependencies fulfilled + while len(queue) > 0: + cur = queue.popleft() + env[cur] = new_graph.node_copy(cur, lambda x: env[x]) + for user in cur.users: + indeg[user] -= 1 + if indeg[user] == 0: + if user.op == "call_function" and user.target in PRIORITIZED_OPS: + queue.appendleft(user) + else: + queue.append(user) + # If the new graph's size is not as large as the old one, then there must be + # a cycle (i.e. some node's dependencies were not satisfied.) + if len(new_graph.nodes) < len(gm.graph.nodes): + raise RuntimeError(f"Input graph has cycles, unable to add {[node for node in indeg if indeg[node] != 0]}") + new_graph._codegen = gm.graph._codegen + gm.graph = new_graph + return gm diff --git a/parrot/lib/python3.10/site-packages/torch/share/cmake/Caffe2/FindCUDAToolkit.cmake b/parrot/lib/python3.10/site-packages/torch/share/cmake/Caffe2/FindCUDAToolkit.cmake new file mode 100644 index 0000000000000000000000000000000000000000..7c8a79c5493afa763ad1bc5499d5074892c4aafc --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/share/cmake/Caffe2/FindCUDAToolkit.cmake @@ -0,0 +1,1073 @@ + +# This module is back-ported from CMake 3.17 and above to work with CMake 3.10 + +# Distributed under the OSI-approved BSD 3-Clause License. See accompanying +# file Copyright.txt or https://cmake.org/licensing for details. + +#[=======================================================================[.rst: +FindCUDAToolkit +--------------- + +.. versionadded:: 3.17 + +This script locates the NVIDIA CUDA toolkit and the associated libraries, but +does not require the ``CUDA`` language be enabled for a given project. This +module does not search for the NVIDIA CUDA Samples. + +.. versionadded:: 3.19 + QNX support. + +Search Behavior +^^^^^^^^^^^^^^^ + +The CUDA Toolkit search behavior uses the following order: + +1. If the ``CUDA`` language has been enabled we will use the directory + containing the compiler as the first search location for ``nvcc``. + +2. If the ``CUDAToolkit_ROOT`` cmake configuration variable (e.g., + ``-DCUDAToolkit_ROOT=/some/path``) *or* environment variable is defined, it + will be searched. If both an environment variable **and** a + configuration variable are specified, the *configuration* variable takes + precedence. + + The directory specified here must be such that the executable ``nvcc`` or + the appropriate ``version.txt`` file can be found underneath the specified + directory. + +3. If the CUDA_PATH environment variable is defined, it will be searched + for ``nvcc``. + +4. The user's path is searched for ``nvcc`` using :command:`find_program`. If + this is found, no subsequent search attempts are performed. Users are + responsible for ensuring that the first ``nvcc`` to show up in the path is + the desired path in the event that multiple CUDA Toolkits are installed. + +5. On Unix systems, if the symbolic link ``/usr/local/cuda`` exists, this is + used. No subsequent search attempts are performed. No default symbolic link + location exists for the Windows platform. + +6. The platform specific default install locations are searched. If exactly one + candidate is found, this is used. The default CUDA Toolkit install locations + searched are: + + +-------------+-------------------------------------------------------------+ + | Platform | Search Pattern | + +=============+=============================================================+ + | macOS | ``/Developer/NVIDIA/CUDA-X.Y`` | + +-------------+-------------------------------------------------------------+ + | Other Unix | ``/usr/local/cuda-X.Y`` | + +-------------+-------------------------------------------------------------+ + | Windows | ``C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\vX.Y`` | + +-------------+-------------------------------------------------------------+ + + Where ``X.Y`` would be a specific version of the CUDA Toolkit, such as + ``/usr/local/cuda-9.0`` or + ``C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v9.0`` + + .. note:: + + When multiple CUDA Toolkits are installed in the default location of a + system(e.g., both ``/usr/local/cuda-9.0`` and ``/usr/local/cuda-10.0`` + exist but the ``/usr/local/cuda`` symbolic link does **not** exist), this + package is marked as **not** found. + + There are too many factors involved in making an automatic decision in + the presence of multiple CUDA Toolkits being installed. In this + situation, users are encouraged to either (1) set ``CUDAToolkit_ROOT`` or + (2) ensure that the correct ``nvcc`` executable shows up in ``$PATH`` for + :command:`find_program` to find. + +Arguments +^^^^^^^^^ + +``[]`` + The ``[]`` argument requests a version with which the package found + should be compatible. See :ref:`find_package version format ` + for more details. + +Options +^^^^^^^ + +``REQUIRED`` + If specified, configuration will error if a suitable CUDA Toolkit is not + found. + +``QUIET`` + If specified, the search for a suitable CUDA Toolkit will not produce any + messages. + +``EXACT`` + If specified, the CUDA Toolkit is considered found only if the exact + ``VERSION`` specified is recovered. + +Imported targets +^^^^^^^^^^^^^^^^ + +An :ref:`imported target ` named ``CUDA::toolkit`` is provided. + +This module defines :prop_tgt:`IMPORTED` targets for each +of the following libraries that are part of the CUDAToolkit: + +- :ref:`CUDA Runtime Library` +- :ref:`CUDA Driver Library` +- :ref:`cuBLAS` +- :ref:`cuFFT` +- :ref:`cuRAND` +- :ref:`cuSOLVER` +- :ref:`cuSPARSE` +- :ref:`cuPTI` +- :ref:`NPP` +- :ref:`nvBLAS` +- :ref:`nvGRAPH` +- :ref:`nvJPEG` +- :ref:`nvidia-ML` +- :ref:`nvRTC` +- :ref:`nvToolsExt` +- :ref:`OpenCL` +- :ref:`cuLIBOS` + +.. _`cuda_toolkit_rt_lib`: + +CUDA Runtime Library +"""""""""""""""""""" + +The CUDA Runtime library (cudart) are what most applications will typically +need to link against to make any calls such as `cudaMalloc`, and `cudaFree`. + +Targets Created: + +- ``CUDA::cudart`` +- ``CUDA::cudart_static`` + +.. _`cuda_toolkit_driver_lib`: + +CUDA Driver Library +"""""""""""""""""""" + +The CUDA Driver library (cuda) are used by applications that use calls +such as `cuMemAlloc`, and `cuMemFree`. + +Targets Created: + +- ``CUDA::cuda_driver`` + +.. _`cuda_toolkit_cuBLAS`: + +cuBLAS +"""""" + +The `cuBLAS `_ library. + +Targets Created: + +- ``CUDA::cublas`` +- ``CUDA::cublas_static`` +- ``CUDA::cublasLt`` starting in CUDA 10.1 +- ``CUDA::cublasLt_static`` starting in CUDA 10.1 + +.. _`cuda_toolkit_cuFFT`: + +cuFFT +""""" + +The `cuFFT `_ library. + +Targets Created: + +- ``CUDA::cufft`` +- ``CUDA::cufftw`` +- ``CUDA::cufft_static`` +- ``CUDA::cufft_static_nocallback`` starting in CUDA 9.2, requires CMake 3.23+ +- ``CUDA::cufftw_static`` + +cuRAND +"""""" + +The `cuRAND `_ library. + +Targets Created: + +- ``CUDA::curand`` +- ``CUDA::curand_static`` + +.. _`cuda_toolkit_cuSOLVER`: + +cuSOLVER +"""""""" + +The `cuSOLVER `_ library. + +Targets Created: + +- ``CUDA::cusolver`` +- ``CUDA::cusolver_static`` + +.. _`cuda_toolkit_cuSPARSE`: + +cuSPARSE +"""""""" + +The `cuSPARSE `_ library. + +Targets Created: + +- ``CUDA::cusparse`` +- ``CUDA::cusparse_static`` + +.. _`cuda_toolkit_cupti`: + +cupti +""""" + +The `NVIDIA CUDA Profiling Tools Interface `_. + +Targets Created: + +- ``CUDA::cupti`` +- ``CUDA::cupti_static`` + +.. _`cuda_toolkit_NPP`: + +NPP +""" + +The `NPP `_ libraries. + +Targets Created: + +- `nppc`: + + - ``CUDA::nppc`` + - ``CUDA::nppc_static`` + +- `nppial`: Arithmetic and logical operation functions in `nppi_arithmetic_and_logical_operations.h` + + - ``CUDA::nppial`` + - ``CUDA::nppial_static`` + +- `nppicc`: Color conversion and sampling functions in `nppi_color_conversion.h` + + - ``CUDA::nppicc`` + - ``CUDA::nppicc_static`` + +- `nppicom`: JPEG compression and decompression functions in `nppi_compression_functions.h` + Removed starting in CUDA 11.0, use :ref:`nvJPEG` instead. + + - ``CUDA::nppicom`` + - ``CUDA::nppicom_static`` + +- `nppidei`: Data exchange and initialization functions in `nppi_data_exchange_and_initialization.h` + + - ``CUDA::nppidei`` + - ``CUDA::nppidei_static`` + +- `nppif`: Filtering and computer vision functions in `nppi_filter_functions.h` + + - ``CUDA::nppif`` + - ``CUDA::nppif_static`` + +- `nppig`: Geometry transformation functions found in `nppi_geometry_transforms.h` + + - ``CUDA::nppig`` + - ``CUDA::nppig_static`` + +- `nppim`: Morphological operation functions found in `nppi_morphological_operations.h` + + - ``CUDA::nppim`` + - ``CUDA::nppim_static`` + +- `nppist`: Statistics and linear transform in `nppi_statistics_functions.h` and `nppi_linear_transforms.h` + + - ``CUDA::nppist`` + - ``CUDA::nppist_static`` + +- `nppisu`: Memory support functions in `nppi_support_functions.h` + + - ``CUDA::nppisu`` + - ``CUDA::nppisu_static`` + +- `nppitc`: Threshold and compare operation functions in `nppi_threshold_and_compare_operations.h` + + - ``CUDA::nppitc`` + - ``CUDA::nppitc_static`` + +- `npps`: + + - ``CUDA::npps`` + - ``CUDA::npps_static`` + +.. _`cuda_toolkit_nvBLAS`: + +nvBLAS +"""""" + +The `nvBLAS `_ libraries. +This is a shared library only. + +Targets Created: + +- ``CUDA::nvblas`` + +.. _`cuda_toolkit_nvGRAPH`: + +nvGRAPH +""""""" + +The `nvGRAPH `_ library. +Removed starting in CUDA 11.0 + +Targets Created: + +- ``CUDA::nvgraph`` +- ``CUDA::nvgraph_static`` + + +.. _`cuda_toolkit_nvJPEG`: + +nvJPEG +"""""" + +The `nvJPEG `_ library. +Introduced in CUDA 10. + +Targets Created: + +- ``CUDA::nvjpeg`` +- ``CUDA::nvjpeg_static`` + +.. _`cuda_toolkit_nvRTC`: + +nvRTC +""""" + +The `nvRTC `_ (Runtime Compilation) library. +This is a shared library only. + +Targets Created: + +- ``CUDA::nvrtc`` + +.. _`cuda_toolkit_nvml`: + +nvidia-ML +""""""""" + +The `NVIDIA Management Library `_. +This is a shared library only. + +Targets Created: + +- ``CUDA::nvml`` + +.. _`cuda_toolkit_nvToolsExt`: + +nvToolsExt +"""""""""" + +The `NVIDIA Tools Extension `_. +This is a shared library only. + +Targets Created: + +- ``CUDA::nvToolsExt`` + +.. _`cuda_toolkit_opencl`: + +OpenCL +"""""" + +The `NVIDIA OpenCL Library `_. +This is a shared library only. + +Targets Created: + +- ``CUDA::OpenCL`` + +.. _`cuda_toolkit_cuLIBOS`: + +cuLIBOS +""""""" + +The cuLIBOS library is a backend thread abstraction layer library which is +static only. The ``CUDA::cublas_static``, ``CUDA::cusparse_static``, +``CUDA::cufft_static``, ``CUDA::curand_static``, and (when implemented) NPP +libraries all automatically have this dependency linked. + +Target Created: + +- ``CUDA::culibos`` + +**Note**: direct usage of this target by consumers should not be necessary. + +.. _`cuda_toolkit_cuRAND`: + + + +Result variables +^^^^^^^^^^^^^^^^ + +``CUDAToolkit_FOUND`` + A boolean specifying whether or not the CUDA Toolkit was found. + +``CUDAToolkit_VERSION`` + The exact version of the CUDA Toolkit found (as reported by + ``nvcc --version`` or ``version.txt``). + +``CUDAToolkit_VERSION_MAJOR`` + The major version of the CUDA Toolkit. + +``CUDAToolkit_VERSION_MINOR`` + The minor version of the CUDA Toolkit. + +``CUDAToolkit_VERSION_PATCH`` + The patch version of the CUDA Toolkit. + +``CUDAToolkit_BIN_DIR`` + The path to the CUDA Toolkit library directory that contains the CUDA + executable ``nvcc``. + +``CUDAToolkit_INCLUDE_DIRS`` + The path to the CUDA Toolkit ``include`` folder containing the header files + required to compile a project linking against CUDA. + +``CUDAToolkit_LIBRARY_DIR`` + The path to the CUDA Toolkit library directory that contains the CUDA + Runtime library ``cudart``. + +``CUDAToolkit_LIBRARY_ROOT`` + .. versionadded:: 3.18 + + The path to the CUDA Toolkit directory containing the nvvm directory and + version.txt. + +``CUDAToolkit_TARGET_DIR`` + The path to the CUDA Toolkit directory including the target architecture + when cross-compiling. When not cross-compiling this will be equivalent to + the parent directory of ``CUDAToolkit_BIN_DIR``. + +``CUDAToolkit_NVCC_EXECUTABLE`` + The path to the NVIDIA CUDA compiler ``nvcc``. Note that this path may + **not** be the same as + :variable:`CMAKE_CUDA_COMPILER _COMPILER>`. ``nvcc`` must be + found to determine the CUDA Toolkit version as well as determining other + features of the Toolkit. This variable is set for the convenience of + modules that depend on this one. + + +#]=======================================================================] + +# NOTE: much of this was simply extracted from FindCUDA.cmake. + +# James Bigler, NVIDIA Corp (nvidia.com - jbigler) +# Abe Stephens, SCI Institute -- http://www.sci.utah.edu/~abe/FindCuda.html +# +# Copyright (c) 2008 - 2009 NVIDIA Corporation. All rights reserved. +# +# Copyright (c) 2007-2009 +# Scientific Computing and Imaging Institute, University of Utah +# +# This code is licensed under the MIT License. See the FindCUDA.cmake script +# for the text of the license. + +# The MIT License +# +# License for the specific language governing rights and limitations under +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the "Software"), +# to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, +# and/or sell copies of the Software, and to permit persons to whom the +# Software is furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +# DEALINGS IN THE SOFTWARE. +# +############################################################################### + +# The toolkit is located during compiler detection for CUDA and stored in CMakeCUDACompiler.cmake as +# CMAKE_CUDA_COMPILER_TOOLKIT_ROOT and CMAKE_CUDA_COMPILER_LIBRARY_ROOT. +# We compute the rest based on those here to avoid re-searching and to avoid finding a possibly +# different installation. +if(CMAKE_CUDA_COMPILER_TOOLKIT_ROOT) + set(CUDAToolkit_ROOT_DIR "${CMAKE_CUDA_COMPILER_TOOLKIT_ROOT}") + set(CUDAToolkit_LIBRARY_ROOT "${CMAKE_CUDA_COMPILER_LIBRARY_ROOT}") + set(CUDAToolkit_VERSION "${CMAKE_CUDA_COMPILER_TOOLKIT_VERSION}") + + if(CUDAToolkit_VERSION MATCHES [=[([0-9]+)\.([0-9]+)\.([0-9]+)]=]) + set(CUDAToolkit_VERSION_MAJOR "${CMAKE_MATCH_1}") + set(CUDAToolkit_VERSION_MINOR "${CMAKE_MATCH_2}") + set(CUDAToolkit_VERSION_PATCH "${CMAKE_MATCH_3}") + endif() +else() + function(_CUDAToolkit_find_root_dir ) + cmake_parse_arguments(arg "" "" "SEARCH_PATHS;FIND_FLAGS" ${ARGN}) + + if(NOT CUDAToolkit_BIN_DIR) + if(NOT CUDAToolkit_SENTINEL_FILE) + find_program(CUDAToolkit_NVCC_EXECUTABLE + NAMES nvcc nvcc.exe + PATHS ${arg_SEARCH_PATHS} + ${arg_FIND_FLAGS} + ) + endif() + + if(NOT CUDAToolkit_NVCC_EXECUTABLE) + find_file(CUDAToolkit_SENTINEL_FILE + NAMES version.txt + PATHS ${arg_SEARCH_PATHS} + NO_DEFAULT_PATH + ) + endif() + + if(EXISTS "${CUDAToolkit_NVCC_EXECUTABLE}") + # If NVCC exists then invoke it to find the toolkit location. + # This allows us to support wrapper scripts (e.g. ccache or colornvcc), CUDA Toolkit, + # NVIDIA HPC SDK, and distro's splayed layouts + execute_process(COMMAND ${CUDAToolkit_NVCC_EXECUTABLE} "-v" "__cmake_determine_cuda" + OUTPUT_VARIABLE _CUDA_NVCC_OUT ERROR_VARIABLE _CUDA_NVCC_OUT) + if(_CUDA_NVCC_OUT MATCHES "\\#\\$ TOP=([^\r\n]*)") + get_filename_component(CUDAToolkit_BIN_DIR "${CMAKE_MATCH_1}/bin" ABSOLUTE) + else() + get_filename_component(CUDAToolkit_BIN_DIR "${CUDAToolkit_NVCC_EXECUTABLE}" DIRECTORY) + endif() + unset(_CUDA_NVCC_OUT) + + mark_as_advanced(CUDAToolkit_BIN_DIR) + set(CUDAToolkit_BIN_DIR "${CUDAToolkit_BIN_DIR}" CACHE PATH "" FORCE) + endif() + + if(CUDAToolkit_SENTINEL_FILE) + get_filename_component(CUDAToolkit_BIN_DIR ${CUDAToolkit_SENTINEL_FILE} DIRECTORY ABSOLUTE) + set(CUDAToolkit_BIN_DIR "${CUDAToolkit_BIN_DIR}/bin") + + set(CUDAToolkit_BIN_DIR "${CUDAToolkit_BIN_DIR}" CACHE PATH "" FORCE) + mark_as_advanced(CUDAToolkit_BIN_DIR) + endif() + endif() + + if(CUDAToolkit_BIN_DIR) + get_filename_component(CUDAToolkit_ROOT_DIR ${CUDAToolkit_BIN_DIR} DIRECTORY ABSOLUTE) + set(CUDAToolkit_ROOT_DIR "${CUDAToolkit_ROOT_DIR}" PARENT_SCOPE) + endif() + + endfunction() + + # For NVCC we can easily deduce the SDK binary directory from the compiler path. + if(CMAKE_CUDA_COMPILER_LOADED AND NOT CUDAToolkit_BIN_DIR AND CMAKE_CUDA_COMPILER_ID STREQUAL "NVIDIA") + get_filename_component(CUDAToolkit_BIN_DIR "${CMAKE_CUDA_COMPILER}" DIRECTORY) + set(CUDAToolkit_BIN_DIR "${CUDAToolkit_BIN_DIR}" CACHE PATH "") + # Try language provided path first. + _CUDAToolkit_find_root_dir(SEARCH_PATHS "${CUDAToolkit_BIN_DIR}" FIND_FLAGS NO_DEFAULT_PATH) + mark_as_advanced(CUDAToolkit_BIN_DIR) + endif() + + # Try user provided path + if(NOT CUDAToolkit_ROOT_DIR AND CUDAToolkit_ROOT) + _CUDAToolkit_find_root_dir(SEARCH_PATHS "${CUDAToolkit_ROOT}" FIND_FLAGS PATH_SUFFIXES bin NO_DEFAULT_PATH) + endif() + if(NOT CUDAToolkit_ROOT_DIR) + _CUDAToolkit_find_root_dir(FIND_FLAGS PATHS ENV CUDA_PATH PATH_SUFFIXES bin) + endif() + + # If the user specified CUDAToolkit_ROOT but the toolkit could not be found, this is an error. + if(NOT CUDAToolkit_ROOT_DIR AND (DEFINED CUDAToolkit_ROOT OR DEFINED ENV{CUDAToolkit_ROOT})) + # Declare error messages now, print later depending on find_package args. + set(fail_base "Could not find nvcc executable in path specified by") + set(cuda_root_fail "${fail_base} CUDAToolkit_ROOT=${CUDAToolkit_ROOT}") + set(env_cuda_root_fail "${fail_base} environment variable CUDAToolkit_ROOT=$ENV{CUDAToolkit_ROOT}") + + if(CUDAToolkit_FIND_REQUIRED) + if(DEFINED CUDAToolkit_ROOT) + message(FATAL_ERROR ${cuda_root_fail}) + elseif(DEFINED ENV{CUDAToolkit_ROOT}) + message(FATAL_ERROR ${env_cuda_root_fail}) + endif() + else() + if(NOT CUDAToolkit_FIND_QUIETLY) + if(DEFINED CUDAToolkit_ROOT) + message(STATUS ${cuda_root_fail}) + elseif(DEFINED ENV{CUDAToolkit_ROOT}) + message(STATUS ${env_cuda_root_fail}) + endif() + endif() + set(CUDAToolkit_FOUND FALSE) + unset(fail_base) + unset(cuda_root_fail) + unset(env_cuda_root_fail) + return() + endif() + endif() + + # CUDAToolkit_ROOT cmake / env variable not specified, try platform defaults. + # + # - Linux: /usr/local/cuda-X.Y + # - macOS: /Developer/NVIDIA/CUDA-X.Y + # - Windows: C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\vX.Y + # + # We will also search the default symlink location /usr/local/cuda first since + # if CUDAToolkit_ROOT is not specified, it is assumed that the symlinked + # directory is the desired location. + if(NOT CUDAToolkit_ROOT_DIR) + if(UNIX) + if(NOT APPLE) + set(platform_base "/usr/local/cuda-") + else() + set(platform_base "/Developer/NVIDIA/CUDA-") + endif() + else() + set(platform_base "C:\\Program Files\\NVIDIA GPU Computing Toolkit\\CUDA\\v") + endif() + + # Build out a descending list of possible cuda installations, e.g. + file(GLOB possible_paths "${platform_base}*") + # Iterate the glob results and create a descending list. + set(versions) + foreach(p ${possible_paths}) + # Extract version number from end of string + string(REGEX MATCH "[0-9][0-9]?\\.[0-9]$" p_version ${p}) + if(IS_DIRECTORY ${p} AND p_version) + list(APPEND versions ${p_version}) + endif() + endforeach() + + # Sort numerically in descending order, so we try the newest versions first. + if(CMAKE_VERSION VERSION_GREATER_EQUAL 3.18) + list(SORT versions COMPARE NATURAL ORDER DESCENDING) + elseif(versions) + # Alphabetical sort here is not ideal but better than nothing + list(SORT versions) + list(REVERSE versions) + endif() + + # With a descending list of versions, populate possible paths to search. + set(search_paths) + foreach(v ${versions}) + list(APPEND search_paths "${platform_base}${v}") + endforeach() + + # Force the global default /usr/local/cuda to the front on Unix. + if(UNIX) + list(INSERT search_paths 0 "/usr/local/cuda") + endif() + + # Now search for the toolkit again using the platform default search paths. + _CUDAToolkit_find_root_dir(SEARCH_PATHS "${search_paths}" FIND_FLAGS PATH_SUFFIXES bin) + + # We are done with these variables now, cleanup for caller. + unset(platform_base) + unset(possible_paths) + unset(versions) + unset(search_paths) + + if(NOT CUDAToolkit_ROOT_DIR) + if(CUDAToolkit_FIND_REQUIRED) + message(FATAL_ERROR "Could not find nvcc, please set CUDAToolkit_ROOT.") + elseif(NOT CUDAToolkit_FIND_QUIETLY) + message(STATUS "Could not find nvcc, please set CUDAToolkit_ROOT.") + endif() + + set(CUDAToolkit_FOUND FALSE) + return() + endif() + endif() +endif() + +if(NOT CUDAToolkit_BIN_DIR) + set(CUDAToolkit_BIN_DIR "${CUDAToolkit_ROOT_DIR}/bin") +endif() + +if(NOT CUDAToolkit_NVCC_EXECUTABLE) + set(CUDAToolkit_NVCC_EXECUTABLE "${CUDAToolkit_BIN_DIR}/nvcc${CMAKE_EXECUTABLE_SUFFIX}") +endif() + +if(CMAKE_CUDA_COMPILER_TOOLKIT_VERSION) + set(CUDAToolkit_VERSION "${CMAKE_CUDA_COMPILER_TOOLKIT_VERSION}") +else() + function(_CUDAToolkit_find_version_file result_variable) + # We first check for a non-scattered installation to prefer it over a scattered installation. + if(CUDAToolkit_ROOT AND EXISTS "${CUDAToolkit_ROOT}/version.txt") + set(${result_variable} "${CUDAToolkit_ROOT}/version.txt" PARENT_SCOPE) + elseif(CUDAToolkit_ROOT_DIR AND EXISTS "${CUDAToolkit_ROOT_DIR}/version.txt") + set(${result_variable} "${CUDAToolkit_ROOT_DIR}/version.txt" PARENT_SCOPE) + elseif(CMAKE_SYSROOT_LINK AND EXISTS "${CMAKE_SYSROOT_LINK}/usr/lib/cuda/version.txt") + set(${result_variable} "${CMAKE_SYSROOT_LINK}/usr/lib/cuda/version.txt" PARENT_SCOPE) + elseif(EXISTS "${CMAKE_SYSROOT}/usr/lib/cuda/version.txt") + set(${result_variable} "${CMAKE_SYSROOT}/usr/lib/cuda/version.txt" PARENT_SCOPE) + endif() + endfunction() + + _CUDAToolkit_find_version_file( _CUDAToolkit_version_file ) + if(_CUDAToolkit_version_file) + # CUDAToolkit_LIBRARY_ROOT contains the device library and version file. + get_filename_component(CUDAToolkit_LIBRARY_ROOT "${_CUDAToolkit_version_file}" DIRECTORY ABSOLUTE) + endif() + unset(_CUDAToolkit_version_file) + + if(CUDAToolkit_NVCC_EXECUTABLE AND + CMAKE_CUDA_COMPILER_VERSION AND + CUDAToolkit_NVCC_EXECUTABLE STREQUAL CMAKE_CUDA_COMPILER) + # Need to set these based off the already computed CMAKE_CUDA_COMPILER_VERSION value + # This if statement will always match, but is used to provide variables for MATCH 1,2,3... + if(CMAKE_CUDA_COMPILER_VERSION MATCHES [=[([0-9]+)\.([0-9]+)\.([0-9]+)]=]) + set(CUDAToolkit_VERSION_MAJOR "${CMAKE_MATCH_1}") + set(CUDAToolkit_VERSION_MINOR "${CMAKE_MATCH_2}") + set(CUDAToolkit_VERSION_PATCH "${CMAKE_MATCH_3}") + set(CUDAToolkit_VERSION "${CMAKE_CUDA_COMPILER_VERSION}") + endif() + elseif(CUDAToolkit_NVCC_EXECUTABLE) + # Compute the version by invoking nvcc + execute_process(COMMAND ${CUDAToolkit_NVCC_EXECUTABLE} "--version" OUTPUT_VARIABLE NVCC_OUT) + if(NVCC_OUT MATCHES [=[ V([0-9]+)\.([0-9]+)\.([0-9]+)]=]) + set(CUDAToolkit_VERSION_MAJOR "${CMAKE_MATCH_1}") + set(CUDAToolkit_VERSION_MINOR "${CMAKE_MATCH_2}") + set(CUDAToolkit_VERSION_PATCH "${CMAKE_MATCH_3}") + set(CUDAToolkit_VERSION "${CMAKE_MATCH_1}.${CMAKE_MATCH_2}.${CMAKE_MATCH_3}") + endif() + unset(NVCC_OUT) + else() + _CUDAToolkit_find_version_file(version_file) + if(version_file) + file(READ "${version_file}" VERSION_INFO) + if(VERSION_INFO MATCHES [=[CUDA Version ([0-9]+)\.([0-9]+)\.([0-9]+)]=]) + set(CUDAToolkit_VERSION_MAJOR "${CMAKE_MATCH_1}") + set(CUDAToolkit_VERSION_MINOR "${CMAKE_MATCH_2}") + set(CUDAToolkit_VERSION_PATCH "${CMAKE_MATCH_3}") + set(CUDAToolkit_VERSION "${CMAKE_MATCH_1}.${CMAKE_MATCH_2}.${CMAKE_MATCH_3}") + endif() + endif() + endif() +endif() + +# Find target directory when crosscompiling. +if(CMAKE_CROSSCOMPILING) + if(CMAKE_SYSTEM_PROCESSOR STREQUAL "armv7-a") + # Support for NVPACK + set(CUDAToolkit_TARGET_NAME "armv7-linux-androideabi") + elseif(CMAKE_SYSTEM_PROCESSOR MATCHES "arm") + set(CUDAToolkit_TARGET_NAME "armv7-linux-gnueabihf") + elseif(CMAKE_SYSTEM_PROCESSOR MATCHES "aarch64") + if(ANDROID_ARCH_NAME STREQUAL "arm64") + set(CUDAToolkit_TARGET_NAME "aarch64-linux-androideabi") + elseif(CMAKE_SYSTEM_NAME STREQUAL "QNX") + set(CUDAToolkit_TARGET_NAME "aarch64-qnx") + else() + set(CUDAToolkit_TARGET_NAME "aarch64-linux") + endif(ANDROID_ARCH_NAME STREQUAL "arm64") + elseif(CMAKE_SYSTEM_PROCESSOR STREQUAL "x86_64") + set(CUDAToolkit_TARGET_NAME "x86_64-linux") + endif() + + if(EXISTS "${CUDAToolkit_ROOT_DIR}/targets/${CUDAToolkit_TARGET_NAME}") + set(CUDAToolkit_TARGET_DIR "${CUDAToolkit_ROOT_DIR}/targets/${CUDAToolkit_TARGET_NAME}") + # add known CUDA target root path to the set of directories we search for programs, libraries and headers + list(PREPEND CMAKE_FIND_ROOT_PATH "${CUDAToolkit_TARGET_DIR}") + + # Mark that we need to pop the root search path changes after we have + # found all cuda libraries so that searches for our cross-compilation + # libraries work when another cuda sdk is in CMAKE_PREFIX_PATH or + # PATh + set(_CUDAToolkit_Pop_ROOT_PATH True) + endif() +endif() + +# If not already set we can simply use the toolkit root or it's a scattered installation. +if(NOT CUDAToolkit_TARGET_DIR) + # Not cross compiling + set(CUDAToolkit_TARGET_DIR "${CUDAToolkit_ROOT_DIR}") + # Now that we have the real ROOT_DIR, find components inside it. + list(APPEND CMAKE_PREFIX_PATH ${CUDAToolkit_ROOT_DIR}) + + # Mark that we need to pop the prefix path changes after we have + # found the cudart library. + set(_CUDAToolkit_Pop_Prefix True) +endif() + +# CUDAToolkit_TARGET_DIR always points to the directory containing the include directory. +# On a scattered installation /usr, on a non-scattered something like /usr/local/cuda or /usr/local/cuda-10.2/targets/aarch64-linux. +if(EXISTS "${CUDAToolkit_TARGET_DIR}/include/cuda_runtime.h") + set(CUDAToolkit_INCLUDE_DIR "${CUDAToolkit_TARGET_DIR}/include") +elseif(NOT CUDAToolkit_FIND_QUIETLY) + message(STATUS "Unable to find cuda_runtime.h in \"${CUDAToolkit_TARGET_DIR}/include\" for CUDAToolkit_INCLUDE_DIR.") +endif() + +# The NVHPC layout moves math library headers and libraries to a sibling directory. +# Create a separate variable so this directory can be selectively added to math targets. +if(NOT EXISTS "${CUDAToolkit_INCLUDE_DIR}/cublas_v2.h") + set(CUDAToolkit_MATH_INCLUDE_DIR "${CUDAToolkit_TARGET_DIR}/../../math_libs/include") + get_filename_component(CUDAToolkit_MATH_INCLUDE_DIR "${CUDAToolkit_MATH_INCLUDE_DIR}" ABSOLUTE) + if(NOT EXISTS "${CUDAToolkit_MATH_INCLUDE_DIR}/cublas_v2.h") + if(NOT CUDAToolkit_FIND_QUIETLY) + message(STATUS "Unable to find cublas_v2.h in either \"${CUDAToolkit_INCLUDE_DIR}\" or \"${CUDAToolkit_MATH_INCLUDE_DIR}\"") + endif() + unset(CUDAToolkit_MATH_INCLUDE_DIR) + endif() +endif() + +# Find the CUDA Runtime Library libcudart +find_library(CUDA_CUDART + NAMES cudart + PATH_SUFFIXES lib64 lib/x64 +) +find_library(CUDA_CUDART + NAMES cudart + PATH_SUFFIXES lib64/stubs lib/x64/stubs +) + +if(NOT CUDA_CUDART AND NOT CUDAToolkit_FIND_QUIETLY) + message(STATUS "Unable to find cudart library.") +endif() + +if(_CUDAToolkit_Pop_Prefix) + list(REMOVE_AT CMAKE_PREFIX_PATH -1) + unset(_CUDAToolkit_Pop_Prefix) +endif() + +#----------------------------------------------------------------------------- +# Perform version comparison and validate all required variables are set. +include(FindPackageHandleStandardArgs) +find_package_handle_standard_args(CUDAToolkit + REQUIRED_VARS + CUDAToolkit_INCLUDE_DIR + CUDAToolkit_VERSION + CUDA_CUDART + CUDAToolkit_BIN_DIR + VERSION_VAR + CUDAToolkit_VERSION +) + +mark_as_advanced(CUDA_CUDART + CUDAToolkit_INCLUDE_DIR + CUDAToolkit_NVCC_EXECUTABLE + CUDAToolkit_SENTINEL_FILE + ) + +#----------------------------------------------------------------------------- +# Construct result variables +if(CUDAToolkit_FOUND) + set(CUDAToolkit_INCLUDE_DIRS ${CUDAToolkit_INCLUDE_DIR}) + get_filename_component(CUDAToolkit_LIBRARY_DIR ${CUDA_CUDART} DIRECTORY ABSOLUTE) +endif() + +#----------------------------------------------------------------------------- +# Construct import targets +if(CUDAToolkit_FOUND) + + function(_CUDAToolkit_find_and_add_import_lib lib_name) + cmake_parse_arguments(arg "" "" "ALT;DEPS;EXTRA_HINTS;EXTRA_PATH_SUFFIXES;EXTRA_INCLUDE_DIRS" ${ARGN}) + + set(search_names ${lib_name} ${arg_ALT}) + + find_library(CUDA_${lib_name}_LIBRARY + NAMES ${search_names} + HINTS ${CUDAToolkit_LIBRARY_DIR} + ENV CUDA_PATH + ${arg_EXTRA_HINTS} + PATH_SUFFIXES nvidia/current lib64 lib/x64 lib + ${arg_EXTRA_PATH_SUFFIXES} + ) + # Don't try any stub directories until we have exhausted all other + # search locations. + find_library(CUDA_${lib_name}_LIBRARY + NAMES ${search_names} + HINTS ${CUDAToolkit_LIBRARY_DIR} + ENV CUDA_PATH + ${arg_EXTRA_HINTS} + PATH_SUFFIXES lib64/stubs lib/x64/stubs lib/stubs stubs + # Support NVHPC splayed math library layout + ../../math_libs/${CUDAToolkit_VERSION_MAJOR}.${CUDAToolkit_VERSION_MINOR}/lib64 + ../../math_libs/lib64 + ) + + mark_as_advanced(CUDA_${lib_name}_LIBRARY) + + if(NOT TARGET CUDA::${lib_name} AND CUDA_${lib_name}_LIBRARY) + add_library(CUDA::${lib_name} UNKNOWN IMPORTED) + set_property(TARGET CUDA::${lib_name} APPEND PROPERTY + INTERFACE_INCLUDE_DIRECTORIES "${CUDAToolkit_INCLUDE_DIRS}") + set_property(TARGET CUDA::${lib_name} APPEND PROPERTY + INTERFACE_SYSTEM_INCLUDE_DIRECTORIES "${CUDAToolkit_INCLUDE_DIRS}") + if(DEFINED CUDAToolkit_MATH_INCLUDE_DIR) + string(FIND ${CUDA_${lib_name}_LIBRARY} "math_libs" math_libs) + if(NOT ${math_libs} EQUAL -1) + set_property(TARGET CUDA::${lib_name} APPEND PROPERTY + INTERFACE_INCLUDE_DIRECTORIES "${CUDAToolkit_MATH_INCLUDE_DIRS}") + set_property(TARGET CUDA::${lib_name} APPEND PROPERTY + INTERFACE_SYSTEM_INCLUDE_DIRECTORIES "${CUDAToolkit_MATH_INCLUDE_DIRS}") + endif() + endif() + set_property(TARGET CUDA::${lib_name} PROPERTY IMPORTED_LOCATION "${CUDA_${lib_name}_LIBRARY}") + foreach(dep ${arg_DEPS}) + if(TARGET CUDA::${dep}) + set_property(TARGET CUDA::${lib_name} APPEND PROPERTY + INTERFACE_LINK_LIBRARIES CUDA::${dep}) + endif() + endforeach() + if(arg_EXTRA_INCLUDE_DIRS) + set_property(TARGET CUDA::${lib_name} APPEND PROPERTY + INTERFACE_INCLUDE_DIRECTORIES "${arg_EXTRA_INCLUDE_DIRS}") + set_property(TARGET CUDA::${lib_name} APPEND PROPERTY + INTERFACE_SYSTEM_INCLUDE_DIRECTORIES "${arg_EXTRA_INCLUDE_DIRS}") + endif() + endif() + endfunction() + + if(NOT TARGET CUDA::toolkit) + add_library(CUDA::toolkit IMPORTED INTERFACE) + set_property(TARGET CUDA::toolkit APPEND PROPERTY + INTERFACE_INCLUDE_DIRECTORIES "${CUDAToolkit_INCLUDE_DIRS}") + set_property(TARGET CUDA::toolkit APPEND PROPERTY + INTERFACE_SYSTEM_INCLUDE_DIRECTORIES "${CUDAToolkit_INCLUDE_DIRS}") + endif() + + _CUDAToolkit_find_and_add_import_lib(cuda_driver ALT cuda) + + _CUDAToolkit_find_and_add_import_lib(cudart) + _CUDAToolkit_find_and_add_import_lib(cudart_static) + + # setup dependencies that are required for cudart_static when building + # on linux. These are generally only required when using the CUDA toolkit + # when CUDA language is disabled + if(NOT TARGET CUDA::cudart_static_deps + AND TARGET CUDA::cudart_static) + + add_library(CUDA::cudart_static_deps IMPORTED INTERFACE) + set_property(TARGET CUDA::cudart_static APPEND PROPERTY + INTERFACE_LINK_LIBRARIES CUDA::cudart_static_deps) + + if(UNIX AND (CMAKE_C_COMPILER OR CMAKE_CXX_COMPILER)) + find_package(Threads REQUIRED) + set_property(TARGET CUDA::cudart_static_deps APPEND PROPERTY + INTERFACE_LINK_LIBRARIES Threads::Threads ${CMAKE_DL_LIBS}) + endif() + + if(UNIX AND NOT APPLE AND NOT (CMAKE_SYSTEM_NAME STREQUAL "QNX")) + # On Linux, you must link against librt when using the static cuda runtime. + find_library(CUDAToolkit_rt_LIBRARY rt) + mark_as_advanced(CUDAToolkit_rt_LIBRARY) + if(NOT CUDAToolkit_rt_LIBRARY) + message(WARNING "Could not find librt library, needed by CUDA::cudart_static") + else() + set_property(TARGET CUDA::cudart_static_deps APPEND PROPERTY + INTERFACE_LINK_LIBRARIES ${CUDAToolkit_rt_LIBRARY}) + endif() + endif() + endif() + + _CUDAToolkit_find_and_add_import_lib(culibos) # it's a static library + foreach(cuda_lib cublasLt cufft curand cusparse nppc nvjpeg) + _CUDAToolkit_find_and_add_import_lib(${cuda_lib}) + _CUDAToolkit_find_and_add_import_lib(${cuda_lib}_static DEPS culibos) + endforeach() + + if(CUDAToolkit_VERSION VERSION_GREATER_EQUAL 11.0.0) + # cublas depends on cublasLt + # https://docs.nvidia.com/cuda/archive/11.0/cublas/index.html#static-library + _CUDAToolkit_find_and_add_import_lib(cublas DEPS cublasLt) + _CUDAToolkit_find_and_add_import_lib(cublas_static DEPS cublasLt_static) + else() + _CUDAToolkit_find_and_add_import_lib(cublas) + _CUDAToolkit_find_and_add_import_lib(cublas_static DEPS culibos) + endif() + + # cuFFTW depends on cuFFT + _CUDAToolkit_find_and_add_import_lib(cufftw DEPS cufft) + _CUDAToolkit_find_and_add_import_lib(cufftw_static DEPS cufft_static) + if(CUDAToolkit_VERSION VERSION_GREATER_EQUAL 9.2) + _CUDAToolkit_find_and_add_import_lib(cufft_static_nocallback DEPS culibos) + endif() + + # cuSOLVER depends on cuBLAS, and cuSPARSE + _CUDAToolkit_find_and_add_import_lib(cusolver DEPS cublas cusparse) + _CUDAToolkit_find_and_add_import_lib(cusolver_static DEPS cublas_static cusparse_static culibos) + + + if(CUDAToolkit_VERSION VERSION_GREATER_EQUAL 10.1.2) + # cusolver depends on liblapack_static.a starting with CUDA 10.1 update 2, + # https://docs.nvidia.com/cuda/archive/11.5.0/cusolver/index.html#static-link-lapack + _CUDAToolkit_find_and_add_import_lib(cusolver_lapack_static ALT lapack_static) # implementation detail static lib + _CUDAToolkit_find_and_add_import_lib(cusolver_static DEPS cusolver_lapack_static) + endif() + + if(CUDAToolkit_VERSION VERSION_GREATER 11.2.1) + # cusolver depends on libcusolver_metis and cublasLt + # https://docs.nvidia.com/cuda/archive/11.2.2/cusolver/index.html#link-dependency + _CUDAToolkit_find_and_add_import_lib(cusolver DEPS cublasLt) + + _CUDAToolkit_find_and_add_import_lib(cusolver_metis_static ALT metis_static) # implementation detail static lib + _CUDAToolkit_find_and_add_import_lib(cusolver_static DEPS cusolver_metis_static cublasLt_static) + endif() + + # nvGRAPH depends on cuRAND, and cuSOLVER. + _CUDAToolkit_find_and_add_import_lib(nvgraph DEPS curand cusolver) + _CUDAToolkit_find_and_add_import_lib(nvgraph_static DEPS curand_static cusolver_static) + + # Process the majority of the NPP libraries. + foreach(cuda_lib nppial nppicc nppidei nppif nppig nppim nppist nppitc npps nppicom nppisu) + _CUDAToolkit_find_and_add_import_lib(${cuda_lib} DEPS nppc) + _CUDAToolkit_find_and_add_import_lib(${cuda_lib}_static DEPS nppc_static) + endforeach() + + find_path(CUDAToolkit_CUPTI_INCLUDE_DIR cupti.h PATHS + "${CUDAToolkit_ROOT_DIR}/extras/CUPTI/include" + "${CUDAToolkit_INCLUDE_DIR}/../extras/CUPTI/include" + "${CUDAToolkit_INCLUDE_DIR}" + NO_DEFAULT_PATH) + mark_as_advanced(CUDAToolkit_CUPTI_INCLUDE_DIR) + + if(CUDAToolkit_CUPTI_INCLUDE_DIR) + _CUDAToolkit_find_and_add_import_lib(cupti + EXTRA_PATH_SUFFIXES ../extras/CUPTI/lib64/ + ../extras/CUPTI/lib/ + EXTRA_INCLUDE_DIRS "${CUDAToolkit_CUPTI_INCLUDE_DIR}") + _CUDAToolkit_find_and_add_import_lib(cupti_static + EXTRA_PATH_SUFFIXES ../extras/CUPTI/lib64/ + ../extras/CUPTI/lib/ + EXTRA_INCLUDE_DIRS "${CUDAToolkit_CUPTI_INCLUDE_DIR}") + endif() + + _CUDAToolkit_find_and_add_import_lib(nvrtc DEPS cuda_driver) + + _CUDAToolkit_find_and_add_import_lib(nvml ALT nvidia-ml nvml) + + # nvtools can be installed outside the CUDA toolkit directory, + # so search the NVTOOLSEXT_PATH windows only environment variable + set(nvToolsExt_EXTRA_PATH) + if(WIN32) + set(nvToolsExt_EXTRA_PATH "C:\\Program Files\\NVIDIA Corporation\\NvToolsExt") + endif() + + find_path(CUDAToolkit_nvToolsExt_INCLUDE_DIR nvToolsExt.h + PATHS "${CUDAToolkit_INCLUDE_DIR}" + "${CUDAToolkit_ROOT_DIR}" + ENV NVTOOLSEXT_PATH + "${nvToolsExt_EXTRA_PATH}" + PATH_SUFFIXES include + NO_DEFAULT_PATH) + mark_as_advanced(CUDAToolkit_nvToolsExt_INCLUDE_DIR) + + if(CUDAToolkit_nvToolsExt_INCLUDE_DIR) + _CUDAToolkit_find_and_add_import_lib(nvToolsExt + ALT nvToolsExt64 nvToolsExt64_1 + EXTRA_HINTS ENV NVTOOLSEXT_PATH + "${nvToolsExt_EXTRA_PATH}" + EXTRA_INCLUDE_DIRS "${CUDAToolkit_nvToolsExt_INCLUDE_DIR}") + endif() + + _CUDAToolkit_find_and_add_import_lib(OpenCL) +endif() + +unset(CUDAToolkit_ROOT_DIR) + +if(_CUDAToolkit_Pop_ROOT_PATH) + list(REMOVE_AT CMAKE_FIND_ROOT_PATH 0) + unset(_CUDAToolkit_Pop_ROOT_PATH) +endif() diff --git a/parrot/lib/python3.10/site-packages/torch/share/cmake/Tensorpipe/TensorpipeTargets-release.cmake b/parrot/lib/python3.10/site-packages/torch/share/cmake/Tensorpipe/TensorpipeTargets-release.cmake new file mode 100644 index 0000000000000000000000000000000000000000..2149086394b4b3d207d4d031db6448012ec11fdd --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/share/cmake/Tensorpipe/TensorpipeTargets-release.cmake @@ -0,0 +1,39 @@ +#---------------------------------------------------------------- +# Generated CMake target import file for configuration "Release". +#---------------------------------------------------------------- + +# Commands may need to know the format version. +set(CMAKE_IMPORT_FILE_VERSION 1) + +# Import target "tensorpipe_uv" for configuration "Release" +set_property(TARGET tensorpipe_uv APPEND PROPERTY IMPORTED_CONFIGURATIONS RELEASE) +set_target_properties(tensorpipe_uv PROPERTIES + IMPORTED_LINK_INTERFACE_LANGUAGES_RELEASE "C" + IMPORTED_LOCATION_RELEASE "${_IMPORT_PREFIX}/lib64/libtensorpipe_uv.a" + ) + +list(APPEND _IMPORT_CHECK_TARGETS tensorpipe_uv ) +list(APPEND _IMPORT_CHECK_FILES_FOR_tensorpipe_uv "${_IMPORT_PREFIX}/lib64/libtensorpipe_uv.a" ) + +# Import target "tensorpipe" for configuration "Release" +set_property(TARGET tensorpipe APPEND PROPERTY IMPORTED_CONFIGURATIONS RELEASE) +set_target_properties(tensorpipe PROPERTIES + IMPORTED_LINK_INTERFACE_LANGUAGES_RELEASE "CXX" + IMPORTED_LOCATION_RELEASE "${_IMPORT_PREFIX}/lib64/libtensorpipe.a" + ) + +list(APPEND _IMPORT_CHECK_TARGETS tensorpipe ) +list(APPEND _IMPORT_CHECK_FILES_FOR_tensorpipe "${_IMPORT_PREFIX}/lib64/libtensorpipe.a" ) + +# Import target "tensorpipe_cuda" for configuration "Release" +set_property(TARGET tensorpipe_cuda APPEND PROPERTY IMPORTED_CONFIGURATIONS RELEASE) +set_target_properties(tensorpipe_cuda PROPERTIES + IMPORTED_LINK_INTERFACE_LANGUAGES_RELEASE "CXX" + IMPORTED_LOCATION_RELEASE "${_IMPORT_PREFIX}/lib64/libtensorpipe_cuda.a" + ) + +list(APPEND _IMPORT_CHECK_TARGETS tensorpipe_cuda ) +list(APPEND _IMPORT_CHECK_FILES_FOR_tensorpipe_cuda "${_IMPORT_PREFIX}/lib64/libtensorpipe_cuda.a" ) + +# Commands beyond this point should not need to know the version. +set(CMAKE_IMPORT_FILE_VERSION) diff --git a/parrot/lib/python3.10/site-packages/torch/share/cmake/Tensorpipe/TensorpipeTargets.cmake b/parrot/lib/python3.10/site-packages/torch/share/cmake/Tensorpipe/TensorpipeTargets.cmake new file mode 100644 index 0000000000000000000000000000000000000000..31cc4794b7b83695f9bea33ffb48340cd5e89713 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/share/cmake/Tensorpipe/TensorpipeTargets.cmake @@ -0,0 +1,114 @@ +# Generated by CMake + +if("${CMAKE_MAJOR_VERSION}.${CMAKE_MINOR_VERSION}" LESS 2.5) + message(FATAL_ERROR "CMake >= 2.6.0 required") +endif() +cmake_policy(PUSH) +cmake_policy(VERSION 2.6...3.17) +#---------------------------------------------------------------- +# Generated CMake target import file. +#---------------------------------------------------------------- + +# Commands may need to know the format version. +set(CMAKE_IMPORT_FILE_VERSION 1) + +# Protect against multiple inclusion, which would fail when already imported targets are added once more. +set(_targetsDefined) +set(_targetsNotDefined) +set(_expectedTargets) +foreach(_expectedTarget tensorpipe_uv tensorpipe tensorpipe_cuda) + list(APPEND _expectedTargets ${_expectedTarget}) + if(NOT TARGET ${_expectedTarget}) + list(APPEND _targetsNotDefined ${_expectedTarget}) + endif() + if(TARGET ${_expectedTarget}) + list(APPEND _targetsDefined ${_expectedTarget}) + endif() +endforeach() +if("${_targetsDefined}" STREQUAL "${_expectedTargets}") + unset(_targetsDefined) + unset(_targetsNotDefined) + unset(_expectedTargets) + set(CMAKE_IMPORT_FILE_VERSION) + cmake_policy(POP) + return() +endif() +if(NOT "${_targetsDefined}" STREQUAL "") + message(FATAL_ERROR "Some (but not all) targets in this export set were already defined.\nTargets Defined: ${_targetsDefined}\nTargets not yet defined: ${_targetsNotDefined}\n") +endif() +unset(_targetsDefined) +unset(_targetsNotDefined) +unset(_expectedTargets) + + +# Compute the installation prefix relative to this file. +get_filename_component(_IMPORT_PREFIX "${CMAKE_CURRENT_LIST_FILE}" PATH) +get_filename_component(_IMPORT_PREFIX "${_IMPORT_PREFIX}" PATH) +get_filename_component(_IMPORT_PREFIX "${_IMPORT_PREFIX}" PATH) +get_filename_component(_IMPORT_PREFIX "${_IMPORT_PREFIX}" PATH) +if(_IMPORT_PREFIX STREQUAL "/") + set(_IMPORT_PREFIX "") +endif() + +# Create imported target tensorpipe_uv +add_library(tensorpipe_uv STATIC IMPORTED) + +set_target_properties(tensorpipe_uv PROPERTIES + INTERFACE_LINK_LIBRARIES "\$;\$;\$" +) + +# Create imported target tensorpipe +add_library(tensorpipe STATIC IMPORTED) + +set_target_properties(tensorpipe PROPERTIES + INTERFACE_INCLUDE_DIRECTORIES "${_IMPORT_PREFIX}/include" + INTERFACE_LINK_LIBRARIES "\$" +) + +# Create imported target tensorpipe_cuda +add_library(tensorpipe_cuda STATIC IMPORTED) + +set_target_properties(tensorpipe_cuda PROPERTIES + INTERFACE_INCLUDE_DIRECTORIES "/usr/local/cuda/include" + INTERFACE_LINK_LIBRARIES "tensorpipe;/usr/local/cuda/lib64/libcudart.so" +) + +if(CMAKE_VERSION VERSION_LESS 2.8.12) + message(FATAL_ERROR "This file relies on consumers using CMake 2.8.12 or greater.") +endif() + +# Load information for each installed configuration. +get_filename_component(_DIR "${CMAKE_CURRENT_LIST_FILE}" PATH) +file(GLOB CONFIG_FILES "${_DIR}/TensorpipeTargets-*.cmake") +foreach(f ${CONFIG_FILES}) + include(${f}) +endforeach() + +# Cleanup temporary variables. +set(_IMPORT_PREFIX) + +# Loop over all imported files and verify that they actually exist +foreach(target ${_IMPORT_CHECK_TARGETS} ) + foreach(file ${_IMPORT_CHECK_FILES_FOR_${target}} ) + if(NOT EXISTS "${file}" ) + message(FATAL_ERROR "The imported target \"${target}\" references the file + \"${file}\" +but this file does not exist. Possible reasons include: +* The file was deleted, renamed, or moved to another location. +* An install or uninstall procedure did not complete successfully. +* The installation package was faulty and contained + \"${CMAKE_CURRENT_LIST_FILE}\" +but not all the files it references. +") + endif() + endforeach() + unset(_IMPORT_CHECK_FILES_FOR_${target}) +endforeach() +unset(_IMPORT_CHECK_TARGETS) + +# This file does not depend on other imported targets which have +# been exported from the same project but in a separate export set. + +# Commands beyond this point should not need to know the version. +set(CMAKE_IMPORT_FILE_VERSION) +cmake_policy(POP) diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/_pywrap_tfcompile.so b/videochat2/lib/python3.10/site-packages/tensorflow/python/_pywrap_tfcompile.so new file mode 100644 index 0000000000000000000000000000000000000000..5cccdc9b0f1bd505cdbde7d1e0dd908fa32eb0f3 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/_pywrap_tfcompile.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ae4d5f8a470c79ac912e7f51ef2a303a8cd6fa1b19cce11c9ac26333f82b2e52 +size 155160 diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/distribute/__pycache__/central_storage_strategy.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/distribute/__pycache__/central_storage_strategy.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..411d343a1e54cd5447145a1c0f0af5061baeac8d Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/distribute/__pycache__/central_storage_strategy.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/distribute/__pycache__/cross_device_ops.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/distribute/__pycache__/cross_device_ops.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8fe83a47bd20fa0560bfdea1191f32bda72b1296 Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/distribute/__pycache__/cross_device_ops.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/distribute/__pycache__/cross_device_utils.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/distribute/__pycache__/cross_device_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..09e4308b047b84983d35416dcb094b54c83a76d8 Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/distribute/__pycache__/cross_device_utils.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/distribute/__pycache__/distribute_config.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/distribute/__pycache__/distribute_config.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..829c655a7495234b26469bcdd3a13c57034c8808 Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/distribute/__pycache__/distribute_config.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/distribute/__pycache__/distribute_utils.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/distribute/__pycache__/distribute_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e63b4811ad6fd0851267064dff257a998151dc7d Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/distribute/__pycache__/distribute_utils.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/distribute/__pycache__/input_lib.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/distribute/__pycache__/input_lib.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b291ffbeb202032cd56a06f08c97bc17011f4c3a Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/distribute/__pycache__/input_lib.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/distribute/__pycache__/input_ops.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/distribute/__pycache__/input_ops.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2b09ef94bfec8d51b3c41948e6ec3270061e942f Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/distribute/__pycache__/input_ops.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/distribute/__pycache__/input_util.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/distribute/__pycache__/input_util.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..eaf71112d49f3c6b82b055ee37ab1939ae7f5907 Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/distribute/__pycache__/input_util.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/distribute/__pycache__/merge_call_interim.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/distribute/__pycache__/merge_call_interim.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dce58c6871cb1b1733f1c2118479a10d1bb75082 Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/distribute/__pycache__/merge_call_interim.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/distribute/__pycache__/mirrored_strategy.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/distribute/__pycache__/mirrored_strategy.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..afac28e479304fdab78a3b056930fe90f1f36d56 Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/distribute/__pycache__/mirrored_strategy.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/distribute/__pycache__/multi_worker_test_base.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/distribute/__pycache__/multi_worker_test_base.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..da0ca3e05694c1f0457390633f51c25356a8841d Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/distribute/__pycache__/multi_worker_test_base.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/distribute/__pycache__/multi_worker_util.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/distribute/__pycache__/multi_worker_util.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5bef4c568a301140b028d3d735d7f8b27139e20e Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/distribute/__pycache__/multi_worker_util.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/distribute/__pycache__/one_device_strategy.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/distribute/__pycache__/one_device_strategy.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..95119f78f2f1969ac5f4b19990dc1bf09714a6ac Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/distribute/__pycache__/one_device_strategy.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/distribute/__pycache__/packed_distributed_variable.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/distribute/__pycache__/packed_distributed_variable.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dbff4971bd8d3e2f5c58a6361091b5fa524b8672 Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/distribute/__pycache__/packed_distributed_variable.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/distribute/__pycache__/ps_values.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/distribute/__pycache__/ps_values.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..48859c6783d9d139509a9c1a6d6699406f36703a Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/distribute/__pycache__/ps_values.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/distribute/__pycache__/reduce_util.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/distribute/__pycache__/reduce_util.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1d4b69d9d2919c262fbe36c77b14b16603f9e250 Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/distribute/__pycache__/reduce_util.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/distribute/__pycache__/sharded_variable.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/distribute/__pycache__/sharded_variable.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f238670564af23e4a9eaf86655d8b9d35663debf Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/distribute/__pycache__/sharded_variable.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/distribute/__pycache__/shared_variable_creator.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/distribute/__pycache__/shared_variable_creator.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6d39f475fc551c934b59004b76871858d3cda80a Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/distribute/__pycache__/shared_variable_creator.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/distribute/__pycache__/strategy_combinations.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/distribute/__pycache__/strategy_combinations.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..826a4c8655bbf540ea24803af4bcaf479d218434 Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/distribute/__pycache__/strategy_combinations.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/distribute/__pycache__/strategy_test_lib.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/distribute/__pycache__/strategy_test_lib.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..176399c0246156cd41d090cd3e933df91d1a2fed Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/distribute/__pycache__/strategy_test_lib.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/distribute/__pycache__/summary_op_util.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/distribute/__pycache__/summary_op_util.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4a05de0c57444da2d297879be81cbe6b6df2944b Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/distribute/__pycache__/summary_op_util.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/distribute/__pycache__/tpu_strategy.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/distribute/__pycache__/tpu_strategy.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8c4dd2db67ba83630c334bbe5f18e0da7c3cbb2e Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/distribute/__pycache__/tpu_strategy.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/distribute/__pycache__/tpu_util.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/distribute/__pycache__/tpu_util.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..aeadcba211047e3d440fc0f2c454b8056e4c45da Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/distribute/__pycache__/tpu_util.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/distribute/__pycache__/values.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/distribute/__pycache__/values.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..abe2ead22e8995c4999e1a852f3b848a70796570 Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/distribute/__pycache__/values.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/distribute/__pycache__/values_v2.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/distribute/__pycache__/values_v2.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..719096bd56c73d3d9c9d5d9759d52758755f62b1 Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/distribute/__pycache__/values_v2.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/training/adadelta.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/training/adadelta.py new file mode 100644 index 0000000000000000000000000000000000000000..f1d27ae66ad071243d84eabe494849caf6a7984a --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/training/adadelta.py @@ -0,0 +1,198 @@ +# Copyright 2015 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Adadelta for TensorFlow.""" +from tensorflow.python.framework import ops +from tensorflow.python.ops import gen_training_ops +from tensorflow.python.ops import math_ops +from tensorflow.python.training import optimizer +from tensorflow.python.util.tf_export import tf_export + + +@tf_export(v1=["train.AdadeltaOptimizer"]) +class AdadeltaOptimizer(optimizer.Optimizer): + """Optimizer that implements the Adadelta algorithm. + + References: + ADADELTA - An Adaptive Learning Rate Method: + [Zeiler, 2012](http://arxiv.org/abs/1212.5701) + ([pdf](http://arxiv.org/pdf/1212.5701v1.pdf)) + + @compatibility(TF2) + tf.compat.v1.train.AdadeltaOptimizer is compatible with eager mode and + `tf.function`. + When eager execution is enabled, `learning_rate`, `rho`, + and `epsilon` can each be a callable that + takes no arguments and returns the actual value to use. This can be useful + for changing these values across different invocations of optimizer + functions. + + To switch to native TF2 style, use [`tf.keras.optimizers.Adadelta`] + (https://www.tensorflow.org/api_docs/python/tf/keras/optimizers/Adadelta) + instead. Please notice that due to the implementation differences, + `tf.keras.optimizers.Adadelta` and + `tf.compat.v1.train.AdadeltaOptimizer` may have slight differences in + floating point numerics even though the formula used for the variable + updates still matches. + + #### Structural mapping to native TF2 + + Before: + + ```python + optimizer = tf.compat.v1.train.AdadeltaOptimizer( + learning_rate=learning_rate, + rho=rho, + epsilon=epsilon) + ``` + + After: + + ```python + optimizer = tf.keras.optimizers.Adadelta( + learning_rate=learning_rate, + rho=rho, + epsilon=epsilon) + ``` + + #### How to map arguments + | TF1 Arg Name | TF2 Arg Name | Note | + | ------------------ | ------------- | ------------------------------- | + | `learning_rate` | `learning_rate`| Be careful of setting | + : : : learning_rate tensor value computed from the global step. : + : : : In TF1 this was usually meant to imply a dynamic learning rate and : + : : : would recompute in each step. In TF2 (eager + function) it will : + : : : treat it as a scalar value that only gets computed once instead of : + : : : a symbolic placeholder to be computed each time. : + | `rho` | `rho` | - | + | `epsilon` | `epsilon` | Default value is 1e-08 in TF1, | + : : : but 1e-07 in TF2. : + | `use_locking` | - | Not applicable in TF2. | + + #### Before & after usage example + Before: + + ```python + x = tf.Variable([1,2,3], dtype=tf.float32) + grad = tf.constant([0.1, 0.2, 0.3]) + optimizer = tf.compat.v1.train.AdadeltaOptimizer(learning_rate=0.001) + optimizer.apply_gradients(zip([grad], [x])) + ``` + + After: + + ```python + x = tf.Variable([1,2,3], dtype=tf.float32) + grad = tf.constant([0.1, 0.2, 0.3]) + optimizer = tf.keras.optimizers.Adadelta(learning_rate=0.001) + optimizer.apply_gradients(zip([grad], [x])) + ``` + + @end_compatibility + """ + + def __init__(self, learning_rate=0.001, rho=0.95, epsilon=1e-8, + use_locking=False, name="Adadelta"): + """Construct a new Adadelta optimizer. + + Args: + learning_rate: A `Tensor` or a floating point value. The learning rate. + To match the exact form in the original paper use 1.0. + rho: A `Tensor` or a floating point value. The decay rate. + epsilon: A `Tensor` or a floating point value. A constant epsilon used + to better conditioning the grad update. + use_locking: If `True` use locks for update operations. + name: Optional name prefix for the operations created when applying + gradients. Defaults to "Adadelta". + + + """ + super(AdadeltaOptimizer, self).__init__(use_locking, name) + self._lr = learning_rate + self._rho = rho + self._epsilon = epsilon + + # Tensor versions of the constructor arguments, created in _prepare(). + self._lr_t = None + self._rho_t = None + self._epsilon_t = None + + def _create_slots(self, var_list): + for v in var_list: + self._zeros_slot(v, "accum", self._name) + self._zeros_slot(v, "accum_update", self._name) + + def _prepare(self): + lr = self._call_if_callable(self._lr) + rho = self._call_if_callable(self._rho) + epsilon = self._call_if_callable(self._epsilon) + + self._lr_t = ops.convert_to_tensor(lr, name="lr") + self._rho_t = ops.convert_to_tensor(rho, name="rho") + self._epsilon_t = ops.convert_to_tensor(epsilon, name="epsilon") + + def _apply_dense(self, grad, var): + accum = self.get_slot(var, "accum") + accum_update = self.get_slot(var, "accum_update") + return gen_training_ops.apply_adadelta( + var, + accum, + accum_update, + math_ops.cast(self._lr_t, var.dtype.base_dtype), + math_ops.cast(self._rho_t, var.dtype.base_dtype), + math_ops.cast(self._epsilon_t, var.dtype.base_dtype), + grad, + use_locking=self._use_locking) + + def _resource_apply_dense(self, grad, var): + accum = self.get_slot(var, "accum") + accum_update = self.get_slot(var, "accum_update") + return gen_training_ops.resource_apply_adadelta( + var.handle, + accum.handle, + accum_update.handle, + math_ops.cast(self._lr_t, grad.dtype.base_dtype), + math_ops.cast(self._rho_t, grad.dtype.base_dtype), + math_ops.cast(self._epsilon_t, grad.dtype.base_dtype), + grad, + use_locking=self._use_locking) + + def _apply_sparse(self, grad, var): + accum = self.get_slot(var, "accum") + accum_update = self.get_slot(var, "accum_update") + return gen_training_ops.sparse_apply_adadelta( + var, + accum, + accum_update, + math_ops.cast(self._lr_t, var.dtype.base_dtype), + math_ops.cast(self._rho_t, var.dtype.base_dtype), + math_ops.cast(self._epsilon_t, var.dtype.base_dtype), + grad.values, + grad.indices, + use_locking=self._use_locking) + + def _resource_apply_sparse(self, grad, var, indices): + accum = self.get_slot(var, "accum") + accum_update = self.get_slot(var, "accum_update") + return gen_training_ops.resource_sparse_apply_adadelta( + var.handle, + accum.handle, + accum_update.handle, + math_ops.cast(self._lr_t, grad.dtype), + math_ops.cast(self._rho_t, grad.dtype), + math_ops.cast(self._epsilon_t, grad.dtype), + grad, + indices, + use_locking=self._use_locking) diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/training/checkpoint_ops.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/training/checkpoint_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..4813a8cd61b90ddd403b82dc19c7708dd4d17bd8 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/training/checkpoint_ops.py @@ -0,0 +1,482 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Operations for generating and loading vocab remappings.""" +import math + +from tensorflow.python.framework import dtypes +from tensorflow.python.framework import ops +from tensorflow.python.ops import array_ops +from tensorflow.python.ops import gen_checkpoint_ops +from tensorflow.python.ops import init_ops +from tensorflow.python.ops import math_ops + +ops.NotDifferentiable("GenerateVocabRemapping") +ops.NotDifferentiable("LoadAndRemapMatrix") + + +def _load_and_remap_matrix(ckpt_path, + old_tensor_name, + new_row_vocab_offset, + num_rows_to_load, + new_col_vocab_size, + initializer, + old_row_vocab_size=-1, + old_row_vocab_file=None, + new_row_vocab_file=None, + old_col_vocab_file=None, + new_col_vocab_file=None, + num_row_oov_buckets=0, + num_col_oov_buckets=0, + max_rows_in_memory=-1): + """Loads a 2-D (matrix) `Tensor` from checkpoint. + + Generates 1D-remappings for rows and columns using the + `GenerateVocabRemapping` op, and initializes any anticipated values with the + provided initializer. Then, uses the `LoadAndRemapMatrix` op to create a + matrix that loads existing values from the checkpoint, while filling out + "missing" values with the newly initialized values. See + contrib/framework/ops/checkpoint_ops.cc for more information on the wrapped + functionality (LoadAndRemapMatrix). This wrapper can be used to perform only + row remapping or only col remapping. If only row remapping is desired, + {new,old}_col_vocab_file should be `None`, and vice versa for column + remapping. + + NOTE: This only supports div-partitioning the vocabulary on the 1st dimension + (row axis) via `new_row_vocab_offset`. + + Args: + ckpt_path: Path to the TensorFlow checkpoint (version 2, `TensorBundle`) + from which the old matrix `Tensor` will be loaded. + old_tensor_name: Name of the 2-D `Tensor` to load from checkpoint. + new_row_vocab_offset: A 0-indexed integer representing what line to + start reading at in the new row vocabulary. Used for partitioned + variables. + num_rows_to_load: Number of rows to load for the new vocabulary (note: to + support variable partitioning and partial loading, this does not need to + be the same as the number of entries in `new_row_vocab_file`). + new_col_vocab_size: Number of columns to load - should be the same as the + number of entries in `new_col_vocab_file`, since we don't support + partitioning along the column axis. + initializer: Callable initializer function that accepts a 1-D tensor as the + arg to specify the shape of the returned tensor. Used to initialize + missing values. + old_row_vocab_size: The number of entries to consider in the old vocabulary. + With the default value of -1, the entire old row vocabulary file will be + used. Otherwise, only the first `old_row_vocab_size` entries will be + considered for remapping.Must be smaller than the length of + `old_row_vocab_file`. NOTE: we do not provide an equivalent + `old_col_vocab_size` for classes. + old_row_vocab_file: A scalar `Tensor` of type `string` containing the + path to the old row vocabulary file. Can be None, which represents no + remapping on the row axis. + new_row_vocab_file: A scalar `Tensor` of type `string` containing the path + to the new row vocabulary file. Can be None, which represents no remapping + on the row axis - in which case, `new_row_vocab_offset` and + `num_rows_to_load` work under the assumption that the new row vocab is the + same as the old row vocab. + old_col_vocab_file: A scalar `Tensor` of type `string` containing the + path to the old column vocabulary file. Can be None, which represents no + remapping on the column axis. + new_col_vocab_file: A scalar `Tensor` of type `string` containing the path + to the new column vocabulary file. Can be None, which represents no + remapping on the column axis - in which case, `new_col_vocab_size` works + under the assumption that the new col vocab is the same as the old col + vocab. + num_row_oov_buckets: `int` specifying the number of out-of-vocabulary rows + to append. Must be >= 0. + num_col_oov_buckets: `int` specifying the number of out-of-vocabulary + columns to append. Must be >= 0. + max_rows_in_memory: `int` specifying the maximum number of rows to load from + the checkpoint at once. If less than or equal to 0, the entire matrix will + be loaded into memory. Setting this arg trades increased disk reads for + lower memory usage. + + Returns: + A Tensor of shape `[num_rows_to_load + num_row_oov_buckets, + new_col_vocab_size + num_col_oov_buckets]`, with values loaded from the + specified tensor in the checkpoint, and any missing or OOV values + initialized with the given `initializer`. + + Raises: + ValueError: If `num_row_oov_buckets` or `num_col_oov_buckets` < 0. + ValueError: If either `old_row_vocab_file` or `new_row_vocab_file` is + provided, while the other is not. Same for `old_col_vocab_file` and + `new_col_vocab_file`. + ValueError: If neither row vocabs or col vocabs are provided. + """ + if num_row_oov_buckets < 0: + raise ValueError("num_row_oov_buckets must be >= 0, but received %d" % + num_row_oov_buckets) + if num_col_oov_buckets < 0: + raise ValueError("num_col_oov_buckets must be >= 0, but received %d" % + num_col_oov_buckets) + + if bool(old_row_vocab_file) != bool(new_row_vocab_file): + raise ValueError( + "old_row_vocab_file and new_row_vocab_file must both be specified or " + "left unspecified. old_row_vocab_file='{}', new_row_vocab_file='{}'". + format(old_row_vocab_file, new_row_vocab_file)) + if bool(old_col_vocab_file) != bool(new_col_vocab_file): + raise ValueError( + "old_col_vocab_file and new_col_vocab_file must both be specified or " + "left unspecified. old_col_vocab_file='{}', new_col_vocab_file='{}'". + format(old_col_vocab_file, new_col_vocab_file)) + + remap_rows = new_row_vocab_file and old_row_vocab_file + remap_cols = new_col_vocab_file and old_col_vocab_file + if not (remap_rows or remap_cols): + raise ValueError( + "Must provide either row or column vocab files. If no remapping is " + "necessary, consider using `tf.contrib.framework.init_from_checkpoint` " + "instead.") + + num_rows_present = num_rows_to_load + if remap_rows: + row_remapping, num_rows_present = ( + gen_checkpoint_ops.generate_vocab_remapping( + new_vocab_file=new_row_vocab_file, + old_vocab_file=old_row_vocab_file, + new_vocab_offset=new_row_vocab_offset, + num_new_vocab=num_rows_to_load, + old_vocab_size=old_row_vocab_size)) + else: + # Even when the rows are not being reordered, we still need to generate a + # remapping to account for initializing partitioned Variables (when + # new_row_vocab_offset is non-zero). + row_remapping = math_ops.range( + new_row_vocab_offset, + new_row_vocab_offset + num_rows_to_load, + dtype=dtypes.int64) + + col_remapping = [] + num_cols_present = new_col_vocab_size + if remap_cols: + col_remapping, num_cols_present = ( + gen_checkpoint_ops.generate_vocab_remapping( + new_vocab_file=new_col_vocab_file, + old_vocab_file=old_col_vocab_file, + new_vocab_offset=0, # Offset is unused for cols (no partitioning). + num_new_vocab=new_col_vocab_size)) + + init_vals = initializer([ + num_rows_to_load * new_col_vocab_size - + num_rows_present * num_cols_present, 1 + ]) + return_tensor = gen_checkpoint_ops.load_and_remap_matrix( + ckpt_path=ckpt_path, + old_tensor_name=old_tensor_name, + row_remapping=row_remapping, + col_remapping=col_remapping, + initializing_values=init_vals, + num_rows=num_rows_to_load, + num_cols=new_col_vocab_size, + max_rows_in_memory=max_rows_in_memory) + + # Add OOV row(s) and column(s). + if num_row_oov_buckets > 0: + init_row_oov_val = initializer([num_row_oov_buckets, new_col_vocab_size]) + init_row_oov_val = ops.convert_to_tensor(init_row_oov_val) + return_tensor = array_ops.concat([return_tensor, init_row_oov_val], 0) + if num_col_oov_buckets > 0: + # We need to add any row OOV to the new column shape. + init_col_oov_val = initializer( + [num_rows_to_load + num_row_oov_buckets, num_col_oov_buckets]) + init_col_oov_val = ops.convert_to_tensor(init_col_oov_val) + return_tensor = array_ops.concat([return_tensor, init_col_oov_val], 1) + + return return_tensor + + +def _load_and_remap_matrix_initializer(ckpt_path, + old_tensor_name, + new_row_vocab_size, + new_col_vocab_size, + old_row_vocab_size=-1, + old_row_vocab_file=None, + new_row_vocab_file=None, + old_col_vocab_file=None, + new_col_vocab_file=None, + num_row_oov_buckets=0, + num_col_oov_buckets=0, + initializer=None, + max_rows_in_memory=-1): + r"""Returns a var initializer for loading and remapping a 2-D (matrix) tensor. + + The returned initializer loads a 2-D (matrix) `Tensor` with name + `old_tensor_name` from the checkpoint at `ckpt_path`. It will reorder the + rows/columns according to the specified vocab files and append additional + out-of-vocabulary rows/columns according to the number of OOV buckets. + + The format of the file at the `{old,new}_{row,col}_vocab_file` path should be + a text file, with each line containing a single entity within the vocabulary. + Let the function `line_of(f, "x")` return the 0-indexed line number of the + entity "x" in file f, and the function `entity_at(f, i)` return the entity at + line i of file f. Then, row i of the new output matrix will be taken from row + `line_of(old_row_vocab_file, entity_at(new_row_vocab_file, i))` of the old + matrix. If any entity in `new_row_vocab_file` is not found in + `old_row_vocab_file`, that row is considered a "missing" row, and its values + will be initialized using the `initializer` arg. The same logic also applies + for the columns. + + For example, assuming that: + + * `old_row_vocab_file` contains "mercury\nvenus\nmars" + * `new_row_vocab_file` contains "venus\njupiter\nmercury" + * `old_col_vocab_file` contains "good\nbetter\nbest" + * `new_col_vocab_file` contains "good\nbest\nfantastic" + * `initializer` returns the natural numbers `[1, 2, 3, 4, ...]` + * `w(i, j)` represents the value from row i, column j of the old matrix + + Then the new output matrix will look like: + + `[[w(1, 0), w(1, 2), 1], + [2, 3, 4], + [w(0, 0), w(0, 2), 5]]` + + If we further specify that: + + * `num_row_oov_buckets` == 2 + * `num_col_oov_buckets` == 1 + + Then the new output matrix will look like: + + `[[w(1, 0), w(1, 2), 1, 12], + [2, 3, 4, 13], + [w(0, 0), w(0, 2), 5, 14], + [6, 7, 8, 15], + [9, 10, 11, 16]]` + + If `{old,new}_row_vocab_file` are None, we assume that the old and new row + vocab files are the same, and no row remapping is done. If + `{old,new}_col_vocab_file` are None, we assume that the old and new column + vocab files are the same, and no column remapping is done. + + The returned initializer only supports div-partitioning along the row axis. It + does not support partitioning along the column axis (as this is not common in + practice) or mod-partitioning. + + NOTE: When this is used to warm-start variables, client code should use + `tf.lookup.index_table_from_tensor()` like + contrib/layers/python/layers/feature_column.py does, as opposed to + `tf.feature_to_id()` - in order to ensure the underlying lookup tables are the + same. + + Args: + ckpt_path: Path to the TensorFlow checkpoint (version 2, `TensorBundle`) + from which the old matrix `Tensor` will be loaded. + old_tensor_name: Name of the 2-D `Tensor` to load from checkpoint. + new_row_vocab_size: `int` specifying the number of entries in + `new_row_vocab_file`. If no row remapping is needed (no row vocab + provided), this should be equal to the number of rows to load from the old + matrix (which can theoretically be smaller than the number of rows in the + old matrix). + new_col_vocab_size: `int` specifying the number of entries in + `new_col_vocab_file`. If no column remapping is needed (no column vocab + provided), this should be equal to the number of columns in the old + matrix. + old_row_vocab_size: The number of entries to consider in the old vocabulary. + With the default value of -1, the entire old row vocabulary file will be + used. Otherwise, only the first `old_row_vocab_size` entries will be + considered for remapping.Must be smaller than the length of + `old_row_vocab_file`. NOTE: we do not provide an equivalent + `old_col_vocab_size` for classes. + old_row_vocab_file: A scalar `Tensor` of type `string` containing the + path to the old row vocabulary file. Can be None, which represents no + remapping on the row axis. + new_row_vocab_file: A scalar `Tensor` of type `string` containing the path + to the new row vocabulary file. Can be None, which represents no remapping + on the row axis. + old_col_vocab_file: A scalar `Tensor` of type `string` containing the + path to the old column vocabulary file. Can be None, which represents no + remapping on the column axis. + new_col_vocab_file: A scalar `Tensor` of type `string` containing the path + to the new column vocabulary file. Can be None, which represents no + remapping on the column axis. + num_row_oov_buckets: `int` specifying the number of out-of-vocabulary rows + to append. Must be >= 0. + num_col_oov_buckets: `int` specifying the number of out-of-vocabulary + columns to append. Must be >= 0. + initializer: Initializer function to initialize missing values. Accepts a + 1-D tensor as the arg to specify the shape of the returned tensor. If + `None`, defaults to using `zeros_initializer()`. + max_rows_in_memory: `int` specifying the maximum number of rows to load from + the checkpoint at once. If less than or equal to 0, the entire matrix will + be loaded into memory. Setting this arg trades increased disk reads for + lower memory usage. + + Returns: + A variable initializer function that should be used to initialize a + (potentially partitioned) `Variable` whose complete shape is + `[new_row_vocab_size + num_row_oov_buckets, new_col_vocab_size + + num_col_oov_buckets]`. + + Raises: + TypeError: If `initializer` is specified but not callable. + """ + if initializer is None: + # TODO(b/25671353): Consider using sqrt(6/(fan_in + fan_out)) instead, from + # Glorot and Bengio, 2010. + initializer = init_ops.zeros_initializer() + + if not callable(initializer): + raise TypeError( + "initializer must be callable, instead of being {} of type {}.".format( + initializer, type(initializer))) + + def _initializer(shape, dtype=dtypes.float32, partition_info=None): + """Variable initializer. + + Args: + shape: Shape of `Tensor` to return. Should include OOV on both axes. + dtype: Must be float32. + partition_info: variable_scope._PartitionInfo. + + Returns: + `Tensor` of shape `shape`. + + Raises: + TypeError: If `dtype` is anything other than float32. + ValueError: For shape mismatch upon invocation. + """ + # Sanity checks. + if dtype != dtypes.float32: + raise TypeError( + "Currently, only float32 is supported. Received dtype: {}".format( + dtype)) + if len(shape) != 2: + raise ValueError("Expected 2-dim shape, but received: {}".format(shape)) + if shape[0] <= 0: + raise ValueError( + "Expected 1st dim of shape to be > 0, but received shape: {}".format( + shape)) + if shape[1] != (new_col_vocab_size + num_col_oov_buckets): + raise ValueError( + "Expected 2nd dim of shape to be new_col_vocab_size ({}) + " + "num_col_oov_buckets ({}) = {}, but received shape: {}".format( + new_col_vocab_size, num_col_oov_buckets, + new_col_vocab_size + num_col_oov_buckets, shape)) + + offset = 0 + if partition_info is not None: + offset = partition_info.single_offset(shape) + + if offset + shape[0] > new_row_vocab_size + num_row_oov_buckets: + raise ValueError( + "Trying to initialize {} additional rows after {} rows have already " + "been initialized, which would exceed expected total row count of " + "new_row_vocab_size ({}) + num_row_oov_buckets ({}) = {}.".format( + shape[0], offset, new_row_vocab_size, num_row_oov_buckets, + new_row_vocab_size + num_row_oov_buckets)) + + row_oov_buckets_to_use = min(shape[0], + max(0, offset + shape[0] - new_row_vocab_size)) + num_rows_to_load = shape[0] - row_oov_buckets_to_use + + # We may be operating on an OOV-only partition, in which case we newly + # initialize all rows of this partition. + if offset > new_row_vocab_size: + if shape[0] != row_oov_buckets_to_use: + raise ValueError( + "Partitioned variable offset is greater than new vocab size and " + "not operating on OOV-only partition.") + return initializer(shape) + + return _load_and_remap_matrix( + ckpt_path=ckpt_path, + old_tensor_name=old_tensor_name, + new_row_vocab_offset=offset, + num_rows_to_load=num_rows_to_load, + new_col_vocab_size=new_col_vocab_size, + initializer=initializer, + old_row_vocab_size=old_row_vocab_size, + old_row_vocab_file=old_row_vocab_file, + new_row_vocab_file=new_row_vocab_file, + old_col_vocab_file=old_col_vocab_file, + new_col_vocab_file=new_col_vocab_file, + num_row_oov_buckets=row_oov_buckets_to_use, + num_col_oov_buckets=num_col_oov_buckets, + max_rows_in_memory=max_rows_in_memory) + + return _initializer + + +def _load_embedding_initializer(ckpt_path, + embedding_tensor_name, + new_vocab_size, + embedding_dim, + old_vocab_file, + new_vocab_file, + old_vocab_size=-1, + num_oov_buckets=0, + initializer=None, + max_rows_in_memory=-1): + """Returns a variable initializer for loading pre-trained embeddings. + + Wrapper around `load_and_remap_matrix_initializer()` specialized for loading + embedding weights and remapping according to the provided vocab files. See + docs for `load_and_remap_matrix_initializer()` for more details. + + NOTE: Only for use with div-partitioned variables / vocabularies. + + Args: + ckpt_path: Path to the TensorFlow checkpoint (version 2, `TensorBundle`) + from which the old matrix `Tensor` will be loaded. + embedding_tensor_name: Name of the 2-D `Tensor` to load from checkpoint. + new_vocab_size: Number of entries in the new vocab. + embedding_dim: `int` specifying the dimension of the embedding vectors from + the checkpoint. Must match the number of columns in the old embedding + matrix. + old_vocab_file: A scalar `Tensor` of type `string` containing the + path to the old vocabulary file. + new_vocab_file: A scalar `Tensor` of type `string` containing the + path to the new vocabulary file. + old_vocab_size: The number of entries to consider in the old vocabulary. + With the default value of -1, the entire old row vocabulary file will be + used. Otherwise, only the first `old_vocab_size` entries will be + considered for remapping.Must be smaller than the length of + `old_row_vocab_file`. + num_oov_buckets: `int` specifying the number of out-of-vocabulary + buckets to use. Must be >= 0. + initializer: Initializer function that accepts a 1-D tensor as the arg to + specify the shape of the returned tensor. If `None`, defaults to using + `truncated_normal_initializer()`. + max_rows_in_memory: `int` specifying the maximum number of rows to load from + the checkpoint at once. If less than or equal to 0, the entire matrix will + be loaded into memory. Setting this arg trades increased disk reads for + lower memory usage. + + Returns: + A variable initializer function. + """ + if initializer is None: + # TODO(b/25671353): This should be kept in sync with the stddev used by + # feature_column.py's _EmbeddingColumn. + initializer = init_ops.truncated_normal_initializer( + stddev=1.0 / math.sqrt(embedding_dim)) + + return _load_and_remap_matrix_initializer( + ckpt_path=ckpt_path, + old_tensor_name=embedding_tensor_name, + new_row_vocab_size=new_vocab_size, + new_col_vocab_size=embedding_dim, + old_row_vocab_size=old_vocab_size, + old_row_vocab_file=old_vocab_file, + new_row_vocab_file=new_vocab_file, + old_col_vocab_file=None, + new_col_vocab_file=None, + num_row_oov_buckets=num_oov_buckets, + num_col_oov_buckets=0, + initializer=initializer, + max_rows_in_memory=max_rows_in_memory) diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/training/coordinator.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/training/coordinator.py new file mode 100644 index 0000000000000000000000000000000000000000..f8a51b5d84bd576b3b8261b468179f6bbc029ca6 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/training/coordinator.py @@ -0,0 +1,507 @@ +# Copyright 2015 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Coordinator to help multiple threads stop when requested.""" +import contextlib +import sys +import threading +import time + +from tensorflow.python.framework import errors +from tensorflow.python.platform import tf_logging as logging +from tensorflow.python.util import compat +from tensorflow.python.util.tf_export import tf_export + + +@tf_export("train.Coordinator") +class Coordinator: + """A coordinator for threads. + + This class implements a simple mechanism to coordinate the termination of a + set of threads. + + #### Usage: + + ```python + # Create a coordinator. + coord = Coordinator() + # Start a number of threads, passing the coordinator to each of them. + ...start thread 1...(coord, ...) + ...start thread N...(coord, ...) + # Wait for all the threads to terminate. + coord.join(threads) + ``` + + Any of the threads can call `coord.request_stop()` to ask for all the threads + to stop. To cooperate with the requests, each thread must check for + `coord.should_stop()` on a regular basis. `coord.should_stop()` returns + `True` as soon as `coord.request_stop()` has been called. + + A typical thread running with a coordinator will do something like: + + ```python + while not coord.should_stop(): + ...do some work... + ``` + + #### Exception handling: + + A thread can report an exception to the coordinator as part of the + `request_stop()` call. The exception will be re-raised from the + `coord.join()` call. + + Thread code: + + ```python + try: + while not coord.should_stop(): + ...do some work... + except Exception as e: + coord.request_stop(e) + ``` + + Main code: + + ```python + try: + ... + coord = Coordinator() + # Start a number of threads, passing the coordinator to each of them. + ...start thread 1...(coord, ...) + ...start thread N...(coord, ...) + # Wait for all the threads to terminate. + coord.join(threads) + except Exception as e: + ...exception that was passed to coord.request_stop() + ``` + + To simplify the thread implementation, the Coordinator provides a + context handler `stop_on_exception()` that automatically requests a stop if + an exception is raised. Using the context handler the thread code above + can be written as: + + ```python + with coord.stop_on_exception(): + while not coord.should_stop(): + ...do some work... + ``` + + #### Grace period for stopping: + + After a thread has called `coord.request_stop()` the other threads have a + fixed time to stop, this is called the 'stop grace period' and defaults to 2 + minutes. If any of the threads is still alive after the grace period expires + `coord.join()` raises a RuntimeError reporting the laggards. + + ```python + try: + ... + coord = Coordinator() + # Start a number of threads, passing the coordinator to each of them. + ...start thread 1...(coord, ...) + ...start thread N...(coord, ...) + # Wait for all the threads to terminate, give them 10s grace period + coord.join(threads, stop_grace_period_secs=10) + except RuntimeError: + ...one of the threads took more than 10s to stop after request_stop() + ...was called. + except Exception: + ...exception that was passed to coord.request_stop() + ``` + """ + + def __init__(self, clean_stop_exception_types=None): + """Create a new Coordinator. + + Args: + clean_stop_exception_types: Optional tuple of Exception types that should + cause a clean stop of the coordinator. If an exception of one of these + types is reported to `request_stop(ex)` the coordinator will behave as + if `request_stop(None)` was called. Defaults to + `(tf.errors.OutOfRangeError,)` which is used by input queues to signal + the end of input. When feeding training data from a Python iterator it + is common to add `StopIteration` to this list. + """ + if clean_stop_exception_types is None: + clean_stop_exception_types = (errors.OutOfRangeError,) + self._clean_stop_exception_types = tuple(clean_stop_exception_types) + # Protects all attributes. + self._lock = threading.Lock() + # Event set when threads must stop. + self._stop_event = threading.Event() + # Python exc_info to report. + # If not None, it should hold the returned value of sys.exc_info(), which is + # a tuple containing exception (type, value, traceback). + self._exc_info_to_raise = None + # True if we have called join() already. + self._joined = False + # Set of threads registered for joining when join() is called. These + # threads will be joined in addition to the threads passed to the join() + # call. It's ok if threads are both registered and passed to the join() + # call. + self._registered_threads = set() + + def _filter_exception(self, ex): + """Check if the exception indicated in 'ex' should be ignored. + + This method examines `ex` to check if it is an exception that should be + reported to the users. If yes, it returns `ex` as is, otherwise it returns + None. + + The code returns None for exception types listed in + `_clean_stop_exception_types`. + + Args: + ex: None, an `Exception`, or a Python `exc_info` tuple as returned by + `sys.exc_info()`. + + Returns: + ex or None. + """ + if isinstance(ex, tuple): + ex2 = ex[1] + else: + ex2 = ex + if isinstance(ex2, self._clean_stop_exception_types): + # Ignore the exception. + ex = None + return ex + + def request_stop(self, ex=None): + """Request that the threads stop. + + After this is called, calls to `should_stop()` will return `True`. + + Note: If an exception is being passed in, in must be in the context of + handling the exception (i.e. `try: ... except Exception as ex: ...`) and not + a newly created one. + + Args: + ex: Optional `Exception`, or Python `exc_info` tuple as returned by + `sys.exc_info()`. If this is the first call to `request_stop()` the + corresponding exception is recorded and re-raised from `join()`. + """ + with self._lock: + ex = self._filter_exception(ex) + # If we have already joined the coordinator the exception will not have a + # chance to be reported, so just raise it normally. This can happen if + # you continue to use a session have having stopped and joined the + # coordinator threads. + if self._joined: + if isinstance(ex, tuple): + _, ex_instance, _ = ex + raise ex_instance + elif ex is not None: + # NOTE(touts): This is bogus if request_stop() is not called + # from the exception handler that raised ex. + _, ex_instance, _ = sys.exc_info() + raise ex_instance + if not self._stop_event.is_set(): + if ex and self._exc_info_to_raise is None: + if isinstance(ex, tuple): + logging.info("Error reported to Coordinator: %s", + compat.as_str_any(ex[1]), + exc_info=ex) + self._exc_info_to_raise = ex + else: + logging.info("Error reported to Coordinator: %s, %s", + type(ex), + compat.as_str_any(ex)) + self._exc_info_to_raise = sys.exc_info() + # self._exc_info_to_raise should contain a tuple containing exception + # (type, value, traceback) + if (len(self._exc_info_to_raise) != 3 or + not self._exc_info_to_raise[0] or + not self._exc_info_to_raise[1]): + # Raise, catch and record the exception here so that error happens + # where expected. + try: + raise ValueError( + "ex must be a tuple or sys.exc_info must return the current " + "exception: %s" + % self._exc_info_to_raise) + except ValueError: + # Record this error so it kills the coordinator properly. + # NOTE(touts): As above, this is bogus if request_stop() is not + # called from the exception handler that raised ex. + self._exc_info_to_raise = sys.exc_info() + + self._stop_event.set() + + def clear_stop(self): + """Clears the stop flag. + + After this is called, calls to `should_stop()` will return `False`. + """ + with self._lock: + self._joined = False + self._exc_info_to_raise = None + if self._stop_event.is_set(): + self._stop_event.clear() + + def should_stop(self): + """Check if stop was requested. + + Returns: + True if a stop was requested. + """ + return self._stop_event.is_set() + + @contextlib.contextmanager + def stop_on_exception(self): + """Context manager to request stop when an Exception is raised. + + Code that uses a coordinator must catch exceptions and pass + them to the `request_stop()` method to stop the other threads + managed by the coordinator. + + This context handler simplifies the exception handling. + Use it as follows: + + ```python + with coord.stop_on_exception(): + # Any exception raised in the body of the with + # clause is reported to the coordinator before terminating + # the execution of the body. + ...body... + ``` + + This is completely equivalent to the slightly longer code: + + ```python + try: + ...body... + except: + coord.request_stop(sys.exc_info()) + ``` + + Yields: + nothing. + """ + try: + yield + except: # pylint: disable=bare-except + self.request_stop(ex=sys.exc_info()) + + def wait_for_stop(self, timeout=None): + """Wait till the Coordinator is told to stop. + + Args: + timeout: Float. Sleep for up to that many seconds waiting for + should_stop() to become True. + + Returns: + True if the Coordinator is told stop, False if the timeout expired. + """ + return self._stop_event.wait(timeout) + + def register_thread(self, thread): + """Register a thread to join. + + Args: + thread: A Python thread to join. + """ + with self._lock: + self._registered_threads.add(thread) + + def join(self, threads=None, stop_grace_period_secs=120, + ignore_live_threads=False): + """Wait for threads to terminate. + + This call blocks until a set of threads have terminated. The set of thread + is the union of the threads passed in the `threads` argument and the list + of threads that registered with the coordinator by calling + `Coordinator.register_thread()`. + + After the threads stop, if an `exc_info` was passed to `request_stop`, that + exception is re-raised. + + Grace period handling: When `request_stop()` is called, threads are given + 'stop_grace_period_secs' seconds to terminate. If any of them is still + alive after that period expires, a `RuntimeError` is raised. Note that if + an `exc_info` was passed to `request_stop()` then it is raised instead of + that `RuntimeError`. + + Args: + threads: List of `threading.Threads`. The started threads to join in + addition to the registered threads. + stop_grace_period_secs: Number of seconds given to threads to stop after + `request_stop()` has been called. + ignore_live_threads: If `False`, raises an error if any of the threads are + still alive after `stop_grace_period_secs`. + + Raises: + RuntimeError: If any thread is still alive after `request_stop()` + is called and the grace period expires. + """ + # Threads registered after this call will not be joined. + with self._lock: + if threads is None: + threads = self._registered_threads + else: + threads = self._registered_threads.union(set(threads)) + # Copy the set into a list to avoid race conditions where a new thread + # is added while we are waiting. + threads = list(threads) + + # Wait for all threads to stop or for request_stop() to be called. + while any(t.is_alive() for t in threads) and not self.wait_for_stop(1.0): + pass + + # If any thread is still alive, wait for the grace period to expire. + # By the time this check is executed, threads may still be shutting down, + # so we add a sleep of increasing duration to give them a chance to shut + # down without losing too many cycles. + # The sleep duration is limited to the remaining grace duration. + stop_wait_secs = 0.001 + while any(t.is_alive() for t in threads) and stop_grace_period_secs >= 0.0: + time.sleep(stop_wait_secs) + stop_grace_period_secs -= stop_wait_secs + stop_wait_secs = 2 * stop_wait_secs + # Keep the waiting period within sane bounds. + # The minimum value is to avoid decreasing stop_wait_secs to a value + # that could cause stop_grace_period_secs to remain unchanged. + stop_wait_secs = max(min(stop_wait_secs, stop_grace_period_secs), 0.001) + + # List the threads still alive after the grace period. + stragglers = [t.name for t in threads if t.is_alive()] + + # Terminate with an exception if appropriate. + with self._lock: + self._joined = True + self._registered_threads = set() + if self._exc_info_to_raise: + _, ex_instance, _ = self._exc_info_to_raise + raise ex_instance + elif stragglers: + if ignore_live_threads: + logging.info("Coordinator stopped with threads still running: %s", + " ".join(stragglers)) + else: + raise RuntimeError( + "Coordinator stopped with threads still running: %s" % + " ".join(stragglers)) + + @property + def joined(self): + return self._joined + + def raise_requested_exception(self): + """If an exception has been passed to `request_stop`, this raises it.""" + with self._lock: + if self._exc_info_to_raise: + _, ex_instance, _ = self._exc_info_to_raise + raise ex_instance + + +# Threads for the standard services. +@tf_export(v1=["train.LooperThread"]) +class LooperThread(threading.Thread): + """A thread that runs code repeatedly, optionally on a timer. + + This thread class is intended to be used with a `Coordinator`. It repeatedly + runs code specified either as `target` and `args` or by the `run_loop()` + method. + + Before each run the thread checks if the coordinator has requested stop. In + that case the looper thread terminates immediately. + + If the code being run raises an exception, that exception is reported to the + coordinator and the thread terminates. The coordinator will then request all + the other threads it coordinates to stop. + + You typically pass looper threads to the supervisor `Join()` method. + """ + + def __init__(self, coord, timer_interval_secs, target=None, args=None, + kwargs=None): + """Create a LooperThread. + + Args: + coord: A Coordinator. + timer_interval_secs: Time boundaries at which to call Run(), or None + if it should be called back to back. + target: Optional callable object that will be executed in the thread. + args: Optional arguments to pass to `target` when calling it. + kwargs: Optional keyword arguments to pass to `target` when calling it. + + Raises: + ValueError: If one of the arguments is invalid. + """ + if not isinstance(coord, Coordinator): + raise ValueError("'coord' argument must be a Coordinator: %s" % coord) + super(LooperThread, self).__init__() + self.daemon = True + self._coord = coord + self._timer_interval_secs = timer_interval_secs + self._target = target + if self._target: + self._args = args or () + self._kwargs = kwargs or {} + elif args or kwargs: + raise ValueError("'args' and 'kwargs' argument require that you also " + "pass 'target'") + self._coord.register_thread(self) + + @staticmethod + def loop(coord, timer_interval_secs, target, args=None, kwargs=None): + """Start a LooperThread that calls a function periodically. + + If `timer_interval_secs` is None the thread calls `target(args)` + repeatedly. Otherwise `target(args)` is called every `timer_interval_secs` + seconds. The thread terminates when a stop of the coordinator is + requested. + + Args: + coord: A Coordinator. + timer_interval_secs: Number. Time boundaries at which to call `target`. + target: A callable object. + args: Optional arguments to pass to `target` when calling it. + kwargs: Optional keyword arguments to pass to `target` when calling it. + + Returns: + The started thread. + """ + looper = LooperThread(coord, timer_interval_secs, target=target, args=args, + kwargs=kwargs) + looper.start() + return looper + + def run(self): + with self._coord.stop_on_exception(): + self.start_loop() + if self._timer_interval_secs is None: + # Call back-to-back. + while not self._coord.should_stop(): + self.run_loop() + else: + # Next time at which to call run_loop(), starts as 'now'. + next_timer_time = time.time() + while not self._coord.wait_for_stop(next_timer_time - time.time()): + next_timer_time += self._timer_interval_secs + self.run_loop() + self.stop_loop() + + def start_loop(self): + """Called when the thread starts.""" + pass + + def stop_loop(self): + """Called when the thread stops.""" + pass + + def run_loop(self): + """Called at 'timer_interval_secs' boundaries.""" + if self._target: + self._target(*self._args, **self._kwargs) diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/training/momentum.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/training/momentum.py new file mode 100644 index 0000000000000000000000000000000000000000..abeb6b0a5d7c7dfa53c1a158d87e879f5292634f --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/training/momentum.py @@ -0,0 +1,203 @@ +# Copyright 2015 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Momentum for TensorFlow.""" +from tensorflow.python.framework import ops +from tensorflow.python.ops import gen_training_ops +from tensorflow.python.ops import math_ops +from tensorflow.python.training import optimizer +from tensorflow.python.util.tf_export import tf_export + + +@tf_export(v1=["train.MomentumOptimizer"]) +class MomentumOptimizer(optimizer.Optimizer): + """Optimizer that implements the Momentum algorithm. + + Computes (if `use_nesterov = False`): + + ``` + accumulation = momentum * accumulation + gradient + variable -= learning_rate * accumulation + ``` + + Note that in the dense version of this algorithm, `accumulation` is updated + and applied regardless of a gradient's value, whereas the sparse version (when + the gradient is an `IndexedSlices`, typically because of `tf.gather` or an + embedding) only updates variable slices and corresponding `accumulation` terms + when that part of the variable was used in the forward pass. + + @compatibility(TF2) + tf.compat.v1.train.MomentumOptimizer is compatible with eager mode and + `tf.function`. + When eager execution is enabled, `learning_rate`,`momentum`, can each be a + callable that takes no arguments and returns the actual value to use. This + can be useful for changing these values across different invocations of + optimizer functions. + + To switch to native TF2 style, please directly use + [`tf.keras.optimizers.SGD`] + (https://www.tensorflow.org/api_docs/python/tf/keras/optimizers/SGD) + with the `momentum` argument. + + #### Structural mapping to native TF2 + + Before: + + ```python + optimizer = tf.compat.v1.train.MomentumOptimizer( + learning_rate=learning_rate, + momentum=momentum, + use_nesterov=use_nesterov) + ``` + + After: + + ```python + optimizer = tf.keras.optimizers.SGD( + learning_rate=learning_rate, + momentum=momentum, + nesterov=use_nesterov) + ``` + + #### How to map arguments + | TF1 Arg Name | TF2 Arg Name | Note | + | ------------------ | ------------- | ------------------------------- | + | `learning_rate` | `learning_rate`| Be careful of setting | + : : : learning_rate tensor value computed from the global step. : + : : : In TF1 this was usually meant to imply a dynamic learning rate and : + : : : would recompute in each step. In TF2 (eager + function) it will : + : : : treat it as a scalar value that only gets computed once instead of : + : : : a symbolic placeholder to be computed each time. : + | `momentum` | `momentum` | - | + | `use_locking` | - | Not applicable in TF2. | + | `use_nesterov` | `nesterov` | - | + + #### Before & after usage example + Before: + + ```python + x = tf.Variable([1,2,3], dtype=tf.float32) + grad = tf.constant([0.1, 0.2, 0.3]) + optimizer = tf.compat.v1.train.MomentumOptimizer( + learning_rate=0.001, + momentum=0.9, + use_nesterov=False) + optimizer.apply_gradients(zip([grad], [x])) + ``` + + After: + + ```python + x = tf.Variable([1,2,3], dtype=tf.float32) + grad = tf.constant([0.1, 0.2, 0.3]) + optimizer = tf.keras.optimizers.SGD( + learning_rate=0.001, + momentum=0.9, + nesterov=False) + optimizer.apply_gradients(zip([grad], [x])) + ``` + + @end_compatibility + + """ + + def __init__(self, learning_rate, momentum, + use_locking=False, name="Momentum", use_nesterov=False): + """Construct a new Momentum optimizer. + + Args: + learning_rate: A `Tensor` or a floating point value. The learning rate. + momentum: A `Tensor` or a floating point value. The momentum. + use_locking: If `True` use locks for update operations. + name: Optional name prefix for the operations created when applying + gradients. Defaults to "Momentum". + use_nesterov: If `True` use Nesterov Momentum. + See (Sutskever et al., 2013). + This implementation always computes gradients at the value of the + variable(s) passed to the optimizer. Using Nesterov Momentum makes the + variable(s) track the values called `theta_t + mu*v_t` in the paper. + This implementation is an approximation of the original formula, valid + for high values of momentum. It will compute the "adjusted gradient" + in NAG by assuming that the new gradient will be estimated by the + current average gradient plus the product of momentum and the change + in the average gradient. + + References: + On the importance of initialization and momentum in deep learning: + [Sutskever et al., 2013] + (http://proceedings.mlr.press/v28/sutskever13.html) + ([pdf](http://proceedings.mlr.press/v28/sutskever13.pdf)) + + + """ + super(MomentumOptimizer, self).__init__(use_locking, name) + self._learning_rate = learning_rate + self._momentum = momentum + self._use_nesterov = use_nesterov + + def _create_slots(self, var_list): + for v in var_list: + self._zeros_slot(v, "momentum", self._name) + + def _prepare(self): + learning_rate = self._learning_rate + if callable(learning_rate): + learning_rate = learning_rate() + self._learning_rate_tensor = ops.convert_to_tensor(learning_rate, + name="learning_rate") + momentum = self._momentum + if callable(momentum): + momentum = momentum() + self._momentum_tensor = ops.convert_to_tensor(momentum, name="momentum") + + def _apply_dense(self, grad, var): + mom = self.get_slot(var, "momentum") + return gen_training_ops.apply_momentum( + var, mom, + math_ops.cast(self._learning_rate_tensor, var.dtype.base_dtype), + grad, + math_ops.cast(self._momentum_tensor, var.dtype.base_dtype), + use_locking=self._use_locking, + use_nesterov=self._use_nesterov).op + + def _resource_apply_dense(self, grad, var): + mom = self.get_slot(var, "momentum") + return gen_training_ops.resource_apply_momentum( + var.handle, mom.handle, + math_ops.cast(self._learning_rate_tensor, grad.dtype.base_dtype), + grad, + math_ops.cast(self._momentum_tensor, grad.dtype.base_dtype), + use_locking=self._use_locking, + use_nesterov=self._use_nesterov) + + def _apply_sparse(self, grad, var): + mom = self.get_slot(var, "momentum") + return gen_training_ops.sparse_apply_momentum( + var, mom, + math_ops.cast(self._learning_rate_tensor, var.dtype.base_dtype), + grad.values, grad.indices, + math_ops.cast(self._momentum_tensor, var.dtype.base_dtype), + use_locking=self._use_locking, + use_nesterov=self._use_nesterov).op + + def _resource_apply_sparse(self, grad, var, indices): + mom = self.get_slot(var, "momentum") + return gen_training_ops.resource_sparse_apply_momentum( + var.handle, mom.handle, + math_ops.cast(self._learning_rate_tensor, grad.dtype), + grad, indices, + math_ops.cast(self._momentum_tensor, grad.dtype), + use_locking=self._use_locking, + use_nesterov=self._use_nesterov) diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/training/monitored_session.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/training/monitored_session.py new file mode 100644 index 0000000000000000000000000000000000000000..23d5eca67ba4393e7585b4ab4f28db44f2d2e024 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/training/monitored_session.py @@ -0,0 +1,1543 @@ +# pylint: disable=g-bad-file-header +# Copyright 2016 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""A wrapper of Session API which runs hooks.""" + +import abc +import os + +from tensorflow.core.protobuf import config_pb2 +from tensorflow.python.checkpoint import checkpoint as trackable_util +from tensorflow.python.checkpoint import graph_view +from tensorflow.python.distribute import distribute_coordinator_context +from tensorflow.python.framework import errors +from tensorflow.python.framework import ops +from tensorflow.python.ops import array_ops +from tensorflow.python.ops import control_flow_ops +from tensorflow.python.ops import lookup_ops +from tensorflow.python.ops import resources +from tensorflow.python.ops import variables +from tensorflow.python.platform import tf_logging as logging +from tensorflow.python.summary import summary +from tensorflow.python.training import basic_session_run_hooks +from tensorflow.python.training import coordinator +from tensorflow.python.training import queue_runner +from tensorflow.python.training import saver as training_saver +from tensorflow.python.training import session_manager as sm +from tensorflow.python.training import session_run_hook +from tensorflow.python.util import function_utils +from tensorflow.python.util.tf_export import tf_export + +# The list of exceptions that we should recover from. Exceptions not in this +# list may terminate the job. +_PREEMPTION_ERRORS = (errors.AbortedError, errors.UnavailableError) + +# Value that indicates no value was provided. +USE_DEFAULT = object() + + +@tf_export(v1=['train.Scaffold']) +class Scaffold: + """Structure to create or gather pieces commonly needed to train a model. + + When you build a model for training you usually need ops to initialize + variables, a `Saver` to checkpoint them, an op to collect summaries for + the visualizer, and so on. + + Various libraries built on top of the core TensorFlow library take care of + creating some or all of these pieces and storing them in well known + collections in the graph. The `Scaffold` class helps pick these pieces from + the graph collections, creating and adding them to the collections if needed. + + If you call the scaffold constructor without any arguments, it will pick + pieces from the collections, creating default ones if needed when + `scaffold.finalize()` is called. You can pass arguments to the constructor to + provide your own pieces. Pieces that you pass to the constructor are not + added to the graph collections. + + The following pieces are directly accessible as attributes of the `Scaffold` + object: + + * `saver`: A `tf.compat.v1.train.Saver` object taking care of saving the + variables. + Picked from and stored into the `SAVERS` collection in the graph by default. + * `init_op`: An op to run to initialize the variables. Picked from and + stored into the `INIT_OP` collection in the graph by default. + * `ready_op`: An op to verify that the variables are initialized. Picked + from and stored into the `READY_OP` collection in the graph by default. + * `ready_for_local_init_op`: An op to verify that global state has been + initialized and it is alright to run `local_init_op`. Picked from and + stored into the `READY_FOR_LOCAL_INIT_OP` collection in the graph by + default. This is needed when the initialization of local variables depends + on the values of global variables. + * `local_init_op`: An op to initialize the local variables. Picked + from and stored into the `LOCAL_INIT_OP` collection in the graph by default. + * `summary_op`: An op to run and merge the summaries in the graph. Picked + from and stored into the `SUMMARY_OP` collection in the graph by default. + + You can also pass the following additional pieces to the constructor: + + * `init_feed_dict`: A session feed dictionary that should be used when + running the init op. + * `init_fn`: A callable to run after the init op to perform additional + initializations. The callable will be called as + `init_fn(scaffold, session)`. + + """ + + def __init__(self, + init_op=None, + init_feed_dict=None, + init_fn=None, + ready_op=None, + ready_for_local_init_op=None, + local_init_op=None, + summary_op=None, + saver=None, + copy_from_scaffold=None, + local_init_feed_dict=None): + """Create a scaffold. + + Args: + init_op: Optional op for initializing variables. + init_feed_dict: Optional session feed dictionary to use when running the + init_op. + init_fn: Optional function to use to initialize the model after running + the init_op. Will be called as `init_fn(scaffold, session)`. + ready_op: Optional op to verify that the variables are initialized. Must + return an empty 1D string tensor when the variables are initialized, or + a non-empty 1D string tensor listing the names of the non-initialized + variables. + ready_for_local_init_op: Optional op to verify that the global variables + are initialized and `local_init_op` can be run. Must return an empty 1D + string tensor when the global variables are initialized, or a non-empty + 1D string tensor listing the names of the non-initialized global + variables. + local_init_op: Optional op to initialize local variables. + summary_op: Optional op to gather all summaries. Must return a scalar + string tensor containing a serialized `Summary` proto. + saver: Optional `tf.compat.v1.train.Saver` object to use to save and + restore variables. May also be a `tf.train.Checkpoint` object, in which + case object-based checkpoints are saved. This will also load some + object-based checkpoints saved from elsewhere, but that loading may be + fragile since it uses fixed keys rather than performing a full + graph-based match. For example if a variable has two paths from the + `Checkpoint` object because two `Model` objects share the `Layer` object + that owns it, removing one `Model` may change the keys and break + checkpoint loading through this API, whereas a graph-based match would + match the variable through the other `Model`. + copy_from_scaffold: Optional scaffold object to copy fields from. Its + fields will be overwritten by the provided fields in this function. + local_init_feed_dict: Optional session feed dictionary to use when running + the local_init_op. + """ + if copy_from_scaffold is not None: + if not isinstance(copy_from_scaffold, Scaffold): + raise TypeError('copy_from_scaffold is not a Scaffold instance.') + # We need _coalesce since Tensor is not converted to bool automatically, + # so the common idiom of (a or b) does not work. + coalesce = lambda a, b: a if a is not None else b + init_op = coalesce(init_op, copy_from_scaffold.init_op) + init_feed_dict = coalesce(init_feed_dict, + copy_from_scaffold.init_feed_dict) + # Use the original init_fn provided by the user to init the new Scaffold. + init_fn = coalesce(init_fn, copy_from_scaffold._user_init_fn) # pylint: disable=protected-access + ready_op = coalesce(ready_op, copy_from_scaffold.ready_op) + ready_for_local_init_op = coalesce( + ready_for_local_init_op, copy_from_scaffold.ready_for_local_init_op) + local_init_op = coalesce(local_init_op, copy_from_scaffold.local_init_op) + local_init_feed_dict = coalesce(local_init_feed_dict, + copy_from_scaffold.local_init_feed_dict) + summary_op = coalesce(summary_op, copy_from_scaffold.summary_op) + saver = coalesce(saver, copy_from_scaffold.saver) + + # NOTE(touts): modifying the init function to be passed the scaffold is a + # hack to make it easy to find the saver. Is there a better way? + self._user_init_fn = init_fn + if init_fn: + self._init_fn = lambda sess: init_fn(self, sess) + else: + self._init_fn = None + + self._init_op = init_op + self._init_feed_dict = init_feed_dict + self._ready_op = ready_op + self._ready_for_local_init_op = ready_for_local_init_op + self._local_init_op = local_init_op + self._local_init_feed_dict = local_init_feed_dict + self._summary_op = summary_op + self._saver = saver + + def finalize(self): + """Creates operations if needed and finalizes the graph.""" + if self._init_op is None: + + def default_init_op(): + return control_flow_ops.group( + variables.global_variables_initializer(), + resources.initialize_resources(resources.shared_resources()), + ops.get_collection('saved_model_initializers')) + + self._init_op = Scaffold.get_or_default('init_op', ops.GraphKeys.INIT_OP, + default_init_op) + if self._ready_op is None: + + def default_ready_op(): + return array_ops.concat([ + variables.report_uninitialized_variables(), + resources.report_uninitialized_resources() + ], 0) + + self._ready_op = Scaffold.get_or_default('ready_op', + ops.GraphKeys.READY_OP, + default_ready_op) + if self._ready_for_local_init_op is None: + + def default_ready_for_local_init_op(): + return array_ops.concat([ + variables.report_uninitialized_variables( + variables.global_variables()), + resources.report_uninitialized_resources( + resources.shared_resources()) + ], 0) + + self._ready_for_local_init_op = Scaffold.get_or_default( + 'ready_for_local_init_op', ops.GraphKeys.READY_FOR_LOCAL_INIT_OP, + default_ready_for_local_init_op) + if self._local_init_op is None: + self._local_init_op = Scaffold.get_or_default( + 'local_init_op', ops.GraphKeys.LOCAL_INIT_OP, + Scaffold.default_local_init_op) + if self._summary_op is None: + self._summary_op = Scaffold.get_or_default('summary_op', + ops.GraphKeys.SUMMARY_OP, + summary.merge_all) + # pylint: disable=g-long-lambda + if self._saver is None: + self._saver = training_saver._get_saver_or_default() # pylint: disable=protected-access + # pylint: enable=g-long-lambda + if isinstance(self._saver, trackable_util.Checkpoint): + self._saver = training_saver.Saver( + var_list=graph_view.ObjectGraphView( + self._saver).frozen_saveable_objects(), + sharded=True) + else: + self._saver.build() + + ops.get_default_graph().finalize() + logging.info('Graph was finalized.') + return self + + @property + def init_fn(self): + return self._init_fn + + @property + def init_op(self): + return self._init_op + + @property + def ready_op(self): + return self._ready_op + + @property + def ready_for_local_init_op(self): + return self._ready_for_local_init_op + + @property + def local_init_op(self): + return self._local_init_op + + @property + def local_init_feed_dict(self): + return self._local_init_feed_dict + + @property + def summary_op(self): + return self._summary_op + + @property + def saver(self): + return self._saver + + @property + def init_feed_dict(self): + return self._init_feed_dict + + @staticmethod + def get_or_default(arg_name, collection_key, default_constructor): + """Get from cache or create a default operation.""" + elements = ops.get_collection(collection_key) + if elements: + if len(elements) > 1: + raise RuntimeError( + 'More than one item in the collection "%s". ' + 'Please indicate which one to use by passing it to ' + 'the tf.Scaffold constructor as: ' + 'tf.Scaffold(%s=item to use)', collection_key, arg_name) + return elements[0] + op = default_constructor() + if op is not None: + ops.add_to_collection(collection_key, op) + return op + + @staticmethod + def default_local_init_op(): + """Returns an op that groups the default local init ops. + + This op is used during session initialization when a Scaffold is + initialized without specifying the local_init_op arg. It includes + `tf.compat.v1.local_variables_initializer`, + `tf.compat.v1.tables_initializer`, and also + initializes local session resources. + + Returns: + The default Scaffold local init op. + """ + return control_flow_ops.group( + variables.local_variables_initializer(), + lookup_ops.tables_initializer(), + resources.initialize_resources(resources.local_resources())) + + +def _create_monitored_session_with_worker_context( + worker_context, # pylint: disable=missing-docstring + scaffold, + checkpoint_dir=None, + hooks=None, + chief_only_hooks=None, + save_checkpoint_secs=None, + save_summaries_steps=None, + save_summaries_secs=None, + config=None, + stop_grace_period_secs=120, + log_step_count_steps=100, + max_wait_secs=7200, + save_checkpoint_steps=None, + summary_dir=None, + save_graph_def=True): + all_hooks = [] + if hooks: + all_hooks.extend(hooks) + if chief_only_hooks and worker_context.is_chief: + all_hooks.extend(chief_only_hooks) + + # We need to call save or summary ops on all workers since these ops may + # contain collective ops, only running save ops on some workers would make + # collective ops hang. Therefore on those workers that don't need to actually + # write checkpoints or summaries, we let them write to a temp directory. + # pylint: disable=protected-access + if type( + worker_context._strategy).__name__ in ('CollectiveAllReduceStrategy', + 'CollectiveAllReduceStrategyV1', + 'MultiWorkerMirroredStrategy'): + if worker_context.task_type: + tmpdir = 'tmp_%s_%d' % (worker_context.task_type, worker_context.task_id) + else: + tmpdir = 'tmp' + + if save_checkpoint_secs: + logging.warning('Collective ops may deadlock with ' + '`save_checkpoints_secs` please use ' + '`save_checkpoint_steps` instead. Clearing ' + '`save_checkpoint_secs` and setting ' + '`save_checkpoint_steps` to 1000 now.') + save_checkpoint_secs = None + save_checkpoint_steps = 1000 + if save_summaries_secs: + logging.warning('Collective ops may run out of sync with' + '`save_summaries_secs`, please use ' + '`save_summaries_steps` instead.') + else: + tmpdir = None + + summary_dir = summary_dir or checkpoint_dir + if summary_dir and log_step_count_steps and log_step_count_steps > 0: + if worker_context.should_save_summary: + all_hooks.append( + basic_session_run_hooks.StepCounterHook( + output_dir=summary_dir, every_n_steps=log_step_count_steps)) + elif tmpdir: + all_hooks.append( + basic_session_run_hooks.StepCounterHook( + output_dir=os.path.join(summary_dir, tmpdir), + every_n_steps=log_step_count_steps)) + + if (((save_summaries_steps and save_summaries_steps > 0) or + (save_summaries_secs and save_summaries_secs > 0)) and summary_dir): + if worker_context.should_save_summary: + all_hooks.append( + basic_session_run_hooks.SummarySaverHook( + scaffold=scaffold, + save_steps=save_summaries_steps, + save_secs=save_summaries_secs, + output_dir=summary_dir)) + elif tmpdir: + all_hooks.append( + basic_session_run_hooks.SummarySaverHook( + scaffold=scaffold, + save_steps=save_summaries_steps, + save_secs=save_summaries_secs, + output_dir=os.path.join(summary_dir, tmpdir))) + + if (((save_checkpoint_secs and save_checkpoint_secs > 0) or + (save_checkpoint_steps and save_checkpoint_steps > 0)) and + checkpoint_dir): + if worker_context.should_checkpoint: + all_hooks.append( + basic_session_run_hooks.CheckpointSaverHook( + checkpoint_dir, + save_steps=save_checkpoint_steps, + save_secs=save_checkpoint_secs, + scaffold=scaffold, + save_graph_def=save_graph_def)) + elif tmpdir: + all_hooks.append( + basic_session_run_hooks.CheckpointSaverHook( + os.path.join(checkpoint_dir, tmpdir), + save_steps=save_checkpoint_steps, + save_secs=save_checkpoint_secs, + scaffold=scaffold, + save_graph_def=save_graph_def)) + + logging.info('all_hooks %r', all_hooks) + session_creator = worker_context.session_creator( + scaffold, + config=config, + checkpoint_dir=checkpoint_dir, + max_wait_secs=max_wait_secs) + return MonitoredSession( + session_creator=session_creator, + hooks=all_hooks, + stop_grace_period_secs=stop_grace_period_secs) + + +@tf_export(v1=['train.MonitoredTrainingSession']) +def MonitoredTrainingSession( + master='', # pylint: disable=invalid-name + is_chief=True, + checkpoint_dir=None, + scaffold=None, + hooks=None, + chief_only_hooks=None, + save_checkpoint_secs=USE_DEFAULT, + save_summaries_steps=USE_DEFAULT, + save_summaries_secs=USE_DEFAULT, + config=None, + stop_grace_period_secs=120, + log_step_count_steps=100, + max_wait_secs=7200, + save_checkpoint_steps=USE_DEFAULT, + summary_dir=None, + save_graph_def=True): + """Creates a `MonitoredSession` for training. + + For a chief, this utility sets proper session initializer/restorer. It also + creates hooks related to checkpoint and summary saving. For workers, this + utility sets proper session creator which waits for the chief to + initialize/restore. Please check `tf.compat.v1.train.MonitoredSession` for + more + information. + + @compatibility(TF2) + This API is not compatible with eager execution and `tf.function`. To migrate + to TF2, rewrite the code to be compatible with eager execution. Check the + [migration + guide](https://www.tensorflow.org/guide/migrate#1_replace_v1sessionrun_calls) + on replacing `Session.run` calls. In Keras, session hooks can be replaced by + Callbacks e.g. [logging hook notebook]( + https://github.com/tensorflow/docs/blob/master/site/en/guide/migrate/logging_stop_hook.ipynb) + For more details please read [Better + performance with tf.function](https://www.tensorflow.org/guide/function). + @end_compatibility + + Args: + master: `String` the TensorFlow master to use. + is_chief: If `True`, it will take care of initialization and recovery the + underlying TensorFlow session. If `False`, it will wait on a chief to + initialize or recover the TensorFlow session. + checkpoint_dir: A string. Optional path to a directory where to restore + variables. + scaffold: A `Scaffold` used for gathering or building supportive ops. If not + specified, a default one is created. It's used to finalize the graph. + hooks: Optional list of `SessionRunHook` objects. + chief_only_hooks: list of `SessionRunHook` objects. Activate these hooks if + `is_chief==True`, ignore otherwise. + save_checkpoint_secs: The frequency, in seconds, that a checkpoint is saved + using a default checkpoint saver. If both `save_checkpoint_steps` and + `save_checkpoint_secs` are set to `None`, then the default checkpoint + saver isn't used. If both are provided, then only `save_checkpoint_secs` + is used. Default 600. + save_summaries_steps: The frequency, in number of global steps, that the + summaries are written to disk using a default summary saver. If both + `save_summaries_steps` and `save_summaries_secs` are set to `None`, then + the default summary saver isn't used. Default 100. + save_summaries_secs: The frequency, in secs, that the summaries are written + to disk using a default summary saver. If both `save_summaries_steps` and + `save_summaries_secs` are set to `None`, then the default summary saver + isn't used. Default not enabled. + config: an instance of `tf.compat.v1.ConfigProto` proto used to configure + the session. It's the `config` argument of constructor of + `tf.compat.v1.Session`. + stop_grace_period_secs: Number of seconds given to threads to stop after + `close()` has been called. + log_step_count_steps: The frequency, in number of global steps, that the + global step/sec is logged. + max_wait_secs: Maximum time workers should wait for the session to become + available. This should be kept relatively short to help detect incorrect + code, but sometimes may need to be increased if the chief takes a while to + start up. + save_checkpoint_steps: The frequency, in number of global steps, that a + checkpoint is saved using a default checkpoint saver. If both + `save_checkpoint_steps` and `save_checkpoint_secs` are set to `None`, then + the default checkpoint saver isn't used. If both are provided, then only + `save_checkpoint_secs` is used. Default not enabled. + summary_dir: A string. Optional path to a directory where to save + summaries. If None, checkpoint_dir is used instead. + save_graph_def: Whether to save the GraphDef and MetaGraphDef to + `checkpoint_dir`. The GraphDef is saved after the session is created as + `graph.pbtxt`. MetaGraphDefs are saved out for every checkpoint as + `model.ckpt-*.meta`. + + Returns: + A `MonitoredSession` object. + """ + if save_summaries_steps == USE_DEFAULT and save_summaries_secs == USE_DEFAULT: + save_summaries_steps = 100 + save_summaries_secs = None + elif save_summaries_secs == USE_DEFAULT: + save_summaries_secs = None + elif save_summaries_steps == USE_DEFAULT: + save_summaries_steps = None + + if (save_checkpoint_steps == USE_DEFAULT and + save_checkpoint_secs == USE_DEFAULT): + save_checkpoint_steps = None + save_checkpoint_secs = 600 + elif save_checkpoint_secs == USE_DEFAULT: + save_checkpoint_secs = None + elif save_checkpoint_steps == USE_DEFAULT: + save_checkpoint_steps = None + + scaffold = scaffold or Scaffold() + worker_context = distribute_coordinator_context.get_current_worker_context() + + if worker_context: + return _create_monitored_session_with_worker_context( + worker_context, + scaffold, + checkpoint_dir=checkpoint_dir, + hooks=hooks, + chief_only_hooks=chief_only_hooks, + save_checkpoint_secs=save_checkpoint_secs, + save_summaries_steps=save_summaries_steps, + save_summaries_secs=save_summaries_secs, + config=config, + stop_grace_period_secs=stop_grace_period_secs, + log_step_count_steps=log_step_count_steps, + max_wait_secs=max_wait_secs, + save_checkpoint_steps=save_checkpoint_steps, + summary_dir=summary_dir, + save_graph_def=save_graph_def) + + if not is_chief: + session_creator = WorkerSessionCreator( + scaffold=scaffold, + master=master, + config=config, + max_wait_secs=max_wait_secs) + return MonitoredSession( + session_creator=session_creator, + hooks=hooks or [], + stop_grace_period_secs=stop_grace_period_secs) + + all_hooks = [] + if chief_only_hooks: + all_hooks.extend(chief_only_hooks) + session_creator = ChiefSessionCreator( + scaffold=scaffold, + checkpoint_dir=checkpoint_dir, + master=master, + config=config) + + summary_dir = summary_dir or checkpoint_dir + if summary_dir: + if log_step_count_steps and log_step_count_steps > 0: + all_hooks.append( + basic_session_run_hooks.StepCounterHook( + output_dir=summary_dir, every_n_steps=log_step_count_steps)) + + if (save_summaries_steps and + save_summaries_steps > 0) or (save_summaries_secs and + save_summaries_secs > 0): + all_hooks.append( + basic_session_run_hooks.SummarySaverHook( + scaffold=scaffold, + save_steps=save_summaries_steps, + save_secs=save_summaries_secs, + output_dir=summary_dir)) + + if checkpoint_dir: + if (save_checkpoint_secs and + save_checkpoint_secs > 0) or (save_checkpoint_steps and + save_checkpoint_steps > 0): + all_hooks.append( + basic_session_run_hooks.CheckpointSaverHook( + checkpoint_dir, + save_steps=save_checkpoint_steps, + save_secs=save_checkpoint_secs, + scaffold=scaffold, + save_graph_def=save_graph_def)) + + if hooks: + all_hooks.extend(hooks) + return MonitoredSession( + session_creator=session_creator, + hooks=all_hooks, + stop_grace_period_secs=stop_grace_period_secs) + + +@tf_export(v1=['train.SessionCreator']) +class SessionCreator(metaclass=abc.ABCMeta): + """A factory for tf.Session.""" + + @abc.abstractmethod + def create_session(self): + raise NotImplementedError( + 'create_session is not implemented for {}.'.format(self)) + + +@tf_export(v1=['train.ChiefSessionCreator']) +class ChiefSessionCreator(SessionCreator): + """Creates a tf.compat.v1.Session for a chief.""" + + def __init__(self, + scaffold=None, + master='', + config=None, + checkpoint_dir=None, + checkpoint_filename_with_path=None): + """Initializes a chief session creator. + + Args: + scaffold: A `Scaffold` used for gathering or building supportive ops. If + not specified a default one is created. It's used to finalize the graph. + master: `String` representation of the TensorFlow master to use. + config: `ConfigProto` proto used to configure the session. + checkpoint_dir: A string. Optional path to a directory where to restore + variables. + checkpoint_filename_with_path: Full file name path to the checkpoint file. + """ + self._checkpoint_dir = checkpoint_dir + self._checkpoint_filename_with_path = checkpoint_filename_with_path + self._scaffold = scaffold or Scaffold() + self._session_manager = None + self._master = master + self._config = config + + def _get_session_manager(self): + """Gets or creates a SessionManager.""" + if self._session_manager: + return self._session_manager + + self._session_manager = sm.SessionManager( + local_init_op=self._scaffold.local_init_op, + local_init_feed_dict=self._scaffold.local_init_feed_dict, + ready_op=self._scaffold.ready_op, + ready_for_local_init_op=self._scaffold.ready_for_local_init_op, + graph=ops.get_default_graph()) + return self._session_manager + + def create_session(self): + self._scaffold.finalize() + return self._get_session_manager().prepare_session( + self._master, + saver=self._scaffold.saver, + checkpoint_dir=self._checkpoint_dir, + checkpoint_filename_with_path=self._checkpoint_filename_with_path, + config=self._config, + init_op=self._scaffold.init_op, + init_feed_dict=self._scaffold.init_feed_dict, + init_fn=self._scaffold.init_fn) + + +@tf_export(v1=['train.WorkerSessionCreator']) +class WorkerSessionCreator(SessionCreator): + """Creates a tf.compat.v1.Session for a worker.""" + + def __init__(self, + scaffold=None, + master='', + config=None, + max_wait_secs=30 * 60): + """Initializes a worker session creator. + + Args: + scaffold: A `Scaffold` used for gathering or building supportive ops. If + not specified a default one is created. It's used to finalize the graph. + master: `String` representation of the TensorFlow master to use. + config: `ConfigProto` proto used to configure the session. + max_wait_secs: Maximum time to wait for the session to become available. + """ + self._scaffold = scaffold or Scaffold() + self._session_manager = None + self._master = master + self._config = config + self._max_wait_secs = max_wait_secs + + def _get_session_manager(self): + """Gets or creates a SessionManager.""" + if self._session_manager: + return self._session_manager + + self._session_manager = sm.SessionManager( + local_init_op=self._scaffold.local_init_op, + local_init_feed_dict=self._scaffold.local_init_feed_dict, + ready_op=self._scaffold.ready_op, + ready_for_local_init_op=self._scaffold.ready_for_local_init_op, + graph=ops.get_default_graph()) + return self._session_manager + + def create_session(self): + self._scaffold.finalize() + return self._get_session_manager().wait_for_session( + self._master, config=self._config, max_wait_secs=self._max_wait_secs) + + +class _MonitoredSession: + """See `MonitoredSession` or `SingularMonitoredSession`.""" + + def __init__(self, + session_creator, + hooks, + should_recover, + stop_grace_period_secs=120): + """Sets up a Monitored or Hooked Session. + + Args: + session_creator: A factory object to create session. Typically a + `ChiefSessionCreator` or a `WorkerSessionCreator`. + hooks: An iterable of `SessionRunHook' objects. + should_recover: A bool. Indicates whether to recover from `AbortedError` + and `UnavailableError` or not. + stop_grace_period_secs: Number of seconds given to threads to stop after + `close()` has been called. + """ + self._graph_was_finalized = ops.get_default_graph().finalized + self._hooks = hooks or [] + for h in self._hooks: + h.begin() + + worker_context = distribute_coordinator_context.get_current_worker_context() + if not session_creator and worker_context: + session_creator = worker_context.session_creator() + + # Create the session. + self._coordinated_creator = self._CoordinatedSessionCreator( + session_creator=session_creator or ChiefSessionCreator(), + hooks=self._hooks, + stop_grace_period_secs=stop_grace_period_secs) + if should_recover: + self._sess = _RecoverableSession(self._coordinated_creator) + else: + self._sess = self._coordinated_creator.create_session() + + @property + def graph(self): + """The graph that was launched in this session.""" + if self._tf_sess() is None: + return None + return self._tf_sess().graph + + def run(self, fetches, feed_dict=None, options=None, run_metadata=None): + """Run ops in the monitored session. + + This method is completely compatible with the `tf.Session.run()` method. + + Args: + fetches: Same as `tf.Session.run()`. + feed_dict: Same as `tf.Session.run()`. + options: Same as `tf.Session.run()`. + run_metadata: Same as `tf.Session.run()`. + + Returns: + Same as `tf.Session.run()`. + """ + return self._sess.run( + fetches, + feed_dict=feed_dict, + options=options, + run_metadata=run_metadata) + + def run_step_fn(self, step_fn): + """Run ops using a step function. + + Args: + step_fn: A function or a method with a single argument of type + `StepContext`. The function may use methods of the argument to perform + computations with access to a raw session. The returned value of the + `step_fn` will be returned from `run_step_fn`, unless a stop is + requested. In that case, the next `should_stop` call will return True. + Example usage: + ```python + with tf.Graph().as_default(): + c = tf.compat.v1.placeholder(dtypes.float32) + v = tf.add(c, 4.0) + w = tf.add(c, 0.5) + def step_fn(step_context): + a = step_context.session.run(fetches=v, feed_dict={c: 0.5}) + if a <= 4.5: + step_context.request_stop() + return step_context.run_with_hooks(fetches=w, + feed_dict={c: 0.1}) + + with tf.MonitoredSession() as session: + while not session.should_stop(): + a = session.run_step_fn(step_fn) + ``` + Hooks interact with the `run_with_hooks()` call inside the + `step_fn` as they do with a `MonitoredSession.run` call. + + Returns: + Returns the returned value of `step_fn`. + + Raises: + StopIteration: if `step_fn` has called `request_stop()`. It may be + caught by `with tf.MonitoredSession()` to close the session. + ValueError: if `step_fn` doesn't have a single argument called + `step_context`. It may also optionally have `self` for cases when it + belongs to an object. + """ + step_fn_arguments = function_utils.fn_args(step_fn) + if step_fn_arguments != ('step_context',) and step_fn_arguments != ( + 'self', + 'step_context', + ): + raise ValueError( + '`step_fn` may either have one `step_context` argument, or' + ' `self` and `step_context` arguments if it\'s an instance' + ' method. Got {} instead.'.format(step_fn_arguments)) + + # `self._sess` is either `_RecoverableSession` or a `_CoordinatedSession`. + # Setting `run_with_hooks` to `None` will cause `run_with_hooks` to be + # `_CoordinatedSession.run` downstream in either case. This allows + # `_PREEMPTION_ERRORS` to propage from within `step_fn` to + # `_RecoverableSession.run_step_fn`. + return self._sess.run_step_fn(step_fn, self._tf_sess(), run_with_hooks=None) + + class StepContext: + """Control flow instrument for the `step_fn` from `run_step_fn()`. + + Users of `step_fn` may perform `run()` calls without running hooks + by accessing the `session`. A `run()` call with hooks may be performed + using `run_with_hooks()`. Computation flow can be interrupted using + `request_stop()`. + """ + + def __init__(self, session, run_with_hooks_fn): + """Initializes the `step_context` argument for a `step_fn` invocation. + + Args: + session: An instance of `tf.compat.v1.Session`. + run_with_hooks_fn: A function for running fetches and hooks. + """ + self._session = session + self._run_with_hooks_fn = run_with_hooks_fn + + @property + def session(self): + return self._session + + def run_with_hooks(self, *args, **kwargs): + """Same as `MonitoredSession.run`. Accepts the same arguments.""" + return self._run_with_hooks_fn(*args, **kwargs) + + def request_stop(self): + """Exit the training loop by causing `should_stop()` to return `True`. + + Causes `step_fn` to exit by raising an exception. + + Raises: + StopIteration + """ + raise StopIteration('step_fn has requested the iterations to stop.') + + def should_stop(self): + return self._sess is None or self._sess.should_stop() + + def close(self): + self._close_internal() + + def __enter__(self): + return self + + def __exit__(self, exception_type, exception_value, traceback): + if exception_type in [errors.OutOfRangeError, StopIteration]: + exception_type = None + self._close_internal(exception_type) + # __exit__ should return True to suppress an exception. + return exception_type is None + + class _CoordinatedSessionCreator(SessionCreator): + """Factory for _CoordinatedSession.""" + + def __init__(self, session_creator, hooks, stop_grace_period_secs): + self._session_creator = session_creator + self._hooks = hooks + self.coord = None + self.tf_sess = None + self._stop_grace_period_secs = stop_grace_period_secs + + def create_session(self): + """Creates a coordinated session.""" + # Keep the tf_sess for unit testing. + self.tf_sess = self._session_creator.create_session() + # We don't want coordinator to suppress any exception. + self.coord = coordinator.Coordinator(clean_stop_exception_types=[]) + if ops.get_collection(ops.GraphKeys.QUEUE_RUNNERS): + queue_runner.start_queue_runners(sess=self.tf_sess, coord=self.coord) + # Inform the hooks that a new session has been created. + for hook in self._hooks: + hook.after_create_session(self.tf_sess, self.coord) + return _CoordinatedSession( + _HookedSession(self.tf_sess, self._hooks), self.coord, + self._stop_grace_period_secs) + + def _close_internal(self, exception_type=None): + try: + if not exception_type: + for h in self._hooks: + h.end(self._coordinated_creator.tf_sess) + finally: + try: + if self._sess is None: + raise RuntimeError('Session is already closed.') + self._sess.close() + finally: + self._sess = None + self._coordinated_creator.tf_sess = None + self._coordinated_creator.coord = None + if not self._graph_was_finalized: + ops.get_default_graph()._unsafe_unfinalize() # pylint: disable=protected-access + + def _is_closed(self): + """Return True if the monitored session is closed. + + For tests only. + + Returns: + A boolean. + """ + return self._coordinated_creator.tf_sess is None + + def _tf_sess(self): + """Return underlying tf.compat.v1.Session object. + + Warning: accessing the returned object in user code is likely to cause races + or "flaky tests". + + Returns: + A tf.compat.v1.Session object. + """ + return self._coordinated_creator.tf_sess + + +@tf_export(v1=['train.MonitoredSession']) +class MonitoredSession(_MonitoredSession): + """Session-like object that handles initialization, recovery and hooks. + + Example usage: + + ```python + saver_hook = CheckpointSaverHook(...) + summary_hook = SummarySaverHook(...) + with MonitoredSession(session_creator=ChiefSessionCreator(...), + hooks=[saver_hook, summary_hook]) as sess: + while not sess.should_stop(): + sess.run(train_op) + ``` + + Initialization: At creation time the monitored session does following things + in given order: + + * calls `hook.begin()` for each given hook + * finalizes the graph via `scaffold.finalize()` + * create session + * initializes the model via initialization ops provided by `Scaffold` + * restores variables if a checkpoint exists + * launches queue runners + * calls `hook.after_create_session()` + + Run: When `run()` is called, the monitored session does following things: + + * calls `hook.before_run()` + * calls TensorFlow `session.run()` with merged fetches and feed_dict + * calls `hook.after_run()` + * returns result of `session.run()` asked by user + * if `AbortedError` or `UnavailableError` occurs, it recovers or + reinitializes the session before executing the run() call again + + + Exit: At the `close()`, the monitored session does following things in order: + + * calls `hook.end()` + * closes the queue runners and the session + * suppresses `OutOfRange` error which indicates that all inputs have been + processed if the monitored_session is used as a context + + How to set `tf.compat.v1.Session` arguments: + + * In most cases you can set session arguments as follows: + + ```python + MonitoredSession( + session_creator=ChiefSessionCreator(master=..., config=...)) + ``` + + * In distributed setting for a non-chief worker, you can use following: + + ```python + MonitoredSession( + session_creator=WorkerSessionCreator(master=..., config=...)) + ``` + + See `MonitoredTrainingSession` for an example usage based on chief or worker. + + Note: This is not a `tf.compat.v1.Session`. For example, it cannot do + following: + + * it cannot be set as default session. + * it cannot be sent to saver.save. + * it cannot be sent to tf.train.start_queue_runners. + + @compatibility(TF2) + This API is not compatible with eager execution and `tf.function`. To migrate + to TF2, rewrite the code to be compatible with eager execution. Check the + [migration + guide](https://www.tensorflow.org/guide/migrate#1_replace_v1sessionrun_calls) + on replacing `Session.run` calls. In Keras, session hooks can be replaced by + Callbacks e.g. [logging hook notebook]( + https://github.com/tensorflow/docs/blob/master/site/en/guide/migrate/logging_stop_hook.ipynb) + For more details please read [Better + performance with tf.function](https://www.tensorflow.org/guide/function). + @end_compatibility + + Args: + session_creator: A factory object to create session. Typically a + `ChiefSessionCreator` which is the default one. + hooks: An iterable of `SessionRunHook' objects. + + Returns: + A MonitoredSession object. + """ + + def __init__(self, + session_creator=None, + hooks=None, + stop_grace_period_secs=120): + super(MonitoredSession, self).__init__( + session_creator, + hooks, + should_recover=True, + stop_grace_period_secs=stop_grace_period_secs) + + +@tf_export(v1=['train.SingularMonitoredSession']) +class SingularMonitoredSession(_MonitoredSession): + """Session-like object that handles initialization, restoring, and hooks. + + Please note that this utility is not recommended for distributed settings. + For distributed settings, please use `tf.compat.v1.train.MonitoredSession`. + The + differences between `MonitoredSession` and `SingularMonitoredSession` are: + + * `MonitoredSession` handles `AbortedError` and `UnavailableError` for + distributed settings, but `SingularMonitoredSession` does not. + * `MonitoredSession` can be created in `chief` or `worker` modes. + `SingularMonitoredSession` is always created as `chief`. + * You can access the raw `tf.compat.v1.Session` object used by + `SingularMonitoredSession`, whereas in MonitoredSession the raw session is + private. This can be used: + - To `run` without hooks. + - To save and restore. + * All other functionality is identical. + + Example usage: + ```python + saver_hook = CheckpointSaverHook(...) + summary_hook = SummarySaverHook(...) + with SingularMonitoredSession(hooks=[saver_hook, summary_hook]) as sess: + while not sess.should_stop(): + sess.run(train_op) + ``` + + Initialization: At creation time the hooked session does following things + in given order: + + * calls `hook.begin()` for each given hook + * finalizes the graph via `scaffold.finalize()` + * create session + * initializes the model via initialization ops provided by `Scaffold` + * restores variables if a checkpoint exists + * launches queue runners + + Run: When `run()` is called, the hooked session does following things: + + * calls `hook.before_run()` + * calls TensorFlow `session.run()` with merged fetches and feed_dict + * calls `hook.after_run()` + * returns result of `session.run()` asked by user + + Exit: At the `close()`, the hooked session does following things in order: + + * calls `hook.end()` + * closes the queue runners and the session + * suppresses `OutOfRange` error which indicates that all inputs have been + processed if the `SingularMonitoredSession` is used as a context. + + @compatibility(TF2) + This API is not compatible with eager execution and `tf.function`. To migrate + to TF2, rewrite the code to be compatible with eager execution. Check the + [migration + guide](https://www.tensorflow.org/guide/migrate#1_replace_v1sessionrun_calls) + on replacing `Session.run` calls. In Keras, session hooks can be replaced by + Callbacks e.g. [logging hook notebook]( + https://github.com/tensorflow/docs/blob/master/site/en/guide/migrate/logging_stop_hook.ipynb) + For more details please read [Better + performance with tf.function](https://www.tensorflow.org/guide/function). + @end_compatibility + """ + + def __init__(self, + hooks=None, + scaffold=None, + master='', + config=None, + checkpoint_dir=None, + stop_grace_period_secs=120, + checkpoint_filename_with_path=None): + """Creates a SingularMonitoredSession. + + Args: + hooks: An iterable of `SessionRunHook' objects. + scaffold: A `Scaffold` used for gathering or building supportive ops. If + not specified a default one is created. It's used to finalize the graph. + master: `String` representation of the TensorFlow master to use. + config: `ConfigProto` proto used to configure the session. + checkpoint_dir: A string. Optional path to a directory where to restore + variables. + stop_grace_period_secs: Number of seconds given to threads to stop after + `close()` has been called. + checkpoint_filename_with_path: A string. Optional path to a checkpoint + file from which to restore variables. + """ + session_creator = ChiefSessionCreator( + scaffold=scaffold, + master=master, + config=config, + checkpoint_dir=checkpoint_dir, + checkpoint_filename_with_path=checkpoint_filename_with_path) + super(SingularMonitoredSession, self).__init__( + session_creator, + hooks, + should_recover=False, + stop_grace_period_secs=stop_grace_period_secs) + + def raw_session(self): + """Returns underlying `TensorFlow.Session` object.""" + return self._tf_sess() + + +class _WrappedSession: + """Wrapper around a `tf.compat.v1.Session`. + + This wrapper is used as a base class for various session wrappers + that provide additional functionality such as monitoring, coordination, + and recovery. + + In addition to the methods exported by `SessionInterface` the wrapper + provides a method to check for stop and never raises exceptions from + calls to `close()`. + """ + + def __init__(self, sess): + """Creates a `_WrappedSession`. + + Args: + sess: A `tf.compat.v1.Session` or `_WrappedSession` object. The wrapped + session. + """ + self._sess = sess + self._wrapped_is_stoppable = isinstance(self._sess, _WrappedSession) + + @property + def graph(self): + return self._sess.graph + + @property + def sess_str(self): + return self._sess.sess_str + + def should_stop(self): + """Return true if this session should not be used anymore. + + Always return True if the session was closed. + + Returns: + True if the session should stop, False otherwise. + """ + if self._check_stop(): + return True + if self._sess: + return self._wrapped_is_stoppable and self._sess.should_stop() + return True + + def _check_stop(self): + """Hook for subclasses to provide their own stop condition. + + Returns: + True if the session should stop, False otherwise. + """ + return False + + def close(self): + if self._sess: + try: + self._sess.close() + except _PREEMPTION_ERRORS as e: + logging.error( + 'An error occurred when attempting to close the ' + 'session. This may be due to a preemption in a ' + 'connected worker or parameter server. Error: %s', e) + finally: + self._sess = None + + def run(self, *args, **kwargs): + return self._sess.run(*args, **kwargs) + + def run_step_fn(self, step_fn, raw_session, run_with_hooks): + # `_RecoverableSession` sets `run_with_hooks` to `_CoordinatedSession.run`. + # It is `None` when called from `_CoordinatedSession`. In that case + # `self.run` is `_CoordinatedSession.run`. + run_with_hooks = run_with_hooks or self.run + return step_fn(_MonitoredSession.StepContext(raw_session, run_with_hooks)) + + +class _RecoverableSession(_WrappedSession): + """A wrapped session that recreates a session upon certain kinds of errors. + + The constructor is passed a SessionCreator object, not a session. + + Calls to `run()` are delegated to the wrapped session. If a call raises the + exception `tf.errors.AbortedError` or `tf.errors.UnavailableError`, the + wrapped session is closed, and a new one is created by calling the factory + again. + """ + + def __init__(self, sess_creator): + """Create a new `_RecoverableSession`. + + The value returned by calling `sess_creator.create_session()` will be the + session wrapped by this recoverable session. + + Args: + sess_creator: A 'SessionCreator' to be wrapped by recoverable. + """ + self._sess_creator = sess_creator + _WrappedSession.__init__(self, self._create_session()) + + def _create_session(self): + while True: + try: + return self._sess_creator.create_session() + except _PREEMPTION_ERRORS as e: + logging.info( + 'An error was raised while a session was being created. ' + 'This may be due to a preemption of a connected worker ' + 'or parameter server. A new session will be created. ' + 'This error may also occur due to a gRPC failure caused ' + 'by high memory or network bandwidth usage in the ' + 'parameter servers. If this error occurs repeatedly, try ' + 'increasing the number of parameter servers assigned to ' + 'the job. Error: %s', e) + + def _check_stop(self): + try: + if self._sess: + return self._sess._check_stop() # pylint: disable=protected-access + else: + return True + except _PREEMPTION_ERRORS as e: + logging.info( + 'An error was raised while considering whether the ' + 'session is complete. This may be due to a preemption in ' + 'a connected worker or parameter server. The current ' + 'session will be closed and a new session will be ' + 'created. This error may also occur due to a gRPC failure ' + 'caused by high memory or network bandwidth usage in the ' + 'parameter servers. If this error occurs repeatedly, try ' + 'increasing the number of parameter servers assigned to ' + 'the job. Error: %s', e) + self.close() + self._sess = self._create_session() + # Since we have just recreated the session, the overall computation should + # not stop: + return False + except Exception: # pylint: disable=broad-except + # `should_stop` should return True instead of raising an exception. + return True + + def run(self, fetches, feed_dict=None, options=None, run_metadata=None): + while True: + try: + if not self._sess: + self._sess = self._create_session() + return self._sess.run( + fetches, + feed_dict=feed_dict, + options=options, + run_metadata=run_metadata) + except _PREEMPTION_ERRORS as e: + logging.info( + 'An error was raised. This may be due to a preemption in ' + 'a connected worker or parameter server. The current ' + 'session will be closed and a new session will be ' + 'created. This error may also occur due to a gRPC failure ' + 'caused by high memory or network bandwidth usage in the ' + 'parameter servers. If this error occurs repeatedly, try ' + 'increasing the number of parameter servers assigned to ' + 'the job. Error: %s', e) + self.close() + self._sess = None + + def run_step_fn(self, step_fn, raw_session, run_with_hooks): + while True: + try: + if not self._sess: + self._sess = self._create_session() + + run_with_hooks = self._sess.run + return self._sess.run_step_fn(step_fn, raw_session, run_with_hooks) + except _PREEMPTION_ERRORS as e: + logging.info( + 'An error was raised. This may be due to a preemption in ' + 'a connected worker or parameter server. The current ' + 'session will be closed and a new session will be ' + 'created. This error may also occur due to a gRPC failure ' + 'caused by high memory or network bandwidth usage in the ' + 'parameter servers. If this error occurs repeatedly, try ' + 'increasing the number of parameter servers assigned to ' + 'the job. Error: %s', e) + self.close() + self._sess = None + + +class _CoordinatedSession(_WrappedSession): + """A wrapped session that works with a `tf.Coordinator`. + + Calls to `run()` are delegated to the wrapped session. If a call + raises an exception, the exception is reported to the coordinator. + + In addition, after each call to `run()` this session ask the coordinator if + the session should stop. In that case it will join all the threads + registered with the coordinator before returning. + + If the coordinator was requested to stop with an exception, that exception + will be re-raised from the call to `run()`. + """ + + def __init__(self, sess, coord, stop_grace_period_secs=120): + """Create a new `_CoordinatedSession`. + + Args: + sess: A `tf.compat.v1.Session` object. The wrapped session. + coord: A `tf.train.Coordinator` object. + stop_grace_period_secs: Number of seconds given to threads to stop after + `close()` has been called. + """ + _WrappedSession.__init__(self, sess) + self._coord = coord + self._stop_grace_period_secs = stop_grace_period_secs + + def _check_stop(self): + # If the coordinator was asked to stop due to an exception, then it needs + # to be propagated to this stack. + self._coord.raise_requested_exception() + # At this point, no exceptions are recorded in the coordinator. + return self._coord.should_stop() + + def close(self): + self._coord.request_stop() + try: + self._coord.join( + stop_grace_period_secs=self._stop_grace_period_secs, + ignore_live_threads=True) + finally: + try: + _WrappedSession.close(self) + except Exception: # pylint: disable=broad-except + # We intentionally suppress exceptions from the close() here since + # useful exceptions are already reported by join(). + pass + + def run(self, *args, **kwargs): + try: + return self._sess.run(*args, **kwargs) + except _PREEMPTION_ERRORS: + raise + except Exception as original_exception: # pylint: disable=broad-except + # A non-preemption error could have been caused by a preemption error + # in the coordinator. If this is the case, raise that exception instead, + # since it's the root cause. Otherwise, stick to the `original_exception`. + try: + self._coord.raise_requested_exception() + except _PREEMPTION_ERRORS: + raise + except Exception: # pylint: disable=broad-except + raise original_exception from None + else: + raise + + +class _HookedSession(_WrappedSession): + """A _WrappedSession that calls hooks during calls to run(). + + The list of hooks to call is passed in the constructor. Before each call + to `run()` the session calls the `before_run()` method of the hooks, which + can return additional ops or tensors to run. These are added to the arguments + of the call to `run()`. + + When the `run()` call finishes, the session calls the `after_run()` methods of + the hooks, passing the values returned by the `run()` call corresponding to + the ops and tensors that each hook requested. + + If any call to the hooks, requests stop via run_context the session will be + marked as needing to stop and its `should_stop()` method will now return + `True`. + """ + + def __init__(self, sess, hooks): + """Initializes a _HookedSession object. + + Args: + sess: A `tf.compat.v1.Session` or a `_WrappedSession` object. + hooks: An iterable of `SessionRunHook' objects. + """ + + _WrappedSession.__init__(self, sess) + self._hooks = hooks + self._should_stop = False + + def _check_stop(self): + """See base class.""" + return self._should_stop + + def run(self, fetches, feed_dict=None, options=None, run_metadata=None): + """See base class.""" + if self.should_stop(): + raise RuntimeError('Run called even after should_stop requested.') + + actual_fetches = {'caller': fetches} + + run_context = session_run_hook.SessionRunContext( + original_args=session_run_hook.SessionRunArgs(fetches, feed_dict), + session=self._sess) + + options = options or config_pb2.RunOptions() + feed_dict = self._call_hook_before_run(run_context, actual_fetches, + feed_dict, options) + + # Do session run. + run_metadata = run_metadata or config_pb2.RunMetadata() + outputs = _WrappedSession.run( + self, + fetches=actual_fetches, + feed_dict=feed_dict, + options=options, + run_metadata=run_metadata) + + for hook in self._hooks: + hook.after_run( + run_context, + session_run_hook.SessionRunValues( + results=outputs[hook] if hook in outputs else None, + options=options, + run_metadata=run_metadata)) + self._should_stop = self._should_stop or run_context.stop_requested + + return outputs['caller'] + + def _call_hook_before_run(self, run_context, fetch_dict, user_feed_dict, + options): + """Calls hooks.before_run and handles requests from hooks.""" + hook_feeds = {} + for hook in self._hooks: + request = hook.before_run(run_context) + if request is not None: + if request.fetches is not None: + fetch_dict[hook] = request.fetches + if request.feed_dict: + self._raise_if_feeds_intersects(hook_feeds, request.feed_dict, + 'Same tensor is fed by two hooks.') + hook_feeds.update(request.feed_dict) + if request.options: + self._merge_run_options(options, request.options) + + if not hook_feeds: + return user_feed_dict + + if not user_feed_dict: + return hook_feeds + + self._raise_if_feeds_intersects( + user_feed_dict, hook_feeds, + 'Same tensor is fed by a SessionRunHook and user.') + hook_feeds.update(user_feed_dict) + return hook_feeds + + def _raise_if_feeds_intersects(self, feeds1, feeds2, message): + intersection = set(feeds1.keys()) & set(feeds2.keys()) + if intersection: + raise RuntimeError(message + ' Conflict(s): ' + str(list(intersection))) + + def _merge_run_options(self, options, incoming_options): + """Merge two instances of RunOptions into the first one. + + During the merger, the numerical fields including trace_level, + timeout_in_ms, inter_op_thread_pool are set to the larger one of the two. + The boolean value is set to the logical OR of the two. + debug_tensor_watch_opts of the original options is extended with that from + the incoming one. + + Args: + options: The options to merge into. + incoming_options: The options to be merged into the first argument. + """ + options.trace_level = max(options.trace_level, incoming_options.trace_level) + options.timeout_in_ms = max(options.timeout_in_ms, + incoming_options.timeout_in_ms) + options.inter_op_thread_pool = max(options.inter_op_thread_pool, + incoming_options.inter_op_thread_pool) + options.output_partition_graphs = max( + options.output_partition_graphs, + incoming_options.output_partition_graphs) + options.debug_options.debug_tensor_watch_opts.extend( + incoming_options.debug_options.debug_tensor_watch_opts) + options.debug_options.reset_disk_byte_usage = ( + options.debug_options.reset_disk_byte_usage or + incoming_options.debug_options.reset_disk_byte_usage) + options.report_tensor_allocations_upon_oom = ( + options.report_tensor_allocations_upon_oom or + incoming_options.report_tensor_allocations_upon_oom) diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/training/py_checkpoint_reader.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/training/py_checkpoint_reader.py new file mode 100644 index 0000000000000000000000000000000000000000..32d7f4ec5d1c9f53ec53c42a6e4e3f802f991a65 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/training/py_checkpoint_reader.py @@ -0,0 +1,96 @@ +# Copyright 2015 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Extending CheckpointReader for TensorFlow.""" +from tensorflow.python.framework import dtypes +from tensorflow.python.framework import errors_impl +from tensorflow.python.util import compat +from tensorflow.python.util._pywrap_checkpoint_reader import CheckpointReader +from tensorflow.python.util.tf_export import tf_export + + +def error_translator(e): + """Translate the tensor_slice_reader.cc errors.""" + # TODO(b/143319754): Remove the RuntimeError casting logic once we resolve the + # issue with throwing python exceptions from C++. + error_message = str(e) + if 'not found in checkpoint' in error_message or ( + 'Failed to find any ' + 'matching files for') in error_message: + raise errors_impl.NotFoundError(None, None, error_message) + elif 'Sliced checkpoints are not supported' in error_message or ( + 'Data type ' + 'not ' + 'supported') in error_message: + raise errors_impl.UnimplementedError(None, None, error_message) + elif 'Failed to get matching files on' in error_message: + raise errors_impl.InvalidArgumentError(None, None, error_message) + elif 'Unable to open table file' in error_message: + raise errors_impl.DataLossError(None, None, error_message) + elif 'Failed to find the saved tensor slices' in error_message or ( + 'not convertible to numpy dtype' in error_message): + raise errors_impl.InternalError(None, None, error_message) + else: + raise errors_impl.OpError(None, None, error_message, errors_impl.UNKNOWN) + + +def get_variable_to_dtype_map(self): + return { + name: dtypes.DType(type_enum) + for name, type_enum in self._GetVariableToDataTypeMap().items() # pylint: disable=protected-access + } + +CheckpointReader.get_variable_to_dtype_map = get_variable_to_dtype_map + + +def has_tensor(self, tensor_str): + return self._HasTensor(compat.as_bytes(tensor_str)) # pylint: disable=protected-access + +CheckpointReader.has_tensor = has_tensor + + +def get_tensor(self, tensor_str): + """Get the tensor from the Checkpoint object.""" + try: + return CheckpointReader.CheckpointReader_GetTensor( + self, compat.as_bytes(tensor_str)) + # TODO(b/143319754): Remove the RuntimeError casting logic once we resolve the + # issue with throwing python exceptions from C++. + except RuntimeError as e: + error_translator(e) + + +CheckpointReader.get_tensor = get_tensor + + +# Disable invalid name to keep backwards compatibility with that function. +# It was previously exported from py_checkpoint_reader.i which did not conform +# to pylint checks. +# pylint: disable=invalid-name +@tf_export(v1=['train.NewCheckpointReader']) +def NewCheckpointReader(filepattern): + """A function that returns a CheckPointReader. + + Args: + filepattern: The filename. + + Returns: + A CheckpointReader object. + """ + try: + return CheckpointReader(compat.as_bytes(filepattern)) + # TODO(b/143319754): Remove the RuntimeError casting logic once we resolve the + # issue with throwing python exceptions from C++. + except RuntimeError as e: + error_translator(e) diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/training/rmsprop.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/training/rmsprop.py new file mode 100644 index 0000000000000000000000000000000000000000..157f389c26b91a88ad1a4c1399e9609b09e951ff --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/training/rmsprop.py @@ -0,0 +1,323 @@ +# Copyright 2015 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""One-line documentation for rmsprop module. + +rmsprop algorithm [tieleman2012rmsprop] + +A detailed description of rmsprop. + +- maintain a moving (discounted) average of the square of gradients +- divide gradient by the root of this average + +mean_square = decay * mean_square{t-1} + (1-decay) * gradient ** 2 +mom = momentum * mom{t-1} + learning_rate * g_t / sqrt(mean_square + epsilon) +delta = - mom + +This implementation of RMSProp uses plain momentum, not Nesterov momentum. + +The centered version additionally maintains a moving (discounted) average of the +gradients, and uses that average to estimate the variance: + +mean_grad = decay * mean_grad{t-1} + (1-decay) * gradient +mean_square = decay * mean_square{t-1} + (1-decay) * gradient ** 2 +mom = momentum * mom{t-1} + learning_rate * g_t / + sqrt(mean_square - mean_grad**2 + epsilon) +delta = - mom +""" + +from tensorflow.python.framework import ops +from tensorflow.python.ops import array_ops +from tensorflow.python.ops import gen_training_ops +from tensorflow.python.ops import init_ops +from tensorflow.python.ops import math_ops +from tensorflow.python.training import optimizer +from tensorflow.python.util.tf_export import tf_export + + +@tf_export(v1=["train.RMSPropOptimizer"]) +class RMSPropOptimizer(optimizer.Optimizer): + """Optimizer that implements the RMSProp algorithm (Tielemans et al. + + 2012). + + References: + Coursera slide 29: + Hinton, 2012 + ([pdf](http://www.cs.toronto.edu/~tijmen/csc321/slides/lecture_slides_lec6.pdf)) + + @compatibility(TF2) + tf.compat.v1.train.RMSPropOptimizer is compatible with eager mode and + `tf.function`. + When eager execution is enabled, `learning_rate`, `decay`, `momentum`, + and `epsilon` can each be a callable that + takes no arguments and returns the actual value to use. This can be useful + for changing these values across different invocations of optimizer + functions. + + To switch to native TF2 style, use [`tf.keras.optimizers.RMSprop`] + (https://www.tensorflow.org/api_docs/python/tf/keras/optimizers/RMSprop) + instead. Please notice that due to the implementation differences, + `tf.keras.optimizers.RMSprop` and + `tf.compat.v1.train.RMSPropOptimizer` may have slight differences in + floating point numerics even though the formula used for the variable + updates still matches. + + #### Structural mapping to native TF2 + + Before: + + ```python + optimizer = tf.compat.v1.train.RMSPropOptimizer( + learning_rate=learning_rate, + decay=decay, + momentum=momentum, + epsilon=epsilon) + ``` + + After: + + ```python + optimizer = tf.keras.optimizers.RMSprop( + learning_rate=learning_rate, + rho=decay, + momentum=momentum, + epsilon=epsilon) + ``` + + #### How to map arguments + | TF1 Arg Name | TF2 Arg Name | Note | + | ------------------ | ------------- | ------------------------------- | + | `learning_rate` | `learning_rate`| Be careful of setting | + : : : learning_rate tensor value computed from the global step. : + : : : In TF1 this was usually meant to imply a dynamic learning rate and : + : : : would recompute in each step. In TF2 (eager + function) it will : + : : : treat it as a scalar value that only gets computed once instead of : + : : : a symbolic placeholder to be computed each time. : + | `decay` | `rho` | - | + | `momentum` | `momentum` | - | + | `epsilon` | `epsilon` | Default value is 1e-10 in TF1, | + : : : but 1e-07 in TF2. : + | `use_locking` | - | Not applicable in TF2. | + + #### Before & after usage example + Before: + + ```python + x = tf.Variable([1,2,3], dtype=tf.float32) + grad = tf.constant([0.1, 0.2, 0.3]) + optimizer = tf.compat.v1.train.RMSPropOptimizer(learning_rate=0.001) + optimizer.apply_gradients(zip([grad], [x])) + ``` + + After: + + ```python + x = tf.Variable([1,2,3], dtype=tf.float32) + grad = tf.constant([0.1, 0.2, 0.3]) + optimizer = tf.keras.optimizers.RMSprop(learning_rate=0.001) + optimizer.apply_gradients(zip([grad], [x])) + ``` + + @end_compatibility + """ + + def __init__(self, + learning_rate, + decay=0.9, + momentum=0.0, + epsilon=1e-10, + use_locking=False, + centered=False, + name="RMSProp"): + """Construct a new RMSProp optimizer. + + Note that in the dense implementation of this algorithm, variables and their + corresponding accumulators (momentum, gradient moving average, square + gradient moving average) will be updated even if the gradient is zero + (i.e. accumulators will decay, momentum will be applied). The sparse + implementation (used when the gradient is an `IndexedSlices` object, + typically because of `tf.gather` or an embedding lookup in the forward pass) + will not update variable slices or their accumulators unless those slices + were used in the forward pass (nor is there an "eventual" correction to + account for these omitted updates). This leads to more efficient updates for + large embedding lookup tables (where most of the slices are not accessed in + a particular graph execution), but differs from the published algorithm. + + Args: + learning_rate: A Tensor or a floating point value. The learning rate. + decay: Discounting factor for the history/coming gradient + momentum: A scalar tensor. + epsilon: Small value to avoid zero denominator. + use_locking: If True use locks for update operation. + centered: If True, gradients are normalized by the estimated variance of + the gradient; if False, by the uncentered second moment. Setting this to + True may help with training, but is slightly more expensive in terms of + computation and memory. Defaults to False. + name: Optional name prefix for the operations created when applying + gradients. Defaults to "RMSProp". + + """ + super(RMSPropOptimizer, self).__init__(use_locking, name) + self._learning_rate = learning_rate + self._decay = decay + self._momentum = momentum + self._epsilon = epsilon + self._centered = centered + + # Tensors for learning rate and momentum. Created in _prepare. + self._learning_rate_tensor = None + self._decay_tensor = None + self._momentum_tensor = None + self._epsilon_tensor = None + + def _create_slots(self, var_list): + for v in var_list: + if v.get_shape().is_fully_defined(): + init_rms = init_ops.ones_initializer(dtype=v.dtype.base_dtype) + else: + init_rms = array_ops.ones_like(v) + self._get_or_make_slot_with_initializer(v, init_rms, v.get_shape(), + v.dtype.base_dtype, "rms", + self._name) + if self._centered: + self._zeros_slot(v, "mg", self._name) + self._zeros_slot(v, "momentum", self._name) + + def _prepare(self): + lr = self._call_if_callable(self._learning_rate) + decay = self._call_if_callable(self._decay) + momentum = self._call_if_callable(self._momentum) + epsilon = self._call_if_callable(self._epsilon) + + self._learning_rate_tensor = ops.convert_to_tensor(lr, name="learning_rate") + self._decay_tensor = ops.convert_to_tensor(decay, name="decay") + self._momentum_tensor = ops.convert_to_tensor(momentum, name="momentum") + self._epsilon_tensor = ops.convert_to_tensor(epsilon, name="epsilon") + + def _apply_dense(self, grad, var): + rms = self.get_slot(var, "rms") + mom = self.get_slot(var, "momentum") + if self._centered: + mg = self.get_slot(var, "mg") + return gen_training_ops.apply_centered_rms_prop( + var, + mg, + rms, + mom, + math_ops.cast(self._learning_rate_tensor, var.dtype.base_dtype), + math_ops.cast(self._decay_tensor, var.dtype.base_dtype), + math_ops.cast(self._momentum_tensor, var.dtype.base_dtype), + math_ops.cast(self._epsilon_tensor, var.dtype.base_dtype), + grad, + use_locking=self._use_locking).op + else: + return gen_training_ops.apply_rms_prop( + var, + rms, + mom, + math_ops.cast(self._learning_rate_tensor, var.dtype.base_dtype), + math_ops.cast(self._decay_tensor, var.dtype.base_dtype), + math_ops.cast(self._momentum_tensor, var.dtype.base_dtype), + math_ops.cast(self._epsilon_tensor, var.dtype.base_dtype), + grad, + use_locking=self._use_locking).op + + def _resource_apply_dense(self, grad, var): + rms = self.get_slot(var, "rms") + mom = self.get_slot(var, "momentum") + if self._centered: + mg = self.get_slot(var, "mg") + return gen_training_ops.resource_apply_centered_rms_prop( + var.handle, + mg.handle, + rms.handle, + mom.handle, + math_ops.cast(self._learning_rate_tensor, grad.dtype.base_dtype), + math_ops.cast(self._decay_tensor, grad.dtype.base_dtype), + math_ops.cast(self._momentum_tensor, grad.dtype.base_dtype), + math_ops.cast(self._epsilon_tensor, grad.dtype.base_dtype), + grad, + use_locking=self._use_locking) + else: + return gen_training_ops.resource_apply_rms_prop( + var.handle, + rms.handle, + mom.handle, + math_ops.cast(self._learning_rate_tensor, grad.dtype.base_dtype), + math_ops.cast(self._decay_tensor, grad.dtype.base_dtype), + math_ops.cast(self._momentum_tensor, grad.dtype.base_dtype), + math_ops.cast(self._epsilon_tensor, grad.dtype.base_dtype), + grad, + use_locking=self._use_locking) + + def _apply_sparse(self, grad, var): + rms = self.get_slot(var, "rms") + mom = self.get_slot(var, "momentum") + if self._centered: + mg = self.get_slot(var, "mg") + return gen_training_ops.sparse_apply_centered_rms_prop( + var, + mg, + rms, + mom, + math_ops.cast(self._learning_rate_tensor, var.dtype.base_dtype), + math_ops.cast(self._decay_tensor, var.dtype.base_dtype), + math_ops.cast(self._momentum_tensor, var.dtype.base_dtype), + math_ops.cast(self._epsilon_tensor, var.dtype.base_dtype), + grad.values, + grad.indices, + use_locking=self._use_locking) + else: + return gen_training_ops.sparse_apply_rms_prop( + var, + rms, + mom, + math_ops.cast(self._learning_rate_tensor, var.dtype.base_dtype), + math_ops.cast(self._decay_tensor, var.dtype.base_dtype), + math_ops.cast(self._momentum_tensor, var.dtype.base_dtype), + math_ops.cast(self._epsilon_tensor, var.dtype.base_dtype), + grad.values, + grad.indices, + use_locking=self._use_locking) + + def _resource_apply_sparse(self, grad, var, indices): + rms = self.get_slot(var, "rms") + mom = self.get_slot(var, "momentum") + if self._centered: + mg = self.get_slot(var, "mg") + return gen_training_ops.resource_sparse_apply_centered_rms_prop( + var.handle, + mg.handle, + rms.handle, + mom.handle, + math_ops.cast(self._learning_rate_tensor, grad.dtype), + math_ops.cast(self._decay_tensor, grad.dtype), + math_ops.cast(self._momentum_tensor, grad.dtype), + math_ops.cast(self._epsilon_tensor, grad.dtype), + grad, + indices, + use_locking=self._use_locking) + else: + return gen_training_ops.resource_sparse_apply_rms_prop( + var.handle, + rms.handle, + mom.handle, + math_ops.cast(self._learning_rate_tensor, grad.dtype), + math_ops.cast(self._decay_tensor, grad.dtype), + math_ops.cast(self._momentum_tensor, grad.dtype), + math_ops.cast(self._epsilon_tensor, grad.dtype), + grad, + indices, + use_locking=self._use_locking) diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/training/saver_test_utils.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/training/saver_test_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..bae7f5721dc7403e97d34ca5994d8959c3bb5c99 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/training/saver_test_utils.py @@ -0,0 +1,87 @@ +# Copyright 2015 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================= +"""Utility classes for testing checkpointing.""" + +from tensorflow.python.eager import context +from tensorflow.python.framework import dtypes +from tensorflow.python.framework import ops as ops_lib +from tensorflow.python.ops import gen_lookup_ops +from tensorflow.python.training import saver as saver_module + + +class CheckpointedOp: + """Op with a custom checkpointing implementation. + + Defined as part of the test because the MutableHashTable Python code is + currently in contrib. + """ + + # pylint: disable=protected-access + def __init__(self, name, table_ref=None): + if table_ref is None: + self.table_ref = gen_lookup_ops.mutable_hash_table_v2( + key_dtype=dtypes.string, value_dtype=dtypes.float32, name=name) + else: + self.table_ref = table_ref + self._name = name + if not context.executing_eagerly(): + self._saveable = CheckpointedOp.CustomSaveable(self, name) + ops_lib.add_to_collection(ops_lib.GraphKeys.SAVEABLE_OBJECTS, + self._saveable) + + @property + def name(self): + return self._name + + @property + def saveable(self): + if context.executing_eagerly(): + return CheckpointedOp.CustomSaveable(self, self.name) + else: + return self._saveable + + def insert(self, keys, values): + return gen_lookup_ops.lookup_table_insert_v2(self.table_ref, keys, values) + + def lookup(self, keys, default): + return gen_lookup_ops.lookup_table_find_v2(self.table_ref, keys, default) + + def keys(self): + return self._export()[0] + + def values(self): + return self._export()[1] + + def _export(self): + return gen_lookup_ops.lookup_table_export_v2(self.table_ref, dtypes.string, + dtypes.float32) + + class CustomSaveable(saver_module.BaseSaverBuilder.SaveableObject): + """A custom saveable for CheckpointedOp.""" + + def __init__(self, table, name): + tensors = table._export() + specs = [ + saver_module.BaseSaverBuilder.SaveSpec(tensors[0], "", + name + "-keys"), + saver_module.BaseSaverBuilder.SaveSpec(tensors[1], "", + name + "-values") + ] + super(CheckpointedOp.CustomSaveable, self).__init__(table, specs, name) + + def restore(self, restore_tensors, shapes): + return gen_lookup_ops.lookup_table_import_v2( + self.op.table_ref, restore_tensors[0], restore_tensors[1]) + # pylint: enable=protected-access diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/training/summary_io.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/training/summary_io.py new file mode 100644 index 0000000000000000000000000000000000000000..5e75751f667bbe45016d39e8141ec32c7c4ec1a5 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/training/summary_io.py @@ -0,0 +1,77 @@ +# Copyright 2016 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Reads Summaries from and writes Summaries to event files.""" + +# pylint: disable=unused-import +from tensorflow.python.summary.summary_iterator import summary_iterator +from tensorflow.python.summary.writer.writer import FileWriter as _FileWriter +from tensorflow.python.summary.writer.writer_cache import FileWriterCache as SummaryWriterCache +# pylint: enable=unused-import +from tensorflow.python.util.deprecation import deprecated + + +class SummaryWriter(_FileWriter): + + @deprecated("2016-11-30", + "Please switch to tf.summary.FileWriter. The interface and " + "behavior is the same; this is just a rename.") + def __init__(self, + logdir, + graph=None, + max_queue=10, + flush_secs=120, + graph_def=None): + """Creates a `SummaryWriter` and an event file. + + This class is deprecated, and should be replaced with tf.summary.FileWriter. + + On construction the summary writer creates a new event file in `logdir`. + This event file will contain `Event` protocol buffers constructed when you + call one of the following functions: `add_summary()`, `add_session_log()`, + `add_event()`, or `add_graph()`. + + If you pass a `Graph` to the constructor it is added to + the event file. (This is equivalent to calling `add_graph()` later). + + TensorBoard will pick the graph from the file and display it graphically so + you can interactively explore the graph you built. You will usually pass + the graph from the session in which you launched it: + + ```python + ...create a graph... + # Launch the graph in a session. + sess = tf.compat.v1.Session() + # Create a summary writer, add the 'graph' to the event file. + writer = tf.compat.v1.summary.FileWriter(, sess.graph) + ``` + + The other arguments to the constructor control the asynchronous writes to + the event file: + + * `flush_secs`: How often, in seconds, to flush the added summaries + and events to disk. + * `max_queue`: Maximum number of summaries or events pending to be + written to disk before one of the 'add' calls block. + + Args: + logdir: A string. Directory where event file will be written. + graph: A `Graph` object, such as `sess.graph`. + max_queue: Integer. Size of the queue for pending events and summaries. + flush_secs: Number. How often, in seconds, to flush the + pending events and summaries to disk. + graph_def: DEPRECATED: Use the `graph` argument instead. + """ + super(SummaryWriter, self).__init__(logdir, graph, max_queue, flush_secs, + graph_def) diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/training/warm_starting_util.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/training/warm_starting_util.py new file mode 100644 index 0000000000000000000000000000000000000000..01196b78e6097e6abd5ab3a909575baf6926a3ac --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/training/warm_starting_util.py @@ -0,0 +1,561 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Utilities to warm-start TF.Learn Estimators.""" + +import collections + +from tensorflow.python.framework import errors +from tensorflow.python.framework import ops +from tensorflow.python.ops import state_ops +from tensorflow.python.ops import variable_scope +from tensorflow.python.ops import variables as variables_lib +from tensorflow.python.platform import tf_logging as logging +from tensorflow.python.training import checkpoint_ops +from tensorflow.python.training import checkpoint_utils +from tensorflow.python.training import saver as saver_lib +from tensorflow.python.training.saving import saveable_object_util +from tensorflow.python.util.tf_export import tf_export + + +@tf_export(v1=["train.VocabInfo"]) +class VocabInfo( + collections.namedtuple("VocabInfo", [ + "new_vocab", + "new_vocab_size", + "num_oov_buckets", + "old_vocab", + "old_vocab_size", + "backup_initializer", + "axis", + ])): + """Vocabulary information for warm-starting. + + See `tf.estimator.WarmStartSettings` for examples of using + VocabInfo to warm-start. + + Args: + new_vocab: [Required] A path to the new vocabulary file (used with the model + to be trained). + new_vocab_size: [Required] An integer indicating how many entries of the new + vocabulary will used in training. + num_oov_buckets: [Required] An integer indicating how many OOV buckets are + associated with the vocabulary. + old_vocab: [Required] A path to the old vocabulary file (used with the + checkpoint to be warm-started from). + old_vocab_size: [Optional] An integer indicating how many entries of the old + vocabulary were used in the creation of the checkpoint. If not provided, + the entire old vocabulary will be used. + backup_initializer: [Optional] A variable initializer used for variables + corresponding to new vocabulary entries and OOV. If not provided, these + entries will be zero-initialized. + axis: [Optional] Denotes what axis the vocabulary corresponds to. The + default, 0, corresponds to the most common use case (embeddings or + linear weights for binary classification / regression). An axis of 1 + could be used for warm-starting output layers with class vocabularies. + + Returns: + A `VocabInfo` which represents the vocabulary information for warm-starting. + + Raises: + ValueError: `axis` is neither 0 or 1. + + Example Usage: +```python + embeddings_vocab_info = tf.VocabInfo( + new_vocab='embeddings_vocab', + new_vocab_size=100, + num_oov_buckets=1, + old_vocab='pretrained_embeddings_vocab', + old_vocab_size=10000, + backup_initializer=tf.compat.v1.truncated_normal_initializer( + mean=0.0, stddev=(1 / math.sqrt(embedding_dim))), + axis=0) + + softmax_output_layer_kernel_vocab_info = tf.VocabInfo( + new_vocab='class_vocab', + new_vocab_size=5, + num_oov_buckets=0, # No OOV for classes. + old_vocab='old_class_vocab', + old_vocab_size=8, + backup_initializer=tf.compat.v1.glorot_uniform_initializer(), + axis=1) + + softmax_output_layer_bias_vocab_info = tf.VocabInfo( + new_vocab='class_vocab', + new_vocab_size=5, + num_oov_buckets=0, # No OOV for classes. + old_vocab='old_class_vocab', + old_vocab_size=8, + backup_initializer=tf.compat.v1.zeros_initializer(), + axis=0) + + #Currently, only axis=0 and axis=1 are supported. + ``` + """ + + def __new__(cls, + new_vocab, + new_vocab_size, + num_oov_buckets, + old_vocab, + old_vocab_size=-1, + backup_initializer=None, + axis=0): + if axis != 0 and axis != 1: + raise ValueError("The only supported values for the axis argument are 0 " + "and 1. Provided axis: {}".format(axis)) + + return super(VocabInfo, cls).__new__( + cls, + new_vocab, + new_vocab_size, + num_oov_buckets, + old_vocab, + old_vocab_size, + backup_initializer, + axis, + ) + + +def _infer_var_name(var): + """Returns name of the `var`. + + Args: + var: A list. The list can contain either of the following: + (i) A single `Variable` + (ii) A single `ResourceVariable` + (iii) Multiple `Variable` objects which must be slices of the same larger + variable. + (iv) A single `PartitionedVariable` + + Returns: + Name of the `var` + """ + name_to_var_dict = saveable_object_util.op_list_to_dict(var) + if len(name_to_var_dict) > 1: + raise TypeError("`var` = %s passed as arg violates the constraints. " + "name_to_var_dict = %s" % (var, name_to_var_dict)) + return list(name_to_var_dict.keys())[0] + + +def _get_var_info(var, prev_tensor_name=None): + """Helper method for standarizing Variable and naming. + + Args: + var: Current graph's variable that needs to be warm-started (initialized). + Can be either of the following: (i) `Variable` (ii) `ResourceVariable` + (iii) list of `Variable`: The list must contain slices of the same larger + variable. (iv) `PartitionedVariable` + prev_tensor_name: Name of the tensor to lookup in provided `prev_ckpt`. If + None, we lookup tensor with same name as given `var`. + + Returns: + A tuple of the Tensor name and var. + """ + if checkpoint_utils._is_variable(var): # pylint: disable=protected-access + current_var_name = _infer_var_name([var]) + elif (isinstance(var, list) and + all(checkpoint_utils._is_variable(v) for v in var)): # pylint: disable=protected-access + current_var_name = _infer_var_name(var) + elif isinstance(var, variables_lib.PartitionedVariable): + current_var_name = _infer_var_name([var]) + var = var._get_variable_list() # pylint: disable=protected-access + else: + raise TypeError( + "var MUST be one of the following: a Variable, list of Variable or " + "PartitionedVariable, but is {}".format(type(var))) + if not prev_tensor_name: + # Assume tensor name remains the same. + prev_tensor_name = current_var_name + + return prev_tensor_name, var + + +# pylint: disable=protected-access +# Accesses protected members of tf.Variable to reset the variable's internal +# state. +def _warm_start_var_with_vocab(var, + current_vocab_path, + current_vocab_size, + prev_ckpt, + prev_vocab_path, + previous_vocab_size=-1, + current_oov_buckets=0, + prev_tensor_name=None, + initializer=None, + axis=0): + """Warm-starts given variable from `prev_tensor_name` tensor in `prev_ckpt`. + + Use this method when the `var` is backed by vocabulary. This method stitches + the given `var` such that values corresponding to individual features in the + vocabulary remain consistent irrespective of changing order of the features + between old and new vocabularies. + + Args: + var: Current graph's variable that needs to be warm-started (initialized). + Can be either of the following: + (i) `Variable` + (ii) `ResourceVariable` + (iii) list of `Variable`: The list must contain slices of the same larger + variable. + (iv) `PartitionedVariable` + current_vocab_path: Path to the vocab file used for the given `var`. + current_vocab_size: An `int` specifying the number of entries in the current + vocab. + prev_ckpt: A string specifying the directory with checkpoint file(s) or path + to checkpoint. The given checkpoint must have tensor with name + `prev_tensor_name` (if not None) or tensor with name same as given `var`. + prev_vocab_path: Path to the vocab file used for the tensor in `prev_ckpt`. + previous_vocab_size: If provided, will constrain previous vocab to the first + `previous_vocab_size` entries. -1 means use the entire previous vocab. + current_oov_buckets: An `int` specifying the number of out-of-vocabulary + buckets used for given `var`. + prev_tensor_name: Name of the tensor to lookup in provided `prev_ckpt`. If + None, we lookup tensor with same name as given `var`. + initializer: Variable initializer to be used for missing entries. If None, + missing entries will be zero-initialized. + axis: Axis of the variable that the provided vocabulary corresponds to. + + Raises: + ValueError: If required args are not provided. + """ + if not (current_vocab_path and current_vocab_size and prev_ckpt and + prev_vocab_path): + raise ValueError("Invalid args: Must provide all of [current_vocab_path, " + "current_vocab_size, prev_ckpt, prev_vocab_path}.") + if checkpoint_utils._is_variable(var): + var = [var] + elif (isinstance(var, list) and + all(checkpoint_utils._is_variable(v) for v in var)): + var = var + elif isinstance(var, variables_lib.PartitionedVariable): + var = var._get_variable_list() + else: + raise TypeError( + "var MUST be one of the following: a Variable, list of Variable or " + "PartitionedVariable, but is {}".format(type(var))) + + if not prev_tensor_name: + # Assume tensor name remains the same. + prev_tensor_name = _infer_var_name(var) + + total_v_first_axis = sum(v.get_shape().as_list()[0] for v in var) + for v in var: + v_shape = v.get_shape().as_list() + slice_info = v._get_save_slice_info() + partition_info = None + if slice_info: + partition_info = variable_scope._PartitionInfo( + full_shape=slice_info.full_shape, var_offset=slice_info.var_offset) + + if axis == 0: + new_row_vocab_size = current_vocab_size + new_col_vocab_size = v_shape[1] + old_row_vocab_size = previous_vocab_size + old_row_vocab_file = prev_vocab_path + new_row_vocab_file = current_vocab_path + old_col_vocab_file = None + new_col_vocab_file = None + num_row_oov_buckets = current_oov_buckets + num_col_oov_buckets = 0 + elif axis == 1: + # Note that we must compute this value across all partitions, whereas + # in the axis = 0 case, we can simply use v_shape[1] because we don't + # allow partitioning across axis = 1. + new_row_vocab_size = total_v_first_axis + new_col_vocab_size = current_vocab_size + old_row_vocab_size = -1 + old_row_vocab_file = None + new_row_vocab_file = None + old_col_vocab_file = prev_vocab_path + new_col_vocab_file = current_vocab_path + num_row_oov_buckets = 0 + num_col_oov_buckets = current_oov_buckets + else: + raise ValueError("The only supported values for the axis argument are 0 " + "and 1. Provided axis: {}".format(axis)) + + init = checkpoint_ops._load_and_remap_matrix_initializer( + ckpt_path=checkpoint_utils._get_checkpoint_filename(prev_ckpt), + old_tensor_name=prev_tensor_name, + new_row_vocab_size=new_row_vocab_size, + new_col_vocab_size=new_col_vocab_size, + old_row_vocab_size=old_row_vocab_size, + old_row_vocab_file=old_row_vocab_file, + new_row_vocab_file=new_row_vocab_file, + old_col_vocab_file=old_col_vocab_file, + new_col_vocab_file=new_col_vocab_file, + num_row_oov_buckets=num_row_oov_buckets, + num_col_oov_buckets=num_col_oov_buckets, + initializer=initializer) + new_init_val = ops.convert_to_tensor( + init(shape=v_shape, partition_info=partition_info)) + v._initializer_op = state_ops.assign(v, new_init_val) + + +# pylint: enable=protected-access + + +def _get_grouped_variables(vars_to_warm_start): + """Collects and groups (possibly partitioned) variables into a dictionary. + + The variables can be provided explicitly through vars_to_warm_start, or they + are retrieved from collections (see below). + + Args: + vars_to_warm_start: One of the following: + + - A regular expression (string) that captures which variables to + warm-start (see tf.compat.v1.get_collection). This expression will + only consider variables in the TRAINABLE_VARIABLES collection. + - A list of strings, each representing a full variable name to warm-start. + These will consider variables in GLOBAL_VARIABLES collection. + - A list of Variables to warm-start. + - `None`, in which case all variables in TRAINABLE_VARIABLES will be used. + Returns: + A dictionary mapping variable names (strings) to lists of Variables. + Raises: + ValueError: If vars_to_warm_start is not a string, `None`, a list of + `Variables`, or a list of strings. + """ + # TODO(b/143899805): Remove unicode checks when deprecating Python2. + if isinstance(vars_to_warm_start, str) or vars_to_warm_start is None: + # Both vars_to_warm_start = '.*' and vars_to_warm_start = None will match + # everything (in TRAINABLE_VARIABLES) here. + logging.info("Warm-starting variables only in TRAINABLE_VARIABLES.") + list_of_vars = ops.get_collection( + ops.GraphKeys.TRAINABLE_VARIABLES, scope=vars_to_warm_start) + elif isinstance(vars_to_warm_start, list): + if all(isinstance(v, str) for v in vars_to_warm_start): + list_of_vars = [] + for v in vars_to_warm_start: + list_of_vars += ops.get_collection( + ops.GraphKeys.GLOBAL_VARIABLES, scope=v) + elif all(checkpoint_utils._is_variable(v) for v in vars_to_warm_start): # pylint: disable=protected-access + list_of_vars = vars_to_warm_start + else: + raise ValueError("If `vars_to_warm_start` is a list, it must be all " + "`Variable` or all `str`. Given types are {}".format( + [type(v) for v in vars_to_warm_start])) + else: + raise ValueError("`vars_to_warm_start must be a `list` or `str`. Given " + "type is {}".format(type(vars_to_warm_start))) + # We have to deal with partitioned variables, since get_collection flattens + # out the list. + grouped_variables = {} + for v in list_of_vars: + t = [v] if not isinstance(v, list) else v + var_name = _infer_var_name(t) + grouped_variables.setdefault(var_name, []).append(v) + + return grouped_variables + + +def _get_object_checkpoint_renames(path, variable_names): + """Returns a dictionary mapping variable names to checkpoint keys. + + The warm-starting utility expects variable names to match with the variable + names in the checkpoint. For object-based checkpoints, the variable names + and names in the checkpoint are different. Thus, for object-based checkpoints, + this function is used to obtain the map from variable names to checkpoint + keys. + + Args: + path: path to checkpoint directory or file. + variable_names: list of variable names to load from the checkpoint. + + Returns: + If the checkpoint is object-based, this function returns a map from variable + names to their corresponding checkpoint keys. + If the checkpoint is name-based, this returns an empty dict. + + Raises: + ValueError: If the object-based checkpoint is missing variables. + """ + fname = checkpoint_utils._get_checkpoint_filename(path) # pylint: disable=protected-access + try: + names_to_keys = saver_lib.object_graph_key_mapping(fname) + except errors.NotFoundError: + # If an error is raised from `object_graph_key_mapping`, then the + # checkpoint is name-based. There are no renames, so return an empty dict. + return {} + + missing_names = set(variable_names) - set(names_to_keys.keys()) + if missing_names: + raise ValueError( + "Attempting to warm-start from an object-based checkpoint, but found " + "that the checkpoint did not contain values for all variables. The " + "following variables were missing: {}" + .format(missing_names)) + return {name: names_to_keys[name] for name in variable_names} + + +@tf_export(v1=["train.warm_start"]) +def warm_start(ckpt_to_initialize_from, + vars_to_warm_start=".*", + var_name_to_vocab_info=None, + var_name_to_prev_var_name=None): + """Warm-starts a model using the given settings. + + If you are using a tf.estimator.Estimator, this will automatically be called + during training. + + Args: + ckpt_to_initialize_from: [Required] A string specifying the directory with + checkpoint file(s) or path to checkpoint from which to warm-start the + model parameters. + vars_to_warm_start: [Optional] One of the following: + + - A regular expression (string) that captures which variables to + warm-start (see tf.compat.v1.get_collection). This expression will only + consider variables in the TRAINABLE_VARIABLES collection -- if you need + to warm-start non_TRAINABLE vars (such as optimizer accumulators or + batch norm statistics), please use the below option. + - A list of strings, each a regex scope provided to + tf.compat.v1.get_collection with GLOBAL_VARIABLES (please see + tf.compat.v1.get_collection). For backwards compatibility reasons, + this is separate from the single-string argument type. + - A list of Variables to warm-start. If you do not have access to the + `Variable` objects at the call site, please use the above option. + - `None`, in which case only TRAINABLE variables specified in + `var_name_to_vocab_info` will be warm-started. + + Defaults to `'.*'`, which warm-starts all variables in the + TRAINABLE_VARIABLES collection. Note that this excludes variables such + as accumulators and moving statistics from batch norm. + var_name_to_vocab_info: [Optional] Dict of variable names (strings) to + `tf.estimator.VocabInfo`. The variable names should be "full" variables, + not the names of the partitions. If not explicitly provided, the variable + is assumed to have no (changes to) vocabulary. + var_name_to_prev_var_name: [Optional] Dict of variable names (strings) to + name of the previously-trained variable in `ckpt_to_initialize_from`. If + not explicitly provided, the name of the variable is assumed to be same + between previous checkpoint and current model. Note that this has no + effect on the set of variables that is warm-started, and only controls + name mapping (use `vars_to_warm_start` for controlling what variables to + warm-start). + + Raises: + ValueError: If the WarmStartSettings contains prev_var_name or VocabInfo + configuration for variable names that are not used. This is to ensure + a stronger check for variable configuration than relying on users to + examine the logs. + """ + logging.info("Warm-starting from: {}".format(ckpt_to_initialize_from)) + grouped_variables = _get_grouped_variables(vars_to_warm_start) + + if var_name_to_vocab_info is None: + var_name_to_vocab_info = {} + + if not var_name_to_prev_var_name: + # Detect whether the checkpoint is object-based, in which case the + # var_name_to_prev_var_name dictionary should map variable names to + # checkpoint keys. If the user has specified var_name_to_prev_var_name, we + # do not override it. + var_name_to_prev_var_name = _get_object_checkpoint_renames( + ckpt_to_initialize_from, grouped_variables.keys()) + + warmstarted_count = 0 + + # Keep track of which var_names in var_name_to_prev_var_name and + # var_name_to_vocab_info have been used. Err on the safer side by throwing an + # exception if any are unused by the end of the loop. It is easy to misname + # a variable during this configuration, in which case without this check, we + # would fail to warm-start silently. + prev_var_name_used = set() + vocab_info_used = set() + + # Group the vocabless vars into one call to init_from_checkpoint. + vocabless_vars = {} + for var_name, variable in grouped_variables.items(): + prev_var_name = var_name_to_prev_var_name.get(var_name) + if prev_var_name: + prev_var_name_used.add(var_name) + vocab_info = var_name_to_vocab_info.get(var_name) + if vocab_info: + vocab_info_used.add(var_name) + warmstarted_count += 1 + logging.debug( + "Warm-starting variable: {}; current_vocab: {} current_vocab_size: {}" + " prev_vocab: {} prev_vocab_size: {} current_oov: {} prev_tensor: {}" + " initializer: {}".format( + var_name, vocab_info.new_vocab, vocab_info.new_vocab_size, + vocab_info.old_vocab, (vocab_info.old_vocab_size if + vocab_info.old_vocab_size > 0 else "All"), + vocab_info.num_oov_buckets, prev_var_name or "Unchanged", + vocab_info.backup_initializer or "zero-initialized")) + _warm_start_var_with_vocab( + variable, + current_vocab_path=vocab_info.new_vocab, + current_vocab_size=vocab_info.new_vocab_size, + prev_ckpt=ckpt_to_initialize_from, + prev_vocab_path=vocab_info.old_vocab, + previous_vocab_size=vocab_info.old_vocab_size, + current_oov_buckets=vocab_info.num_oov_buckets, + prev_tensor_name=prev_var_name, + initializer=vocab_info.backup_initializer, + axis=vocab_info.axis) + else: + # For the special value of vars_to_warm_start = None, + # we only warm-start variables with explicitly specified vocabularies. + if vars_to_warm_start: + warmstarted_count += 1 + logging.debug("Warm-starting variable: {}; prev_var_name: {}".format( + var_name, prev_var_name or "Unchanged")) + # Because we use a default empty list in grouped_variables, single + # unpartitioned variables will be lists here, which we rectify in order + # for init_from_checkpoint logic to work correctly. + if len(variable) == 1: + variable = variable[0] + prev_tensor_name, var = _get_var_info(variable, prev_var_name) + if prev_tensor_name in vocabless_vars: + # The API for checkpoint_utils.init_from_checkpoint accepts a mapping + # from checkpoint tensor names to model variable names, so it does not + # support warm-starting two variables from the same tensor. Our work- + # around is to run init_from_checkpoint multiple times, each time we + # encounter a new variable that should be initialized by a previously- + # used tensor. + logging.debug("Requested prev_var_name {} initialize both {} and {}; " + "calling init_from_checkpoint.".format( + prev_tensor_name, + vocabless_vars[prev_tensor_name], + var)) + checkpoint_utils.init_from_checkpoint(ckpt_to_initialize_from, + vocabless_vars) + vocabless_vars.clear() + vocabless_vars[prev_tensor_name] = var + + if vocabless_vars: + checkpoint_utils.init_from_checkpoint(ckpt_to_initialize_from, + vocabless_vars) + prev_var_name_not_used = set( + var_name_to_prev_var_name.keys()) - prev_var_name_used + vocab_info_not_used = set(var_name_to_vocab_info.keys()) - vocab_info_used + + logging.info("Warm-started %d variables.", warmstarted_count) + + if prev_var_name_not_used: + raise ValueError( + "You provided the following variables in " + "var_name_to_prev_var_name that were not used: " + "{0}. Perhaps you misspelled them? Here is the list of viable " + "variable names: {1}".format(prev_var_name_not_used, + grouped_variables.keys())) + if vocab_info_not_used: + raise ValueError( + "You provided the following variables in " + "var_name_to_vocab_info that were not used: {0}. " + " Perhaps you misspelled them? Here is the list of viable variable " + "names: {1}".format(vocab_info_not_used, grouped_variables.keys()))